Esempio n. 1
0
 protected virtual void OnSqlInfoMessage(BulkLoadContext context, object sender, SqlInfoMessageEventArgs args)
 {
     foreach (var line in args.Message.Split(new[] { Environment.NewLine }, StringSplitOptions.RemoveEmptyEntries))
     {
         context.Log.Debug($"SQL Info: {line}");
     }
 }
Esempio n. 2
0
        protected virtual void LookupIds(BulkLoadContext loadContext, BulkLoadSqlContext sqlContext,
                                         BulkItemsAndFieldsReader itemAndFieldReader)
        {
            var lookupBlobsSql = sqlContext.GetEmbeddedSql(loadContext, "Sql.02.LookupBlobs.sql");
            var lookupItemsSql = sqlContext.GetEmbeddedSql(loadContext, "Sql.03.LookupItems.sql");

            var stopwatch = Stopwatch.StartNew();

            if (loadContext.LookupBlobIds)
            {
                sqlContext.ExecuteSql(lookupBlobsSql);
            }

            if (loadContext.LookupItemIds)
            {
                // Using sql parameters resets temp tables.
                if (loadContext.Destination != null)
                {
                    lookupItemsSql = sqlContext.ReplaceOneLineSqlStringParameter(lookupItemsSql, "@destinationPath",
                                                                                 loadContext.Destination.ItemPath);
                    lookupItemsSql = sqlContext.ReplaceOneLineSqlStringParameter(lookupItemsSql, "@destinationId",
                                                                                 loadContext.Destination.ItemId.ToString("D"));
                }
                sqlContext.ExecuteSql(lookupItemsSql);
            }

            loadContext.Log.Info($"Looked up ids: {(int)stopwatch.Elapsed.TotalSeconds}s");
        }
Esempio n. 3
0
        protected virtual void MergeData(BulkLoadContext loadContext, BulkLoadSqlContext sqlContext,
                                         BulkItemsAndFieldsReader itemAndFieldReader)
        {
            var sql = sqlContext.GetEmbeddedSql(loadContext, "Sql.08.MergeTempData.sql");

            var stopwatch = Stopwatch.StartNew();

            // Execute merge and collect imported items.
            // Using sql parameters resets temp tables.
            sql = sqlContext.ReplaceOneLineSqlBitParameter(sql, "@ProcessDependingFields",
                                                           itemAndFieldReader.HasFieldDependencies);
            sql = sqlContext.ReplaceOneLineSqlBitParameter(sql, "@CleanupBlobs",
                                                           itemAndFieldReader.HasBlobFields);
            sql = sqlContext.ReplaceOneLineSqlBitParameter(sql, "@AllowTemplateChanges",
                                                           loadContext.AllowTemplateChanges);
            sql = sqlContext.ReplaceOneLineSqlStringParameter(sql, "@DefaultLanguage",
                                                              LanguageManager.DefaultLanguage.Name);

            using (var cmd = sqlContext.NewSqlCommand(sql))
            {
                cmd.CommandTimeout = int.MaxValue;
                using (var reader = cmd.ExecuteReader())
                {
                    while (reader.Read())
                    {
                        loadContext.ItemChanges.AddLast(new ItemChange(reader));
                    }
                }
            }
            loadContext.Log.Info($"Merged loaded data: {(int)stopwatch.Elapsed.TotalSeconds}s");
        }
Esempio n. 4
0
        protected virtual bool StageData(BulkLoadContext loadContext, BulkLoadSqlContext sqlContext,
                                         IEnumerable <BulkItem> items, out BulkItemsAndFieldsReader itemAndFieldReader)
        {
            var sql = sqlContext.GetEmbeddedSql(loadContext, "Sql.01.CreateTempTable.sql");

            // Cleanup left-over staging tables.
            if (loadContext.StageDataWithoutProcessing)
            {
                sqlContext.DropStageTables();
            }

            // Create temp table.
            // We don't use table valued parameters because we don't want to change the database schema.
            sqlContext.ExecuteSql(sql);

            // Load data into temp table.
            itemAndFieldReader = NewReader(items);
            if (!BulkCopyToTempTable(loadContext, sqlContext, itemAndFieldReader, NewFieldRulesReader(loadContext)))
            {
                return(false);
            }

            loadContext.OnDataStaged?.Invoke(loadContext);
            Event.RaiseEvent("bulkloader:datastaged", loadContext);

            return(true);
        }
Esempio n. 5
0
        protected virtual FieldRulesReader NewFieldRulesReader(BulkLoadContext context)
        {
            if (context.FieldRules == null || context.FieldRules.Count == 0)
            {
                return(null);
            }

            return(new FieldRulesReader(() => context.FieldRules.GetEnumerator()));
        }
Esempio n. 6
0
        protected virtual bool ValidateAndPrepareData(BulkLoadContext loadContext, BulkLoadSqlContext sqlContext)
        {
            var splitTempTablesSql = sqlContext.GetEmbeddedSql(loadContext, "Sql.04.SplitTempTable.sql");
            var indexesSql         = sqlContext.GetEmbeddedSql(loadContext, "Sql.06.CreateIndexes.sql");

            var stopwatch = Stopwatch.StartNew();

            sqlContext.ExecuteSql(splitTempTablesSql);

            if (!OnStagedDataValidating.Execute(p => p.ValidateLoadStage(loadContext, sqlContext), breakOnDefault: false))
            {
            }

            sqlContext.ExecuteSql(indexesSql);

            if (!OnTempDataValidating.Execute(p => p.ValidateLoadStage(loadContext, sqlContext), breakOnDefault: false))
            {
            }

            loadContext.Log.Info($"Validated and prepared loaded data: {(int)stopwatch.Elapsed.TotalSeconds}s");
            return(true);
        }
Esempio n. 7
0
        protected virtual bool BulkCopyToTempTable(BulkLoadContext loadContext, BulkLoadSqlContext sqlContext,
                                                   BulkItemsAndFieldsReader itemAndFieldReader, FieldRulesReader fieldRulesReader)
        {
            var stopwatch = Stopwatch.StartNew();

            // Bulk copy into temp tables, so that we don't have to buffer stuff,
            // blobs can give OutOfMemoryExceptions.
            using (var bulkCopy = new SqlBulkCopy(sqlContext.Connection))
            {
                bulkCopy.BulkCopyTimeout      = int.MaxValue;
                bulkCopy.EnableStreaming      = true;
                bulkCopy.DestinationTableName = sqlContext.PostProcessSql(loadContext, "#BulkItemsAndFields");
                try
                {
                    bulkCopy.WriteToServer(itemAndFieldReader);
                }
                catch (Exception exception)
                {
                    loadContext.StageFailed(Stage.Load, exception, $"Write to #BulkItemsAndFields failed with message: {exception.Message}");
                    return(false);
                }

                if (fieldRulesReader != null)
                {
                    bulkCopy.DestinationTableName = sqlContext.PostProcessSql(loadContext, "#FieldRules");
                    try
                    {
                        bulkCopy.WriteToServer(fieldRulesReader);
                    }
                    catch (Exception exception)
                    {
                        loadContext.StageFailed(Stage.Load, exception, $"Write to #FieldRules failed with message: {exception}");
                    }
                }
            }
            loadContext.Log.Info($"Loaded data in database: {(int)stopwatch.Elapsed.TotalSeconds}s");
            stopwatch.Restart();
            return(true);
        }
Esempio n. 8
0
 protected virtual ICollection <ItemChange> GetChanges(BulkLoadContext loadContext, BulkLoadSqlContext sqlContext)
 {
     // By putting this in a virtual method, overriders can implement e.g. crash recovery.
     return(loadContext.ItemChanges);
 }
Esempio n. 9
0
        public virtual void LoadItems(BulkLoadContext context, IEnumerable <BulkLoadItem> items)
        {
            if (context == null)
            {
                throw new ArgumentNullException(nameof(context));
            }
            if (items == null)
            {
                throw new ArgumentNullException(nameof(items));
            }

            //items = OnItemProcessing.Execute(items, (p, itms) => p.Process(context, itms));

            var db = Factory.GetDatabase(context.Database, true);
            var connectionString = ConfigurationManager.ConnectionStrings[db.ConnectionStringName].ConnectionString;

            var infoMessageHandler = new SqlInfoMessageEventHandler((s, e) => OnSqlInfoMessage(context, s, e));

            using (var conn = new SqlConnection(connectionString))
            {
                var            sqlContext  = new BulkLoadSqlContext(conn, typeof(BulkLoader));
                SqlTransaction transaction = null;
                try
                {
                    conn.InfoMessage += infoMessageHandler;
                    conn.Open();

                    BulkItemsAndFieldsReader itemAndFieldReader;
                    if (!StageData(context, sqlContext, items, out itemAndFieldReader))
                    {
                        return;
                    }

                    if (context.StageDataWithoutProcessing)
                    {
                        context.Log.Info("Data to import is staged in database, no processing done.");
                        context.StageSucceeded(Stage.Load);
                        return;
                    }

                    if (itemAndFieldReader.ReadItemCount > 0)
                    {
                        LookupIds(context, sqlContext, itemAndFieldReader);

                        if (!ValidateAndPrepareData(context, sqlContext))
                        {
                            return;
                        }

                        sqlContext.Transaction = transaction = conn.BeginTransaction();
                        MergeData(context, sqlContext, itemAndFieldReader);
                    }

                    OnTransactionCommitting.Execute(p => p.Process(context, sqlContext));

                    // After this point, there's no use in keeping the transaction arround,
                    // because we cannot sync everything transactionally (e.g. indexes, publshing, ...)
                    // Be aware that after this point the process may halt and not everything is consistent.
                    // We mitigate this inconsistency with crash recovery, see below.
                    transaction?.Commit();
                    transaction = null;

                    // Allow clearing caches before raising event so that reading the item API in event will result in fresh reads.
                    OnItemsLoading.Execute(p => p.Process(context, sqlContext, context.ItemChanges));

                    // Databases are now entirely in sync.
                    context.OnDataLoaded?.Invoke(context);
                    Event.RaiseEvent("bulkloader:dataloaded", context);

                    // Execute post processors.y.
                    var itemChanges = GetChanges(context, sqlContext);
                    OnItemsLoaded.Execute(p => p.Process(context, sqlContext, itemChanges));

                    context.StageSucceeded(Stage.Load);
                }
                catch (Exception ex)
                {
                    transaction?.Rollback();
                    context.StageFailed(Stage.Load, ex.Message);
                }
                finally
                {
                    conn.InfoMessage -= infoMessageHandler;
                }
            }
        }