예제 #1
0
        protected virtual bool BulkCopyToTempTable(BulkLoadContext loadContext, BulkLoadSqlContext sqlContext,
                                                   BulkItemsAndFieldsReader itemAndFieldReader, FieldRulesReader fieldRulesReader)
        {
            var stopwatch = Stopwatch.StartNew();

            // Bulk copy into temp tables, so that we don't have to buffer stuff,
            // blobs can give OutOfMemoryExceptions.
            using (var bulkCopy = new SqlBulkCopy(sqlContext.Connection))
            {
                bulkCopy.BulkCopyTimeout      = int.MaxValue;
                bulkCopy.EnableStreaming      = true;
                bulkCopy.DestinationTableName = sqlContext.PostProcessSql(loadContext, "#BulkItemsAndFields");
                try
                {
                    bulkCopy.WriteToServer(itemAndFieldReader);
                }
                catch (Exception exception)
                {
                    loadContext.StageFailed(Stage.Load, exception,
                                            $"Write to #BulkItemsAndFields failed with message: {exception.Message}");
                    return(false);
                }

                if (fieldRulesReader != null)
                {
                    bulkCopy.DestinationTableName = sqlContext.PostProcessSql(loadContext, "#FieldRules");
                    try
                    {
                        bulkCopy.WriteToServer(fieldRulesReader);
                    }
                    catch (Exception exception)
                    {
                        loadContext.StageFailed(Stage.Load, exception,
                                                $"Write to #FieldRules failed with message: {exception}");
                    }
                }
            }
            loadContext.Log.Info($"Loaded data in database: {(int)stopwatch.Elapsed.TotalSeconds}s");
            stopwatch.Restart();
            return(true);
        }
        public virtual void LoadItems(BulkLoadContext context, IEnumerable <BulkLoadItem> items)
        {
            if (context == null)
            {
                throw new ArgumentNullException(nameof(context));
            }
            if (items == null)
            {
                throw new ArgumentNullException(nameof(items));
            }

            items = OnItemProcessing.Execute(items, (p, itms) => p.Process(context, itms));

            var db = Factory.GetDatabase(context.Database, true);
            var connectionString = ConfigurationManager.ConnectionStrings[db.ConnectionStringName].ConnectionString;

            var infoMessageHandler = new SqlInfoMessageEventHandler((s, e) => OnSqlInfoMessage(context, s, e));

            using (var conn = new SqlConnection(connectionString))
            {
                var            sqlContext  = new BulkLoadSqlContext(conn, typeof(BulkLoader));
                SqlTransaction transaction = null;
                try
                {
                    conn.InfoMessage += infoMessageHandler;
                    conn.Open();

                    BulkItemsAndFieldsReader itemAndFieldReader;
                    if (!StageData(context, sqlContext, items, out itemAndFieldReader))
                    {
                        return;
                    }

                    if (context.StageDataWithoutProcessing)
                    {
                        context.Log.Info("Data to import is staged in database, no processing done.");
                        context.StageSucceeded(Stage.Load);
                        return;
                    }

                    if (itemAndFieldReader.ReadItemCount > 0)
                    {
                        LookupIds(context, sqlContext, itemAndFieldReader);

                        if (!ValidateAndPrepareData(context, sqlContext))
                        {
                            return;
                        }

                        sqlContext.Transaction = transaction = conn.BeginTransaction();
                        MergeData(context, sqlContext, itemAndFieldReader);
                    }

                    OnTransactionCommitting.Execute(p => p.Process(context, sqlContext));

                    // After this point, there's no use in keeping the transaction arround,
                    // because we cannot sync everything transactionally (e.g. indexes, publshing, ...)
                    // Be aware that after this point the process may halt and not everything is consistent.
                    // We mitigate this inconsistency with crash recovery, see below.
                    transaction?.Commit();
                    transaction = null;

                    // Allow clearing caches before raising event so that reading the item API in event will result in fresh reads.
                    OnItemsLoading.Execute(p => p.Process(context, sqlContext, context.ItemChanges));

                    // Databases are now entirely in sync.
                    context.OnDataLoaded?.Invoke(context);
                    Event.RaiseEvent("bulkloader:dataloaded", context);

                    // Execute post processors.y.
                    var itemChanges = GetChanges(context, sqlContext);
                    OnItemsLoaded.Execute(p => p.Process(context, sqlContext, itemChanges));

                    context.StageSucceeded(Stage.Load);
                }
                catch (Exception ex)
                {
                    transaction?.Rollback();
                    context.StageFailed(Stage.Load, ex.Message);
                }
                finally
                {
                    conn.InfoMessage -= infoMessageHandler;
                }
            }
        }