示例#1
0
        public int Run(IRDMPPlatformRepositoryServiceLocator repositoryLocator, IDataLoadEventListener listener, ICheckNotifier checkNotifier, GracefulCancellationToken token)
        {
            // if we have no listener use a throw immediately one (generate exceptions if it went badly)
            if (listener == null)
            {
                listener = new ThrowImmediatelyDataLoadEventListener();
            }

            // whatever happens we want a listener to record the worst result for the return code (even if theres ignore all errors listeners being used)
            var toMemory = new ToMemoryDataLoadEventListener(false);

            // User might have some additional listeners registered
            listener = new ForkDataLoadEventListener(AdditionalListeners.Union(new [] { toMemory, listener }).ToArray());

            // build the engine and run it
            var engine = UseCase.GetEngine(Pipeline, listener);

            engine.ExecutePipeline(token ?? new GracefulCancellationToken());

            // return code of -1 if it went badly otherwise 0
            var exitCode = toMemory.GetWorst() >= ProgressEventType.Error ? -1:0;

            if (exitCode == 0)
            {
                PipelineExecutionFinishedsuccessfully?.Invoke(this, new PipelineEngineEventArgs(engine));
            }

            return(exitCode);
        }
示例#2
0
        public int Run(IRDMPPlatformRepositoryServiceLocator repositoryLocator, IDataLoadEventListener listener, ICheckNotifier checkNotifier, GracefulCancellationToken token)
        {
            const string dataLoadTask = "caching";

            CacheProgress cp = repositoryLocator.CatalogueRepository.GetObjectByID <CacheProgress>(_options.CacheProgress);

            var defaults      = repositoryLocator.CatalogueRepository.GetServerDefaults();
            var loggingServer = defaults.GetDefaultFor(PermissableDefaults.LiveLoggingServer_ID);

            if (loggingServer == null)
            {
                throw new NotSupportedException("No default logging server specified, you must specify one in ");
            }

            var logManager = new LogManager(loggingServer);

            logManager.CreateNewLoggingTaskIfNotExists(dataLoadTask);

            switch (_options.Command)
            {
            case CommandLineActivity.run:

                //Setup dual listeners for the Cache process, one ticks the lifeline one very message and one logs to the logging db
                var toLog        = new ToLoggingDatabaseDataLoadEventListener(this, logManager, dataLoadTask, "Caching " + cp);
                var forkListener = new ForkDataLoadEventListener(toLog, listener);
                try
                {
                    var cachingHost = new CachingHost(repositoryLocator.CatalogueRepository);
                    cachingHost.RetryMode         = _options.RetryMode;
                    cachingHost.CacheProgressList = new ICacheProgress[] { cp }.ToList();     //run the cp

                    //By default caching host will block
                    cachingHost.TerminateIfOutsidePermissionWindow = true;

                    cachingHost.Start(forkListener, token);
                }
                finally
                {
                    //finish everything
                    toLog.FinalizeTableLoadInfos();
                }

                break;

            case CommandLineActivity.check:
                var checkable = new CachingPreExecutionChecker(cp);
                checkable.Check(checkNotifier);
                break;

            default:
                throw new ArgumentOutOfRangeException();
            }

            return(0);
        }
示例#3
0
        public ConfigureAndExecutePipelineUI(PipelineUseCase useCase, IActivateItems activator)
        {
            _useCase = useCase;

            InitializeComponent();

            //designer mode
            if (useCase == null && activator == null)
            {
                return;
            }

            SetItemActivator(activator);
            progressUI1.ApplyTheme(activator.Theme);

            pipelineDiagram1 = new PipelineDiagramUI();

            pipelineDiagram1.Dock = DockStyle.Fill;
            panel_pipelineDiagram1.Controls.Add(pipelineDiagram1);

            fork = new ForkDataLoadEventListener(progressUI1);

            var context = useCase.GetContext();

            if (context.GetFlowType() != typeof(DataTable))
            {
                throw new NotSupportedException("Only DataTable flow contexts can be used with this class");
            }

            foreach (var o in useCase.GetInitializationObjects())
            {
                var de = o as DatabaseEntity;
                if (o is DatabaseEntity)
                {
                    CommonFunctionality.Add(new ExecuteCommandShow(activator, de, 0, true));
                }
                else
                {
                    CommonFunctionality.Add(o.ToString());
                }

                _initializationObjects.Add(o);
            }

            SetPipelineOptions(activator.RepositoryLocator.CatalogueRepository);

            lblTask.Text = "Task:" + useCase.GetType().Name;
        }
        public void TestWithEcho()
        {
            var source = new ProcessBasedCacheSource();

            if (IsLinux)
            {
                source.Command = "/bin/echo";
                source.Args    = "Hey Thomas go get %s and store in %d";
            }
            else
            {
                source.Command = "cmd.exe";
                source.Args    = "/c echo Hey Thomas go get %s and store in %d";
            }
            source.TimeFormat             = "dd/MM/yy";
            source.ThrowOnNonZeroExitCode = true;

            // What dates to load
            var cp = WhenIHaveA <CacheProgress>();

            cp.CacheFillProgress = new DateTime(2001, 12, 24);
            cp.SaveToDatabase();

            // Where to put files
            var lmd = cp.LoadProgress.LoadMetadata;

            var dir     = new DirectoryInfo(TestContext.CurrentContext.WorkDirectory);
            var loadDir = LoadDirectory.CreateDirectoryStructure(dir, "blah", true);

            lmd.LocationOfFlatFiles = loadDir.RootPath.FullName;
            lmd.SaveToDatabase();

            source.PreInitialize(new CacheFetchRequestProvider(cp), new ThrowImmediatelyDataLoadEventListener());
            source.PreInitialize(cp.CatalogueRepository, new ThrowImmediatelyDataLoadEventListener());
            source.PreInitialize(new PermissionWindow(cp.CatalogueRepository), new ThrowImmediatelyDataLoadEventListener());

            var toMem = new ToMemoryDataLoadEventListener(true);
            var fork  = new ForkDataLoadEventListener(toMem, new ThrowImmediatelyDataLoadEventListener()
            {
                WriteToConsole = true
            });

            source.GetChunk(fork, new GracefulCancellationToken());

            Assert.Contains($"Hey Thomas go get 24/12/01 and store in {Path.Combine(loadDir.Cache.FullName,"ALL")}", toMem.GetAllMessagesByProgressEventType()[ProgressEventType.Information].Select(v => v.Message).ToArray());
        }
示例#5
0
 public void SetAdditionalProgressListener(IDataLoadEventListener listener)
 {
     fork = new ForkDataLoadEventListener(progressUI1, listener);
 }
示例#6
0
        public override void GenerateReport(ICatalogue c, IDataLoadEventListener listener, CancellationToken cancellationToken)
        {
            SetupLogging(c.CatalogueRepository);

            var toDatabaseLogger = new ToLoggingDatabaseDataLoadEventListener(this, _logManager, _loggingTask, "DQE evaluation of " + c);

            var forker = new ForkDataLoadEventListener(listener, toDatabaseLogger);

            try
            {
                _catalogue = c;
                var dqeRepository = new DQERepository(c.CatalogueRepository);

                byPivotCategoryCubesOverTime.Add("ALL", new PeriodicityCubesOverTime("ALL"));
                byPivotRowStatesOverDataLoadRunId.Add("ALL", new DQEStateOverDataLoadRunId("ALL"));

                Check(new FromDataLoadEventListenerToCheckNotifier(forker));

                var sw = Stopwatch.StartNew();
                using (var con = _server.GetConnection())
                {
                    con.Open();

                    var cmd = _server.GetCommand(_queryBuilder.SQL, con);
                    cmd.CommandTimeout = 500000;

                    var t = cmd.ExecuteReaderAsync(cancellationToken);
                    t.Wait(cancellationToken);

                    if (cancellationToken.IsCancellationRequested)
                    {
                        throw new OperationCanceledException("User cancelled DQE while fetching data");
                    }

                    var r = t.Result;

                    int progress = 0;

                    while (r.Read())
                    {
                        cancellationToken.ThrowIfCancellationRequested();

                        progress++;
                        int dataLoadRunIDOfCurrentRecord = 0;
                        //to start with assume we will pass the results for the 'unknown batch' (where data load run ID is null or not available)

                        //if the DataReader is likely to have a data load run ID column
                        if (_containsDataLoadID)
                        {
                            //get data load run id
                            int?runID = dqeRepository.ObjectToNullableInt(r[_dataLoadRunFieldName]);

                            //if it has a value use it (otherwise it is null so use 0 - ugh I know, it's a primary key constraint issue)
                            if (runID != null)
                            {
                                dataLoadRunIDOfCurrentRecord = (int)runID;
                            }
                        }

                        string pivotValue = null;

                        //if the user has a pivot category configured
                        if (_pivotCategory != null)
                        {
                            pivotValue = GetStringValueForPivotField(r[_pivotCategory], forker);

                            if (!haveComplainedAboutNullCategories && string.IsNullOrWhiteSpace(pivotValue))
                            {
                                forker.OnNotify(this,
                                                new NotifyEventArgs(ProgressEventType.Warning,
                                                                    "Found a null/empty value for pivot category '" + _pivotCategory +
                                                                    "', this record will ONLY be recorded under ALL and not it's specific category, you will not be warned of further nulls because there are likely to be many if there are any"));
                                haveComplainedAboutNullCategories = true;
                                pivotValue = null;
                            }
                        }

                        //always increase the "ALL" category
                        ProcessRecord(dqeRepository, dataLoadRunIDOfCurrentRecord, r,
                                      byPivotCategoryCubesOverTime["ALL"], byPivotRowStatesOverDataLoadRunId["ALL"]);

                        //if there is a value in the current record for the pivot column
                        if (pivotValue != null)
                        {
                            //if it is a novel
                            if (!byPivotCategoryCubesOverTime.ContainsKey(pivotValue))
                            {
                                //we will need to expand the dictionaries
                                if (byPivotCategoryCubesOverTime.Keys.Count > MaximumPivotValues)
                                {
                                    throw new OverflowException(
                                              "Encountered more than " + MaximumPivotValues + " values for the pivot column " + _pivotCategory +
                                              " this will result in crazy space usage since it is a multiplicative scale of DQE tesseracts");
                                }

                                //expand both the time periodicity and the state results
                                byPivotRowStatesOverDataLoadRunId.Add(pivotValue,
                                                                      new DQEStateOverDataLoadRunId(pivotValue));
                                byPivotCategoryCubesOverTime.Add(pivotValue, new PeriodicityCubesOverTime(pivotValue));
                            }

                            //now we are sure that the dictionaries have the category field we can increment it
                            ProcessRecord(dqeRepository, dataLoadRunIDOfCurrentRecord, r,
                                          byPivotCategoryCubesOverTime[pivotValue], byPivotRowStatesOverDataLoadRunId[pivotValue]);
                        }

                        if (progress % 5000 == 0)
                        {
                            forker.OnProgress(this,
                                              new ProgressEventArgs("Processing " + _catalogue,
                                                                    new ProgressMeasurement(progress, ProgressType.Records), sw.Elapsed));
                        }
                    }
                    //final value
                    forker.OnProgress(this,
                                      new ProgressEventArgs("Processing " + _catalogue,
                                                            new ProgressMeasurement(progress, ProgressType.Records), sw.Elapsed));
                    con.Close();
                }
                sw.Stop();

                foreach (var state in byPivotRowStatesOverDataLoadRunId.Values)
                {
                    state.CalculateFinalValues();
                }

                //now commit results
                using (var con = dqeRepository.BeginNewTransactedConnection())
                {
                    try
                    {
                        //mark down that we are beginning an evaluation on this the day of our lord etc...
                        Evaluation evaluation = new Evaluation(dqeRepository, _catalogue);

                        foreach (var state in byPivotRowStatesOverDataLoadRunId.Values)
                        {
                            state.CommitToDatabase(evaluation, _catalogue, con.Connection, con.Transaction);
                        }

                        if (_timePeriodicityField != null)
                        {
                            foreach (PeriodicityCubesOverTime periodicity in byPivotCategoryCubesOverTime.Values)
                            {
                                periodicity.CommitToDatabase(evaluation);
                            }
                        }

                        con.ManagedTransaction.CommitAndCloseConnection();
                    }
                    catch (Exception)
                    {
                        con.ManagedTransaction.AbandonAndCloseConnection();
                        throw;
                    }
                }

                forker.OnNotify(this,
                                new NotifyEventArgs(ProgressEventType.Information,
                                                    "CatalogueConstraintReport completed successfully  and committed results to DQE server"));
            }
            catch (Exception e)
            {
                if (!(e is OperationCanceledException))
                {
                    forker.OnNotify(this, new NotifyEventArgs(ProgressEventType.Error, "Fatal Crash", e));
                }
                else
                {
                    forker.OnNotify(this, new NotifyEventArgs(ProgressEventType.Warning, "DQE Execution Cancelled", e));
                }
            }
            finally
            {
                toDatabaseLogger.FinalizeTableLoadInfos();
            }
        }
        public DataTable ProcessPipelineData(DataTable toProcess, IDataLoadEventListener listener, GracefulCancellationToken cancellationToken)
        {
            if (toProcess == null)
            {
                return(null);
            }

            IDatabaseColumnRequestAdjuster adjuster = null;

            if (Adjuster != null)
            {
                var constructor = new ObjectConstructor();
                adjuster = (IDatabaseColumnRequestAdjuster)constructor.Construct(Adjuster);
            }

            //work out the table name for the table we are going to create
            if (TargetTableName == null)
            {
                if (string.IsNullOrWhiteSpace(toProcess.TableName))
                {
                    throw new Exception("Chunk did not have a TableName, did not know what to call the newly created table");
                }

                TargetTableName = QuerySyntaxHelper.MakeHeaderNameSane(toProcess.TableName);
            }

            ClearPrimaryKeyFromDataTableAndExplicitWriteTypes(toProcess);

            StartAuditIfExists(TargetTableName);

            if (_loggingDatabaseListener != null)
            {
                listener = new ForkDataLoadEventListener(listener, _loggingDatabaseListener);
            }

            EnsureTableHasDataInIt(toProcess);

            bool createdTable = false;

            if (_firstTime)
            {
                bool tableAlreadyExistsButEmpty = false;

                if (!_database.Exists())
                {
                    throw new Exception("Database " + _database + " does not exist");
                }

                discoveredTable = _database.ExpectTable(TargetTableName);

                //table already exists
                if (discoveredTable.Exists())
                {
                    tableAlreadyExistsButEmpty = true;

                    if (!AllowLoadingPopulatedTables)
                    {
                        if (discoveredTable.IsEmpty())
                        {
                            listener.OnNotify(this, new NotifyEventArgs(ProgressEventType.Warning, "Found table " + TargetTableName + " already, normally this would forbid you from loading it (data duplication / no primary key etc) but it is empty so we are happy to load it, it will not be created"));
                        }
                        else
                        {
                            throw new Exception("There is already a table called " + TargetTableName + " at the destination " + _database);
                        }
                    }

                    if (AllowResizingColumnsAtUploadTime)
                    {
                        _dataTypeDictionary = discoveredTable.DiscoverColumns().ToDictionary(k => k.GetRuntimeName(), v => v.GetDataTypeComputer(), StringComparer.CurrentCultureIgnoreCase);
                    }
                }
                else
                {
                    listener.OnNotify(this, new NotifyEventArgs(ProgressEventType.Information, "Determined that the table name " + TargetTableName + " is unique at destination " + _database));
                }

                //create connection to destination
                if (!tableAlreadyExistsButEmpty)
                {
                    createdTable = true;

                    if (AllowResizingColumnsAtUploadTime)
                    {
                        _database.CreateTable(out _dataTypeDictionary, TargetTableName, toProcess, ExplicitTypes.ToArray(), true, adjuster);
                    }
                    else
                    {
                        _database.CreateTable(TargetTableName, toProcess, ExplicitTypes.ToArray(), true, adjuster);
                    }

                    listener.OnNotify(this, new NotifyEventArgs(ProgressEventType.Information, "Created table " + TargetTableName + " successfully."));
                }

                _managedConnection = _server.BeginNewTransactedConnection();
                _bulkcopy          = discoveredTable.BeginBulkInsert(_managedConnection.ManagedTransaction);

                if (Culture != null)
                {
                    _bulkcopy.DateTimeDecider.Culture = Culture;
                }

                _firstTime = false;
            }

            try
            {
                if (AllowResizingColumnsAtUploadTime && !createdTable)
                {
                    ResizeColumnsIfRequired(toProcess, listener);
                }

                //push the data
                swTimeSpentWritting.Start();

                _affectedRows += _bulkcopy.Upload(toProcess);

                swTimeSpentWritting.Stop();
                listener.OnProgress(this, new ProgressEventArgs("Uploading to " + TargetTableName, new ProgressMeasurement(_affectedRows, ProgressType.Records), swTimeSpentWritting.Elapsed));
            }
            catch (Exception e)
            {
                _managedConnection.ManagedTransaction.AbandonAndCloseConnection();

                if (LoggingServer != null)
                {
                    _dataLoadInfo.LogFatalError(GetType().Name, ExceptionHelper.ExceptionToListOfInnerMessages(e, true));
                }

                throw new Exception("Failed to write rows (in transaction) to table " + TargetTableName, e);
            }

            return(null);
        }