Пример #1
0
        public void should_test_that_an_item_is_contained()
        {
            var setToRemoveFrom = new ConcurrentSet<int>(Enumerable.Range(1, 5));

            setToRemoveFrom.Contains(3).ShouldBeTrue();
            setToRemoveFrom.Contains(7).ShouldBeFalse();
        }
        private async Task<ProjectMap> CreateProjectMapAsync(ConcurrentSet<SymbolAndProjectId> symbols)
        {
            using (Logger.LogBlock(FunctionId.FindReference_CreateProjectMapAsync, _cancellationToken))
            {
                var projectMap = new ProjectMap();

                var scope = _documents?.Select(d => d.Project).ToImmutableHashSet();
                foreach (var symbolAndProjectId in symbols)
                {
                    foreach (var finder in _finders)
                    {
                        _cancellationToken.ThrowIfCancellationRequested();

                        var projects = await finder.DetermineProjectsToSearchAsync(symbolAndProjectId.Symbol, _solution, scope, _cancellationToken).ConfigureAwait(false);
                        foreach (var project in projects.Distinct().WhereNotNull())
                        {
                            if (scope == null || scope.Contains(project))
                            {
                                projectMap.Add(project, (symbolAndProjectId, finder));
                            }
                        }
                    }
                }

                Contract.ThrowIfTrue(projectMap.Any(kvp => kvp.Value.Count != kvp.Value.ToSet().Count));
                return projectMap;
            }
        }
Пример #3
0
		protected void HandleReduceForIndex(IndexToWorkOn indexToWorkOn)
		{
			var viewGenerator = context.IndexDefinitionStorage.GetViewGenerator(indexToWorkOn.IndexId);
			if (viewGenerator == null)
				return;

			bool operationCanceled = false;
			var itemsToDelete = new ConcurrentSet<object>();

			IList<ReduceTypePerKey> mappedResultsInfo = null;
			transactionalStorage.Batch(actions =>
			{
				mappedResultsInfo = actions.MapReduce.GetReduceTypesPerKeys(indexToWorkOn.IndexId,
					context.CurrentNumberOfItemsToReduceInSingleBatch,
					context.NumberOfItemsToExecuteReduceInSingleStep).ToList();
			});

			var singleStepReduceKeys = mappedResultsInfo.Where(x => x.OperationTypeToPerform == ReduceType.SingleStep).Select(x => x.ReduceKey).ToArray();
			var multiStepsReduceKeys = mappedResultsInfo.Where(x => x.OperationTypeToPerform == ReduceType.MultiStep).Select(x => x.ReduceKey).ToArray();

			currentlyProcessedIndexes.TryAdd(indexToWorkOn.IndexId, indexToWorkOn.Index);
			try
			{
				if (singleStepReduceKeys.Length > 0)
				{
					Log.Debug("SingleStep reduce for keys: {0}",singleStepReduceKeys.Select(x => x + ","));
					SingleStepReduce(indexToWorkOn, singleStepReduceKeys, viewGenerator, itemsToDelete);
				}

				if (multiStepsReduceKeys.Length > 0)
				{
					Log.Debug("MultiStep reduce for keys: {0}", singleStepReduceKeys.Select(x => x + ","));
					MultiStepReduce(indexToWorkOn, multiStepsReduceKeys, viewGenerator, itemsToDelete);
				}
			}
			catch (OperationCanceledException)
			{
				operationCanceled = true;
			}
			finally
			{
				if (operationCanceled == false)
				{
					// whatever we succeeded in indexing or not, we have to update this
					// because otherwise we keep trying to re-index failed mapped results
					transactionalStorage.Batch(actions =>
					{
						var latest = actions.MapReduce.DeleteScheduledReduction(itemsToDelete);

						if (latest == null)
							return;
						actions.Indexing.UpdateLastReduced(indexToWorkOn.Index.indexId, latest.Etag, latest.Timestamp);
					});
				}

				Index _;
				currentlyProcessedIndexes.TryRemove(indexToWorkOn.IndexId, out _);
			}
		}
		/// <summary>
		/// Create a work item queue that will try to pull items from a named RabbitMQ endpoint
		/// </summary>
		/// <param name="endpoint">Destination endpoint to pull messages from</param>
		/// <param name="messagingBase">RabbitMQ connection provider</param>
		/// <param name="sleeper">Sleeper to rate limit polling</param>
		public RabbitMqPollingNode(IRoutingEndpoint endpoint,
			IMessagingBase messagingBase, ISleepWrapper sleeper)
		{
			_endpoint = endpoint.ToString();
			_messagingBase = messagingBase;
			_sleeper = sleeper;
			_boundMessageTypes = new ConcurrentSet<Type>();
		}
Пример #5
0
        public void should_add_an_item()
        {
            var setToRemoveFrom = new ConcurrentSet<int>(Enumerable.Range(1, 5));

            setToRemoveFrom.Add(6);

            setToRemoveFrom.ShouldEqual(new[] { 1, 2, 3, 4, 5, 6 });
        }
Пример #6
0
        public void should_clear_itself()
        {
            var setToClear = new ConcurrentSet<int>(Enumerable.Range(1, 5));

            setToClear.Clear();

            setToClear.Count.ShouldEqual(0);
        }
Пример #7
0
        public void should_remove_an_item()
        {
            var setToRemoveFrom = new ConcurrentSet<int>(Enumerable.Range(1, 5));

            setToRemoveFrom.Remove(3);

            setToRemoveFrom.ShouldEqual(new[] { 1, 2, 4, 5 });
        }
Пример #8
0
        public WatcherInfo(FileSystemWatcher watcher, Boolean isDirectory)
            : this()
        {
            Watcher = watcher;

            if (isDirectory)
                WatchedFiles = new ConcurrentSet<String>();
        }
		public GetItemsToReduceParams(string index, IEnumerable<string> reduceKeys, int level, bool loadData, ConcurrentSet<object> itemsToDelete)
		{
			Index = index;
			Level = level;
			LoadData = loadData;
			ItemsToDelete = itemsToDelete;
			ItemsAlreadySeen = new HashSet<Tuple<string, int>>();
			ReduceKeys = new HashSet<string>(reduceKeys);
		}
		/// <summary>
		/// Create a local polling node.
		/// <para>You should not use this yourself. Use:</para>
		/// <para>MessagingSystem.Configure.WithLocalQueue(...);</para>
		/// and receive messages as normal.
		/// </summary>
		public LocalQueuePollingNode(string dispatchPath, string incomingPath,
		                             IMessageSerialiser serialiser, ISleepWrapper sleeper)
		{
			_dispatchPath = dispatchPath;
			_incomingPath = incomingPath;
			_serialiser = serialiser;
			_sleeper = sleeper;
			_boundMessageTypes = new ConcurrentSet<Type>();
		}
Пример #11
0
        public void should_copy_itself_to_an_array()
        {
            var setToCopy = new ConcurrentSet<int>(Enumerable.Range(1, 5));
            var destinationArray = new int[5];

            setToCopy.CopyTo(destinationArray, 0);

            destinationArray.ShouldEqual(new[] { 1, 2, 3, 4, 5 });
        }
      public async Task RunAsync() {
         Console.WriteLine(DateTime.Now);
         const int kMessagesPerWorker = 200000;
         var sink = courierFacades[0];
         var senders = courierFacades.Skip(1).ToArray();
         var counter = kMessagesPerWorker * senders.Length;
         var doneSignal = new AsyncLatch();
         int upCounter = 0;
         var set = new ConcurrentSet<int>();
         sink.InboundMessageRouter.RegisterHandler<string>(
            x => {
               set.AddOrThrow(int.Parse(x.Body));
               var newCounter = Interlocked.Decrement(ref counter);
               Interlocked.Increment(ref upCounter);
               if (upCounter % 500 == 0)
                  Console.WriteLine(newCounter + " " + upCounter);
               if (newCounter == 0) {
                  doneSignal.Set();
               }
               return Task.FromResult(false);
            });

         var sync = new AsyncCountdownLatch(senders.Length);
         var senderTasks = senders.Select((s, id) => Go(async () => {
            await s.PeerTable.GetOrAdd(sink.Identity.Id).WaitForDiscoveryAsync().ConfigureAwait(false);
            sync.Signal();
            await sync.WaitAsync().ConfigureAwait(false);
            Console.WriteLine("Sink discovered: " + DateTime.Now);

            const int kBatchFactor = 1;
            for (var batch = 0; batch < kBatchFactor; batch++) {
               var batchSize = kMessagesPerWorker / kBatchFactor;
               await Task.WhenAll(Util.Generate(
                  batchSize,
                  i => s.Messenger.SendReliableAsync(
                     "" + (batch * batchSize + i + id * kMessagesPerWorker),
                     sink.Identity.Id))
                  ).ConfigureAwait(false);
            }

            Console.WriteLine("Worker Done: " + DateTime.Now);
         }));
         await Task.WhenAll(senderTasks).ConfigureAwait(false);
         Console.WriteLine("Senders Done: " + DateTime.Now);

         await doneSignal.WaitAsync().ConfigureAwait(false);
         Console.WriteLine("Done Signalled: " + DateTime.Now);

         AssertCollectionDeepEquals(set, new ConcurrentSet<int>(Enumerable.Range(0, kMessagesPerWorker * senders.Length)));

         while (true) {
            GC.Collect();
         }
      }
        /// <summary>
        /// Creates a new compilation by attaching diagnostic analyzers to an existing compilation.
        /// </summary>
        /// <param name="compilation">The original compilation.</param>
        /// <param name="analyzers">The set of analyzers to include in future analyses.</param>
        /// <param name="options">Options that are passed to analyzers.</param>
        /// <param name="cancellationToken">A cancellation token that can be used to abort analysis.</param>
        public CompilationWithAnalyzers(Compilation compilation, ImmutableArray<DiagnosticAnalyzer> analyzers, AnalyzerOptions options, CancellationToken cancellationToken)
        {
            if (compilation == null)
            {
                throw new ArgumentNullException(nameof(compilation));
            }

            VerifyAnalyzersArgument(analyzers);

            _cancellationToken = cancellationToken;
            _exceptionDiagnostics = new ConcurrentSet<Diagnostic>();
            _driver = AnalyzerDriver.Create(compilation, analyzers, options, AnalyzerManager.Instance, AddExceptionDiagnostic, false, out _compilation, _cancellationToken);
        }
        public CodeLensFindReferencesProgress(
            ISymbol queriedDefinition,
            SyntaxNode queriedNode,
            int searchCap,
            CancellationToken cancellationToken)
        {
            _queriedSymbol = queriedDefinition;
            _queriedNode = queriedNode;
            _aggregateCancellationTokenSource = CancellationTokenSource.CreateLinkedTokenSource(cancellationToken);
            _locations = new ConcurrentSet<Location>();

            SearchCap = searchCap;
        }
        private async Task<ConcurrentDictionary<Project, ConcurrentQueue<ValueTuple<SymbolAndProjectId, IReferenceFinder>>>> CreateProjectMapAsync(
            ConcurrentSet<SymbolAndProjectId> symbols)
        {
            using (Logger.LogBlock(FunctionId.FindReference_CreateProjectMapAsync, _cancellationToken))
            {
                Func<Project, ConcurrentQueue<ValueTuple<SymbolAndProjectId, IReferenceFinder>>> createQueue = 
                    p => new ConcurrentQueue<ValueTuple<SymbolAndProjectId, IReferenceFinder>>();

                var projectMap = new ConcurrentDictionary<Project, ConcurrentQueue<ValueTuple<SymbolAndProjectId, IReferenceFinder>>>();

#if PARALLEL
            Roslyn.Utilities.TaskExtensions.RethrowIncorrectAggregateExceptions(cancellationToken, () =>
                {
                    symbols.AsParallel().WithCancellation(cancellationToken).ForAll(s =>
                    {
                        finders.AsParallel().WithCancellation(cancellationToken).ForAll(f =>
                        {
                            var projects = f.DetermineProjectsToSearch(s, solution, cancellationToken) ?? SpecializedCollections.EmptyEnumerable<Project>();
                            foreach (var project in projects.Distinct())
                            {
                                projectMap.GetOrAdd(project, createQueue).Enqueue(ValueTuple.Create(s, f));
                            }
                        });
                    });
                });
#else

                var scope = _documents != null ? _documents.Select(d => d.Project).ToImmutableHashSet() : null;
                foreach (var s in symbols)
                {
                    foreach (var f in _finders)
                    {
                        _cancellationToken.ThrowIfCancellationRequested();

                        var projects = await f.DetermineProjectsToSearchAsync(s.Symbol, _solution, scope, _cancellationToken).ConfigureAwait(false);
                        foreach (var project in projects.Distinct().WhereNotNull())
                        {
                            if (scope == null || scope.Contains(project))
                            {
                                projectMap.GetOrAdd(project, createQueue).Enqueue(ValueTuple.Create(s, f));
                            }
                        }
                    }
                }
#endif

                Contract.ThrowIfTrue(projectMap.Any(kvp => kvp.Value.Count != kvp.Value.ToSet().Count));
                return projectMap;
            }
        }
Пример #16
0
        public void ConcurrentTest()
        {
            var cset = new ConcurrentSet<int>();
            Action a1 = () => { for (int i = 0; i < 1000000; i++) cset.Add(i); };
            Action a2 = () => { for (int i = 1000000; i < 2000000; i++) cset.Add(i); };
            Action a3 = () => { for (int i = 2000000; i < 3000000; i++) cset.Add(i); };
            Action a4 = () => { for (int i = 3000000; i < 4000000; i++) cset.Add(i); };
            bool b1 = false;
            bool b2 = false;
            bool b3 = false;
            bool b4 = false;
            a1.BeginInvoke(iar =>
            {
                a1.EndInvoke(iar);
                b1 = true;
            }, null);
            a2.BeginInvoke(iar =>
            {
                a2.EndInvoke(iar);
                b2 = true;
            }, null);
            a3.BeginInvoke(iar =>
            {
                a3.EndInvoke(iar);
                b3 = true;
            }, null);
            a4.BeginInvoke(iar =>
            {
                a4.EndInvoke(iar);
                b4 = true;
            }, null);

            while (!(b1 && b2 && b3 && b4))
            {
                Thread.Sleep(10);
            }

            Assert.AreEqual(4000000, cset.Count());
        }
Пример #17
0
        private Task CreateTask(int baseInterval, ConcurrentSet<ICounterMetric> concurrentSet)
        {
            return Task.Run(async () =>
            {
                long elapsed = 0;

                while (true)
                {
                    try
                    {
                        if (cts.IsCancellationRequested)
                            return;

                        var milliseconds = baseInterval - elapsed;
                        if (milliseconds > 0)
                        {
                            await Task.Delay(TimeSpan.FromMilliseconds(milliseconds), cts.Token).ConfigureAwait(false);
                        }

                        var sp = Stopwatch.StartNew();
                        foreach (var ticker in concurrentSet)
                        {
                            if (cts.IsCancellationRequested)
                                return;

                            ticker.Tick();
                        }
                        elapsed = sp.ElapsedMilliseconds;
                    }
                    catch (TaskCanceledException)
                    {
                        return;
                    }
                }
            }, cts.Token);
        }
Пример #18
0
        private ReducingPerformanceStats SingleStepReduce(IndexToWorkOn index, string[] keysToReduce, AbstractViewGenerator viewGenerator,
                                                          ConcurrentSet <object> itemsToDelete)
        {
            var needToMoveToSingleStepQueue = new ConcurrentQueue <HashSet <string> >();

            Log.Debug(() => string.Format("Executing single step reducing for {0} keys [{1}]", keysToReduce.Length, string.Join(", ", keysToReduce)));
            var batchTimeWatcher         = Stopwatch.StartNew();
            var reducingBatchThrottlerId = Guid.NewGuid();

            var reducePerformanceStats = new ReducingPerformanceStats(ReduceType.SingleStep);

            var reduceLevelStats = new ReduceLevelPeformanceStats()
            {
                Started = SystemTime.UtcNow,
                Level   = 2
            };

            try
            {
                var parallelOperations = new ConcurrentQueue <ParallelBatchStats>();

                var parallelProcessingStart = SystemTime.UtcNow;

                BackgroundTaskExecuter.Instance.ExecuteAllBuffered(context, keysToReduce, enumerator =>
                {
                    var parallelStats = new ParallelBatchStats
                    {
                        StartDelay = (long)(SystemTime.UtcNow - parallelProcessingStart).TotalMilliseconds
                    };

                    var localNeedToMoveToSingleStep = new HashSet <string>();
                    needToMoveToSingleStepQueue.Enqueue(localNeedToMoveToSingleStep);
                    var localKeys = new HashSet <string>();
                    while (enumerator.MoveNext())
                    {
                        localKeys.Add(enumerator.Current);
                    }

                    transactionalStorage.Batch(actions =>
                    {
                        var getItemsToReduceParams = new GetItemsToReduceParams(index: index.IndexId, reduceKeys: localKeys, level: 0,
                                                                                loadData: false,
                                                                                itemsToDelete: itemsToDelete)
                        {
                            Take = int.MaxValue                             // just get all, we do the rate limit when we load the number of keys to reduce, anyway
                        };

                        var getItemsToReduceDuration = Stopwatch.StartNew();

                        List <MappedResultInfo> scheduledItems;
                        using (StopwatchScope.For(getItemsToReduceDuration))
                        {
                            scheduledItems = actions.MapReduce.GetItemsToReduce(getItemsToReduceParams).ToList();
                        }

                        parallelStats.Operations.Add(PerformanceStats.From(IndexingOperation.Reduce_GetItemsToReduce, getItemsToReduceDuration.ElapsedMilliseconds));

                        autoTuner.CurrentlyUsedBatchSizesInBytes.GetOrAdd(reducingBatchThrottlerId, scheduledItems.Sum(x => x.Size));

                        if (scheduledItems.Count == 0)
                        {
                            if (Log.IsWarnEnabled)
                            {
                                Log.Warn("Found single reduce items ({0}) that didn't have any items to reduce. Deleting level 1 & level 2 items for those keys. (If you can reproduce this, please contact [email protected])",
                                         string.Join(", ", keysToReduce));
                            }
                            // Here we have an interesting issue. We have scheduled reductions, because GetReduceTypesPerKeys() returned them
                            // and at the same time, we don't have any at level 0. That probably means that we have them at level 1 or 2.
                            // They shouldn't be here, and indeed, we remove them just a little down from here in this function.
                            // That said, they might have smuggled in between versions, or something happened to cause them to be here.
                            // In order to avoid that, we forcibly delete those extra items from the scheduled reductions, and move on

                            var deletingScheduledReductionsDuration = Stopwatch.StartNew();

                            using (StopwatchScope.For(deletingScheduledReductionsDuration))
                            {
                                foreach (var reduceKey in keysToReduce)
                                {
                                    actions.MapReduce.DeleteScheduledReduction(index.IndexId, 1, reduceKey);
                                    actions.MapReduce.DeleteScheduledReduction(index.IndexId, 2, reduceKey);
                                }
                            }

                            parallelStats.Operations.Add(PerformanceStats.From(IndexingOperation.Reduce_DeleteScheduledReductions, deletingScheduledReductionsDuration.ElapsedMilliseconds));
                        }

                        var removeReduceResultsDuration = new Stopwatch();

                        foreach (var reduceKey in localKeys)
                        {
                            var lastPerformedReduceType = actions.MapReduce.GetLastPerformedReduceType(index.IndexId, reduceKey);

                            if (lastPerformedReduceType != ReduceType.SingleStep)
                            {
                                localNeedToMoveToSingleStep.Add(reduceKey);
                            }

                            if (lastPerformedReduceType != ReduceType.MultiStep)
                            {
                                continue;
                            }

                            Log.Debug("Key {0} was moved from multi step to single step reduce, removing existing reduce results records",
                                      reduceKey);

                            // now we are in single step but previously multi step reduce was performed for the given key
                            var mappedBuckets = actions.MapReduce.GetMappedBuckets(index.IndexId, reduceKey).ToList();

                            // add scheduled items too to be sure we will delete reduce results of already deleted documents
                            mappedBuckets.AddRange(scheduledItems.Select(x => x.Bucket));

                            using (StopwatchScope.For(removeReduceResultsDuration))
                            {
                                foreach (var mappedBucket in mappedBuckets.Distinct())
                                {
                                    actions.MapReduce.RemoveReduceResults(index.IndexId, 1, reduceKey, mappedBucket);
                                    actions.MapReduce.RemoveReduceResults(index.IndexId, 2, reduceKey, mappedBucket / 1024);
                                }
                            }
                        }

                        parallelStats.Operations.Add(PerformanceStats.From(IndexingOperation.Reduce_RemoveReduceResults, removeReduceResultsDuration.ElapsedMilliseconds));

                        parallelOperations.Enqueue(parallelStats);
                    });
                });

                reduceLevelStats.Operations.Add(new ParallelPerformanceStats
                {
                    NumberOfThreads   = parallelOperations.Count,
                    DurationMs        = (long)(SystemTime.UtcNow - parallelProcessingStart).TotalMilliseconds,
                    BatchedOperations = parallelOperations.ToList()
                });

                var getMappedResultsDuration = new Stopwatch();

                var keysLeftToReduce = new HashSet <string>(keysToReduce);

                var reductionPerformanceStats = new List <IndexingPerformanceStats>();

                while (keysLeftToReduce.Count > 0)
                {
                    List <MappedResultInfo> mappedResults = null;
                    var keysReturned = new HashSet <string>();

                    context.TransactionalStorage.Batch(actions =>
                    {
                        context.CancellationToken.ThrowIfCancellationRequested();
                        var take = context.CurrentNumberOfItemsToReduceInSingleBatch;

                        using (StopwatchScope.For(getMappedResultsDuration))
                        {
                            mappedResults = actions.MapReduce.GetMappedResults(
                                index.IndexId,
                                keysLeftToReduce,
                                true,
                                take,
                                keysReturned
                                ).ToList();
                        }
                    });

                    var count = mappedResults.Count;
                    var size  = mappedResults.Sum(x => x.Size);

                    mappedResults.ApplyIfNotNull(x => x.Bucket = 0);

                    var results = mappedResults.Where(x => x.Data != null).GroupBy(x => x.Bucket, x => JsonToExpando.Convert(x.Data)).ToArray();

                    context.MetricsCounters.ReducedPerSecond.Mark(results.Length);

                    context.CancellationToken.ThrowIfCancellationRequested();

                    var performance = context.IndexStorage.Reduce(index.IndexId, viewGenerator, results, 2, context, null, keysReturned, mappedResults.Count);

                    reductionPerformanceStats.Add(performance);

                    autoTuner.AutoThrottleBatchSize(count, size, batchTimeWatcher.Elapsed);
                }

                var needToMoveToSingleStep = new HashSet <string>();
                HashSet <string> set;
                while (needToMoveToSingleStepQueue.TryDequeue(out set))
                {
                    needToMoveToSingleStep.UnionWith(set);
                }

                foreach (var reduceKey in needToMoveToSingleStep)
                {
                    string localReduceKey = reduceKey;
                    transactionalStorage.Batch(actions =>
                                               actions.MapReduce.UpdatePerformedReduceType(index.IndexId, localReduceKey, ReduceType.SingleStep));
                }

                reduceLevelStats.Completed = SystemTime.UtcNow;
                reduceLevelStats.Duration  = reduceLevelStats.Completed - reduceLevelStats.Started;
                reduceLevelStats.Operations.Add(PerformanceStats.From(IndexingOperation.Reduce_GetMappedResults, getMappedResultsDuration.ElapsedMilliseconds));
                reduceLevelStats.Operations.Add(PerformanceStats.From(IndexingOperation.StorageCommit, 0));                 // in single step we write directly to Lucene index

                foreach (var stats in reductionPerformanceStats)
                {
                    reduceLevelStats.Add(stats);
                }

                reducePerformanceStats.LevelStats.Add(reduceLevelStats);
            }
            finally
            {
                long _;
                autoTuner.CurrentlyUsedBatchSizesInBytes.TryRemove(reducingBatchThrottlerId, out _);
            }

            return(reducePerformanceStats);
        }
Пример #19
0
 public Unsubscriber(ConcurrentSet <IChainStateVisitor> visitors, IChainStateVisitor visitor)
 {
     this.visitors = visitors;
     this.visitor  = visitor;
 }
Пример #20
0
        private int RunCore(TextWriter consoleOutput, ErrorLogger errorLogger, CancellationToken cancellationToken)
        {
            Debug.Assert(!Arguments.IsInteractive);

            cancellationToken.ThrowIfCancellationRequested();

            if (Arguments.DisplayLogo)
            {
                PrintLogo(consoleOutput);
            }

            if (Arguments.DisplayHelp)
            {
                PrintHelp(consoleOutput);
                return(Succeeded);
            }

            if (ReportErrors(Arguments.Errors, consoleOutput, errorLogger))
            {
                return(Failed);
            }

            var touchedFilesLogger = (Arguments.TouchedFilesPath != null) ? new TouchedFileLogger() : null;

            Compilation compilation = CreateCompilation(consoleOutput, touchedFilesLogger, errorLogger);

            if (compilation == null)
            {
                return(Failed);
            }

            var diagnostics         = new List <DiagnosticInfo>();
            var analyzers           = ResolveAnalyzersFromArguments(diagnostics, MessageProvider, touchedFilesLogger);
            var additionalTextFiles = ResolveAdditionalFilesFromArguments(diagnostics, MessageProvider, touchedFilesLogger);

            if (ReportErrors(diagnostics, consoleOutput, errorLogger))
            {
                return(Failed);
            }

            cancellationToken.ThrowIfCancellationRequested();

            CancellationTokenSource analyzerCts     = null;
            AnalyzerManager         analyzerManager = null;
            AnalyzerDriver          analyzerDriver  = null;

            try
            {
                Func <ImmutableArray <Diagnostic> > getAnalyzerDiagnostics       = null;
                ConcurrentSet <Diagnostic>          analyzerExceptionDiagnostics = null;
                if (!analyzers.IsDefaultOrEmpty)
                {
                    analyzerCts     = CancellationTokenSource.CreateLinkedTokenSource(cancellationToken);
                    analyzerManager = new AnalyzerManager();
                    analyzerExceptionDiagnostics = new ConcurrentSet <Diagnostic>();
                    Action <Diagnostic> addExceptionDiagnostic = diagnostic => analyzerExceptionDiagnostics.Add(diagnostic);
                    var analyzerOptions = new AnalyzerOptions(ImmutableArray <AdditionalText> .CastUp(additionalTextFiles));
                    analyzerDriver         = AnalyzerDriver.Create(compilation, analyzers, analyzerOptions, analyzerManager, addExceptionDiagnostic, Arguments.ReportAnalyzer, out compilation, analyzerCts.Token);
                    getAnalyzerDiagnostics = () => analyzerDriver.GetDiagnosticsAsync().Result;
                }

                // Print the diagnostics produced during the parsing stage and exit if there were any errors.
                if (ReportErrors(compilation.GetParseDiagnostics(), consoleOutput, errorLogger))
                {
                    return(Failed);
                }

                if (ReportErrors(compilation.GetDeclarationDiagnostics(), consoleOutput, errorLogger))
                {
                    return(Failed);
                }

                EmitResult emitResult;

                // NOTE: as native compiler does, we generate the documentation file
                // NOTE: 'in place', replacing the contents of the file if it exists

                string finalOutputPath;
                string finalPdbFilePath;
                string finalXmlFilePath;

                Stream xmlStreamOpt = null;

                cancellationToken.ThrowIfCancellationRequested();

                finalXmlFilePath = Arguments.DocumentationPath;
                if (finalXmlFilePath != null)
                {
                    xmlStreamOpt = OpenFile(finalXmlFilePath, consoleOutput, PortableShim.FileMode.OpenOrCreate, PortableShim.FileAccess.Write, PortableShim.FileShare.ReadWriteBitwiseOrDelete);
                    if (xmlStreamOpt == null)
                    {
                        return(Failed);
                    }

                    xmlStreamOpt.SetLength(0);
                }

                cancellationToken.ThrowIfCancellationRequested();

                IEnumerable <DiagnosticInfo> errors;
                using (var win32ResourceStreamOpt = GetWin32Resources(Arguments, compilation, out errors))
                    using (xmlStreamOpt)
                    {
                        if (ReportErrors(errors, consoleOutput, errorLogger))
                        {
                            return(Failed);
                        }

                        cancellationToken.ThrowIfCancellationRequested();

                        string outputName = GetOutputFileName(compilation, cancellationToken);

                        finalOutputPath  = Path.Combine(Arguments.OutputDirectory, outputName);
                        finalPdbFilePath = Arguments.PdbPath ?? Path.ChangeExtension(finalOutputPath, ".pdb");

                        // NOTE: Unlike the PDB path, the XML doc path is not embedded in the assembly, so we don't need to pass it to emit.
                        var emitOptions = Arguments.EmitOptions.
                                          WithOutputNameOverride(outputName).
                                          WithPdbFilePath(finalPdbFilePath);

                        using (var peStreamProvider = new CompilerEmitStreamProvider(this, finalOutputPath))
                            using (var pdbStreamProviderOpt = Arguments.EmitPdb ? new CompilerEmitStreamProvider(this, finalPdbFilePath) : null)
                            {
                                emitResult = compilation.Emit(
                                    peStreamProvider,
                                    pdbStreamProviderOpt,
                                    (xmlStreamOpt != null) ? new Compilation.SimpleEmitStreamProvider(xmlStreamOpt) : null,
                                    (win32ResourceStreamOpt != null) ? new Compilation.SimpleEmitStreamProvider(win32ResourceStreamOpt) : null,
                                    Arguments.ManifestResources,
                                    emitOptions,
                                    getAnalyzerDiagnostics,
                                    cancellationToken);

                                if (emitResult.Success && touchedFilesLogger != null)
                                {
                                    if (pdbStreamProviderOpt != null)
                                    {
                                        touchedFilesLogger.AddWritten(finalPdbFilePath);
                                    }

                                    touchedFilesLogger.AddWritten(finalOutputPath);
                                }
                            }
                    }

                GenerateSqmData(Arguments.CompilationOptions, emitResult.Diagnostics);

                if (ReportErrors(emitResult.Diagnostics, consoleOutput, errorLogger))
                {
                    return(Failed);
                }

                cancellationToken.ThrowIfCancellationRequested();

                if (analyzerExceptionDiagnostics != null && ReportErrors(analyzerExceptionDiagnostics, consoleOutput, errorLogger))
                {
                    return(Failed);
                }

                bool errorsReadingAdditionalFiles = false;
                foreach (var additionalFile in additionalTextFiles)
                {
                    if (ReportErrors(additionalFile.Diagnostics, consoleOutput, errorLogger))
                    {
                        errorsReadingAdditionalFiles = true;
                    }
                }

                if (errorsReadingAdditionalFiles)
                {
                    return(Failed);
                }

                cancellationToken.ThrowIfCancellationRequested();

                if (Arguments.TouchedFilesPath != null)
                {
                    Debug.Assert(touchedFilesLogger != null);

                    if (finalXmlFilePath != null)
                    {
                        touchedFilesLogger.AddWritten(finalXmlFilePath);
                    }

                    var readStream = OpenFile(Arguments.TouchedFilesPath + ".read", consoleOutput, mode: PortableShim.FileMode.OpenOrCreate);
                    if (readStream == null)
                    {
                        return(Failed);
                    }

                    using (var writer = new StreamWriter(readStream))
                    {
                        touchedFilesLogger.WriteReadPaths(writer);
                    }

                    var writtenStream = OpenFile(Arguments.TouchedFilesPath + ".write", consoleOutput, mode: PortableShim.FileMode.OpenOrCreate);
                    if (writtenStream == null)
                    {
                        return(Failed);
                    }

                    using (var writer = new StreamWriter(writtenStream))
                    {
                        touchedFilesLogger.WriteWrittenPaths(writer);
                    }
                }
            }
            finally
            {
                // At this point analyzers are already complete in which case this is a no-op.  Or they are
                // still running because the compilation failed before all of the compilation events were
                // raised.  In the latter case the driver, and all its associated state, will be waiting around
                // for events that are never coming.  Cancel now and let the clean up process begin.
                if (analyzerCts != null)
                {
                    analyzerCts.Cancel();

                    if (analyzerManager != null)
                    {
                        // Clear cached analyzer descriptors and unregister exception handlers hooked up to the LocalizableString fields of the associated descriptors.
                        analyzerManager.ClearAnalyzerState(analyzers);
                    }

                    if (Arguments.ReportAnalyzer && analyzerDriver != null && compilation != null)
                    {
                        ReportAnalyzerExecutionTime(consoleOutput, analyzerDriver, Culture, compilation.Options.ConcurrentBuild);
                    }
                }
            }

            return(Succeeded);
        }
Пример #21
0
 public TouchedFileLogger()
 {
     _readFiles    = new ConcurrentSet <string>();
     _writtenFiles = new ConcurrentSet <string>();
 }
Пример #22
0
        private int RunCore(TextWriter consoleOutput, ErrorLogger2 errorLogger, CancellationToken cancellationToken)
        {
            Debug.Assert(!Arguments.GetFieldOrProperty <bool>("IsScriptRunner"));

            cancellationToken.ThrowIfCancellationRequested();

            if (Arguments.DisplayLogo)
            {
                PrintLogo(consoleOutput);
            }

            if (Arguments.DisplayHelp)
            {
                PrintHelp(consoleOutput);
                return(Succeeded);
            }

            if (ReportErrors(Arguments.Errors, consoleOutput, errorLogger))
            {
                return(Failed);
            }

            var touchedFilesLogger = (Arguments.TouchedFilesPath != null) ? new TouchedFileLogger() : null;

            Compilation compilation = CreateCompilation(consoleOutput, touchedFilesLogger, errorLogger);

            if (compilation == null)
            {
                return(Failed);
            }


            var diagnostics         = typeof(List <>).MakeGenericTypeFast(Refl.Type_DiagnosticInfo).InvokeFunction(".ctor");
            var analyzers           = ResolveAnalyzersAndGeneratorsFromArguments(diagnostics, MessageProvider, touchedFilesLogger);
            var additionalTextFiles = ResolveAdditionalFilesFromArguments(diagnostics, MessageProvider, touchedFilesLogger);

            if (ReportErrors((IEnumerable <DiagnosticInfo>)diagnostics, consoleOutput, errorLogger))
            {
                return(Failed);
            }

            cancellationToken.ThrowIfCancellationRequested();

            CancellationTokenSource analyzerCts     = null;
            AnalyzerManager         analyzerManager = null;
            AnalyzerDriver          analyzerDriver  = null;

            try
            {
                Func <ImmutableArray <Diagnostic> > getAnalyzerDiagnostics       = null;
                ConcurrentSet <Diagnostic>          analyzerExceptionDiagnostics = null;
                if (!analyzers.IsDefaultOrEmpty)
                {
                    analyzerCts     = CancellationTokenSource.CreateLinkedTokenSource(cancellationToken);
                    analyzerManager = new AnalyzerManager();
                    analyzerExceptionDiagnostics = new ConcurrentSet <Diagnostic>();
                    Action <Diagnostic> addExceptionDiagnostic = diagnostic => analyzerExceptionDiagnostics.Add(diagnostic);
                    var analyzerOptions = new AnalyzerOptions(ImmutableArray.CreateRange(additionalTextFiles.Select(x => (AdditionalText)x)));

                    analyzerDriver = ReflAnalyzerDriver.CreateAndAttachToCompilation(
                        compilation,
                        analyzers,
                        analyzerOptions,
                        analyzerManager,
                        addExceptionDiagnostic,
                        Arguments.ReportAnalyzer,
                        out compilation,
                        analyzerCts.Token);
                    getAnalyzerDiagnostics = () => ((Task <ImmutableArray <Diagnostic> >)analyzerDriver.InvokeFunction("GetDiagnosticsAsync", compilation)).Result;
                }

                // Print the diagnostics produced during the parsing stage and exit if there were any errors.
                if (ReportErrors(compilation.GetParseDiagnostics(), consoleOutput, errorLogger))
                {
                    return(Failed);
                }

                if (ReportErrors(compilation.GetDeclarationDiagnostics(), consoleOutput, errorLogger))
                {
                    return(Failed);
                }
                consoleOutput.WriteLine("Rewriting LINQ to procedural code...");
                var originalCompilation = compilation;

                var rewrittenLinqInvocations = 0;
                var rewrittenMethods         = 0;
                foreach (var syntaxTree in compilation.SyntaxTrees)
                {
                    var        rewriter = new LinqRewriter(originalCompilation.GetSemanticModel(syntaxTree));
                    var        root     = syntaxTree.GetRoot();
                    SyntaxNode rewritten;
                    try
                    {
                        rewritten = rewriter.Visit(root);
                    }
                    catch (Exception ex)
                    {
                        ReportErrors(new[] { rewriter.CreateDiagnosticForException(ex, syntaxTree.FilePath) }, consoleOutput, errorLogger);

                        var m = ex;
                        while (m != null)
                        {
                            Console.WriteLine(m);
                            Console.WriteLine(m.StackTrace);
                            m = m.InnerException;
                        }


                        return(Failed);
                    }

                    if (rewritten != root)
                    {
                        OriginalPaths[syntaxTree] = syntaxTree.FilePath;
                        compilation = compilation.ReplaceSyntaxTree(syntaxTree, rewritten.SyntaxTree);

                        rewrittenLinqInvocations += rewriter.RewrittenLinqQueries;
                        rewrittenMethods         += rewriter.RewrittenMethods;
                    }
                }
                consoleOutput.WriteLine(string.Format("Rewritten {0} LINQ queries in {1} methods as procedural code.", rewrittenLinqInvocations, rewrittenMethods));

                var k = compilation.GetDiagnostics().Where(x => x.Severity == DiagnosticSeverity.Error).ToList();
                if (k.Count != 0)
                {
                    foreach (var err in k)
                    {
                        System.Console.WriteLine("Could not compile rewritten method. Consider applying a [Shaman.Runtime.NoLinqRewrite] attribute.");
                        var m = err.Location.SourceTree.GetRoot().FindNode(err.Location.SourceSpan);
                        System.Console.WriteLine(err.Location.SourceTree.FilePath);
                        //var z = m.FirstAncestorOrSelf<CSharp.Syntax.BaseMethodDeclarationSyntax>(x => x.Kind() == CSharp.SyntaxKind.MethodDeclaration);
                        //compilation.GetSemanticModel().GetEnclosingSymbol(err.Location.SourceSpan.Start)
                    }
                }
                if (ReportErrors(compilation.GetParseDiagnostics().Where(x => x.Severity == DiagnosticSeverity.Error), consoleOutput, errorLogger))
                {
                    return(Failed);
                }

                if (ReportErrors(compilation.GetDeclarationDiagnostics().Where(x => x.Severity == DiagnosticSeverity.Error), consoleOutput, errorLogger))
                {
                    return(Failed);
                }



                EmitResult emitResult;

                // NOTE: as native compiler does, we generate the documentation file
                // NOTE: 'in place', replacing the contents of the file if it exists

                string finalPeFilePath;
                string finalPdbFilePath;
                string finalXmlFilePath;

                Stream xmlStreamOpt = null;

                cancellationToken.ThrowIfCancellationRequested();

                finalXmlFilePath = Arguments.DocumentationPath;
                if (finalXmlFilePath != null)
                {
                    xmlStreamOpt = OpenFile(finalXmlFilePath, consoleOutput, FileMode.OpenOrCreate, FileAccess.Write, FileShare.ReadWrite | FileShare.Delete);
                    if (xmlStreamOpt == null)
                    {
                        return(Failed);
                    }

                    xmlStreamOpt.SetLength(0);
                }

                cancellationToken.ThrowIfCancellationRequested();
                ImmutableArray <Diagnostic>  emitDiagnostics;
                IEnumerable <DiagnosticInfo> errors;
                using (var win32ResourceStreamOpt = GetWin32Resources(Arguments, compilation, out errors))
                    using (xmlStreamOpt)
                    {
                        if (ReportErrors(errors, consoleOutput, errorLogger))
                        {
                            return(Failed);
                        }

                        cancellationToken.ThrowIfCancellationRequested();

                        string outputName = GetOutputFileName(compilation, cancellationToken);

                        finalPeFilePath  = Path.Combine(Arguments.OutputDirectory, outputName);
                        finalPdbFilePath = Arguments.PdbPath ?? Path.ChangeExtension(finalPeFilePath, ".pdb");

                        // NOTE: Unlike the PDB path, the XML doc path is not embedded in the assembly, so we don't need to pass it to emit.
                        var emitOptions = Arguments.EmitOptions.
                                          WithOutputNameOverride(outputName).
                                          WithPdbFilePath(finalPdbFilePath);

                        // The PDB path is emitted in it's entirety into the PE.  This makes it impossible to have deterministic
                        // builds that occur in different source directories.  To enable this we shave all path information from
                        // the PDB when specified by the user.
                        //
                        // This is a temporary work around to allow us to make progress with determinism.  The following issue
                        // tracks getting an official solution here.
                        //
                        // https://github.com/dotnet/roslyn/issues/9813
                        if (Arguments.ParseOptions.Features.ContainsKey("pdb-path-determinism") && !string.IsNullOrEmpty(emitOptions.PdbFilePath))
                        {
                            emitOptions = emitOptions.WithPdbFilePath(Path.GetFileName(emitOptions.PdbFilePath));
                        }

                        //var dummyCompiler = Refl.Type_Csc.InvokeFunction(".ctor", backup_responseFile, (object)null, backup_args, backup_analyzerLoader);

                        var constr             = Refl.Type_Csc.GetConstructors(BindingFlags.Public | BindingFlags.NonPublic | BindingFlags.Instance).First();
                        var backup_buildPaths2 = Refl.Type_BuildPaths.InvokeFunction(".ctor", backup_buildPaths.ClientDirectory, backup_buildPaths.WorkingDirectory, backup_buildPaths.SdkDirectory);
                        var dummyCompiler      = constr.Invoke(new object[] { backup_responseFile, backup_buildPaths2, backup_args, backup_analyzerLoader });
                        //var dummyCompiler = Activator.CreateInstance(Refl.Type_Csc, BindingFlags.Public|BindingFlags.NonPublic|BindingFlags.CreateInstance|BindingFlags.Instance, null, new object[]{ backup_responseFile, (object)null, backup_args, backup_analyzerLoader}, null);
                        using (var peStreamProvider = (IDisposable)Refl.Type_CompilerEmitStreamProvider.InvokeFunction(".ctor", dummyCompiler, finalPeFilePath))
                            using (var pdbStreamProviderOpt = Arguments.EmitPdb ? (IDisposable)Refl.Type_CompilerEmitStreamProvider.InvokeFunction(".ctor", dummyCompiler, finalPdbFilePath) : null)
                            {
                                /*
                                 * internal EmitResult Emit(
                                 *  Compilation.EmitStreamProvider peStreamProvider,
                                 *  Compilation.EmitStreamProvider pdbStreamProvider,
                                 *  Compilation.EmitStreamProvider xmlDocumentationStreamProvider,
                                 *  Compilation.EmitStreamProvider win32ResourcesStreamProvider,
                                 *  IEnumerable<ResourceDescription> manifestResources,
                                 *  EmitOptions options,
                                 *  IMethodSymbol debugEntryPoint,
                                 *  CompilationTestData testData,
                                 *  Func<ImmutableArray<Diagnostic>> getHostDiagnostics,
                                 *  CancellationToken cancellationToken)
                                 */
                                var diagnosticBag = Refl.Type_DiagnosticBag.InvokeFunction(".ctor");

                                emitResult = null;
                                try
                                {
                                    var peStream  = ReflEmitStreamProvider.CreateStream(peStreamProvider, diagnosticBag);
                                    var pdbStream = pdbStreamProviderOpt != null?ReflEmitStreamProvider.CreateStream(pdbStreamProviderOpt, diagnosticBag) : null;

                                    emitResult = compilation.Emit(
                                        peStream,
                                        pdbStream,
                                        xmlStreamOpt,
                                        win32ResourceStreamOpt,
                                        Arguments.ManifestResources,
                                        emitOptions,
                                        null,
                                        cancellationToken);
                                }
                                catch
                                {
                                    emitDiagnostics = GetEmitDiagnostic(diagnosticBag);
                                    if (emitDiagnostics.Length == 0)
                                    {
                                        throw;
                                    }
                                }
                                emitDiagnostics = GetEmitDiagnostic(diagnosticBag);

                                if (emitResult != null && emitResult.Success && touchedFilesLogger != null)
                                {
                                    if (pdbStreamProviderOpt != null)
                                    {
                                        touchedFilesLogger.InvokeAction("AddWritten", finalPdbFilePath);
                                    }

                                    touchedFilesLogger.InvokeAction("AddWritten", finalPeFilePath);
                                }
                            }
                    }


                if (ReportErrors((emitResult != null ? emitResult.Diagnostics : Enumerable.Empty <Diagnostic>()).Union(emitDiagnostics), consoleOutput, errorLogger))
                {
                    return(Failed);
                }

                cancellationToken.ThrowIfCancellationRequested();

                if (analyzerExceptionDiagnostics != null && ReportErrors(analyzerExceptionDiagnostics, consoleOutput, errorLogger))
                {
                    return(Failed);
                }

                bool errorsReadingAdditionalFiles = false;
                foreach (var additionalFile in additionalTextFiles)
                {
                    if (ReportErrors(additionalFile.GetFieldOrProperty <IEnumerable <Diagnostic> >("Diagnostics"), consoleOutput, errorLogger))
                    {
                        errorsReadingAdditionalFiles = true;
                    }
                }

                if (errorsReadingAdditionalFiles)
                {
                    return(Failed);
                }

                cancellationToken.ThrowIfCancellationRequested();

                if (Arguments.TouchedFilesPath != null)
                {
                    Debug.Assert(touchedFilesLogger != null);

                    if (finalXmlFilePath != null)
                    {
                        touchedFilesLogger.InvokeAction("AddWritten", finalXmlFilePath);
                    }

                    var readStream = OpenFile(Arguments.TouchedFilesPath + ".read", consoleOutput, mode: FileMode.OpenOrCreate);
                    if (readStream == null)
                    {
                        return(Failed);
                    }

                    using (var writer = new StreamWriter(readStream))
                    {
                        touchedFilesLogger.InvokeAction("WriteReadPaths", writer);
                    }

                    var writtenStream = OpenFile(Arguments.TouchedFilesPath + ".write", consoleOutput, mode: FileMode.OpenOrCreate);
                    if (writtenStream == null)
                    {
                        return(Failed);
                    }

                    using (var writer = new StreamWriter(writtenStream))
                    {
                        touchedFilesLogger.InvokeAction("WriteWrittenPaths", writer);
                    }
                }
            }
            finally
            {
                // At this point analyzers are already complete in which case this is a no-op.  Or they are
                // still running because the compilation failed before all of the compilation events were
                // raised.  In the latter case the driver, and all its associated state, will be waiting around
                // for events that are never coming.  Cancel now and let the clean up process begin.
                if (analyzerCts != null)
                {
                    analyzerCts.Cancel();

                    if (analyzerManager != null)
                    {
                        // Clear cached analyzer descriptors and unregister exception handlers hooked up to the LocalizableString fields of the associated descriptors.
                        analyzerManager.InvokeAction("ClearAnalyzerState", analyzers);
                    }

                    if (Arguments.ReportAnalyzer && analyzerDriver != null && compilation != null)
                    {
                        ReportAnalyzerExecutionTime(consoleOutput, analyzerDriver, Culture, compilation.Options.ConcurrentBuild);
                    }
                }
            }

            return(Succeeded);
        }
Пример #23
0
        private static async Task AddDescendantSourceTypesInProjectAsync(
            SymbolSet currentSourceAndMetadataTypes,
            SymbolSet result,
            Project project,
            Func <INamedTypeSymbol, SymbolSet, bool> typeMatches,
            Func <INamedTypeSymbol, bool> shouldContinueSearching,
            bool transitive,
            CancellationToken cancellationToken)
        {
            cancellationToken.ThrowIfCancellationRequested();

            // We're going to be sweeping over this project over and over until we reach a
            // fixed point.  In order to limit GC and excess work, we cache all the semantic
            // models and DeclaredSymbolInfo for the documents we look at.
            // Because we're only processing a project at a time, this is not an issue.
            var cachedModels = new ConcurrentSet <SemanticModel>();

            using var _1 = GetSymbolSet(out var typesToSearchFor);
            using var _2 = GetSymbolSet(out var tempBuffer);

            typesToSearchFor.AddAll(currentSourceAndMetadataTypes);

            var projectIndex = await ProjectIndex.GetIndexAsync(project, cancellationToken).ConfigureAwait(false);

            // As long as there are new types to search for, keep looping.
            while (typesToSearchFor.Count > 0)
            {
                foreach (var type in typesToSearchFor)
                {
                    cancellationToken.ThrowIfCancellationRequested();

                    switch (type.SpecialType)
                    {
                    case SpecialType.System_Object:
                        await AddMatchingTypesAsync(
                            cachedModels,
                            projectIndex.ClassesThatMayDeriveFromSystemObject,
                            result : tempBuffer,
                            predicateOpt : n => n.BaseType?.SpecialType == SpecialType.System_Object,
                            cancellationToken).ConfigureAwait(false);

                        break;

                    case SpecialType.System_ValueType:
                        await AddMatchingTypesAsync(
                            cachedModels,
                            projectIndex.ValueTypes,
                            result : tempBuffer,
                            predicateOpt : null,
                            cancellationToken).ConfigureAwait(false);

                        break;

                    case SpecialType.System_Enum:
                        await AddMatchingTypesAsync(
                            cachedModels,
                            projectIndex.Enums,
                            result : tempBuffer,
                            predicateOpt : null,
                            cancellationToken).ConfigureAwait(false);

                        break;

                    case SpecialType.System_MulticastDelegate:
                        await AddMatchingTypesAsync(
                            cachedModels,
                            projectIndex.Delegates,
                            result : tempBuffer,
                            predicateOpt : null,
                            cancellationToken).ConfigureAwait(false);

                        break;
                    }

                    await AddSourceTypesThatDeriveFromNameAsync(
                        typeMatches,
                        cachedModels,
                        typesToSearchFor,
                        projectIndex,
                        result : tempBuffer,
                        type.Name,
                        cancellationToken).ConfigureAwait(false);
                }

                PropagateTemporaryResults(
                    result, typesToSearchFor, tempBuffer, transitive, shouldContinueSearching);
            }
        }
Пример #24
0
        public override IndexingPerformanceStats IndexDocuments(AbstractViewGenerator viewGenerator, IndexingBatch batch, IStorageActionsAccessor actions, DateTime minimumTimestamp, CancellationToken token)
        {
            token.ThrowIfCancellationRequested();

            var count            = 0;
            var sourceCount      = batch.Docs.Count;
            var deleted          = new Dictionary <ReduceKeyAndBucket, int>();
            var performance      = RecordCurrentBatch("Current Map", "Map", batch.Docs.Count);
            var performanceStats = new List <BasePerformanceStats>();

            var usedStorageAccessors = new ConcurrentSet <IStorageActionsAccessor>();

            if (usedStorageAccessors.TryAdd(actions))
            {
                var storageCommitDuration = new Stopwatch();

                actions.BeforeStorageCommit += storageCommitDuration.Start;

                actions.AfterStorageCommit += () =>
                {
                    storageCommitDuration.Stop();

                    performanceStats.Add(PerformanceStats.From(IndexingOperation.StorageCommit, storageCommitDuration.ElapsedMilliseconds));
                };
            }

            List <dynamic> documentsWrapped;

            if (actions.MapReduce.HasMappedResultsForIndex(indexId) == false)
            {
                //new index
                documentsWrapped = batch.Docs.Where(x => x is FilteredDocument == false).ToList();
            }
            else
            {
                var deleteMappedResultsDuration = new Stopwatch();
                documentsWrapped = batch.Docs.Select(doc =>
                {
                    token.ThrowIfCancellationRequested();

                    var documentId = doc.__document_id;

                    using (StopwatchScope.For(deleteMappedResultsDuration))
                    {
                        actions.MapReduce.DeleteMappedResultsForDocumentId((string)documentId, indexId, deleted);
                    }

                    return(doc);
                })
                                   .Where(x => x is FilteredDocument == false)
                                   .ToList();

                performanceStats.Add(new PerformanceStats
                {
                    Name       = IndexingOperation.Map_DeleteMappedResults,
                    DurationMs = deleteMappedResultsDuration.ElapsedMilliseconds,
                });
            }

            var allReferencedDocs = new ConcurrentQueue <IDictionary <string, HashSet <string> > >();
            var allReferenceEtags = new ConcurrentQueue <IDictionary <string, Etag> >();
            var allState          = new ConcurrentQueue <Tuple <HashSet <ReduceKeyAndBucket>, IndexingWorkStats, Dictionary <string, int> > >();

            var parallelOperations = new ConcurrentQueue <ParallelBatchStats>();

            var parallelProcessingStart = SystemTime.UtcNow;

            if (context.Database.ThreadPool == null || context.RunReducing == false)
            {
                throw new OperationCanceledException();
            }

            context.Database.ThreadPool.ExecuteBatch(documentsWrapped, (IEnumerator <dynamic> partition) =>
            {
                token.ThrowIfCancellationRequested();
                var parallelStats = new ParallelBatchStats
                {
                    StartDelay = (long)(SystemTime.UtcNow - parallelProcessingStart).TotalMilliseconds
                };

                var localStats   = new IndexingWorkStats();
                var localChanges = new HashSet <ReduceKeyAndBucket>();
                var statsPerKey  = new Dictionary <string, int>();

                var linqExecutionDuration            = new Stopwatch();
                var reduceInMapLinqExecutionDuration = new Stopwatch();
                var putMappedResultsDuration         = new Stopwatch();
                var convertToRavenJObjectDuration    = new Stopwatch();

                allState.Enqueue(Tuple.Create(localChanges, localStats, statsPerKey));

                using (CurrentIndexingScope.Current = new CurrentIndexingScope(context.Database, PublicName))
                {
                    // we are writing to the transactional store from multiple threads here, and in a streaming fashion
                    // should result in less memory and better perf
                    context.TransactionalStorage.Batch(accessor =>
                    {
                        if (usedStorageAccessors.TryAdd(accessor))
                        {
                            var storageCommitDuration = new Stopwatch();

                            accessor.BeforeStorageCommit += storageCommitDuration.Start;

                            accessor.AfterStorageCommit += () =>
                            {
                                storageCommitDuration.Stop();

                                parallelStats.Operations.Add(PerformanceStats.From(IndexingOperation.StorageCommit, storageCommitDuration.ElapsedMilliseconds));
                            };
                        }

                        var mapResults             = RobustEnumerationIndex(partition, viewGenerator.MapDefinitions, localStats, linqExecutionDuration);
                        var currentDocumentResults = new List <object>();
                        string currentKey          = null;
                        bool skipDocument          = false;

                        foreach (var currentDoc in mapResults)
                        {
                            token.ThrowIfCancellationRequested();

                            var documentId = GetDocumentId(currentDoc);
                            if (documentId != currentKey)
                            {
                                count += ProcessBatch(viewGenerator, currentDocumentResults, currentKey, localChanges, accessor, statsPerKey, reduceInMapLinqExecutionDuration, putMappedResultsDuration, convertToRavenJObjectDuration);

                                currentDocumentResults.Clear();
                                currentKey = documentId;
                            }
                            else if (skipDocument)
                            {
                                continue;
                            }

                            RavenJObject currentDocJObject;
                            using (StopwatchScope.For(convertToRavenJObjectDuration))
                            {
                                currentDocJObject = RavenJObject.FromObject(currentDoc, jsonSerializer);
                            }

                            currentDocumentResults.Add(new DynamicJsonObject(currentDocJObject));

                            if (EnsureValidNumberOfOutputsForDocument(documentId, currentDocumentResults.Count) == false)
                            {
                                skipDocument = true;
                                currentDocumentResults.Clear();
                                continue;
                            }

                            Interlocked.Increment(ref localStats.IndexingSuccesses);
                        }
                        count += ProcessBatch(viewGenerator, currentDocumentResults, currentKey, localChanges, accessor, statsPerKey, reduceInMapLinqExecutionDuration, putMappedResultsDuration, convertToRavenJObjectDuration);

                        parallelStats.Operations.Add(PerformanceStats.From(IndexingOperation.LoadDocument, CurrentIndexingScope.Current.LoadDocumentDuration.ElapsedMilliseconds));
                        parallelStats.Operations.Add(PerformanceStats.From(IndexingOperation.Linq_MapExecution, linqExecutionDuration.ElapsedMilliseconds));
                        parallelStats.Operations.Add(PerformanceStats.From(IndexingOperation.Linq_ReduceLinqExecution, reduceInMapLinqExecutionDuration.ElapsedMilliseconds));
                        parallelStats.Operations.Add(PerformanceStats.From(IndexingOperation.Map_PutMappedResults, putMappedResultsDuration.ElapsedMilliseconds));
                        parallelStats.Operations.Add(PerformanceStats.From(IndexingOperation.Map_ConvertToRavenJObject, convertToRavenJObjectDuration.ElapsedMilliseconds));

                        parallelOperations.Enqueue(parallelStats);
                    });

                    allReferenceEtags.Enqueue(CurrentIndexingScope.Current.ReferencesEtags);
                    allReferencedDocs.Enqueue(CurrentIndexingScope.Current.ReferencedDocuments);
                }
            }, description: $"Reducing index {PublicName} up to etag {batch.HighestEtagBeforeFiltering}, for {documentsWrapped.Count} documents", database: context.Database);

            performanceStats.Add(new ParallelPerformanceStats
            {
                NumberOfThreads   = parallelOperations.Count,
                DurationMs        = (long)(SystemTime.UtcNow - parallelProcessingStart).TotalMilliseconds,
                BatchedOperations = parallelOperations.ToList()
            });

            var updateDocumentReferencesDuration = new Stopwatch();

            using (StopwatchScope.For(updateDocumentReferencesDuration))
            {
                UpdateDocumentReferences(actions, allReferencedDocs, allReferenceEtags);
            }
            performanceStats.Add(PerformanceStats.From(IndexingOperation.UpdateDocumentReferences, updateDocumentReferencesDuration.ElapsedMilliseconds));

            var changed = allState.SelectMany(x => x.Item1).Concat(deleted.Keys)
                          .Distinct()
                          .ToList();

            var stats          = new IndexingWorkStats(allState.Select(x => x.Item2));
            var reduceKeyStats = allState.SelectMany(x => x.Item3)
                                 .GroupBy(x => x.Key)
                                 .Select(g => new { g.Key, Count = g.Sum(x => x.Value) })
                                 .ToList();

            var reduceKeyToCount = new ConcurrentDictionary <string, int>();

            foreach (var singleDeleted in deleted)
            {
                var reduceKey = singleDeleted.Key.ReduceKey;
                reduceKeyToCount[reduceKey] = reduceKeyToCount.GetOrDefault(reduceKey) + singleDeleted.Value;
            }

            if (context.Database.ThreadPool == null || context.RunReducing == false)
            {
                throw new OperationCanceledException();
            }

            context.Database.ThreadPool.ExecuteBatch(reduceKeyStats, enumerator =>
                                                     context.TransactionalStorage.Batch(accessor =>
            {
                while (enumerator.MoveNext())
                {
                    var reduceKeyStat = enumerator.Current;
                    var value         = 0;
                    reduceKeyToCount.TryRemove(reduceKeyStat.Key, out value);

                    var changeValue = reduceKeyStat.Count - value;
                    if (changeValue == 0)
                    {
                        // nothing to change
                        continue;
                    }

                    accessor.MapReduce.IncrementReduceKeyCounter(indexId, reduceKeyStat.Key, changeValue);
                }
            }),
                                                     description: $"Incrementing reducing key counter fo index {PublicName} for operation " +
                                                     $"from etag {GetLastEtagFromStats()} to etag {batch.HighestEtagBeforeFiltering}", database: context.Database);

            foreach (var keyValuePair in reduceKeyToCount)
            {
                // those are the remaining keys that weren't used,
                // reduce keys that were replaced
                actions.MapReduce.IncrementReduceKeyCounter(indexId, keyValuePair.Key, -keyValuePair.Value);
            }

            actions.General.MaybePulseTransaction();

            var parallelReductionOperations = new ConcurrentQueue <ParallelBatchStats>();
            var parallelReductionStart      = SystemTime.UtcNow;

            if (context.Database.ThreadPool == null || context.RunReducing == false)
            {
                throw new OperationCanceledException();
            }

            context.Database.ThreadPool.ExecuteBatch(changed, enumerator =>
                                                     context.TransactionalStorage.Batch(accessor =>
            {
                var parallelStats = new ParallelBatchStats
                {
                    StartDelay = (long)(SystemTime.UtcNow - parallelReductionStart).TotalMilliseconds
                };

                var scheduleReductionsDuration = new Stopwatch();

                using (StopwatchScope.For(scheduleReductionsDuration))
                {
                    while (enumerator.MoveNext())
                    {
                        accessor.MapReduce.ScheduleReductions(indexId, 0, enumerator.Current);
                        accessor.General.MaybePulseTransaction();
                    }
                }

                parallelStats.Operations.Add(PerformanceStats.From(IndexingOperation.Map_ScheduleReductions, scheduleReductionsDuration.ElapsedMilliseconds));
                parallelReductionOperations.Enqueue(parallelStats);
            }),
                                                     description: $"Schedule reductions for index {PublicName} after operation " +
                                                     $"from etag {GetLastEtagFromStats()} to etag {batch.HighestEtagBeforeFiltering}", database: context.Database);

            performanceStats.Add(new ParallelPerformanceStats
            {
                NumberOfThreads   = parallelReductionOperations.Count,
                DurationMs        = (long)(SystemTime.UtcNow - parallelReductionStart).TotalMilliseconds,
                BatchedOperations = parallelReductionOperations.ToList()
            });

            UpdateIndexingStats(context, stats);

            performance.OnCompleted = () => BatchCompleted("Current Map", "Map", sourceCount, count, performanceStats);
            if (logIndexing.IsDebugEnabled)
            {
                logIndexing.Debug("Mapped {0} documents for {1}", count, PublicName);
            }

            return(performance);
        }
Пример #25
0
        /// <summary>
        /// Constructor for Data Subscriptions
        /// </summary>
        /// <param name="objectType">Type of the data objects.</param>
        /// <param name="symbol">Symbol of the asset we're requesting</param>
        /// <param name="resolution">Resolution of the asset we're requesting</param>
        /// <param name="dataTimeZone">The time zone the raw data is time stamped in</param>
        /// <param name="exchangeTimeZone">Specifies the time zone of the exchange for the security this subscription is for. This
        /// is this output time zone, that is, the time zone that will be used on BaseData instances</param>
        /// <param name="fillForward">Fill in gaps with historical data</param>
        /// <param name="extendedHours">Equities only - send in data from 4am - 8pm</param>
        /// <param name="isInternalFeed">Set to true if this subscription is added for the sole purpose of providing currency conversion rates,
        /// setting this flag to true will prevent the data from being sent into the algorithm's OnData methods</param>
        /// <param name="isCustom">True if this is user supplied custom data, false for normal QC data</param>
        /// <param name="tickType">Specifies if trade or quote data is subscribed</param>
        /// <param name="isFilteredSubscription">True if this subscription should have filters applied to it (market hours/user filters from security), false otherwise</param>
        /// <param name="dataNormalizationMode">Specifies normalization mode used for this subscription</param>
        /// <param name="dataMappingMode">The contract mapping mode to use for the security</param>
        /// <param name="contractDepthOffset">The continuous contract desired offset from the current front month.
        /// For example, 0 (default) will use the front month, 1 will use the back month contract</param>
        public SubscriptionDataConfig(Type objectType,
                                      Symbol symbol,
                                      Resolution resolution,
                                      DateTimeZone dataTimeZone,
                                      DateTimeZone exchangeTimeZone,
                                      bool fillForward,
                                      bool extendedHours,
                                      bool isInternalFeed,
                                      bool isCustom               = false,
                                      TickType?tickType           = null,
                                      bool isFilteredSubscription = true,
                                      DataNormalizationMode dataNormalizationMode = DataNormalizationMode.Adjusted,
                                      DataMappingMode dataMappingMode             = DataMappingMode.OpenInterest,
                                      uint contractDepthOffset = 0)
        {
            if (objectType == null)
            {
                throw new ArgumentNullException(nameof(objectType));
            }
            if (symbol == null)
            {
                throw new ArgumentNullException(nameof(symbol));
            }
            if (dataTimeZone == null)
            {
                throw new ArgumentNullException(nameof(dataTimeZone));
            }
            if (exchangeTimeZone == null)
            {
                throw new ArgumentNullException(nameof(exchangeTimeZone));
            }

            Type                   = objectType;
            Resolution             = resolution;
            _sid                   = symbol.ID;
            Symbol                 = symbol;
            FillDataForward        = fillForward;
            ExtendedMarketHours    = extendedHours;
            PriceScaleFactor       = 1;
            IsInternalFeed         = isInternalFeed;
            IsCustomData           = isCustom;
            DataTimeZone           = dataTimeZone;
            DataMappingMode        = dataMappingMode;
            ExchangeTimeZone       = exchangeTimeZone;
            ContractDepthOffset    = contractDepthOffset;
            IsFilteredSubscription = isFilteredSubscription;
            Consolidators          = new ConcurrentSet <IDataConsolidator>();
            DataNormalizationMode  = dataNormalizationMode;

            TickType = tickType ?? LeanData.GetCommonTickTypeForCommonDataTypes(objectType, SecurityType);

            switch (resolution)
            {
            case Resolution.Tick:
                //Ticks are individual sales and fillforward doesn't apply.
                Increment       = TimeSpan.FromSeconds(0);
                FillDataForward = false;
                break;

            case Resolution.Second:
                Increment = TimeSpan.FromSeconds(1);
                break;

            case Resolution.Minute:
                Increment = TimeSpan.FromMinutes(1);
                break;

            case Resolution.Hour:
                Increment = TimeSpan.FromHours(1);
                break;

            case Resolution.Daily:
                Increment = TimeSpan.FromDays(1);
                break;

            default:
                throw new InvalidEnumArgumentException(Invariant($"Unexpected Resolution: {resolution}"));
            }
        }
Пример #26
0
 public void HandleLowMemory()
 {
     documentIdsSetBuildersCache      = new ConditionalWeakTable <IndexReader, ConcurrentDictionary <Tuple <string, Predicate <string> >, Predicate <int> > >();
     fieldsStringValuesInReadersCache = new ConditionalWeakTable <IndexReader, ConcurrentDictionary <string, Dictionary <int, string> > >();
     _keys = new ConcurrentSet <WeakReference <IndexReader> >();
 }
Пример #27
0
 protected NotificationsBase()
 {
     Watchers          = new ConcurrentSet <ConnectedWatcher>();
     BackgroundWorkers = new List <BackgroundWorkBase>();
 }
Пример #28
0
        public async Task EditRollingIndexMultipleTimesWhileDocumentsModified()
        {
            DebuggerAttachedTimeout.DisableLongTimespan = true;
            var cluster = await CreateRaftCluster(3, watcherCluster : true);

            using (var store = GetDocumentStoreForRollingIndexes(
                       new Options
            {
                Server = cluster.Leader,
                ReplicationFactor = 3,
            }))
            {
                await CreateData(store);

                await store.ExecuteIndexAsync(new MyRollingIndex());

                WaitForIndexingInTheCluster(store, store.Database);

                await VerifyHistory(cluster, store);

                var runningIndexes = new ConcurrentSet <Index>();
                var violation      = new StringBuilder();
                var mre            = new ManualResetEventSlim();
                foreach (var server in Servers)
                {
                    var database = await server.ServerStore.DatabasesLandlord.TryGetOrCreateResourceStore(store.Database);

                    var indexStore = database.IndexStore;

                    indexStore.ForTestingPurposesOnly().BeforeRollingIndexStart = index => mre.Wait(index.IndexingProcessCancellationToken);

                    indexStore.ForTestingPurposesOnly().OnRollingIndexStart = index =>
                    {
                        if (index.Name != "ReplacementOf/MyRollingIndex")
                        {
                            return;
                        }

                        if (runningIndexes.TryAdd(index) == false)
                        {
                            violation.AppendLine($"{index} already exists");
                        }

                        var inc = runningIndexes.Count;
                        if (inc > 1)
                        {
                            violation.AppendLine($"{index} started concurrently (count: {inc})");
                        }
                    };
                    indexStore.ForTestingPurposesOnly().BeforeRollingIndexFinished = index =>
                    {
                        if (runningIndexes.TryRemove(index) == false)
                        {
                            violation.AppendLine($"{index} isn't found");
                        }

                        var dec = runningIndexes.Count;
                        if (dec != 0)
                        {
                            violation.AppendLine($"finishing {index} must be zero but is {dec} @ {server.ServerStore.NodeTag}");
                        }
                    };

                    indexStore.ForTestingPurposesOnly().BeforeIndexThreadExit = index =>
                    {
                        if (index.IndexingProcessCancellationToken.IsCancellationRequested)
                        {
                            runningIndexes.TryRemove(index);
                        }
                    };
                }

                using (var cts = new CancellationTokenSource(TimeSpan.FromSeconds(10)))
                {
                    var t = ContinuouslyModifyDocuments(store, cts.Token);
                    try
                    {
                        try
                        {
                            await store.ExecuteIndexAsync(new MyEditedRollingIndex());

                            await store.ExecuteIndexAsync(new MyEditedRollingIndex2());

                            await store.ExecuteIndexAsync(new MyEditedRollingIndex());

                            await store.ExecuteIndexAsync(new MyEditedRollingIndex2());
                        }
                        finally
                        {
                            mre.Set();
                        }

                        WaitForIndexingInTheCluster(store, store.Database);

                        var v = violation.ToString();
                        Assert.True(string.IsNullOrEmpty(v), v);

                        await VerifyHistory(cluster, store);
                    }
                    finally
                    {
                        try
                        {
                            cts.Cancel();
                            await t;
                        }
                        catch
                        {
                            // ignore
                        }
                    }
                }
            }
        }
Пример #29
0
        protected ReducingPerformanceStats[] HandleReduceForIndex(IndexToWorkOn indexToWorkOn, CancellationToken token)
        {
            var viewGenerator = context.IndexDefinitionStorage.GetViewGenerator(indexToWorkOn.IndexId);

            if (viewGenerator == null)
            {
                return(null);
            }

            bool operationCanceled = false;
            var  itemsToDelete     = new ConcurrentSet <object>();

            var singleStepReduceKeys = new List <string>();
            var multiStepsReduceKeys = new List <string>();

            transactionalStorage.Batch(actions =>
            {
                var mappedResultsInfo = actions.MapReduce.GetReduceTypesPerKeys(indexToWorkOn.IndexId,
                                                                                context.CurrentNumberOfItemsToReduceInSingleBatch,
                                                                                context.NumberOfItemsToExecuteReduceInSingleStep, token);

                foreach (var key in mappedResultsInfo)
                {
                    token.ThrowIfCancellationRequested();

                    switch (key.OperationTypeToPerform)
                    {
                    case ReduceType.SingleStep:
                        singleStepReduceKeys.Add(key.ReduceKey);
                        break;

                    case ReduceType.MultiStep:
                        multiStepsReduceKeys.Add(key.ReduceKey);
                        break;
                    }
                }
            });

            currentlyProcessedIndexes.TryAdd(indexToWorkOn.IndexId, indexToWorkOn.Index);

            var performanceStats = new List <ReducingPerformanceStats>();

            try
            {
                if (singleStepReduceKeys.Count > 0)
                {
                    if (Log.IsDebugEnabled)
                    {
                        Log.Debug("SingleStep reduce for keys: {0}", string.Join(",", singleStepReduceKeys));
                    }

                    var singleStepStats = SingleStepReduce(indexToWorkOn, singleStepReduceKeys, viewGenerator, itemsToDelete, token);

                    performanceStats.Add(singleStepStats);
                }

                if (multiStepsReduceKeys.Count > 0)
                {
                    if (Log.IsDebugEnabled)
                    {
                        Log.Debug("MultiStep reduce for keys: {0}", string.Join(",", multiStepsReduceKeys));
                    }

                    var multiStepStats = MultiStepReduce(indexToWorkOn, multiStepsReduceKeys, viewGenerator, itemsToDelete, token);

                    performanceStats.Add(multiStepStats);
                }
            }
            catch (OperationCanceledException)
            {
                operationCanceled = true;
            }
            catch (AggregateException e)
            {
                var anyOperationsCanceled = e
                                            .InnerExceptions
                                            .OfType <OperationCanceledException>()
                                            .Any();

                if (anyOperationsCanceled == false)
                {
                    throw;
                }

                operationCanceled = true;
            }
            finally
            {
                var postReducingOperations = new ReduceLevelPeformanceStats
                {
                    Level   = -1,
                    Started = SystemTime.UtcNow
                };

                if (operationCanceled == false)
                {
                    // need to flush the changes made to the map-reduce index
                    // before commiting the deletions of the scheduled reductions
                    context.IndexStorage.FlushIndex(indexToWorkOn.IndexId, onlyAddIndexError: true);

                    var deletingScheduledReductionsDuration = new Stopwatch();
                    var storageCommitDuration = new Stopwatch();

                    // whatever we succeeded in indexing or not, we have to update this
                    // because otherwise we keep trying to re-index failed mapped results
                    transactionalStorage.Batch(actions =>
                    {
                        actions.BeforeStorageCommit += storageCommitDuration.Start;
                        actions.AfterStorageCommit  += storageCommitDuration.Stop;

                        ScheduledReductionInfo latest;

                        using (StopwatchScope.For(deletingScheduledReductionsDuration))
                        {
                            latest = actions.MapReduce.DeleteScheduledReduction(itemsToDelete);
                        }

                        if (latest == null)
                        {
                            return;
                        }

                        actions.Indexing.UpdateLastReduced(indexToWorkOn.IndexId, latest.Etag, latest.Timestamp);
                    });

                    postReducingOperations.Operations.Add(PerformanceStats.From(IndexingOperation.Reduce_DeleteScheduledReductions, deletingScheduledReductionsDuration.ElapsedMilliseconds));
                    postReducingOperations.Operations.Add(PerformanceStats.From(IndexingOperation.StorageCommit, storageCommitDuration.ElapsedMilliseconds));
                }

                postReducingOperations.Completed = SystemTime.UtcNow;
                postReducingOperations.Duration  = postReducingOperations.Completed - postReducingOperations.Started;

                performanceStats.Add(new ReducingPerformanceStats(ReduceType.None)
                {
                    LevelStats = new List <ReduceLevelPeformanceStats> {
                        postReducingOperations
                    }
                });

                Index _;
                currentlyProcessedIndexes.TryRemove(indexToWorkOn.IndexId, out _);
            }

            return(performanceStats.ToArray());
        }
Пример #30
0
        protected ReducingPerformanceStats[] HandleReduceForIndex(
            IndexToWorkOn indexToWorkOn, bool skipIncreasingBatchSize, CancellationToken token)
        {
            var viewGenerator = context.IndexDefinitionStorage.GetViewGenerator(indexToWorkOn.IndexId);

            if (viewGenerator == null)
            {
                return(null);
            }

            bool operationCanceled = false;
            var  itemsToDelete     = new ConcurrentSet <object>();

            var singleStepReduceKeys = new List <string>();
            var multiStepsReduceKeys = new List <string>();

            transactionalStorage.Batch(actions =>
            {
                var mappedResultsInfo = actions.MapReduce.GetReduceTypesPerKeys(indexToWorkOn.IndexId,
                                                                                context.CurrentNumberOfItemsToReduceInSingleBatch,
                                                                                context.NumberOfItemsToExecuteReduceInSingleStep, token);

                foreach (var key in mappedResultsInfo)
                {
                    token.ThrowIfCancellationRequested();

                    switch (key.OperationTypeToPerform)
                    {
                    case ReduceType.SingleStep:
                        singleStepReduceKeys.Add(key.ReduceKey);
                        break;

                    case ReduceType.MultiStep:
                        multiStepsReduceKeys.Add(key.ReduceKey);
                        break;
                    }
                }
            });

            var performanceStats = new List <ReducingPerformanceStats>();

            try
            {
                if (singleStepReduceKeys.Count > 0)
                {
                    if (Log.IsDebugEnabled)
                    {
                        Log.Debug("SingleStep reduce for keys: {0}", string.Join(",", singleStepReduceKeys));
                    }

                    var singleStepStats = SingleStepReduce(indexToWorkOn, singleStepReduceKeys, viewGenerator, itemsToDelete, skipIncreasingBatchSize, token);

                    performanceStats.Add(singleStepStats);
                }

                if (multiStepsReduceKeys.Count > 0)
                {
                    if (Log.IsDebugEnabled)
                    {
                        Log.Debug("MultiStep reduce for keys: {0}", string.Join(",", multiStepsReduceKeys));
                    }

                    var multiStepStats = MultiStepReduce(indexToWorkOn, multiStepsReduceKeys, viewGenerator, itemsToDelete, skipIncreasingBatchSize, token);

                    performanceStats.Add(multiStepStats);
                }
            }
            catch (IndexDoesNotExistsException)
            {
                // race condition -> index was deleted
                // we can ignore this
                operationCanceled = true;
            }
            catch (ObjectDisposedException)
            {
                // index was disposed
                // we can ignore this
                operationCanceled = true;
            }
            catch (Exception e)
            {
                if (HandleIfOutOfMemory(e, new OutOfMemoryDetails
                {
                    Index = indexToWorkOn.Index,
                    FailedItemsToProcessCount = singleStepReduceKeys.Count + multiStepsReduceKeys.Count,
                    IsReducing = true
                }))
                {
                    //if we got a OOME we need to decrease the batch size
                    operationCanceled = true;
                    return(null);
                }

                if (IsOperationCanceledException(e))
                {
                    operationCanceled = true;
                    return(null);
                }

                var message = $"Failed to reduce index: {indexToWorkOn.Index.PublicName} (id: {indexToWorkOn.IndexId}) " +
                              $"{singleStepReduceKeys.Count} single step keys and {multiStepsReduceKeys.Count} multi step keys. " +
                              "Skipping this batch (it won't be reduced)";

                indexToWorkOn.Index.AddIndexingError(e, message);
            }
            finally
            {
                var postReducingOperations = new ReduceLevelPeformanceStats
                {
                    Level   = -1,
                    Started = SystemTime.UtcNow
                };

                if (operationCanceled == false)
                {
                    // need to flush the changes made to the map-reduce index
                    // before commiting the deletions of the scheduled reductions
                    context.IndexStorage.FlushIndex(indexToWorkOn.IndexId, onlyAddIndexError: true);

                    var deletingScheduledReductionsDuration = new Stopwatch();
                    var storageCommitDuration = new Stopwatch();

                    // whatever we succeeded in indexing or not, we have to update this
                    // because otherwise we keep trying to re-index failed mapped results
                    transactionalStorage.Batch(actions =>
                    {
                        actions.BeforeStorageCommit += storageCommitDuration.Start;
                        actions.AfterStorageCommit  += storageCommitDuration.Stop;

                        ScheduledReductionInfo latest;

                        using (StopwatchScope.For(deletingScheduledReductionsDuration))
                        {
                            latest = actions.MapReduce.DeleteScheduledReduction(itemsToDelete, token);
                        }

                        if (latest == null)
                        {
                            return;
                        }

                        actions.Indexing.UpdateLastReduced(indexToWorkOn.IndexId, latest.Etag, latest.Timestamp);
                    });

                    postReducingOperations.Operations.Add(PerformanceStats.From(IndexingOperation.Reduce_DeleteScheduledReductions, deletingScheduledReductionsDuration.ElapsedMilliseconds));
                    postReducingOperations.Operations.Add(PerformanceStats.From(IndexingOperation.StorageCommit, storageCommitDuration.ElapsedMilliseconds));
                }

                postReducingOperations.Completed = SystemTime.UtcNow;
                postReducingOperations.Duration  = postReducingOperations.Completed - postReducingOperations.Started;

                performanceStats.Add(new ReducingPerformanceStats(ReduceType.None)
                {
                    LevelStats = new List <ReduceLevelPeformanceStats> {
                        postReducingOperations
                    }
                });
            }

            return(performanceStats.ToArray());
        }
Пример #31
0
        private ReducingPerformanceStats SingleStepReduce(IndexToWorkOn index, List <string> keysToReduce, AbstractViewGenerator viewGenerator,
                                                          ConcurrentSet <object> itemsToDelete, bool skipIncreasingBatchSize, CancellationToken token)
        {
            var needToMoveToSingleStepQueue = new ConcurrentQueue <HashSet <string> >();
            var alreadySingleStepQueue      = new ConcurrentQueue <HashSet <string> >();

            if (Log.IsDebugEnabled)
            {
                Log.Debug(() => $"Executing single step reducing for {keysToReduce.Count} keys [{string.Join(", ", keysToReduce)}]");
            }

            var batchTimeWatcher = Stopwatch.StartNew();

            var reducingBatchThrottlerId = Guid.NewGuid();
            var reducePerformanceStats   = new ReducingPerformanceStats(ReduceType.SingleStep);
            var reduceLevelStats         = new ReduceLevelPeformanceStats
            {
                Started = SystemTime.UtcNow,
                Level   = 2
            };

            try
            {
                var parallelOperations = new ConcurrentQueue <ParallelBatchStats>();

                var parallelProcessingStart = SystemTime.UtcNow;

                if (context.Database.ThreadPool == null || context.RunReducing == false)
                {
                    throw new OperationCanceledException();
                }

                context.Database.ThreadPool.ExecuteBatch(keysToReduce, enumerator =>
                {
                    var parallelStats = new ParallelBatchStats
                    {
                        StartDelay = (long)(SystemTime.UtcNow - parallelProcessingStart).TotalMilliseconds
                    };

                    while (enumerator.MoveNext())
                    {
                        var localKeys = new HashSet <string>();
                        for (var i = 0; i < RavenThreadPool.DefaultPageSize; i++)
                        {
                            token.ThrowIfCancellationRequested();

                            localKeys.Add(enumerator.Current);

                            if (enumerator.MoveNext() == false)
                            {
                                break;
                            }
                        }

                        if (localKeys.Count == 0)
                        {
                            return;
                        }

                        var localNeedToMoveToSingleStep = new HashSet <string>();
                        needToMoveToSingleStepQueue.Enqueue(localNeedToMoveToSingleStep);
                        var localAlreadySingleStep = new HashSet <string>();
                        alreadySingleStepQueue.Enqueue(localAlreadySingleStep);

                        transactionalStorage.Batch(actions =>
                        {
                            var getItemsToReduceParams = new GetItemsToReduceParams(index: index.IndexId, reduceKeys: new HashSet <string>(localKeys), level: 0, loadData: false, itemsToDelete: itemsToDelete)
                            {
                                Take = int.MaxValue // just get all, we do the rate limit when we load the number of keys to reduce, anyway
                            };

                            var getItemsToReduceDuration = new Stopwatch();

                            int scheduledItemsSum   = 0;
                            int scheduledItemsCount = 0;
                            List <int> scheduledItemsMappedBuckets = new List <int>();
                            using (StopwatchScope.For(getItemsToReduceDuration))
                            {
                                foreach (var item in actions.MapReduce.GetItemsToReduce(getItemsToReduceParams, token))
                                {
                                    scheduledItemsMappedBuckets.Add(item.Bucket);
                                    scheduledItemsSum += item.Size;
                                    scheduledItemsCount++;
                                }
                            }

                            parallelStats.Operations.Add(PerformanceStats.From(IndexingOperation.Reduce_GetItemsToReduce, getItemsToReduceDuration.ElapsedMilliseconds));

                            autoTuner.CurrentlyUsedBatchSizesInBytes.GetOrAdd(reducingBatchThrottlerId, scheduledItemsSum);

                            if (scheduledItemsCount == 0)
                            {
                                // Here we have an interesting issue. We have scheduled reductions, because GetReduceTypesPerKeys() returned them
                                // and at the same time, we don't have any at level 0. That probably means that we have them at level 1 or 2.
                                // They shouldn't be here, and indeed, we remove them just a little down from here in this function.
                                // That said, they might have smuggled in between versions, or something happened to cause them to be here.
                                // In order to avoid that, we forcibly delete those extra items from the scheduled reductions, and move on

                                Log.Warn("Found single reduce items ({0}) that didn't have any items to reduce. Deleting level 1 & level 2 items for those keys. (If you can reproduce this, please contact [email protected])", string.Join(", ", keysToReduce));

                                var deletingScheduledReductionsDuration = Stopwatch.StartNew();

                                using (StopwatchScope.For(deletingScheduledReductionsDuration))
                                {
                                    foreach (var reduceKey in keysToReduce)
                                    {
                                        token.ThrowIfCancellationRequested();

                                        actions.MapReduce.DeleteScheduledReduction(index.IndexId, 1, reduceKey);
                                        actions.MapReduce.DeleteScheduledReduction(index.IndexId, 2, reduceKey);
                                    }
                                }

                                parallelStats.Operations.Add(PerformanceStats.From(IndexingOperation.Reduce_DeleteScheduledReductions, deletingScheduledReductionsDuration.ElapsedMilliseconds));
                            }

                            var removeReduceResultsDuration = new Stopwatch();

                            foreach (var reduceKey in localKeys)
                            {
                                token.ThrowIfCancellationRequested();

                                var lastPerformedReduceType = actions.MapReduce.GetLastPerformedReduceType(index.IndexId, reduceKey);

                                if (lastPerformedReduceType != ReduceType.SingleStep)
                                {
                                    localNeedToMoveToSingleStep.Add(reduceKey);
                                }

                                if (lastPerformedReduceType == ReduceType.SingleStep)
                                {
                                    localAlreadySingleStep.Add(reduceKey);
                                }

                                if (lastPerformedReduceType != ReduceType.MultiStep)
                                {
                                    continue;
                                }

                                if (Log.IsDebugEnabled)
                                {
                                    Log.Debug("Key {0} was moved from multi step to single step reduce, removing existing reduce results records", reduceKey);
                                }

                                using (StopwatchScope.For(removeReduceResultsDuration))
                                {
                                    // now we are in single step but previously multi step reduce was performed for the given key
                                    var mappedBuckets = actions.MapReduce.GetMappedBuckets(index.IndexId, reduceKey, token);

                                    // add scheduled items too to be sure we will delete reduce results of already deleted documents
                                    foreach (var mappedBucket in mappedBuckets.Union(scheduledItemsMappedBuckets))
                                    {
                                        actions.MapReduce.RemoveReduceResults(index.IndexId, 1, reduceKey, mappedBucket);
                                        actions.MapReduce.RemoveReduceResults(index.IndexId, 2, reduceKey, mappedBucket / 1024);
                                    }
                                }
                            }

                            parallelStats.Operations.Add(PerformanceStats.From(IndexingOperation.Reduce_RemoveReduceResults, removeReduceResultsDuration.ElapsedMilliseconds));
                        });

                        parallelOperations.Enqueue(parallelStats);
                    }
                }, description: $"Performing single step reduction for index {index.Index.PublicName} from etag {index.Index.GetLastEtagFromStats()} for {keysToReduce.Count} keys", database: context.Database);

                reduceLevelStats.Operations.Add(new ParallelPerformanceStats
                {
                    NumberOfThreads   = parallelOperations.Count,
                    DurationMs        = (long)(SystemTime.UtcNow - parallelProcessingStart).TotalMilliseconds,
                    BatchedOperations = parallelOperations.ToList()
                });

                var getMappedResultsDuration = new Stopwatch();

                var reductionPerformanceStats = new List <IndexingPerformanceStats>();

                var keysLeftToReduce = new HashSet <string>(keysToReduce);
                while (keysLeftToReduce.Count > 0)
                {
                    var keysReturned = new HashSet <string>();

                    // Try to diminish the allocations happening because of .Resize()
                    var mappedResults = new List <MappedResultInfo>(keysLeftToReduce.Count);

                    context.TransactionalStorage.Batch(actions =>
                    {
                        var take = context.CurrentNumberOfItemsToReduceInSingleBatch;

                        using (StopwatchScope.For(getMappedResultsDuration))
                        {
                            mappedResults = actions.MapReduce.GetMappedResults(index.IndexId, keysLeftToReduce, true, take, keysReturned, token, mappedResults);
                        }
                    });

                    var count = mappedResults.Count;

                    int size = 0;
                    foreach (var item in mappedResults)
                    {
                        item.Bucket = 0;
                        size       += item.Size;
                    }

                    var results = mappedResults.GroupBy(x => x.Bucket, x => JsonToExpando.Convert(x.Data)).ToArray();

                    context.MetricsCounters.ReducedPerSecond.Mark(results.Length);

                    token.ThrowIfCancellationRequested();

                    var performance = context.IndexStorage.Reduce(index.IndexId, viewGenerator, results, 2, context, null, keysReturned, count);

                    reductionPerformanceStats.Add(performance);

                    autoTuner.AutoThrottleBatchSize(count, size, batchTimeWatcher.Elapsed, skipIncreasingBatchSize);
                }

                // update new preformed single step
                UpdatePerformedSingleStep(index.IndexId, needToMoveToSingleStepQueue);

                // already single step,
                // if the multi step keys were already removed,
                // the reduce types for those keys needs to be removed also
                UpdatePerformedSingleStep(index.IndexId, alreadySingleStepQueue, skipAdd: true);

                reduceLevelStats.Completed = SystemTime.UtcNow;
                reduceLevelStats.Duration  = reduceLevelStats.Completed - reduceLevelStats.Started;
                reduceLevelStats.Operations.Add(PerformanceStats.From(IndexingOperation.Reduce_GetMappedResults, getMappedResultsDuration.ElapsedMilliseconds));
                reduceLevelStats.Operations.Add(PerformanceStats.From(IndexingOperation.StorageCommit, 0)); // in single step we write directly to Lucene index

                foreach (var stats in reductionPerformanceStats)
                {
                    reduceLevelStats.Add(stats);
                }

                reducePerformanceStats.LevelStats.Add(reduceLevelStats);
            }
            finally
            {
                long _;
                autoTuner.CurrentlyUsedBatchSizesInBytes.TryRemove(reducingBatchThrottlerId, out _);
            }

            return(reducePerformanceStats);
        }
Пример #32
0
        private static async Task GetDependentTypesInProjectAsync(
            INamedTypeSymbol type, Project project, Solution solution, Func <INamedTypeSymbol, INamedTypeSymbol, bool> predicate, ConditionalWeakTable <Compilation, ConcurrentDictionary <SymbolKey, List <SymbolKey> > > cache, bool locationsInMetadata, ConcurrentSet <ISymbol> results, CancellationToken cancellationToken)
        {
            cancellationToken.ThrowIfCancellationRequested();

            var compilation = await project.GetCompilationAsync(cancellationToken).ConfigureAwait(false);

            var typeId = type.GetSymbolKey();

            List <SymbolKey> dependentTypeIds;

            if (!TryGetDependentTypes(cache, compilation, typeId, out dependentTypeIds))
            {
                List <INamedTypeSymbol> allTypes;
                if (locationsInMetadata)
                {
                    // From metadata, have to check other (non private) metadata types, as well as
                    // source types.
                    allTypes = GetAllSourceAndAccessibleTypesInCompilation(compilation, cancellationToken);
                }
                else
                {
                    // It's from source, so only other source types could derive from it.
                    allTypes = GetAllSourceTypesInCompilation(compilation, cancellationToken);
                }

                dependentTypeIds = new List <SymbolKey>();
                foreach (var t in allTypes)
                {
                    cancellationToken.ThrowIfCancellationRequested();

                    if (predicate(t, type))
                    {
                        dependentTypeIds.Add(t.GetSymbolKey());
                    }
                }

                dependentTypeIds = GetOrAddDependentTypes(cache, compilation, typeId, dependentTypeIds);
            }

            foreach (var id in dependentTypeIds)
            {
                cancellationToken.ThrowIfCancellationRequested();

                var resolvedSymbols = id.Resolve(compilation, cancellationToken: cancellationToken).GetAllSymbols();
                foreach (var resolvedSymbol in resolvedSymbols)
                {
                    var mappedSymbol = await SymbolFinder.FindSourceDefinitionAsync(resolvedSymbol, solution, cancellationToken).ConfigureAwait(false) ?? resolvedSymbol;

                    results.Add(mappedSymbol);
                }
            }
        }
Пример #33
0
        protected override void ExecuteIndexingWork(IList <IndexToWorkOn> indexes)
        {
            var indexingGroups = context.Configuration.IndexingClassifier.GroupMapIndexes(indexes);

            indexingGroups = indexingGroups.OrderByDescending(x => x.Key).ToDictionary(x => x.Key, x => x.Value);

            if (indexingGroups.Count == 0)
            {
                return;
            }

            var usedPrefetchers = new ConcurrentSet <PrefetchingBehavior>();

            var groupedIndexes = indexingGroups.Select(x =>
            {
                var result = new IndexingGroup
                {
                    LastIndexedEtag = x.Key,
                    Indexes         = x.Value,
                    LastQueryTime   = x.Value.Max(y => y.Index.LastQueryTime),
                };

                SetPrefetcherForIndexingGroup(result, usedPrefetchers);

                return(result);
            }).OrderByDescending(x => x.LastQueryTime).ToList();


            var maxIndexOutputsPerDoc    = groupedIndexes.Max(x => x.Indexes.Max(y => y.Index.MaxIndexOutputsPerDocument));
            var containsMapReduceIndexes = groupedIndexes.Any(x => x.Indexes.Any(y => y.Index.IsMapReduce));

            var recoverTunerState = ((IndexBatchSizeAutoTuner)autoTuner).ConsiderLimitingNumberOfItemsToProcessForThisBatch(maxIndexOutputsPerDoc, containsMapReduceIndexes);

            BackgroundTaskExecuter.Instance.ExecuteAll(context, groupedIndexes, (indexingGroup, i) =>
            {
                context.CancellationToken.ThrowIfCancellationRequested();
                using (LogContext.WithDatabase(context.DatabaseName))
                {
                    var prefetchingBehavior = indexingGroup.PrefetchingBehavior;
                    var indexesToWorkOn     = indexingGroup.Indexes;

                    var operationCanceled     = false;
                    TimeSpan indexingDuration = TimeSpan.Zero;
                    var lastEtag = Etag.Empty;

                    List <JsonDocument> jsonDocs;
                    IndexingBatchInfo batchInfo = null;

                    using (MapIndexingInProgress(indexesToWorkOn))
                        using (prefetchingBehavior.DocumentBatchFrom(indexingGroup.LastIndexedEtag, out jsonDocs))
                        {
                            try
                            {
                                if (Log.IsDebugEnabled)
                                {
                                    Log.Debug("Found a total of {0} documents that requires indexing since etag: {1}: ({2})",
                                              jsonDocs.Count, indexingGroup.LastIndexedEtag, string.Join(", ", jsonDocs.Select(x => x.Key)));
                                }

                                batchInfo = context.ReportIndexingBatchStarted(jsonDocs.Count, jsonDocs.Sum(x => x.SerializedSizeOnDisk), indexesToWorkOn.Select(x => x.Index.PublicName).ToList());

                                context.CancellationToken.ThrowIfCancellationRequested();

                                if (jsonDocs.Count <= 0)
                                {
                                    return;
                                }

                                var sw = Stopwatch.StartNew();

                                lastEtag = DoActualIndexing(indexesToWorkOn, jsonDocs, batchInfo);

                                indexingDuration = sw.Elapsed;
                            }
                            catch (InvalidDataException e)
                            {
                                Log.ErrorException("Failed to index because of data corruption. ", e);
                                indexesToWorkOn.ForEach(index =>
                                                        context.AddError(index.IndexId, index.Index.PublicName, null, string.Format("Failed to index because of data corruption. Reason: {0}", e.Message)));
                            }
                            catch (OperationCanceledException)
                            {
                                operationCanceled = true;
                            }
                            catch (AggregateException e)
                            {
                                var anyOperationsCanceled = e
                                                            .InnerExceptions
                                                            .OfType <OperationCanceledException>()
                                                            .Any();

                                if (anyOperationsCanceled == false)
                                {
                                    throw;
                                }

                                operationCanceled = true;
                            }
                            finally
                            {
                                if (operationCanceled == false && jsonDocs != null && jsonDocs.Count > 0)
                                {
                                    prefetchingBehavior.CleanupDocuments(lastEtag);
                                    prefetchingBehavior.UpdateAutoThrottler(jsonDocs, indexingDuration);
                                }

                                prefetchingBehavior.BatchProcessingComplete();
                                if (batchInfo != null)
                                {
                                    context.ReportIndexingBatchCompleted(batchInfo);
                                }
                            }
                        }
                }
            });

            if (recoverTunerState != null)
            {
                recoverTunerState();
            }

            RemoveUnusedPrefetchers(usedPrefetchers);
        }
Пример #34
0
        private static async Task<IEnumerable<INamedTypeSymbol>> GetDependentTypesAsync(
            INamedTypeSymbol type,
            Solution solution,
            IImmutableSet<Project> projects,
            Func<INamedTypeSymbol, INamedTypeSymbol, bool> predicate,
            ConditionalWeakTable<Compilation, ConcurrentDictionary<SymbolKey, List<SymbolKey>>> cache,
            CancellationToken cancellationToken)
        {
            cancellationToken.ThrowIfCancellationRequested();

            var dependentProjects = await DependentProjectsFinder.GetDependentProjectsAsync(type, solution, projects, cancellationToken).ConfigureAwait(false);

            // If it's a type from source, then only other types from source could derive from
            // it.  If it's a type from metadata then unfortunately anything could derive from
            // it.
            bool locationsInMetadata = type.Locations.Any(loc => loc.IsInMetadata);

            ConcurrentSet<ISymbol> results = new ConcurrentSet<ISymbol>(SymbolEquivalenceComparer.Instance);

            cancellationToken.ThrowIfCancellationRequested();

            var projectTasks = new List<Task>();
            foreach (var project in dependentProjects)
            {
                projectTasks.Add(Task.Run(
                    async () => await GetDependentTypesInProjectAsync(type, project, solution, predicate, cache, locationsInMetadata, results, cancellationToken).ConfigureAwait(false), cancellationToken));
            }

            await Task.WhenAll(projectTasks).ConfigureAwait(false);

            if (results.Any())
            {
                return results.OfType<INamedTypeSymbol>();
            }
            else
            {
                return SpecializedCollections.EmptyEnumerable<INamedTypeSymbol>();
            }
        }
        public async Task <IRyuContainer> CreateAsync(IReadOnlySet <ITransportFactory> transportFactories, Guid?forceId = null)
        {
            var container      = root.CreateChildContainer();
            var proxyGenerator = container.GetOrDefault <ProxyGenerator>() ?? new ProxyGenerator();
            var shutdownCancellationTokenSource = new CancellationTokenSource();

            // Auditing Subsystem
            var auditService = new AuditService(shutdownCancellationTokenSource.Token);

            auditService.Initialize();

            // management tier containers
            var mobContextContainer = new MobContextContainer();
            var mobContextFactory   = new MobContextFactory(auditService);
            var mobOperations       = new MobOperations(mobContextFactory, mobContextContainer);

            // Other Courier Stuff
            var identity              = Identity.Create(forceId);
            var routingTable          = new RoutingTable();
            var peerDiscoveryEventBus = new AsyncBus <PeerDiscoveryEvent>();
            var peerTable             = new PeerTable(container, (table, peerId) => new PeerContext(table, peerId, peerDiscoveryEventBus));

            var inboundMessageRouter     = new InboundMessageRouter();
            var inboundMessageDispatcher = new InboundMessageDispatcher(identity, peerTable, inboundMessageRouter);

            var transports = new ConcurrentSet <ITransport>();

            foreach (var transportFactory in transportFactories)
            {
                var transport = await transportFactory.CreateAsync(mobOperations, identity, routingTable, peerTable, inboundMessageDispatcher, auditService).ConfigureAwait(false);

                transports.TryAdd(transport);
            }

            var messenger = new Messenger(identity, transports, routingTable);

            container.Set(identity);
            container.Set(routingTable);
            container.Set(peerTable);
            container.Set(inboundMessageRouter);
            container.Set(messenger);

            //----------------------------------------------------------------------------------------
            // Service Tier - Service Discovery, Remote Method Invocation
            //----------------------------------------------------------------------------------------
            var localServiceRegistry        = new LocalServiceRegistry(identity, messenger);
            var remoteServiceInvoker        = new RemoteServiceInvoker(identity, messenger);
            var remoteServiceProxyContainer = new RemoteServiceProxyContainer(proxyGenerator, remoteServiceInvoker);

            inboundMessageRouter.RegisterHandler <RmiRequestDto>(localServiceRegistry.HandleInvocationRequestAsync);
            inboundMessageRouter.RegisterHandler <RmiResponseDto>(remoteServiceInvoker.HandleInvocationResponse);
            container.Set(localServiceRegistry);
            container.Set(remoteServiceProxyContainer);

            //----------------------------------------------------------------------------------------
            // Management Tier - DMI - Services
            //----------------------------------------------------------------------------------------
            var managementObjectService = new ManagementObjectService(mobContextContainer, mobOperations);

            localServiceRegistry.RegisterService <IManagementObjectService>(managementObjectService);
            container.Set(mobOperations);
            container.Set(managementObjectService);

            var facade = new CourierFacade(transports, container);

            container.Set(facade);

            return(container);
        }
Пример #36
0
		private ReducingPerformanceStats MultiStepReduce(IndexToWorkOn index, List<string> keysToReduce, AbstractViewGenerator viewGenerator, ConcurrentSet<object> itemsToDelete, CancellationToken token)
		{
			var needToMoveToMultiStep = new HashSet<string>();
			transactionalStorage.Batch(actions =>
			{
				foreach (var localReduceKey in keysToReduce)
				{
					token.ThrowIfCancellationRequested();

					var lastPerformedReduceType = actions.MapReduce.GetLastPerformedReduceType(index.IndexId, localReduceKey);

					if (lastPerformedReduceType != ReduceType.MultiStep)
						needToMoveToMultiStep.Add(localReduceKey);

					if (lastPerformedReduceType != ReduceType.SingleStep)
						continue;

					// we exceeded the limit of items to reduce in single step
					// now we need to schedule reductions at level 0 for all map results with given reduce key
					var mappedItems = actions.MapReduce.GetMappedBuckets(index.IndexId, localReduceKey, token).ToList();
					foreach (var result in mappedItems.Select(x => new ReduceKeyAndBucket(x, localReduceKey)))
					{
						actions.MapReduce.ScheduleReductions(index.IndexId, 0, result);
					}
				}
			});

			var reducePerformance = new ReducingPerformanceStats(ReduceType.MultiStep);

            var keysToReduceSet = new HashSet<string>(keysToReduce);

			for (int i = 0; i < 3; i++)
			{
				var level = i;

				var reduceLevelStats = new ReduceLevelPeformanceStats()
				{
					Level = level,
					Started = SystemTime.UtcNow,
				};

				var reduceParams = new GetItemsToReduceParams(
					index.IndexId,
                    keysToReduceSet,
					level,
					true,
					itemsToDelete);

				var gettingItemsToReduceDuration = new Stopwatch();
				var scheduleReductionsDuration = new Stopwatch();
				var removeReduceResultsDuration = new Stopwatch();
				var storageCommitDuration = new Stopwatch();

				bool retry = true;
				while (retry && reduceParams.ReduceKeys.Count > 0)
				{
					var reduceBatchAutoThrottlerId = Guid.NewGuid();
					try
					{
						transactionalStorage.Batch(actions =>
						{
							token.ThrowIfCancellationRequested();

							actions.BeforeStorageCommit += storageCommitDuration.Start;
							actions.AfterStorageCommit += storageCommitDuration.Stop;

							var batchTimeWatcher = Stopwatch.StartNew();

							reduceParams.Take = context.CurrentNumberOfItemsToReduceInSingleBatch;

                            int size = 0;                  
          
                            IList<MappedResultInfo> persistedResults;
                            var reduceKeys = new HashSet<string>(StringComparer.InvariantCultureIgnoreCase);
							using (StopwatchScope.For(gettingItemsToReduceDuration))
							{
                                persistedResults = actions.MapReduce.GetItemsToReduce(reduceParams, token);                                

                                foreach (var item in persistedResults)
                                {
                                    reduceKeys.Add(item.ReduceKey);
                                    size += item.Size;
                                }
							}

                            if (persistedResults.Count == 0)
							{
								retry = false;
								return;
							}

                            var count = persistedResults.Count;

							autoTuner.CurrentlyUsedBatchSizesInBytes.GetOrAdd(reduceBatchAutoThrottlerId, size);

							if (Log.IsDebugEnabled)
							{
                                if (persistedResults.Count > 0)
                                {
                                    Log.Debug(() => string.Format("Found {0} results for keys [{1}] for index {2} at level {3} in {4}",
                                        persistedResults.Count,
                                        string.Join(", ", persistedResults.Select(x => x.ReduceKey).Distinct()),
                                        index.IndexId, level, batchTimeWatcher.Elapsed));
                                }
								else
                                {
                                    Log.Debug("No reduce keys found for {0}", index.IndexId);
                                }									
							}

							token.ThrowIfCancellationRequested();


                            var requiredReduceNextTimeSet = new HashSet<ReduceKeyAndBucket>(persistedResults.Select(x => new ReduceKeyAndBucket(x.Bucket, x.ReduceKey)), ReduceKeyAndBucketEqualityComparer.Instance);

							using (StopwatchScope.For(removeReduceResultsDuration))
							{
                                foreach (var mappedResultInfo in requiredReduceNextTimeSet)
								{
									token.ThrowIfCancellationRequested();

									actions.MapReduce.RemoveReduceResults(index.IndexId, level + 1, mappedResultInfo.ReduceKey, mappedResultInfo.Bucket);
								}
							}

							if (level != 2)
							{
                                var reduceKeysAndBucketsSet = new HashSet<ReduceKeyAndBucket>(requiredReduceNextTimeSet.Select(x => new ReduceKeyAndBucket(x.Bucket / 1024, x.ReduceKey)), ReduceKeyAndBucketEqualityComparer.Instance);

								using (StopwatchScope.For(scheduleReductionsDuration))
								{
                                    foreach (var reduceKeysAndBucket in reduceKeysAndBucketsSet)
									{
										token.ThrowIfCancellationRequested();

										actions.MapReduce.ScheduleReductions(index.IndexId, level + 1, reduceKeysAndBucket);
									}
								}
							}

							token.ThrowIfCancellationRequested();

							var reduceTimeWatcher = Stopwatch.StartNew();

                            var results = persistedResults.Where(x => x.Data != null)
                                                          .GroupBy(x => x.Bucket, x => JsonToExpando.Convert(x.Data));                            

                            var performance = context.IndexStorage.Reduce(index.IndexId, viewGenerator, results, level, context, actions, reduceKeys, persistedResults.Count);

                            context.MetricsCounters.ReducedPerSecond.Mark(results.Count());

							reduceLevelStats.Add(performance);

							var batchDuration = batchTimeWatcher.Elapsed;

                            if ( Log.IsDebugEnabled )
                            {
                                Log.Debug("Indexed {0} reduce keys in {1} with {2} results for index {3} in {4} on level {5}", reduceKeys.Count, batchDuration, performance.ItemsCount, index.IndexId, reduceTimeWatcher.Elapsed, level);
                            }

							autoTuner.AutoThrottleBatchSize(count, size, batchDuration);
						});
					}
					finally
					{
						long _;
						autoTuner.CurrentlyUsedBatchSizesInBytes.TryRemove(reduceBatchAutoThrottlerId, out _);
					}
				}

				reduceLevelStats.Completed = SystemTime.UtcNow;
				reduceLevelStats.Duration = reduceLevelStats.Completed - reduceLevelStats.Started;

				reduceLevelStats.Operations.Add(PerformanceStats.From(IndexingOperation.Reduce_GetItemsToReduce, gettingItemsToReduceDuration.ElapsedMilliseconds));
				reduceLevelStats.Operations.Add(PerformanceStats.From(IndexingOperation.Reduce_ScheduleReductions, scheduleReductionsDuration.ElapsedMilliseconds));
				reduceLevelStats.Operations.Add(PerformanceStats.From(IndexingOperation.Reduce_RemoveReduceResults, removeReduceResultsDuration.ElapsedMilliseconds));
				reduceLevelStats.Operations.Add(PerformanceStats.From(IndexingOperation.StorageCommit, storageCommitDuration.ElapsedMilliseconds));

				reducePerformance.LevelStats.Add(reduceLevelStats);
			}

			foreach (var reduceKey in needToMoveToMultiStep)
			{
				token.ThrowIfCancellationRequested();

				string localReduceKey = reduceKey;
				transactionalStorage.Batch(actions =>
										   actions.MapReduce.UpdatePerformedReduceType(index.IndexId, localReduceKey,
																					   ReduceType.MultiStep));
			}

			return reducePerformance;
		}
Пример #37
0
        public void OnEndOfTimeStep()
        {
            if (_pendingUniverseAdditions.Count + _pendingUserDefinedUniverseSecurityAdditions.Count == 0)
            {
                // no point in looping through everything if there's no pending changes
                return;
            }

            var requiredHistoryRequests = new Dictionary <Security, Resolution>();

            // rewrite securities w/ derivatives to be in raw mode
            lock (_pendingUniverseAdditionsLock)
            {
                foreach (var security in Securities.Select(kvp => kvp.Value).Union(
                             _pendingUserDefinedUniverseSecurityAdditions.Select(x => x.Security)))
                {
                    // check for any derivative securities and mark the underlying as raw
                    if (Securities.Any(skvp => skvp.Key.SecurityType != SecurityType.Base && skvp.Key.HasUnderlyingSymbol(security.Symbol)))
                    {
                        // set data mode raw and default volatility model
                        ConfigureUnderlyingSecurity(security);
                    }

                    var configs = SubscriptionManager.SubscriptionDataConfigService
                                  .GetSubscriptionDataConfigs(security.Symbol);
                    if (security.Symbol.HasUnderlying && security.Symbol.SecurityType != SecurityType.Base)
                    {
                        Security underlyingSecurity;
                        var      underlyingSymbol = security.Symbol.Underlying;
                        var      resolution       = configs.GetHighestResolution();

                        // create the underlying security object if it doesn't already exist
                        if (!Securities.TryGetValue(underlyingSymbol, out underlyingSecurity))
                        {
                            underlyingSecurity = AddSecurity(underlyingSymbol.SecurityType,
                                                             underlyingSymbol.Value,
                                                             resolution,
                                                             underlyingSymbol.ID.Market,
                                                             false,
                                                             0,
                                                             configs.IsExtendedMarketHours());
                        }

                        // set data mode raw and default volatility model
                        ConfigureUnderlyingSecurity(underlyingSecurity);

                        if (LiveMode && underlyingSecurity.GetLastData() == null)
                        {
                            if (requiredHistoryRequests.ContainsKey(underlyingSecurity))
                            {
                                // lets request the higher resolution
                                var currentResolutionRequest = requiredHistoryRequests[underlyingSecurity];
                                if (currentResolutionRequest != Resolution.Minute && // Can not be less than Minute
                                    resolution < currentResolutionRequest)
                                {
                                    requiredHistoryRequests[underlyingSecurity] = (Resolution)Math.Max((int)resolution, (int)Resolution.Minute);
                                }
                            }
                            else
                            {
                                requiredHistoryRequests.Add(underlyingSecurity, (Resolution)Math.Max((int)resolution, (int)Resolution.Minute));
                            }
                        }
                        // set the underlying security on the derivative -- we do this in two places since it's possible
                        // to do AddOptionContract w/out the underlying already added and normalized properly
                        var derivative = security as IDerivativeSecurity;
                        if (derivative != null)
                        {
                            derivative.Underlying = underlyingSecurity;
                        }
                    }
                }

                if (!requiredHistoryRequests.IsNullOrEmpty())
                {
                    // Create requests
                    var historyRequests = Enumerable.Empty <HistoryRequest>();
                    foreach (var byResolution in requiredHistoryRequests.GroupBy(x => x.Value))
                    {
                        historyRequests = historyRequests.Concat(
                            CreateBarCountHistoryRequests(byResolution.Select(x => x.Key.Symbol), 3, byResolution.Key));
                    }
                    // Request data
                    var historicLastData = History(historyRequests);
                    historicLastData.PushThrough(x =>
                    {
                        var security = requiredHistoryRequests.Keys.FirstOrDefault(y => y.Symbol == x.Symbol);
                        security?.Cache.AddData(x);
                    });
                }

                // add subscriptionDataConfig to their respective user defined universes
                foreach (var userDefinedUniverseAddition in _pendingUserDefinedUniverseSecurityAdditions)
                {
                    foreach (var subscriptionDataConfig in userDefinedUniverseAddition.SubscriptionDataConfigs)
                    {
                        userDefinedUniverseAddition.Universe.Add(subscriptionDataConfig);
                    }
                }

                // finally add any pending universes, this will make them available to the data feed
                foreach (var universe in _pendingUniverseAdditions)
                {
                    UniverseManager.Add(universe.Configuration.Symbol, universe);
                }

                _pendingUniverseAdditions.Clear();
                _pendingUserDefinedUniverseSecurityAdditions.Clear();
            }

            if (!_rawNormalizationWarningSymbols.IsNullOrEmpty())
            {
                // Log our securities being set to raw price mode
                Debug($"Warning: The following securities were set to raw price normalization mode to work with options: " +
                      $"{string.Join(", ", _rawNormalizationWarningSymbols.Take(_rawNormalizationWarningSymbolsMaxCount).Select(x => x.Value))}...");

                // Set our warning list to null to stop emitting these warnings after its done once
                _rawNormalizationWarningSymbols = null;
            }
        }
Пример #38
0
		private ReducingPerformanceStats SingleStepReduce(IndexToWorkOn index, List<string> keysToReduce, AbstractViewGenerator viewGenerator,
												          ConcurrentSet<object> itemsToDelete, CancellationToken token)
		{
			var needToMoveToSingleStepQueue = new ConcurrentQueue<HashSet<string>>();

            if ( Log.IsDebugEnabled )
			    Log.Debug(() => string.Format("Executing single step reducing for {0} keys [{1}]", keysToReduce.Count, string.Join(", ", keysToReduce)));

			var batchTimeWatcher = Stopwatch.StartNew();

			var reducingBatchThrottlerId = Guid.NewGuid();
			var reducePerformanceStats = new ReducingPerformanceStats(ReduceType.SingleStep);
			var reduceLevelStats = new ReduceLevelPeformanceStats
			{
				Started = SystemTime.UtcNow,
				Level = 2
			};

			try
			{
				var parallelOperations = new ConcurrentQueue<ParallelBatchStats>();

				var parallelProcessingStart = SystemTime.UtcNow;

				BackgroundTaskExecuter.Instance.ExecuteAllBuffered(context, keysToReduce, enumerator =>
				{
					var parallelStats = new ParallelBatchStats
					{
						StartDelay = (long)(SystemTime.UtcNow - parallelProcessingStart).TotalMilliseconds
					};

					var localNeedToMoveToSingleStep = new HashSet<string>();
					needToMoveToSingleStepQueue.Enqueue(localNeedToMoveToSingleStep);
					var localKeys = new HashSet<string>();
					while (enumerator.MoveNext())
					{
						token.ThrowIfCancellationRequested();

						localKeys.Add(enumerator.Current);
					}

					transactionalStorage.Batch(actions =>
					{
						var getItemsToReduceParams = new GetItemsToReduceParams(index: index.IndexId, reduceKeys: localKeys, level: 0, loadData: false, itemsToDelete: itemsToDelete)
						{
							Take = int.MaxValue // just get all, we do the rate limit when we load the number of keys to reduce, anyway
						};

						var getItemsToReduceDuration = Stopwatch.StartNew();

                        int scheduledItemsSum = 0;
                        int scheduledItemsCount = 0;
                        List<int> scheduledItemsMappedBuckets = new List<int>();
						using (StopwatchScope.For(getItemsToReduceDuration))
						{
                            foreach (var item in actions.MapReduce.GetItemsToReduce(getItemsToReduceParams, token))
                            {
                                scheduledItemsMappedBuckets.Add(item.Bucket);
                                scheduledItemsSum += item.Size;
                                scheduledItemsCount++;
                            }
						}

						parallelStats.Operations.Add(PerformanceStats.From(IndexingOperation.Reduce_GetItemsToReduce, getItemsToReduceDuration.ElapsedMilliseconds));

						autoTuner.CurrentlyUsedBatchSizesInBytes.GetOrAdd(reducingBatchThrottlerId, scheduledItemsSum);

                        if (scheduledItemsCount == 0)
						{						    
							// Here we have an interesting issue. We have scheduled reductions, because GetReduceTypesPerKeys() returned them
							// and at the same time, we don't have any at level 0. That probably means that we have them at level 1 or 2.
							// They shouldn't be here, and indeed, we remove them just a little down from here in this function.
							// That said, they might have smuggled in between versions, or something happened to cause them to be here.
							// In order to avoid that, we forcibly delete those extra items from the scheduled reductions, and move on

                            Log.Warn("Found single reduce items ({0}) that didn't have any items to reduce. Deleting level 1 & level 2 items for those keys. (If you can reproduce this, please contact [email protected])", string.Join(", ", keysToReduce));

							var deletingScheduledReductionsDuration = Stopwatch.StartNew();

							using (StopwatchScope.For(deletingScheduledReductionsDuration))
							{
								foreach (var reduceKey in keysToReduce)
								{
									token.ThrowIfCancellationRequested();

									actions.MapReduce.DeleteScheduledReduction(index.IndexId, 1, reduceKey);
									actions.MapReduce.DeleteScheduledReduction(index.IndexId, 2, reduceKey);
								}
							}

							parallelStats.Operations.Add(PerformanceStats.From(IndexingOperation.Reduce_DeleteScheduledReductions, deletingScheduledReductionsDuration.ElapsedMilliseconds));
						}

						var removeReduceResultsDuration = new Stopwatch();

						foreach (var reduceKey in localKeys)
						{
							token.ThrowIfCancellationRequested();

							var lastPerformedReduceType = actions.MapReduce.GetLastPerformedReduceType(index.IndexId, reduceKey);

							if (lastPerformedReduceType != ReduceType.SingleStep)
								localNeedToMoveToSingleStep.Add(reduceKey);

							if (lastPerformedReduceType != ReduceType.MultiStep)
								continue;

                            if ( Log.IsDebugEnabled )
                            {
                                Log.Debug("Key {0} was moved from multi step to single step reduce, removing existing reduce results records", reduceKey);
                            }

							using (StopwatchScope.For(removeReduceResultsDuration))
							{
                                // now we are in single step but previously multi step reduce was performed for the given key
                                var mappedBuckets = actions.MapReduce.GetMappedBuckets(index.IndexId, reduceKey, token);        

                                // add scheduled items too to be sure we will delete reduce results of already deleted documents
								foreach (var mappedBucket in mappedBuckets.Union(scheduledItemsMappedBuckets))
								{
									actions.MapReduce.RemoveReduceResults(index.IndexId, 1, reduceKey, mappedBucket);
									actions.MapReduce.RemoveReduceResults(index.IndexId, 2, reduceKey, mappedBucket / 1024);
								}
							}
						}

						parallelStats.Operations.Add(PerformanceStats.From(IndexingOperation.Reduce_RemoveReduceResults, removeReduceResultsDuration.ElapsedMilliseconds));

						parallelOperations.Enqueue(parallelStats);
					});
				});

				reduceLevelStats.Operations.Add(new ParallelPerformanceStats
				{
					NumberOfThreads = parallelOperations.Count,
					DurationMs = (long)(SystemTime.UtcNow - parallelProcessingStart).TotalMilliseconds,
					BatchedOperations = parallelOperations.ToList()
				});

				var getMappedResultsDuration = new Stopwatch();				

				var reductionPerformanceStats = new List<IndexingPerformanceStats>();
                
                var keysLeftToReduce = new HashSet<string>(keysToReduce);                                              
				while (keysLeftToReduce.Count > 0)
				{
                    var keysReturned = new HashSet<string>();       

                    // Try to diminish the allocations happening because of .Resize()
                    var mappedResults = new List<MappedResultInfo>(keysLeftToReduce.Count);                             
                    
                    context.TransactionalStorage.Batch(actions =>
					{
						var take = context.CurrentNumberOfItemsToReduceInSingleBatch;

						using (StopwatchScope.For(getMappedResultsDuration))
						{
                            mappedResults = actions.MapReduce.GetMappedResults(index.IndexId, keysLeftToReduce, true, take, keysReturned, token, mappedResults);
						}
					});

					var count = mappedResults.Count;

                    int size = 0;
                    foreach ( var item in mappedResults )
                    {
                        item.Bucket = 0;
                        size += item.Size;
                    }
                        
                    var results = mappedResults.GroupBy(x => x.Bucket, x => JsonToExpando.Convert(x.Data)).ToArray();

					context.MetricsCounters.ReducedPerSecond.Mark(results.Length);

					token.ThrowIfCancellationRequested();

                    var performance = context.IndexStorage.Reduce(index.IndexId, viewGenerator, results, 2, context, null, keysReturned, count);

					reductionPerformanceStats.Add(performance);

					autoTuner.AutoThrottleBatchSize(count, size, batchTimeWatcher.Elapsed);
				}

				var needToMoveToSingleStep = new HashSet<string>();

				HashSet<string> set;
				while (needToMoveToSingleStepQueue.TryDequeue(out set))
				{
					needToMoveToSingleStep.UnionWith(set);
				}

				foreach (var reduceKey in needToMoveToSingleStep)
				{
					string localReduceKey = reduceKey;
					transactionalStorage.Batch(actions =>
						actions.MapReduce.UpdatePerformedReduceType(index.IndexId, localReduceKey, ReduceType.SingleStep));
				}

				reduceLevelStats.Completed = SystemTime.UtcNow;
				reduceLevelStats.Duration = reduceLevelStats.Completed - reduceLevelStats.Started;
				reduceLevelStats.Operations.Add(PerformanceStats.From(IndexingOperation.Reduce_GetMappedResults, getMappedResultsDuration.ElapsedMilliseconds));
				reduceLevelStats.Operations.Add(PerformanceStats.From(IndexingOperation.StorageCommit, 0)); // in single step we write directly to Lucene index

				foreach (var stats in reductionPerformanceStats)
				{
					reduceLevelStats.Add(stats);
				}

				reducePerformanceStats.LevelStats.Add(reduceLevelStats);
			}
			finally
			{
				long _;
				autoTuner.CurrentlyUsedBatchSizesInBytes.TryRemove(reducingBatchThrottlerId, out _);
			}

			return reducePerformanceStats;
		}
Пример #39
0
 public GetItemsToReduceParams(string index, IEnumerable <string> reduceKeys, int level, bool loadData, ConcurrentSet <object> itemsToDelete)
 {
     Index            = index;
     Level            = level;
     LoadData         = loadData;
     ItemsToDelete    = itemsToDelete;
     ItemsAlreadySeen = new HashSet <Tuple <string, int> >();
     ReduceKeys       = new HashSet <string>(reduceKeys);
 }
 private async Task<ConcurrentSet<ISymbol>> DetermineAllSymbolsAsync(ISymbol symbol)
 {
     using (Logger.LogBlock(FeatureId.FindReference, FunctionId.FindReference_DetermineAllSymbolsAsync, this.cancellationToken))
     {
         var result = new ConcurrentSet<ISymbol>(SymbolEquivalenceComparer.Instance);
         await DetermineAllSymbolsCoreAsync(symbol, result).ConfigureAwait(false);
         return result;
     }
 }
Пример #41
0
        private static async Task <IEnumerable <INamedTypeSymbol> > FindSourceTypesInProjectAsync(
            HashSet <INamedTypeSymbol> sourceAndMetadataTypes,
            Project project,
            Func <HashSet <INamedTypeSymbol>, INamedTypeSymbol, bool> sourceTypeImmediatelyMatches,
            Func <INamedTypeSymbol, bool> shouldContinueSearching,
            bool transitive,
            CancellationToken cancellationToken)
        {
            // We're going to be sweeping over this project over and over until we reach a
            // fixed point.  In order to limit GC and excess work, we cache all the sematic
            // models and DeclaredSymbolInfo for hte documents we look at.
            // Because we're only processing a project at a time, this is not an issue.
            var cachedModels = new ConcurrentSet <SemanticModel>();
            var cachedInfos  = new ConcurrentSet <IDeclarationInfo>();

            var finalResult = new HashSet <INamedTypeSymbol>(SymbolEquivalenceComparer.Instance);

            var typesToSearchFor = new HashSet <INamedTypeSymbol>(SymbolEquivalenceComparer.Instance);

            typesToSearchFor.AddAll(sourceAndMetadataTypes);

            var inheritanceQuery = new InheritanceQuery(sourceAndMetadataTypes);

            // As long as there are new types to search for, keep looping.
            while (typesToSearchFor.Count > 0)
            {
                // Compute the set of names to look for in the base/interface lists.
                inheritanceQuery.TypeNames.AddRange(typesToSearchFor.Select(c => c.Name));

                // Search all the documents of this project in parallel.
                var tasks = project.Documents.Select(d => FindImmediatelyInheritingTypesInDocumentAsync(
                                                         d, typesToSearchFor, inheritanceQuery,
                                                         cachedModels, cachedInfos,
                                                         sourceTypeImmediatelyMatches, cancellationToken)).ToArray();

                await Task.WhenAll(tasks).ConfigureAwait(false);

                // Clear out the information about the types we're looking for.  We'll
                // fill these in if we discover any more types that we need to keep searching
                // for.
                typesToSearchFor.Clear();
                inheritanceQuery.TypeNames.Clear();

                foreach (var task in tasks)
                {
                    if (task.Result != null)
                    {
                        foreach (var derivedType in task.Result)
                        {
                            if (finalResult.Add(derivedType))
                            {
                                if (transitive && shouldContinueSearching(derivedType))
                                {
                                    typesToSearchFor.Add(derivedType);
                                }
                            }
                        }
                    }
                }
            }

            return(finalResult);
        }
Пример #42
0
		private void SingleStepReduce(IndexToWorkOn index, string[] keysToReduce, AbstractViewGenerator viewGenerator,
												ConcurrentSet<object> itemsToDelete)
		{
			var needToMoveToSingleStepQueue = new ConcurrentQueue<HashSet<string>>();

			Log.Debug(() => string.Format("Executing single step reducing for {0} keys [{1}]", keysToReduce.Length, string.Join(", ", keysToReduce)));
			var batchTimeWatcher = Stopwatch.StartNew();
			var count = 0;
			var size = 0;
			var state = new ConcurrentQueue<Tuple<HashSet<string>, List<MappedResultInfo>>>();
			var reducingBatchThrottlerId = Guid.NewGuid();
			try
			{
				BackgroundTaskExecuter.Instance.ExecuteAllBuffered(context, keysToReduce, enumerator =>
				{
					var localNeedToMoveToSingleStep = new HashSet<string>();
					needToMoveToSingleStepQueue.Enqueue(localNeedToMoveToSingleStep);
					var localKeys = new HashSet<string>();
					while (enumerator.MoveNext())
					{
						localKeys.Add(enumerator.Current);
					}

					transactionalStorage.Batch(actions =>
					{
						var getItemsToReduceParams = new GetItemsToReduceParams(index: index.IndexName, reduceKeys: localKeys, level: 0,
							loadData: false,
							itemsToDelete: itemsToDelete)
						{
							Take = int.MaxValue// just get all, we do the rate limit when we load the number of keys to reduce, anyway
						};

					
						var scheduledItems = actions.MapReduce.GetItemsToReduce(getItemsToReduceParams).ToList();
						autoTuner.CurrentlyUsedBatchSizes.GetOrAdd(reducingBatchThrottlerId, scheduledItems.Sum(x => x.Size));
						if (scheduledItems.Count == 0)
						{
							if (Log.IsWarnEnabled)
							{
								Log.Warn("Found single reduce items ({0}) that didn't have any items to reduce. Deleting level 1 & level 2 items for those keys. (If you can reproduce this, please contact [email protected])",
									string.Join(", ", keysToReduce));
							}
							// Here we have an interesting issue. We have scheduled reductions, because GetReduceTypesPerKeys() returned them
							// and at the same time, we don't have any at level 0. That probably means that we have them at level 1 or 2.
							// They shouldn't be here, and indeed, we remove them just a little down from here in this function.
							// That said, they might bave smuggled in between versions, or something happened to cause them to be here.
							// In order to avoid that, we forcibly delete those extra items from the scheduled reductions, and move on
							foreach (var reduceKey in keysToReduce)
							{
								actions.MapReduce.DeleteScheduledReduction(index.IndexName, 1, reduceKey);
								actions.MapReduce.DeleteScheduledReduction(index.IndexName, 2, reduceKey);
							}
						}

						foreach (var reduceKey in localKeys)
						{
							var lastPerformedReduceType = actions.MapReduce.GetLastPerformedReduceType(index.IndexName, reduceKey);

							if (lastPerformedReduceType != ReduceType.SingleStep)
								localNeedToMoveToSingleStep.Add(reduceKey);

							if (lastPerformedReduceType != ReduceType.MultiStep)
								continue;

							Log.Debug("Key {0} was moved from multi step to single step reduce, removing existing reduce results records",
								reduceKey);

							// now we are in single step but previously multi step reduce was performed for the given key
							var mappedBuckets = actions.MapReduce.GetMappedBuckets(index.IndexName, reduceKey).ToList();

							// add scheduled items too to be sure we will delete reduce results of already deleted documents
							mappedBuckets.AddRange(scheduledItems.Select(x => x.Bucket));

							foreach (var mappedBucket in mappedBuckets.Distinct())
							{
								actions.MapReduce.RemoveReduceResults(index.IndexName, 1, reduceKey, mappedBucket);
								actions.MapReduce.RemoveReduceResults(index.IndexName, 2, reduceKey, mappedBucket / 1024);
							}
						}

						var mappedResults = actions.MapReduce.GetMappedResults(
							index.IndexName,
							localKeys,
							loadData: true
							).ToList();

						Interlocked.Add(ref count, mappedResults.Count);
						Interlocked.Add(ref size, mappedResults.Sum(x => x.Size));

						mappedResults.ApplyIfNotNull(x => x.Bucket = 0);

						state.Enqueue(Tuple.Create(localKeys, mappedResults));
					});
				});

				var reduceKeys = new HashSet<string>(state.SelectMany(x => x.Item1));

				var results = state.SelectMany(x => x.Item2)
					.Where(x => x.Data != null)
					.GroupBy(x => x.Bucket, x => JsonToExpando.Convert(x.Data))
					.ToArray();
				context.PerformanceCounters.ReducedPerSecond.IncrementBy(results.Length);

				context.TransactionalStorage.Batch(actions =>
					context.IndexStorage.Reduce(index.IndexName, viewGenerator, results, 2, context, actions, reduceKeys, state.Sum(x=>x.Item2.Count))
					);

				autoTuner.AutoThrottleBatchSize(count, size, batchTimeWatcher.Elapsed);

				var needToMoveToSingleStep = new HashSet<string>();
				HashSet<string> set;
				while (needToMoveToSingleStepQueue.TryDequeue(out set))
				{
					needToMoveToSingleStep.UnionWith(set);
				}

				foreach (var reduceKey in needToMoveToSingleStep)
				{
					string localReduceKey = reduceKey;
					transactionalStorage.Batch(actions =>
						actions.MapReduce.UpdatePerformedReduceType(index.IndexName, localReduceKey, ReduceType.SingleStep));
				}
			}
			finally
			{
				long _;
				autoTuner.CurrentlyUsedBatchSizes.TryRemove(reducingBatchThrottlerId, out _);
			}
		}
Пример #43
0
        private void SingleStepReduce(IndexToWorkOn index, string[] keysToReduce, AbstractViewGenerator viewGenerator,
                                      ConcurrentSet <object> itemsToDelete)
        {
            var needToMoveToSingleStepQueue = new ConcurrentQueue <HashSet <string> >();

            Log.Debug(() => string.Format("Executing single step reducing for {0} keys [{1}]", keysToReduce.Length, string.Join(", ", keysToReduce)));
            var batchTimeWatcher = Stopwatch.StartNew();
            var count            = 0;
            var size             = 0;
            var state            = new ConcurrentQueue <Tuple <HashSet <string>, List <MappedResultInfo> > >();

            BackgroundTaskExecuter.Instance.ExecuteAllBuffered(context, keysToReduce, enumerator =>
            {
                var localNeedToMoveToSingleStep = new HashSet <string>();
                needToMoveToSingleStepQueue.Enqueue(localNeedToMoveToSingleStep);
                var localKeys = new HashSet <string>();
                while (enumerator.MoveNext())
                {
                    localKeys.Add(enumerator.Current);
                }
                transactionalStorage.Batch(actions =>
                {
                    var getItemsToReduceParams = new GetItemsToReduceParams(index: index.IndexName, reduceKeys: localKeys, level: 0,
                                                                            loadData: false,
                                                                            itemsToDelete: itemsToDelete)
                    {
                        Take = int.MaxValue                        // just get all, we do the rate limit when we load the number of keys to reduce, anyway
                    };
                    var scheduledItems = actions.MapReduce.GetItemsToReduce(getItemsToReduceParams).ToList();

                    if (scheduledItems.Count == 0)
                    {
                        if (Log.IsWarnEnabled)
                        {
                            Log.Warn("Found single reduce items ({0}) that didn't have any items to reduce. Deleting level 1 & level 2 items for those keys. (If you can reproduce this, please contact [email protected])",
                                     string.Join(", ", keysToReduce));
                        }
                        // Here we have an interesting issue. We have scheduled reductions, because GetReduceTypesPerKeys() returned them
                        // and at the same time, we don't have any at level 0. That probably means that we have them at level 1 or 2.
                        // They shouldn't be here, and indeed, we remove them just a little down from here in this function.
                        // That said, they might bave smuggled in between versions, or something happened to cause them to be here.
                        // In order to avoid that, we forcibly delete those extra items from the scheduled reductions, and move on
                        foreach (var reduceKey in keysToReduce)
                        {
                            actions.MapReduce.DeleteScheduledReduction(index.IndexName, 1, reduceKey);
                            actions.MapReduce.DeleteScheduledReduction(index.IndexName, 2, reduceKey);
                        }
                    }

                    foreach (var reduceKey in localKeys)
                    {
                        var lastPerformedReduceType = actions.MapReduce.GetLastPerformedReduceType(index.IndexName, reduceKey);

                        if (lastPerformedReduceType != ReduceType.SingleStep)
                        {
                            localNeedToMoveToSingleStep.Add(reduceKey);
                        }

                        if (lastPerformedReduceType != ReduceType.MultiStep)
                        {
                            continue;
                        }

                        Log.Debug("Key {0} was moved from multi step to single step reduce, removing existing reduce results records",
                                  reduceKey);

                        // now we are in single step but previously multi step reduce was performed for the given key
                        var mappedBuckets = actions.MapReduce.GetMappedBuckets(index.IndexName, reduceKey).ToList();

                        // add scheduled items too to be sure we will delete reduce results of already deleted documents
                        mappedBuckets.AddRange(scheduledItems.Select(x => x.Bucket));

                        foreach (var mappedBucket in mappedBuckets.Distinct())
                        {
                            actions.MapReduce.RemoveReduceResults(index.IndexName, 1, reduceKey, mappedBucket);
                            actions.MapReduce.RemoveReduceResults(index.IndexName, 2, reduceKey, mappedBucket / 1024);
                        }
                    }

                    var mappedResults = actions.MapReduce.GetMappedResults(
                        index.IndexName,
                        localKeys,
                        loadData: true
                        ).ToList();

                    Interlocked.Add(ref count, mappedResults.Count);
                    Interlocked.Add(ref size, mappedResults.Sum(x => x.Size));

                    mappedResults.ApplyIfNotNull(x => x.Bucket = 0);

                    state.Enqueue(Tuple.Create(localKeys, mappedResults));
                });
            });

            var reduceKeys = new HashSet <string>(state.SelectMany(x => x.Item1));

            var results = state.SelectMany(x => x.Item2)
                          .Where(x => x.Data != null)
                          .GroupBy(x => x.Bucket, x => JsonToExpando.Convert(x.Data))
                          .ToArray();

            context.PerformanceCounters.ReducedPerSecond.IncrementBy(results.Length);

            context.TransactionalStorage.Batch(actions =>
                                               context.IndexStorage.Reduce(index.IndexName, viewGenerator, results, 2, context, actions, reduceKeys, state.Sum(x => x.Item2.Count))
                                               );

            autoTuner.AutoThrottleBatchSize(count, size, batchTimeWatcher.Elapsed);

            var needToMoveToSingleStep = new HashSet <string>();
            HashSet <string> set;

            while (needToMoveToSingleStepQueue.TryDequeue(out set))
            {
                needToMoveToSingleStep.UnionWith(set);
            }

            foreach (var reduceKey in needToMoveToSingleStep)
            {
                string localReduceKey = reduceKey;
                transactionalStorage.Batch(actions =>
                                           actions.MapReduce.UpdatePerformedReduceType(index.IndexName, localReduceKey, ReduceType.SingleStep));
            }
        }
Пример #44
0
 public ChainStateMonitor(Logger logger)
     : base("ChainStateMonitor", isConcurrent: false, logger: logger)
 {
     this.visitors = new ConcurrentSet <IChainStateVisitor>();
 }
 private void AddSymbolTasks(
     ConcurrentSet<SymbolAndProjectId> result,
     IEnumerable<SymbolAndProjectId> symbols,
     List<Task> symbolTasks)
 {
     if (symbols != null)
     {
         foreach (var child in symbols)
         {
             _cancellationToken.ThrowIfCancellationRequested();
             symbolTasks.Add(Task.Run(() => DetermineAllSymbolsCoreAsync(child, result), _cancellationToken));
         }
     }
 }
Пример #46
0
        private void MultiStepReduce(IndexToWorkOn index, string[] keysToReduce, AbstractViewGenerator viewGenerator, ConcurrentSet <object> itemsToDelete)
        {
            var needToMoveToMultiStep = new HashSet <string>();

            transactionalStorage.Batch(actions =>
            {
                foreach (var localReduceKey in keysToReduce)
                {
                    var lastPerformedReduceType = actions.MapReduce.GetLastPerformedReduceType(index.IndexName, localReduceKey);

                    if (lastPerformedReduceType != ReduceType.MultiStep)
                    {
                        needToMoveToMultiStep.Add(localReduceKey);
                    }

                    if (lastPerformedReduceType != ReduceType.SingleStep)
                    {
                        continue;
                    }
                    // we exceeded the limit of items to reduce in single step
                    // now we need to schedule reductions at level 0 for all map results with given reduce key
                    var mappedItems = actions.MapReduce.GetMappedBuckets(index.IndexName, localReduceKey).ToList();
                    foreach (var result in mappedItems.Select(x => new ReduceKeyAndBucket(x, localReduceKey)))
                    {
                        actions.MapReduce.ScheduleReductions(index.IndexName, 0, result);
                    }
                }
            });

            for (int i = 0; i < 3; i++)
            {
                var level = i;

                var reduceParams = new GetItemsToReduceParams(
                    index.IndexName,
                    keysToReduce,
                    level,
                    true,
                    itemsToDelete);

                bool retry = true;
                while (retry && reduceParams.ReduceKeys.Count > 0)
                {
                    transactionalStorage.Batch(actions =>
                    {
                        context.CancellationToken.ThrowIfCancellationRequested();

                        var batchTimeWatcher = Stopwatch.StartNew();

                        reduceParams.Take    = context.CurrentNumberOfItemsToReduceInSingleBatch;
                        var persistedResults = actions.MapReduce.GetItemsToReduce(reduceParams).ToList();
                        if (persistedResults.Count == 0)
                        {
                            retry = false;
                            return;
                        }

                        var count = persistedResults.Count;
                        var size  = persistedResults.Sum(x => x.Size);

                        if (Log.IsDebugEnabled)
                        {
                            if (persistedResults.Count > 0)
                            {
                                Log.Debug(() => string.Format("Found {0} results for keys [{1}] for index {2} at level {3} in {4}",
                                                              persistedResults.Count,
                                                              string.Join(", ", persistedResults.Select(x => x.ReduceKey).Distinct()),
                                                              index.IndexName, level, batchTimeWatcher.Elapsed));
                            }
                            else
                            {
                                Log.Debug("No reduce keys found for {0}", index.IndexName);
                            }
                        }

                        context.CancellationToken.ThrowIfCancellationRequested();

                        var requiredReduceNextTime = persistedResults.Select(x => new ReduceKeyAndBucket(x.Bucket, x.ReduceKey))
                                                     .OrderBy(x => x.Bucket)
                                                     .Distinct()
                                                     .ToArray();
                        foreach (var mappedResultInfo in requiredReduceNextTime)
                        {
                            actions.MapReduce.RemoveReduceResults(index.IndexName, level + 1, mappedResultInfo.ReduceKey,
                                                                  mappedResultInfo.Bucket);
                        }

                        if (level != 2)
                        {
                            var reduceKeysAndBuckets = requiredReduceNextTime
                                                       .Select(x => new ReduceKeyAndBucket(x.Bucket / 1024, x.ReduceKey))
                                                       .Distinct()
                                                       .ToArray();
                            foreach (var reduceKeysAndBucket in reduceKeysAndBuckets)
                            {
                                actions.MapReduce.ScheduleReductions(index.IndexName, level + 1, reduceKeysAndBucket);
                            }
                        }

                        var results = persistedResults
                                      .Where(x => x.Data != null)
                                      .GroupBy(x => x.Bucket, x => JsonToExpando.Convert(x.Data))
                                      .ToArray();
                        var reduceKeys = new HashSet <string>(persistedResults.Select(x => x.ReduceKey),
                                                              StringComparer.InvariantCultureIgnoreCase);
                        context.PerformanceCounters.ReducedPerSecond.IncrementBy(results.Length);

                        context.CancellationToken.ThrowIfCancellationRequested();
                        var reduceTimeWatcher = Stopwatch.StartNew();

                        context.IndexStorage.Reduce(index.IndexName, viewGenerator, results, level, context, actions, reduceKeys, persistedResults.Count);

                        var batchDuration = batchTimeWatcher.Elapsed;
                        Log.Debug("Indexed {0} reduce keys in {1} with {2} results for index {3} in {4} on level {5}", reduceKeys.Count, batchDuration,
                                  results.Length, index.IndexName, reduceTimeWatcher.Elapsed, level);

                        autoTuner.AutoThrottleBatchSize(count, size, batchDuration);
                    });
                }
            }

            foreach (var reduceKey in needToMoveToMultiStep)
            {
                string localReduceKey = reduceKey;
                transactionalStorage.Batch(actions =>
                                           actions.MapReduce.UpdatePerformedReduceType(index.IndexName, localReduceKey,
                                                                                       ReduceType.MultiStep));
            }
        }
Пример #47
0
			public void HandleLowMemory()
			{
				documentIdsSetBuildersCache = new ConditionalWeakTable<IndexReader, ConcurrentDictionary<Tuple<string, Predicate<string>>, Predicate<int>>>();
				fieldsStringValuesInReadersCache = new ConditionalWeakTable<IndexReader, ConcurrentDictionary<string, Dictionary<int, string>>>();
				_keys = new ConcurrentSet<WeakReference<IndexReader>>();
			}
Пример #48
0
 public LoggerExecutionWrapper(ILog logger, string loggerName, ConcurrentSet <Target> targets)
 {
     this.logger     = logger;
     this.loggerName = loggerName;
     this.targets    = targets;
 }
Пример #49
0
		public override IndexingPerformanceStats IndexDocuments(AbstractViewGenerator viewGenerator, IndexingBatch batch, IStorageActionsAccessor actions, DateTime minimumTimestamp, CancellationToken token)
		{
			token.ThrowIfCancellationRequested();

			var count = 0;
			var sourceCount = 0;
			var deleted = new Dictionary<ReduceKeyAndBucket, int>();
			var performance = RecordCurrentBatch("Current Map", "Map", batch.Docs.Count);
			var performanceStats = new List<BasePerformanceStats>();

			var usedStorageAccessors = new ConcurrentSet<IStorageActionsAccessor>();

			if (usedStorageAccessors.TryAdd(actions))
			{
				var storageCommitDuration = new Stopwatch();

				actions.BeforeStorageCommit += storageCommitDuration.Start;

				actions.AfterStorageCommit += () =>
				{
					storageCommitDuration.Stop();

					performanceStats.Add(PerformanceStats.From(IndexingOperation.StorageCommit, storageCommitDuration.ElapsedMilliseconds));
				};
			}

			var deleteMappedResultsDuration = new Stopwatch();
			var documentsWrapped = batch.Docs.Select(doc =>
			{
				token.ThrowIfCancellationRequested();

				sourceCount++;
				var documentId = doc.__document_id;

				using (StopwatchScope.For(deleteMappedResultsDuration))
				{
					actions.MapReduce.DeleteMappedResultsForDocumentId((string)documentId, indexId, deleted);
				}
				
				return doc;
			})
			.Where(x => x is FilteredDocument == false)
			.ToList();

			performanceStats.Add(new PerformanceStats
			{
				Name = IndexingOperation.Map_DeleteMappedResults,
				DurationMs = deleteMappedResultsDuration.ElapsedMilliseconds,
			});

			var allReferencedDocs = new ConcurrentQueue<IDictionary<string, HashSet<string>>>();
			var allReferenceEtags = new ConcurrentQueue<IDictionary<string, Etag>>();
			var allState = new ConcurrentQueue<Tuple<HashSet<ReduceKeyAndBucket>, IndexingWorkStats, Dictionary<string, int>>>();

			var parallelOperations = new ConcurrentQueue<ParallelBatchStats>();

			var parallelProcessingStart = SystemTime.UtcNow;

			BackgroundTaskExecuter.Instance.ExecuteAllBuffered(context, documentsWrapped, partition =>
			{
                token.ThrowIfCancellationRequested();
				var parallelStats = new ParallelBatchStats
				{
					StartDelay = (long)(SystemTime.UtcNow - parallelProcessingStart).TotalMilliseconds
				};

				var localStats = new IndexingWorkStats();
				var localChanges = new HashSet<ReduceKeyAndBucket>();
				var statsPerKey = new Dictionary<string, int>();

				var linqExecutionDuration = new Stopwatch();
				var reduceInMapLinqExecutionDuration = new Stopwatch();
				var putMappedResultsDuration = new Stopwatch();
				var convertToRavenJObjectDuration = new Stopwatch();

				allState.Enqueue(Tuple.Create(localChanges, localStats, statsPerKey));

				using (CurrentIndexingScope.Current = new CurrentIndexingScope(context.Database, PublicName))
				{
					// we are writing to the transactional store from multiple threads here, and in a streaming fashion
					// should result in less memory and better perf
					context.TransactionalStorage.Batch(accessor =>
					{
						if (usedStorageAccessors.TryAdd(accessor))
						{
							var storageCommitDuration = new Stopwatch();

							accessor.BeforeStorageCommit += storageCommitDuration.Start;

							accessor.AfterStorageCommit += () =>
							{
								storageCommitDuration.Stop();

								parallelStats.Operations.Add(PerformanceStats.From(IndexingOperation.StorageCommit, storageCommitDuration.ElapsedMilliseconds));
							};
						}

						var mapResults = RobustEnumerationIndex(partition, viewGenerator.MapDefinitions, localStats, linqExecutionDuration);
						var currentDocumentResults = new List<object>();
						string currentKey = null;
						bool skipDocument = false;
						
						foreach (var currentDoc in mapResults)
						{
							token.ThrowIfCancellationRequested();

							var documentId = GetDocumentId(currentDoc);
							if (documentId != currentKey)
							{
								count += ProcessBatch(viewGenerator, currentDocumentResults, currentKey, localChanges, accessor, statsPerKey, reduceInMapLinqExecutionDuration, putMappedResultsDuration, convertToRavenJObjectDuration);

								currentDocumentResults.Clear();
								currentKey = documentId;
							}
							else if (skipDocument)
							{
								continue;
							}

							RavenJObject currentDocJObject;
							using (StopwatchScope.For(convertToRavenJObjectDuration))
							{
								currentDocJObject = RavenJObject.FromObject(currentDoc, jsonSerializer);
							}

							currentDocumentResults.Add(new DynamicJsonObject(currentDocJObject));

							if (EnsureValidNumberOfOutputsForDocument(documentId, currentDocumentResults.Count) == false)
							{
								skipDocument = true;
								currentDocumentResults.Clear();
								continue;
							}

							Interlocked.Increment(ref localStats.IndexingSuccesses);
						}
						count += ProcessBatch(viewGenerator, currentDocumentResults, currentKey, localChanges, accessor, statsPerKey, reduceInMapLinqExecutionDuration, putMappedResultsDuration, convertToRavenJObjectDuration);

						parallelStats.Operations.Add(PerformanceStats.From(IndexingOperation.LoadDocument, CurrentIndexingScope.Current.LoadDocumentDuration.ElapsedMilliseconds));
						parallelStats.Operations.Add(PerformanceStats.From(IndexingOperation.Linq_MapExecution, linqExecutionDuration.ElapsedMilliseconds));
						parallelStats.Operations.Add(PerformanceStats.From(IndexingOperation.Linq_ReduceLinqExecution, reduceInMapLinqExecutionDuration.ElapsedMilliseconds));
						parallelStats.Operations.Add(PerformanceStats.From(IndexingOperation.Map_PutMappedResults, putMappedResultsDuration.ElapsedMilliseconds));
						parallelStats.Operations.Add(PerformanceStats.From(IndexingOperation.Map_ConvertToRavenJObject, convertToRavenJObjectDuration.ElapsedMilliseconds));

						parallelOperations.Enqueue(parallelStats);
					});

					allReferenceEtags.Enqueue(CurrentIndexingScope.Current.ReferencesEtags);
					allReferencedDocs.Enqueue(CurrentIndexingScope.Current.ReferencedDocuments);
				}
			});

			performanceStats.Add(new ParallelPerformanceStats
			{
				NumberOfThreads = parallelOperations.Count,
				DurationMs = (long)(SystemTime.UtcNow - parallelProcessingStart).TotalMilliseconds,
				BatchedOperations = parallelOperations.ToList()
			});

			var updateDocumentReferencesDuration = new Stopwatch();
			using (StopwatchScope.For(updateDocumentReferencesDuration))
			{
			UpdateDocumentReferences(actions, allReferencedDocs, allReferenceEtags);
			}
			performanceStats.Add(PerformanceStats.From(IndexingOperation.UpdateDocumentReferences, updateDocumentReferencesDuration.ElapsedMilliseconds));

			var changed = allState.SelectMany(x => x.Item1).Concat(deleted.Keys)
					.Distinct()
					.ToList();

			var stats = new IndexingWorkStats(allState.Select(x => x.Item2));
			var reduceKeyStats = allState.SelectMany(x => x.Item3)
										 .GroupBy(x => x.Key)
										 .Select(g => new { g.Key, Count = g.Sum(x => x.Value) })
										 .ToList();

			BackgroundTaskExecuter.Instance.ExecuteAllBuffered(context, reduceKeyStats, enumerator => context.TransactionalStorage.Batch(accessor =>
			{
				while (enumerator.MoveNext())
				{
					var reduceKeyStat = enumerator.Current;
					accessor.MapReduce.IncrementReduceKeyCounter(indexId, reduceKeyStat.Key, reduceKeyStat.Count);
				}
			}));


			var parallelReductionOperations = new ConcurrentQueue<ParallelBatchStats>();
			var parallelReductionStart = SystemTime.UtcNow;

			BackgroundTaskExecuter.Instance.ExecuteAllBuffered(context, changed, enumerator => context.TransactionalStorage.Batch(accessor =>
			{
				var parallelStats = new ParallelBatchStats
				{
					StartDelay = (long)(SystemTime.UtcNow - parallelReductionStart).TotalMilliseconds
				};

				var scheduleReductionsDuration = new Stopwatch();

				using (StopwatchScope.For(scheduleReductionsDuration))
				{
					while (enumerator.MoveNext())
					{
						accessor.MapReduce.ScheduleReductions(indexId, 0, enumerator.Current);
					}
				}

				parallelStats.Operations.Add(PerformanceStats.From(IndexingOperation.Map_ScheduleReductions, scheduleReductionsDuration.ElapsedMilliseconds));
				parallelReductionOperations.Enqueue(parallelStats);
			}));

			performanceStats.Add(new ParallelPerformanceStats
			{
				NumberOfThreads = parallelReductionOperations.Count,
				DurationMs = (long)(SystemTime.UtcNow - parallelReductionStart).TotalMilliseconds,
				BatchedOperations = parallelReductionOperations.ToList()
			});

			UpdateIndexingStats(context, stats);

			performance.OnCompleted = () => BatchCompleted("Current Map", "Map", sourceCount, count, performanceStats);

			logIndexing.Debug("Mapped {0} documents for {1}", count, indexId);

			return performance;
		}
Пример #50
0
 public MemoryBlockStorage()
 {
     this.chainedHeaders            = new ConcurrentDictionary <UInt256, ChainedHeader>();
     this.invalidBlocks             = new ConcurrentSet <UInt256>();
     this.chainedHeadersByTotalWork = new SortedDictionary <BigInteger, List <ChainedHeader> >(new ReverseBigIntegerComparer());
 }
Пример #51
0
        private static async Task GetDependentTypesInProjectAsync(
            INamedTypeSymbol type, Project project, Solution solution, Func<INamedTypeSymbol, INamedTypeSymbol, bool> predicate, ConditionalWeakTable<Compilation, ConcurrentDictionary<SymbolKey, List<SymbolKey>>> cache, bool locationsInMetadata, ConcurrentSet<ISymbol> results, CancellationToken cancellationToken)
        {
            cancellationToken.ThrowIfCancellationRequested();

            var compilation = await project.GetCompilationAsync(cancellationToken).ConfigureAwait(false);

            var typeId = type.GetSymbolKey();

            List<SymbolKey> dependentTypeIds;
            if (!TryGetDependentTypes(cache, compilation, typeId, out dependentTypeIds))
            {
                List<INamedTypeSymbol> allTypes;
                if (locationsInMetadata)
                {
                    // From metadata, have to check other (non private) metadata types, as well as
                    // source types.
                    allTypes = GetAllSourceAndAccessibleTypesInCompilation(compilation, cancellationToken);
                }
                else
                {
                    // It's from source, so only other source types could derive from it.
                    allTypes = GetAllSourceTypesInCompilation(compilation, cancellationToken);
                }

                dependentTypeIds = new List<SymbolKey>();
                foreach (var t in allTypes)
                {
                    cancellationToken.ThrowIfCancellationRequested();

                    if (predicate(t, type))
                    {
                        dependentTypeIds.Add(t.GetSymbolKey());
                    }
                }

                dependentTypeIds = GetOrAddDependentTypes(cache, compilation, typeId, dependentTypeIds);
            }

            foreach (var id in dependentTypeIds)
            {
                cancellationToken.ThrowIfCancellationRequested();

                var resolvedSymbols = id.Resolve(compilation, cancellationToken: cancellationToken).GetAllSymbols();
                foreach (var resolvedSymbol in resolvedSymbols)
                {
                    var mappedSymbol = await SymbolFinder.FindSourceDefinitionAsync(resolvedSymbol, solution, cancellationToken).ConfigureAwait(false) ?? resolvedSymbol;
                    results.Add(mappedSymbol);
                }
            }
        }
Пример #52
0
        /// <summary>
        /// Executes given function in batches on received collection
        /// </summary>
        /// <typeparam name="T"></typeparam>
        /// <param name="src"></param>
        /// <param name="action"></param>
        /// <param name="pageSize"></param>
        /// <param name="description"></param>
        public void ExecuteBatch <T>(IList <T> src, Action <IEnumerator <T> > action,
                                     int pageSize = defaultPageSize, string description = null)
        {
            if (src.Count == 0)
            {
                return;
            }

            if (src.Count <= pageSize)
            {
                //if we have only none or less than pageSize,
                //we should execute it in the current thread, without using RTP threads
                ExecuteSingleBatchSynchronously(src, action, description);
                return;
            }

            var ranges       = new ConcurrentQueue <Tuple <int, int> >();
            var now          = DateTime.UtcNow;
            var numOfBatches = (src.Count / pageSize) + (src.Count % pageSize == 0 ? 0 : 1);

            CountdownEvent batchesCountdown;

            if (_concurrentEvents.TryDequeue(out batchesCountdown) == false)
            {
                batchesCountdown = new CountdownEvent(numOfBatches);
            }
            else
            {
                batchesCountdown.Reset(numOfBatches);
            }

            var localTasks = new List <ThreadTask>();
            var exceptions = new ConcurrentSet <Exception>();

            for (var i = 0; i < src.Count; i += pageSize)
            {
                var rangeStart = i;
                var rangeEnd   = i + pageSize - 1 < src.Count ? i + pageSize - 1 : src.Count - 1;
                ranges.Enqueue(Tuple.Create(rangeStart, rangeEnd));

                var threadTask = new ThreadTask
                {
                    Action = () =>
                    {
                        var numOfBatchesUsed = new Reference <int>();
                        try
                        {
                            Tuple <int, int> range;
                            if (ranges.TryDequeue(out range) == false)
                            {
                                return;
                            }

                            action(YieldFromRange(ranges, range, src, numOfBatchesUsed));
                        }
                        catch (Exception e)
                        {
                            exceptions.Add(e);
                            throw;
                        }
                        finally
                        {
                            var numOfBatchesUsedValue = numOfBatchesUsed.Value;
                            //handle the case when we are out of ranges
                            if (numOfBatchesUsedValue > 0)
                            {
                                batchesCountdown.Signal(numOfBatchesUsedValue);
                            }
                        }
                    },
                    Description = new OperationDescription
                    {
                        Type      = OperationDescription.OperationType.Range,
                        PlainText = description,
                        From      = rangeStart,
                        To        = rangeEnd,
                        Total     = src.Count
                    },
                    BatchStats = new BatchStatistics
                    {
                        Total     = rangeEnd - rangeStart,
                        Completed = 0
                    },
                    QueuedAt  = now,
                    DoneEvent = batchesCountdown
                };
                localTasks.Add(threadTask);
            }

            // we must add the tasks to the global tasks after we added all the ranges
            // to prevent the tasks from completing the range fast enough that it won't
            // see the next range, see: http://issues.hibernatingrhinos.com/issue/RavenDB-4829
            foreach (var threadTask in localTasks)
            {
                _tasks.Add(threadTask, _ct);
            }

            WaitForBatchToCompletion(batchesCountdown);

            switch (exceptions.Count)
            {
            case 0:
                return;

            case 1:
                ExceptionDispatchInfo.Capture(exceptions.First()).Throw();
                break;

            default:
                throw new AggregateException(exceptions);
            }
        }
Пример #53
0
		protected ReducingPerformanceStats[] HandleReduceForIndex(IndexToWorkOn indexToWorkOn, CancellationToken token)
		{
			var viewGenerator = context.IndexDefinitionStorage.GetViewGenerator(indexToWorkOn.IndexId);
			if (viewGenerator == null)
				return null;

			bool operationCanceled = false;
			var itemsToDelete = new ConcurrentSet<object>();

			var singleStepReduceKeys = new List<string>();
			var multiStepsReduceKeys = new List<string>();

			transactionalStorage.Batch(actions =>
			{
                var mappedResultsInfo = actions.MapReduce.GetReduceTypesPerKeys(indexToWorkOn.IndexId, 
                                                                    context.CurrentNumberOfItemsToReduceInSingleBatch, 
                                                                    context.NumberOfItemsToExecuteReduceInSingleStep, token);

			    foreach (var key in mappedResultsInfo)
			    {
				    token.ThrowIfCancellationRequested();

				    switch (key.OperationTypeToPerform)
				    {
					    case ReduceType.SingleStep:
						    singleStepReduceKeys.Add(key.ReduceKey);
						    break;
					    case ReduceType.MultiStep:
						    multiStepsReduceKeys.Add(key.ReduceKey);
						    break;
				    }
			    }
			});

			currentlyProcessedIndexes.TryAdd(indexToWorkOn.IndexId, indexToWorkOn.Index);

			var performanceStats = new List<ReducingPerformanceStats>();

			try
			{
				if (singleStepReduceKeys.Count > 0)
				{
                    if ( Log.IsDebugEnabled )
                        Log.Debug("SingleStep reduce for keys: {0}", singleStepReduceKeys.Select(x => x + ","));
                    
					var singleStepStats = SingleStepReduce(indexToWorkOn, singleStepReduceKeys, viewGenerator, itemsToDelete, token);

					performanceStats.Add(singleStepStats);
				}

				if (multiStepsReduceKeys.Count > 0)
				{
                    if ( Log.IsDebugEnabled )
                        Log.Debug("MultiStep reduce for keys: {0}", multiStepsReduceKeys.Select(x => x + ","));

					var multiStepStats = MultiStepReduce(indexToWorkOn, multiStepsReduceKeys, viewGenerator, itemsToDelete, token);

					performanceStats.Add(multiStepStats);
				}
			}
			catch (OperationCanceledException)
			{
				operationCanceled = true;
			}
			catch (AggregateException e)
			{
				var anyOperationsCanceled = e
					.InnerExceptions
					.OfType<OperationCanceledException>()
					.Any();

				if (anyOperationsCanceled == false) 
					throw;

				operationCanceled = true;
			}
			finally
			{
				var postReducingOperations = new ReduceLevelPeformanceStats
				{
					Level = -1,
					Started = SystemTime.UtcNow
				};

				if (operationCanceled == false)
				{
					var deletingScheduledReductionsDuration = new Stopwatch();
					var storageCommitDuration = new Stopwatch();

					// whatever we succeeded in indexing or not, we have to update this
					// because otherwise we keep trying to re-index failed mapped results
					transactionalStorage.Batch(actions =>
					{
						actions.BeforeStorageCommit += storageCommitDuration.Start;
						actions.AfterStorageCommit += storageCommitDuration.Stop;

						ScheduledReductionInfo latest;

						using (StopwatchScope.For(deletingScheduledReductionsDuration))
						{
							latest = actions.MapReduce.DeleteScheduledReduction(itemsToDelete);
						}

						if (latest == null)
							return;
						actions.Indexing.UpdateLastReduced(indexToWorkOn.Index.indexId, latest.Etag, latest.Timestamp);
					});

					postReducingOperations.Operations.Add(PerformanceStats.From(IndexingOperation.Reduce_DeleteScheduledReductions, deletingScheduledReductionsDuration.ElapsedMilliseconds));
					postReducingOperations.Operations.Add(PerformanceStats.From(IndexingOperation.StorageCommit, storageCommitDuration.ElapsedMilliseconds));
				}

				postReducingOperations.Completed = SystemTime.UtcNow;
				postReducingOperations.Duration = postReducingOperations.Completed - postReducingOperations.Started;

				performanceStats.Add(new ReducingPerformanceStats(ReduceType.None)
				{
					LevelStats = new List<ReduceLevelPeformanceStats> { postReducingOperations }
				});

				Index _;
				currentlyProcessedIndexes.TryRemove(indexToWorkOn.IndexId, out _);
			}

			return performanceStats.ToArray();
		}
Пример #54
0
        public void ExecuteBatch <T>(IList <T> src, Action <T> action, string description = null,
                                     bool allowPartialBatchResumption = false, int completedMultiplier = 2, int freeThreadsMultiplier = 2, int maxWaitMultiplier = 1)
        {
            switch (src.Count)
            {
            case 0:
                return;

            case 1:
                //if we have only one source to go through,
                //we should execute it in the current thread, without using RTP threads
                ExecuteSingleBatchSynchronously(src, action, description);
                return;
            }

            var            now = DateTime.UtcNow;
            CountdownEvent lastEvent;
            var            itemsCount = 0;

            var batch = new BatchStatistics
            {
                Total     = src.Count,
                Completed = 0
            };

            if (_concurrentEvents.TryDequeue(out lastEvent) == false)
            {
                lastEvent = new CountdownEvent(src.Count);
            }
            else
            {
                lastEvent.Reset(src.Count);
            }

            var exceptions = new ConcurrentSet <Exception>();

            for (; itemsCount < src.Count && _ct.IsCancellationRequested == false; itemsCount++)
            {
                var copy       = itemsCount;
                var threadTask = new ThreadTask
                {
                    Action = () =>
                    {
                        try
                        {
                            action(src[copy]);
                        }
                        catch (Exception e)
                        {
                            exceptions.Add(e);
                            throw;
                        }
                        finally
                        {
                            lastEvent.Signal();
                        }
                    },
                    Description = new OperationDescription
                    {
                        Type      = OperationDescription.OperationType.Atomic,
                        PlainText = description,
                        From      = itemsCount + 1,
                        To        = itemsCount + 1,
                        Total     = src.Count
                    },
                    BatchStats = batch,
                    EarlyBreak = allowPartialBatchResumption,
                    QueuedAt   = now,
                    DoneEvent  = lastEvent
                };

                _tasks.Add(threadTask);
            }

            if (allowPartialBatchResumption == false)
            {
                WaitForBatchToCompletion(lastEvent);
            }
            else
            {
                WaitForBatchAllowingPartialBatchResumption(lastEvent, batch, completedMultiplier, freeThreadsMultiplier, maxWaitMultiplier);
            }

            switch (exceptions.Count)
            {
            case 0:
                return;

            case 1:
                ExceptionDispatchInfo.Capture(exceptions.First()).Throw();
                break;

            default:
                throw new AggregateException(exceptions);
            }
        }
Пример #55
0
        private int RunCore(TextWriter consoleOutput, ErrorLogger errorLogger, CancellationToken cancellationToken)
        {
            Debug.Assert(!Arguments.IsScriptRunner);

            cancellationToken.ThrowIfCancellationRequested();

            if (Arguments.DisplayLogo)
            {
                PrintLogo(consoleOutput);
            }

            if (Arguments.DisplayHelp)
            {
                PrintHelp(consoleOutput);
                return Succeeded;
            }

            if (ReportErrors(Arguments.Errors, consoleOutput, errorLogger))
            {
                return Failed;
            }

            var touchedFilesLogger = (Arguments.TouchedFilesPath != null) ? new TouchedFileLogger() : null;

            Compilation compilation = CreateCompilation(consoleOutput, touchedFilesLogger, errorLogger);
            if (compilation == null)
            {
                return Failed;
            }


            var diagnostics = new List<DiagnosticInfo>();
            var analyzers = ResolveAnalyzersFromArguments(diagnostics, MessageProvider, touchedFilesLogger);
            var additionalTextFiles = ResolveAdditionalFilesFromArguments(diagnostics, MessageProvider, touchedFilesLogger);
            if (ReportErrors(diagnostics, consoleOutput, errorLogger))
            {
                return Failed;
            }

            cancellationToken.ThrowIfCancellationRequested();

            CancellationTokenSource analyzerCts = null;
            AnalyzerManager analyzerManager = null;
            AnalyzerDriver analyzerDriver = null;
            try
            {
                Func<ImmutableArray<Diagnostic>> getAnalyzerDiagnostics = null;
                ConcurrentSet<Diagnostic> analyzerExceptionDiagnostics = null;
                if (!analyzers.IsDefaultOrEmpty)
                {
                    analyzerCts = CancellationTokenSource.CreateLinkedTokenSource(cancellationToken);
                    analyzerManager = new AnalyzerManager();
                    analyzerExceptionDiagnostics = new ConcurrentSet<Diagnostic>();
                    Action<Diagnostic> addExceptionDiagnostic = diagnostic => analyzerExceptionDiagnostics.Add(diagnostic);
                    var analyzerOptions = new AnalyzerOptions(ImmutableArray<AdditionalText>.CastUp(additionalTextFiles));
                    
                    analyzerDriver = AnalyzerDriver.CreateAndAttachToCompilation(compilation, analyzers, analyzerOptions, analyzerManager, addExceptionDiagnostic, Arguments.ReportAnalyzer, out compilation, analyzerCts.Token);
                    getAnalyzerDiagnostics = () => analyzerDriver.GetDiagnosticsAsync(compilation).Result;
                }

                // Print the diagnostics produced during the parsing stage and exit if there were any errors.
                if (ReportErrors(compilation.GetParseDiagnostics(), consoleOutput, errorLogger))
                {
                    return Failed;
                }

                if (ReportErrors(compilation.GetDeclarationDiagnostics(), consoleOutput, errorLogger))
                {
                    return Failed;
                }

                EmitResult emitResult;

                // NOTE: as native compiler does, we generate the documentation file
                // NOTE: 'in place', replacing the contents of the file if it exists

                string finalPeFilePath;
                string finalPdbFilePath;
                string finalXmlFilePath;

                Stream xmlStreamOpt = null;

                cancellationToken.ThrowIfCancellationRequested();

                finalXmlFilePath = Arguments.DocumentationPath;
                if (finalXmlFilePath != null)
                {
                    xmlStreamOpt = OpenFile(finalXmlFilePath, consoleOutput, PortableShim.FileMode.OpenOrCreate, PortableShim.FileAccess.Write, PortableShim.FileShare.ReadWriteBitwiseOrDelete);
                    if (xmlStreamOpt == null)
                    {
                        return Failed;
                    }

                    xmlStreamOpt.SetLength(0);
                }

                cancellationToken.ThrowIfCancellationRequested();

                IEnumerable<DiagnosticInfo> errors;
                using (var win32ResourceStreamOpt = GetWin32Resources(Arguments, compilation, out errors))
                using (xmlStreamOpt)
                {
                    if (ReportErrors(errors, consoleOutput, errorLogger))
                    {
                        return Failed;
                    }

                    cancellationToken.ThrowIfCancellationRequested();

                    string outputName = GetOutputFileName(compilation, cancellationToken);

                    finalPeFilePath = Path.Combine(Arguments.OutputDirectory, outputName);
                    finalPdbFilePath = Arguments.PdbPath ?? Path.ChangeExtension(finalPeFilePath, ".pdb");

                    // NOTE: Unlike the PDB path, the XML doc path is not embedded in the assembly, so we don't need to pass it to emit.
                    var emitOptions = Arguments.EmitOptions.
                        WithOutputNameOverride(outputName).
                        WithPdbFilePath(finalPdbFilePath);

                    using (var peStreamProvider = new CompilerEmitStreamProvider(this, finalPeFilePath))
                    using (var pdbStreamProviderOpt = Arguments.EmitPdb ? new CompilerEmitStreamProvider(this, finalPdbFilePath) : null)
                    {
                        emitResult = compilation.Emit(
                            peStreamProvider,
                            pdbStreamProviderOpt,
                            (xmlStreamOpt != null) ? new Compilation.SimpleEmitStreamProvider(xmlStreamOpt) : null,
                            (win32ResourceStreamOpt != null) ? new Compilation.SimpleEmitStreamProvider(win32ResourceStreamOpt) : null,
                            Arguments.ManifestResources,
                            emitOptions,
                            debugEntryPoint: null,
                            getHostDiagnostics: getAnalyzerDiagnostics,
                            cancellationToken: cancellationToken);

                        if (emitResult.Success && touchedFilesLogger != null)
                        {
                            if (pdbStreamProviderOpt != null)
                            {
                                touchedFilesLogger.AddWritten(finalPdbFilePath);
                            }

                            touchedFilesLogger.AddWritten(finalPeFilePath);
                        }
                    }
                }

                GenerateSqmData(Arguments.CompilationOptions, emitResult.Diagnostics);

                if (ReportErrors(emitResult.Diagnostics, consoleOutput, errorLogger))
                {
                    return Failed;
                }

                cancellationToken.ThrowIfCancellationRequested();

                if (analyzerExceptionDiagnostics != null && ReportErrors(analyzerExceptionDiagnostics, consoleOutput, errorLogger))
                {
                    return Failed;
                }

                bool errorsReadingAdditionalFiles = false;
                foreach (var additionalFile in additionalTextFiles)
                {
                    if (ReportErrors(additionalFile.Diagnostics, consoleOutput, errorLogger))
                    {
                        errorsReadingAdditionalFiles = true;
                    }
                }

                if (errorsReadingAdditionalFiles)
                {
                    return Failed;
                }

                cancellationToken.ThrowIfCancellationRequested();

                if (Arguments.TouchedFilesPath != null)
                {
                    Debug.Assert(touchedFilesLogger != null);

                    if (finalXmlFilePath != null)
                    {
                        touchedFilesLogger.AddWritten(finalXmlFilePath);
                    }

                    var readStream = OpenFile(Arguments.TouchedFilesPath + ".read", consoleOutput, mode: PortableShim.FileMode.OpenOrCreate);
                    if (readStream == null)
                    {
                        return Failed;
                    }

                    using (var writer = new StreamWriter(readStream))
                    {
                        touchedFilesLogger.WriteReadPaths(writer);
                    }

                    var writtenStream = OpenFile(Arguments.TouchedFilesPath + ".write", consoleOutput, mode: PortableShim.FileMode.OpenOrCreate);
                    if (writtenStream == null)
                    {
                        return Failed;
                    }

                    using (var writer = new StreamWriter(writtenStream))
                    {
                        touchedFilesLogger.WriteWrittenPaths(writer);
                    }
                }
            }
            finally
            {
                // At this point analyzers are already complete in which case this is a no-op.  Or they are 
                // still running because the compilation failed before all of the compilation events were 
                // raised.  In the latter case the driver, and all its associated state, will be waiting around 
                // for events that are never coming.  Cancel now and let the clean up process begin.
                if (analyzerCts != null)
                {
                    analyzerCts.Cancel();

                    if (analyzerManager != null)
                    {
                        // Clear cached analyzer descriptors and unregister exception handlers hooked up to the LocalizableString fields of the associated descriptors.
                        analyzerManager.ClearAnalyzerState(analyzers);
                    }

                    if (Arguments.ReportAnalyzer && analyzerDriver != null && compilation != null)
                    {
                        ReportAnalyzerExecutionTime(consoleOutput, analyzerDriver, Culture, compilation.Options.ConcurrentBuild);
                    }
                }
            }

            return Succeeded;
        }
Пример #56
0
 public GetItemsToReduceParams(int index, HashSet <string> reduceKeys, int level, bool loadData, ConcurrentSet <object> itemsToDelete)
 {
     Index         = index;
     Level         = level;
     LoadData      = loadData;
     ItemsToDelete = itemsToDelete;
     ReduceKeys    = reduceKeys;
 }
        private async Task DetermineAllSymbolsCoreAsync(ISymbol symbol, ConcurrentSet<ISymbol> result)
        {
            this.cancellationToken.ThrowIfCancellationRequested();

            var searchSymbol = MapToAppropriateSymbol(symbol);

            // 2) Try to map this back to source symbol if this was a metadata symbol.
            searchSymbol = await SymbolFinder.FindSourceDefinitionAsync(searchSymbol, solution, cancellationToken).ConfigureAwait(false) ?? searchSymbol;

            if (searchSymbol != null && result.Add(searchSymbol))
            {
                this.progress.OnDefinitionFound(searchSymbol);

                this.foundReferences.GetOrAdd(searchSymbol, createSymbolLocations);

                // get project to search
                var projects = GetProjectScope();

                this.cancellationToken.ThrowIfCancellationRequested();

                List<Task> finderTasks = new List<Task>();
                foreach (var f in finders)
                {
                    finderTasks.Add(Task.Run(async () =>
                    {
                        var symbols = await f.DetermineCascadedSymbolsAsync(searchSymbol, solution, projects, cancellationToken).ConfigureAwait(false) ?? SpecializedCollections.EmptyEnumerable<ISymbol>();

                        this.cancellationToken.ThrowIfCancellationRequested();

                        List<Task> symbolTasks = new List<Task>();
                        foreach (var child in symbols)
                        {
                            symbolTasks.Add(Task.Run(() => DetermineAllSymbolsCoreAsync(child, result), this.cancellationToken));
                        }

                        await Task.WhenAll(symbolTasks).ConfigureAwait(false);
                    }, this.cancellationToken));
                }

                await Task.WhenAll(finderTasks).ConfigureAwait(false);
            }
        }
Пример #58
0
        private ReducingPerformanceStats MultiStepReduce(IndexToWorkOn index, string[] keysToReduce, AbstractViewGenerator viewGenerator, ConcurrentSet <object> itemsToDelete)
        {
            var needToMoveToMultiStep = new HashSet <string>();

            transactionalStorage.Batch(actions =>
            {
                foreach (var localReduceKey in keysToReduce)
                {
                    var lastPerformedReduceType = actions.MapReduce.GetLastPerformedReduceType(index.IndexId, localReduceKey);

                    if (lastPerformedReduceType != ReduceType.MultiStep)
                    {
                        needToMoveToMultiStep.Add(localReduceKey);
                    }

                    if (lastPerformedReduceType != ReduceType.SingleStep)
                    {
                        continue;
                    }
                    // we exceeded the limit of items to reduce in single step
                    // now we need to schedule reductions at level 0 for all map results with given reduce key
                    var mappedItems = actions.MapReduce.GetMappedBuckets(index.IndexId, localReduceKey).ToList();
                    foreach (var result in mappedItems.Select(x => new ReduceKeyAndBucket(x, localReduceKey)))
                    {
                        actions.MapReduce.ScheduleReductions(index.IndexId, 0, result);
                    }
                }
            });

            var reducePerformance = new ReducingPerformanceStats(ReduceType.MultiStep);

            for (int i = 0; i < 3; i++)
            {
                var level = i;

                var reduceLevelStats = new ReduceLevelPeformanceStats()
                {
                    Level   = level,
                    Started = SystemTime.UtcNow,
                };

                var reduceParams = new GetItemsToReduceParams(
                    index.IndexId,
                    keysToReduce,
                    level,
                    true,
                    itemsToDelete);

                var gettigItemsToReduceDuration = new Stopwatch();
                var scheduleReductionsDuration  = new Stopwatch();
                var removeReduceResultsDuration = new Stopwatch();
                var storageCommitDuration       = new Stopwatch();

                bool retry = true;
                while (retry && reduceParams.ReduceKeys.Count > 0)
                {
                    var reduceBatchAutoThrottlerId = Guid.NewGuid();
                    try
                    {
                        transactionalStorage.Batch(actions =>
                        {
                            context.CancellationToken.ThrowIfCancellationRequested();

                            actions.BeforeStorageCommit += storageCommitDuration.Start;
                            actions.AfterStorageCommit  += storageCommitDuration.Stop;

                            var batchTimeWatcher = Stopwatch.StartNew();

                            reduceParams.Take = context.CurrentNumberOfItemsToReduceInSingleBatch;

                            List <MappedResultInfo> persistedResults;
                            using (StopwatchScope.For(gettigItemsToReduceDuration))
                            {
                                persistedResults = actions.MapReduce.GetItemsToReduce(reduceParams).ToList();
                            }

                            if (persistedResults.Count == 0)
                            {
                                retry = false;
                                return;
                            }

                            var count = persistedResults.Count;
                            var size  = persistedResults.Sum(x => x.Size);
                            autoTuner.CurrentlyUsedBatchSizesInBytes.GetOrAdd(reduceBatchAutoThrottlerId, size);

                            if (Log.IsDebugEnabled)
                            {
                                if (persistedResults.Count > 0)
                                {
                                    Log.Debug(() => string.Format("Found {0} results for keys [{1}] for index {2} at level {3} in {4}",
                                                                  persistedResults.Count,
                                                                  string.Join(", ", persistedResults.Select(x => x.ReduceKey).Distinct()),
                                                                  index.IndexId, level, batchTimeWatcher.Elapsed));
                                }
                                else
                                {
                                    Log.Debug("No reduce keys found for {0}", index.IndexId);
                                }
                            }

                            context.CancellationToken.ThrowIfCancellationRequested();

                            var requiredReduceNextTime = persistedResults.Select(x => new ReduceKeyAndBucket(x.Bucket, x.ReduceKey))
                                                         .OrderBy(x => x.Bucket)
                                                         .Distinct()
                                                         .ToArray();

                            using (StopwatchScope.For(removeReduceResultsDuration))
                            {
                                foreach (var mappedResultInfo in requiredReduceNextTime)
                                {
                                    actions.MapReduce.RemoveReduceResults(index.IndexId, level + 1, mappedResultInfo.ReduceKey,
                                                                          mappedResultInfo.Bucket);
                                }
                            }

                            if (level != 2)
                            {
                                var reduceKeysAndBuckets = requiredReduceNextTime
                                                           .Select(x => new ReduceKeyAndBucket(x.Bucket / 1024, x.ReduceKey))
                                                           .Distinct()
                                                           .ToArray();

                                using (StopwatchScope.For(scheduleReductionsDuration))
                                {
                                    foreach (var reduceKeysAndBucket in reduceKeysAndBuckets)
                                    {
                                        actions.MapReduce.ScheduleReductions(index.IndexId, level + 1, reduceKeysAndBucket);
                                    }
                                }
                            }

                            var results = persistedResults
                                          .Where(x => x.Data != null)
                                          .GroupBy(x => x.Bucket, x => JsonToExpando.Convert(x.Data))
                                          .ToArray();
                            var reduceKeys = new HashSet <string>(persistedResults.Select(x => x.ReduceKey),
                                                                  StringComparer.InvariantCultureIgnoreCase);

                            context.MetricsCounters.ReducedPerSecond.Mark(results.Length);

                            context.CancellationToken.ThrowIfCancellationRequested();
                            var reduceTimeWatcher = Stopwatch.StartNew();

                            var performance = context.IndexStorage.Reduce(index.IndexId, viewGenerator, results, level, context, actions, reduceKeys, persistedResults.Count);

                            reduceLevelStats.Add(performance);

                            var batchDuration = batchTimeWatcher.Elapsed;
                            Log.Debug("Indexed {0} reduce keys in {1} with {2} results for index {3} in {4} on level {5}", reduceKeys.Count, batchDuration,
                                      results.Length, index.IndexId, reduceTimeWatcher.Elapsed, level);

                            autoTuner.AutoThrottleBatchSize(count, size, batchDuration);
                        });
                    }
                    finally
                    {
                        long _;
                        autoTuner.CurrentlyUsedBatchSizesInBytes.TryRemove(reduceBatchAutoThrottlerId, out _);
                    }
                }

                reduceLevelStats.Completed = SystemTime.UtcNow;
                reduceLevelStats.Duration  = reduceLevelStats.Completed - reduceLevelStats.Started;

                reduceLevelStats.Operations.Add(PerformanceStats.From(IndexingOperation.Reduce_GetItemsToReduce, gettigItemsToReduceDuration.ElapsedMilliseconds));
                reduceLevelStats.Operations.Add(PerformanceStats.From(IndexingOperation.Reduce_ScheduleReductions, scheduleReductionsDuration.ElapsedMilliseconds));
                reduceLevelStats.Operations.Add(PerformanceStats.From(IndexingOperation.Reduce_RemoveReduceResults, removeReduceResultsDuration.ElapsedMilliseconds));
                reduceLevelStats.Operations.Add(PerformanceStats.From(IndexingOperation.StorageCommit, storageCommitDuration.ElapsedMilliseconds));

                reducePerformance.LevelStats.Add(reduceLevelStats);
            }

            foreach (var reduceKey in needToMoveToMultiStep)
            {
                string localReduceKey = reduceKey;
                transactionalStorage.Batch(actions =>
                                           actions.MapReduce.UpdatePerformedReduceType(index.IndexId, localReduceKey,
                                                                                       ReduceType.MultiStep));
            }

            return(reducePerformance);
        }
Пример #59
0
		private void MultiStepReduce(IndexToWorkOn index, string[] keysToReduce, AbstractViewGenerator viewGenerator, ConcurrentSet<object> itemsToDelete)
		{
			var needToMoveToMultiStep = new HashSet<string>();
			transactionalStorage.Batch(actions =>
			{
				foreach (var localReduceKey in keysToReduce)
				{
					var lastPerformedReduceType = actions.MapReduce.GetLastPerformedReduceType(index.IndexName, localReduceKey);

					if (lastPerformedReduceType != ReduceType.MultiStep)
						needToMoveToMultiStep.Add(localReduceKey);

					if (lastPerformedReduceType != ReduceType.SingleStep)
						continue;
					// we exceeded the limit of items to reduce in single step
					// now we need to schedule reductions at level 0 for all map results with given reduce key
					var mappedItems = actions.MapReduce.GetMappedBuckets(index.IndexName, localReduceKey).ToList();
					foreach (var result in mappedItems.Select(x => new ReduceKeyAndBucket(x, localReduceKey)))
					{
						actions.MapReduce.ScheduleReductions(index.IndexName, 0, result);
					}
				}
			});

			for (int i = 0; i < 3; i++)
			{
				var level = i;

				var reduceParams = new GetItemsToReduceParams(
					index.IndexName,
					keysToReduce,
					level,
					true,
					itemsToDelete);

				bool retry = true;
				while (retry && reduceParams.ReduceKeys.Count > 0)
				{
					var reduceBatchAutoThrottlerId = Guid.NewGuid();
					try
					{
						transactionalStorage.Batch(actions =>
						{
							context.CancellationToken.ThrowIfCancellationRequested();

							var batchTimeWatcher = Stopwatch.StartNew();

							reduceParams.Take = context.CurrentNumberOfItemsToReduceInSingleBatch;
							var persistedResults = actions.MapReduce.GetItemsToReduce(reduceParams).ToList();
							if (persistedResults.Count == 0)
							{
								retry = false;
								return;
							}

							var count = persistedResults.Count;
							var size = persistedResults.Sum(x => x.Size);
							autoTuner.CurrentlyUsedBatchSizes.GetOrAdd(reduceBatchAutoThrottlerId, size);

							if (Log.IsDebugEnabled)
							{
								if (persistedResults.Count > 0)
									Log.Debug(() => string.Format("Found {0} results for keys [{1}] for index {2} at level {3} in {4}",
										persistedResults.Count,
										string.Join(", ", persistedResults.Select(x => x.ReduceKey).Distinct()),
										index.IndexName, level, batchTimeWatcher.Elapsed));
								else
									Log.Debug("No reduce keys found for {0}", index.IndexName);
							}

							context.CancellationToken.ThrowIfCancellationRequested();

							var requiredReduceNextTime = persistedResults.Select(x => new ReduceKeyAndBucket(x.Bucket, x.ReduceKey))
								.OrderBy(x => x.Bucket)
								.Distinct()
								.ToArray();
							foreach (var mappedResultInfo in requiredReduceNextTime)
							{
								actions.MapReduce.RemoveReduceResults(index.IndexName, level + 1, mappedResultInfo.ReduceKey,
									mappedResultInfo.Bucket);
							}

							if (level != 2)
							{
								var reduceKeysAndBuckets = requiredReduceNextTime
									.Select(x => new ReduceKeyAndBucket(x.Bucket / 1024, x.ReduceKey))
									.Distinct()
									.ToArray();
								foreach (var reduceKeysAndBucket in reduceKeysAndBuckets)
								{
									actions.MapReduce.ScheduleReductions(index.IndexName, level + 1, reduceKeysAndBucket);
								}
							}

							var results = persistedResults
								.Where(x => x.Data != null)
								.GroupBy(x => x.Bucket, x => JsonToExpando.Convert(x.Data))
								.ToArray();
							var reduceKeys = new HashSet<string>(persistedResults.Select(x => x.ReduceKey),
								StringComparer.InvariantCultureIgnoreCase);
							context.PerformanceCounters.ReducedPerSecond.IncrementBy(results.Length);

							context.CancellationToken.ThrowIfCancellationRequested();
							var reduceTimeWatcher = Stopwatch.StartNew();

							context.IndexStorage.Reduce(index.IndexName, viewGenerator, results, level, context, actions, reduceKeys, persistedResults.Count);

							var batchDuration = batchTimeWatcher.Elapsed;
							Log.Debug("Indexed {0} reduce keys in {1} with {2} results for index {3} in {4} on level {5}", reduceKeys.Count, batchDuration,
								results.Length, index.IndexName, reduceTimeWatcher.Elapsed, level);

							autoTuner.AutoThrottleBatchSize(count, size, batchDuration);
						});
					}
					finally
					{
						long _;
						autoTuner.CurrentlyUsedBatchSizes.TryRemove(reduceBatchAutoThrottlerId, out _);
					}
				}
			}

			foreach (var reduceKey in needToMoveToMultiStep)
			{
				string localReduceKey = reduceKey;
				transactionalStorage.Batch(actions =>
										   actions.MapReduce.UpdatePerformedReduceType(index.IndexName, localReduceKey,
																					   ReduceType.MultiStep));
			}
		}
Пример #60
0
        protected ReducingPerformanceStats[] HandleReduceForIndex(IndexToWorkOn indexToWorkOn)
        {
            var viewGenerator = context.IndexDefinitionStorage.GetViewGenerator(indexToWorkOn.IndexId);

            if (viewGenerator == null)
            {
                return(null);
            }

            bool operationCanceled = false;
            var  itemsToDelete     = new ConcurrentSet <object>();

            IList <ReduceTypePerKey> mappedResultsInfo = null;

            transactionalStorage.Batch(actions =>
            {
                mappedResultsInfo = actions.MapReduce.GetReduceTypesPerKeys(indexToWorkOn.IndexId,
                                                                            context.CurrentNumberOfItemsToReduceInSingleBatch,
                                                                            context.NumberOfItemsToExecuteReduceInSingleStep).ToList();
            });

            var singleStepReduceKeys = mappedResultsInfo.Where(x => x.OperationTypeToPerform == ReduceType.SingleStep).Select(x => x.ReduceKey).ToArray();
            var multiStepsReduceKeys = mappedResultsInfo.Where(x => x.OperationTypeToPerform == ReduceType.MultiStep).Select(x => x.ReduceKey).ToArray();

            currentlyProcessedIndexes.TryAdd(indexToWorkOn.IndexId, indexToWorkOn.Index);

            var performanceStats = new List <ReducingPerformanceStats>();

            try
            {
                if (singleStepReduceKeys.Length > 0)
                {
                    Log.Debug("SingleStep reduce for keys: {0}", singleStepReduceKeys.Select(x => x + ","));
                    var singleStepStats = SingleStepReduce(indexToWorkOn, singleStepReduceKeys, viewGenerator, itemsToDelete);

                    performanceStats.Add(singleStepStats);
                }

                if (multiStepsReduceKeys.Length > 0)
                {
                    Log.Debug("MultiStep reduce for keys: {0}", multiStepsReduceKeys.Select(x => x + ","));
                    var multiStepStats = MultiStepReduce(indexToWorkOn, multiStepsReduceKeys, viewGenerator, itemsToDelete);

                    performanceStats.Add(multiStepStats);
                }
            }
            catch (OperationCanceledException)
            {
                operationCanceled = true;
            }
            finally
            {
                var postReducingOperations = new ReduceLevelPeformanceStats
                {
                    Level   = -1,
                    Started = SystemTime.UtcNow
                };

                if (operationCanceled == false)
                {
                    var deletingScheduledReductionsDuration = new Stopwatch();
                    var storageCommitDuration = new Stopwatch();

                    // whatever we succeeded in indexing or not, we have to update this
                    // because otherwise we keep trying to re-index failed mapped results
                    transactionalStorage.Batch(actions =>
                    {
                        actions.BeforeStorageCommit += storageCommitDuration.Start;
                        actions.AfterStorageCommit  += storageCommitDuration.Stop;

                        ScheduledReductionInfo latest;

                        using (StopwatchScope.For(deletingScheduledReductionsDuration))
                        {
                            latest = actions.MapReduce.DeleteScheduledReduction(itemsToDelete);
                        }

                        if (latest == null)
                        {
                            return;
                        }
                        actions.Indexing.UpdateLastReduced(indexToWorkOn.Index.indexId, latest.Etag, latest.Timestamp);
                    });

                    postReducingOperations.Operations.Add(PerformanceStats.From(IndexingOperation.Reduce_DeleteScheduledReductions, deletingScheduledReductionsDuration.ElapsedMilliseconds));
                    postReducingOperations.Operations.Add(PerformanceStats.From(IndexingOperation.StorageCommit, storageCommitDuration.ElapsedMilliseconds));
                }

                postReducingOperations.Completed = SystemTime.UtcNow;
                postReducingOperations.Duration  = postReducingOperations.Completed - postReducingOperations.Started;

                performanceStats.Add(new ReducingPerformanceStats(ReduceType.None)
                {
                    LevelStats = new List <ReduceLevelPeformanceStats> {
                        postReducingOperations
                    }
                });

                Index _;
                currentlyProcessedIndexes.TryRemove(indexToWorkOn.IndexId, out _);
            }

            return(performanceStats.ToArray());
        }