示例#1
0
        public async Task IncreaseConcurrency(bool useChannelBasedImpl)
        {
            int count = 0;

            var waitForFirstTwoItems = new TaskCompletionSource <object>();
            // Event that will hold first two workers.
            var mre         = new ManualResetEventSlim(false);
            var actionBlock = ActionBlockSlim.Create <int>(
                2,
                n =>
            {
                var currentCount = Interlocked.Increment(ref count);
                if (currentCount == 2)
                {
                    // Notify the test that 2 items are processed.
                    waitForFirstTwoItems.SetResult(null);
                }

                if (currentCount <= 2)
                {
                    // This is the first or the second thread that should be blocked before we increase the number of threads.
                    mre.Wait(TimeSpan.FromSeconds(100));
                }

                Thread.Sleep(1);
            },
                useChannelBasedImpl: useChannelBasedImpl);

            // Schedule work
            actionBlock.Post(1);
            actionBlock.Post(2);

            await waitForFirstTwoItems.Task;

            // The first 2 threads should be blocked in the callback in the action block,
            // but the count should be incremented
            Assert.Equal(2, count);

            var task = actionBlock.CompletionAsync();

            // The task should not be completed yet!
            Assert.NotEqual(TaskStatus.RanToCompletion, task.Status);

            // This will cause another thread to spawn
            actionBlock.IncreaseConcurrencyTo(3);

            // Add more work
            actionBlock.Post(3);

            actionBlock.Complete();

            // Release the first 2 threads
            mre.Set();

            // Waiting for completion
            await task;

            // The new thread should run and increment the count
            Assert.Equal(3, count);
        }
示例#2
0
        public void CompletionTaskIsDoneWhenCompletedWith0ConcurrencyIsCalled(bool useChannelBasedImpl)
        {
            var actionBlock = ActionBlockSlim.Create <int>(0, n => { }, useChannelBasedImpl: useChannelBasedImpl);
            var task        = actionBlock.CompletionAsync();

            Assert.Equal(TaskStatus.RanToCompletion, task.Status);
        }
 /// <summary>
 /// Construct a WorkerAnalyzer.
 /// </summary>
 public ConcurrentPipProcessor(PackedExecutionExporter exporter)
 {
     m_exporter        = exporter;
     m_processingBlock = ActionBlockSlim.Create <ProcessFingerprintComputationEventData>(
         degreeOfParallelism: -1, // default
         processItemAction: ProcessFingerprintComputedCore);
 }
示例#4
0
            /// <summary>
            /// Construct a WorkerAnalyzer.
            /// </summary>
            public WorkerAnalyzer(PackedExecutionExporter exporter, string name, WorkerId workerId)
            {
                m_exporter = exporter;
                Name       = name;
                m_workerId = workerId;

                m_processingBlock = new ActionBlockSlim <ProcessFingerprintComputationEventData>(1, ProcessFingerprintComputedCore);
            }
示例#5
0
        /// <summary>
        /// Needs to take the flushing lock. Called only from <see cref="FlushAsync(OperationContext)"/>. Refactored
        /// out for clarity.
        /// </summary>
        private void PerformFlush(OperationContext context)
        {
            _database.Counters[ContentLocationDatabaseCounters.TotalNumberOfCacheFlushes].Increment();

            using (_database.Counters[ContentLocationDatabaseCounters.CacheFlush].Start())
            {
                using (_exchangeLock.AcquireWriteLock())
                {
                    _flushingCache = _cache;
                    _cache         = new ConcurrentBigMap <ShortHash, ContentLocationEntry>();
                }

                if (_configuration.FlushSingleTransaction)
                {
                    _database.PersistBatch(context, _flushingCache);
                }
                else
                {
                    var actionBlock = new ActionBlockSlim <KeyValuePair <ShortHash, ContentLocationEntry> >(_configuration.FlushDegreeOfParallelism, kv =>
                    {
                        // Do not lock on GetLock here, as it will cause a deadlock with
                        // SetMachineExistenceAndUpdateDatabase. It is correct not do take any locks as well, because
                        // no Store can happen while flush is running.
                        _database.Persist(context, kv.Key, kv.Value);
                    });

                    foreach (var kv in _flushingCache)
                    {
                        actionBlock.Post(kv);
                    }

                    actionBlock.Complete();
                    actionBlock.CompletionAsync().Wait();
                }

                _database.Counters[ContentLocationDatabaseCounters.NumberOfPersistedEntries].Add(_flushingCache.Count);

                if (_configuration.FlushPreservePercentInMemory > 0)
                {
                    int targetFlushingSize = (int)(_flushingCache.Count * _configuration.FlushPreservePercentInMemory);
                    int removeAmount       = _flushingCache.Count - targetFlushingSize;

                    foreach (var key in _flushingCache.Keys.Take(removeAmount))
                    {
                        _flushingCache.RemoveKey(key);
                    }
                }
                else
                {
                    using (_exchangeLock.AcquireWriteLock())
                    {
                        _flushingCache = new ConcurrentBigMap <ShortHash, ContentLocationEntry>();
                    }
                }

                _database.Counters[ContentLocationDatabaseCounters.TotalNumberOfCompletedCacheFlushes].Increment();
            }
        }
示例#6
0
        public async Task CompletionTaskIsDoneWhenCompletedIsCalled()
        {
            var actionBlock = new ActionBlockSlim <int>(42, n => { });
            var task        = actionBlock.CompletionAsync();

            Assert.NotEqual(TaskStatus.RanToCompletion, task.Status);

            actionBlock.Complete();
            await task;

            Assert.Equal(TaskStatus.RanToCompletion, task.Status);
        }
示例#7
0
        public async Task CompletionTaskIsDoneWhenCompletedIsCalled(bool useChannelBasedImpl)
        {
            var actionBlock = ActionBlockSlim.Create <int>(42, n => { }, useChannelBasedImpl: useChannelBasedImpl);
            var task        = actionBlock.CompletionAsync();

            Assert.NotEqual(TaskStatus.RanToCompletion, task.Status);

            actionBlock.Complete();
            await task;

            Assert.Equal(TaskStatus.RanToCompletion, task.Status);
        }
示例#8
0
            public FingerprintStoreEventProcessor(int degreeOfParallelism)
            {
                Contract.Requires(degreeOfParallelism > 0);

                // To ensure that events coming from the same pips are processed according to the order when they came,
                // we use N action blocks, all with degree of parallelism 1. Thus, we potentially get parallelism across
                // different pips, but maintain sequential processing within a single pip.
                // This is necessary in particular when the runtime cache miss analyzer is enabled because
                // we use cachelookup fingerprints to perform cache miss analyzer for strongfingerprint misses.
                m_actionBlocks = new ActionBlockSlim <Action> [degreeOfParallelism];
                for (int i = 0; i < degreeOfParallelism; ++i)
                {
                    m_actionBlocks[i] = new ActionBlockSlim <Action>(1, a => a());
                }
            }
示例#9
0
        public async Task AllTheElementsAreFinished()
        {
            int count       = 0;
            var actionBlock = new ActionBlockSlim <int>(
                42,
                n => { Interlocked.Increment(ref count); Thread.Sleep(1); });

            var task = actionBlock.CompletionAsync();

            actionBlock.Post(1);
            actionBlock.Post(2);

            actionBlock.Complete();
            await task;

            Assert.Equal(2, count);
        }
示例#10
0
            public FingerprintStoreEventProcessor(int degreeOfParallelism)
            {
                Contract.Requires(degreeOfParallelism > 0);

                // To ensure that events coming from the same pips are processed according to the order when they came,
                // we use N action blocks, all with degree of parallelism 1. Thus, we potentially get parallelism across
                // different pips, but maintain sequential processing within a single pip.
                // This is necessary in particular when the runtime cache miss analyzer is enabled because
                // we use cachelookup fingerprints to perform cache miss analyzer for strongfingerprint misses.
                m_actionBlocks = new ActionBlockSlim <Action> [degreeOfParallelism];
                for (int i = 0; i < degreeOfParallelism; ++i)
                {
                    // Initially we start each action block with a parallelism of 0 so they are in a paused state.
                    // Processing is started once the RuntimeCacheMissAnalyzer is available via its initialization task completing.
                    m_actionBlocks[i] = ActionBlockSlim.Create <Action>(0, a => a());
                }
            }
示例#11
0
        public async Task AllTheElementsAreProcessedBy2Thread()
        {
            const int maxCount    = 420;
            int       count       = 0;
            var       actionBlock = new ActionBlockSlim <int>(
                2,
                n => { Interlocked.Increment(ref count); });

            for (int i = 0; i < maxCount; i++)
            {
                actionBlock.Post(i);
            }

            actionBlock.Complete();
            await actionBlock.CompletionAsync();

            Assert.Equal(maxCount, count);
        }
示例#12
0
        public async Task AllTheElementsAreFinished(bool useChannelBasedImpl)
        {
            int count       = 0;
            var actionBlock = ActionBlockSlim.Create <int>(
                42,
                n => { Interlocked.Increment(ref count); Thread.Sleep(1); },
                useChannelBasedImpl: useChannelBasedImpl);

            var task = actionBlock.CompletionAsync();

            actionBlock.Post(1);
            actionBlock.Post(2);

            actionBlock.Complete();
            await task;

            Assert.Equal(2, count);
        }
示例#13
0
        public async Task ExceptionIsThrownWhenTheBlockIsFull(bool useChannelBasedImpl)
        {
            var tcs         = new TaskCompletionSource <object>();
            var actionBlock = ActionBlockSlim.CreateWithAsyncAction <int>(1, n => tcs.Task, capacityLimit: 1, useChannelBasedImpl);

            actionBlock.Post(42);
            Assert.Equal(1, actionBlock.PendingWorkItems);

            Assert.Throws <ActionBlockIsFullException>(() => actionBlock.Post(1));
            Assert.Equal(1, actionBlock.PendingWorkItems);

            tcs.SetResult(null);
            await WaitUntilAsync(() => actionBlock.PendingWorkItems == 0, TimeSpan.FromMilliseconds(1)).WithTimeoutAsync(TimeSpan.FromSeconds(5));

            Assert.Equal(0, actionBlock.PendingWorkItems);

            // This should not fail!
            actionBlock.Post(1);
        }
示例#14
0
        public async Task AllTheElementsAreProcessedBy2Thread(bool useChannelBasedImpl)
        {
            const int maxCount    = 420;
            int       count       = 0;
            var       actionBlock = ActionBlockSlim.Create <int>(
                2,
                n => { Interlocked.Increment(ref count); },
                useChannelBasedImpl: useChannelBasedImpl);

            for (int i = 0; i < maxCount; i++)
            {
                actionBlock.Post(i);
            }

            actionBlock.Complete();
            await actionBlock.CompletionAsync();

            Assert.Equal(maxCount, count);
        }
        /// <nodoc />
        public PipTableSerializationScheduler(int maxDegreeOfParallelism, bool debug, Action <Pip, MutablePipState> serializer)
        {
            Contract.Requires(maxDegreeOfParallelism >= -1);
            Contract.Requires(maxDegreeOfParallelism > 0 || debug);

            m_serializer = serializer;

            maxDegreeOfParallelism = maxDegreeOfParallelism == -1 ? Environment.ProcessorCount : maxDegreeOfParallelism;
            m_nonSerializedDebug   = maxDegreeOfParallelism == 0 && debug;

            m_serializationQueue = new ActionBlockSlim <QueueItem>(
                m_nonSerializedDebug ? 1 : maxDegreeOfParallelism,
                item => ProcessQueueItem(item));

            if (m_nonSerializedDebug)
            {
                m_serializationQueue.Complete();    // Don't allow more changes
                m_nonSerializablePips = new List <Pip>();
            }
        }
示例#16
0
        /// <summary>
        /// Creates an instance of <see cref="FrontEndEngineImplementation"/>.
        /// </summary>
        public FrontEndEngineImplementation(
            LoggingContext loggingContext,
            PathTable pathTable,
            IConfiguration configuration,
            IStartupConfiguration startupConfiguration,
            MountsTable mountsTable,
            InputTracker inputTracker,
            SnapshotCollector snapshotCollector,
            DirectoryTranslator directoryTranslator,
            Func <FileContentTable> getFileContentTable,
            int timerUpdatePeriod,
            bool isPartialReuse,
            IEnumerable <IFrontEnd> registeredFrontends)
        {
            Contract.Requires(loggingContext != null);
            Contract.Requires(pathTable != null);
            Contract.Requires(configuration != null);
            Contract.Requires(startupConfiguration != null);
            Contract.Requires(mountsTable != null);
            Contract.Requires(inputTracker != null);
            Contract.Requires(getFileContentTable != null);
            Contract.Requires(registeredFrontends != null);

            m_loggingContext                  = loggingContext;
            PathTable                         = pathTable;
            m_mountsTable                     = mountsTable;
            m_inputTracker                    = inputTracker;
            m_getFileContentTable             = getFileContentTable;
            m_isPartialReuse                  = isPartialReuse;
            m_frontendsEnvironmentRestriction = registeredFrontends.ToDictionary(frontend => frontend.Name, frontEnd => frontEnd.ShouldRestrictBuildParameters);
            m_snapshotCollector               = snapshotCollector;
            GetTimerUpdatePeriod              = timerUpdatePeriod;
            Layout = configuration.Layout;

            if (ShouldUseSpecCache(configuration))
            {
                m_specCache = new FileCombiner(
                    loggingContext,
                    Path.Combine(configuration.Layout.EngineCacheDirectory.ToString(PathTable), SpecCacheFileName),
                    FileCombiner.FileCombinerUsage.SpecFileCache,
                    configuration.FrontEnd.LogStatistics);
            }

            m_allBuildParameters = new ConcurrentDictionary <string, TrackedValue>(StringComparer.OrdinalIgnoreCase);

            foreach (var kvp in PopulateFromEnvironmentAndApplyOverrides(loggingContext, startupConfiguration.Properties).ToDictionary())
            {
                m_allBuildParameters.TryAdd(kvp.Key, new TrackedValue(kvp.Value, false));
            }

            m_localDiskContentStore = new LocalDiskContentStore(
                loggingContext,
                PathTable,
                m_getFileContentTable(),
                m_inputTracker.FileChangeTracker,
                directoryTranslator,
                vfsCasRoot: configuration.Cache.VfsCasRoot);

            m_localDiskContentStoreConcurrencyLimiter = new ActionBlockSlim <MaterializeFileRequest>(
                Environment.ProcessorCount,
                request =>
            {
                var requestCompletionSource = request.CompletionSource;

                try
                {
                    var materializeResult = m_localDiskContentStore.TryMaterializeAsync(
                        request.Cache,
                        request.FileRealizationModes,
                        request.Path,
                        request.ContentHash,
                        trackPath: request.TrackPath,
                        recordPathInFileContentTable: request.RecordPathInFileContentTable).GetAwaiter().GetResult();

                    requestCompletionSource.SetResult(materializeResult);
                }
                catch (TaskCanceledException)
                {
                    requestCompletionSource.SetCanceled();
                }
                catch (Exception e)
                {
                    requestCompletionSource.SetException(e);
                }
            });
        }
示例#17
0
        /// <summary>
        /// Needs to take the flushing lock. Called only from <see cref="FlushAsync(OperationContext)"/>. Refactored
        /// out for clarity.
        /// </summary>
        private CounterCollection <FlushableCacheCounters> PerformFlush(OperationContext context)
        {
            _database.Counters[ContentLocationDatabaseCounters.TotalNumberOfCacheFlushes].Increment();
            var counters = new CounterCollection <FlushableCacheCounters>();

            using (_database.Counters[ContentLocationDatabaseCounters.CacheFlush].Start())
            {
                using (_exchangeLock.AcquireWriteLock())
                {
                    _flushingCache = _cache;
                    _cache         = new ConcurrentBigMap <ShortHash, ContentLocationEntry>();
                }

                using (counters[FlushableCacheCounters.FlushingTime].Start()) {
                    var threads = _configuration.FlushDegreeOfParallelism;
                    if (threads <= 0)
                    {
                        threads = Environment.ProcessorCount;
                    }

                    if (_configuration.FlushSingleTransaction)
                    {
                        if (_configuration.FlushDegreeOfParallelism == 1 || _flushingCache.Count <= _configuration.FlushTransactionSize)
                        {
                            _database.PersistBatch(context, _flushingCache);
                        }
                        else
                        {
                            var actionBlock = new ActionBlockSlim <IEnumerable <KeyValuePair <ShortHash, ContentLocationEntry> > >(threads, kvs =>
                            {
                                _database.PersistBatch(context, kvs);
                            });

                            foreach (var kvs in _flushingCache.GetPages(_configuration.FlushTransactionSize))
                            {
                                actionBlock.Post(kvs);
                            }

                            actionBlock.Complete();
                            actionBlock.CompletionAsync().Wait();
                        }
                    }
                    else
                    {
                        var actionBlock = new ActionBlockSlim <KeyValuePair <ShortHash, ContentLocationEntry> >(threads, kv =>
                        {
                            // Do not lock on GetLock here, as it will cause a deadlock with
                            // SetMachineExistenceAndUpdateDatabase. It is correct not do take any locks as well, because
                            // no Store can happen while flush is running.
                            _database.Persist(context, kv.Key, kv.Value);
                        });

                        foreach (var kv in _flushingCache)
                        {
                            actionBlock.Post(kv);
                        }

                        actionBlock.Complete();
                        actionBlock.CompletionAsync().Wait();
                    }
                }

                counters[FlushableCacheCounters.Persisted].Add(_flushingCache.Count);

                _database.Counters[ContentLocationDatabaseCounters.NumberOfPersistedEntries].Add(_flushingCache.Count);

                using (counters[FlushableCacheCounters.CleanupTime].Start())
                {
                    if (_configuration.FlushPreservePercentInMemory > 0)
                    {
                        int targetFlushingSize = (int)(_flushingCache.Count * _configuration.FlushPreservePercentInMemory);
                        int removeAmount       = _flushingCache.Count - targetFlushingSize;

                        foreach (var key in _flushingCache.Keys.Take(removeAmount))
                        {
                            _flushingCache.RemoveKey(key);
                        }
                    }
                    else
                    {
                        using (_exchangeLock.AcquireWriteLock())
                        {
                            _flushingCache = new ConcurrentBigMap <ShortHash, ContentLocationEntry>();
                        }
                    }
                }

                counters[FlushableCacheCounters.Leftover].Add(_flushingCache.Count);
                _database.Counters[ContentLocationDatabaseCounters.TotalNumberOfCompletedCacheFlushes].Increment();
            }

            counters[FlushableCacheCounters.Growth].Add(_cache.Count);
            return(counters);
        }
示例#18
0
 public WorkerAnalyzer(FileConsumptionAnalyzer analyzer, string name)
 {
     m_analyzer        = analyzer;
     Name              = name;
     m_processingBlock = new ActionBlockSlim <ProcessFingerprintComputationEventData>(1, ProcessFingerprintComputedCore);
 }