Example #1
0
        /// <summary>
        /// Task that completes when no more serialization tasks are running in the background.
        /// </summary>
        public Task WhenDone()
        {
            Contract.Requires(!IsDisposed);

            m_serializationQueue.Complete();
            return(m_serializationQueue.CompletionAsync());
        }
Example #2
0
        public async Task IncreaseConcurrency()
        {
            int count = 0;

            var waitForFirstTwoItems = TaskSourceSlim.Create <object>();
            // Event that will hold first two workers.
            var mre         = new ManualResetEventSlim(false);
            var actionBlock = new ActionBlockSlim <int>(
                2,
                n =>
            {
                var currentCount = Interlocked.Increment(ref count);
                if (currentCount == 2)
                {
                    // Notify the test that 2 items are processed.
                    waitForFirstTwoItems.SetResult(null);
                }

                if (currentCount <= 2)
                {
                    // This is the first or the second thread that should be blocked before we increase the number of threads.
                    mre.Wait(TimeSpan.FromSeconds(100));
                }

                Thread.Sleep(1);
            });

            // Schedule work
            actionBlock.Post(1);
            actionBlock.Post(2);

            await waitForFirstTwoItems.Task;

            // The first 2 threads should be blocked in the callback in the action block,
            // but the count should be incremented
            Assert.Equal(2, count);

            var task = actionBlock.CompletionAsync();

            // The task should not be completed yet!
            Assert.NotEqual(TaskStatus.RanToCompletion, task.Status);

            // This will cause another thread to spawn
            actionBlock.IncreaseConcurrencyTo(3);

            // Add more work
            actionBlock.Post(3);

            actionBlock.Complete();

            // Release the first 2 threads
            mre.Set();

            // Waiting for completion
            await task;

            // The new thread should run and increment the count
            Assert.Equal(3, count);
        }
Example #3
0
        /// <summary>
        /// Needs to take the flushing lock. Called only from <see cref="FlushAsync(OperationContext)"/>. Refactored
        /// out for clarity.
        /// </summary>
        private void PerformFlush(OperationContext context)
        {
            _database.Counters[ContentLocationDatabaseCounters.TotalNumberOfCacheFlushes].Increment();

            using (_database.Counters[ContentLocationDatabaseCounters.CacheFlush].Start())
            {
                using (_exchangeLock.AcquireWriteLock())
                {
                    _flushingCache = _cache;
                    _cache         = new ConcurrentBigMap <ShortHash, ContentLocationEntry>();
                }

                if (_configuration.FlushSingleTransaction)
                {
                    _database.PersistBatch(context, _flushingCache);
                }
                else
                {
                    var actionBlock = new ActionBlockSlim <KeyValuePair <ShortHash, ContentLocationEntry> >(_configuration.FlushDegreeOfParallelism, kv =>
                    {
                        // Do not lock on GetLock here, as it will cause a deadlock with
                        // SetMachineExistenceAndUpdateDatabase. It is correct not do take any locks as well, because
                        // no Store can happen while flush is running.
                        _database.Persist(context, kv.Key, kv.Value);
                    });

                    foreach (var kv in _flushingCache)
                    {
                        actionBlock.Post(kv);
                    }

                    actionBlock.Complete();
                    actionBlock.CompletionAsync().Wait();
                }

                _database.Counters[ContentLocationDatabaseCounters.NumberOfPersistedEntries].Add(_flushingCache.Count);

                if (_configuration.FlushPreservePercentInMemory > 0)
                {
                    int targetFlushingSize = (int)(_flushingCache.Count * _configuration.FlushPreservePercentInMemory);
                    int removeAmount       = _flushingCache.Count - targetFlushingSize;

                    foreach (var key in _flushingCache.Keys.Take(removeAmount))
                    {
                        _flushingCache.RemoveKey(key);
                    }
                }
                else
                {
                    using (_exchangeLock.AcquireWriteLock())
                    {
                        _flushingCache = new ConcurrentBigMap <ShortHash, ContentLocationEntry>();
                    }
                }

                _database.Counters[ContentLocationDatabaseCounters.TotalNumberOfCompletedCacheFlushes].Increment();
            }
        }
Example #4
0
        public async Task CompletionTaskIsDoneWhenCompletedIsCalled()
        {
            var actionBlock = new ActionBlockSlim <int>(42, n => { });
            var task        = actionBlock.CompletionAsync();

            Assert.NotEqual(TaskStatus.RanToCompletion, task.Status);

            actionBlock.Complete();
            await task;

            Assert.Equal(TaskStatus.RanToCompletion, task.Status);
        }
Example #5
0
        public async Task AllTheElementsAreFinished()
        {
            int count       = 0;
            var actionBlock = new ActionBlockSlim <int>(
                42,
                n => { Interlocked.Increment(ref count); Thread.Sleep(1); });

            var task = actionBlock.CompletionAsync();

            actionBlock.Post(1);
            actionBlock.Post(2);

            actionBlock.Complete();
            await task;

            Assert.Equal(2, count);
        }
Example #6
0
        public async Task AllTheElementsAreProcessedBy2Thread()
        {
            const int maxCount    = 420;
            int       count       = 0;
            var       actionBlock = new ActionBlockSlim <int>(
                2,
                n => { Interlocked.Increment(ref count); });

            for (int i = 0; i < maxCount; i++)
            {
                actionBlock.Post(i);
            }

            actionBlock.Complete();
            await actionBlock.CompletionAsync();

            Assert.Equal(maxCount, count);
        }
        /// <nodoc />
        public PipTableSerializationScheduler(int maxDegreeOfParallelism, bool debug, Action <Pip, MutablePipState> serializer)
        {
            Contract.Requires(maxDegreeOfParallelism >= -1);
            Contract.Requires(maxDegreeOfParallelism > 0 || debug);

            m_serializer = serializer;

            maxDegreeOfParallelism = maxDegreeOfParallelism == -1 ? Environment.ProcessorCount : maxDegreeOfParallelism;
            m_nonSerializedDebug   = maxDegreeOfParallelism == 0 && debug;

            m_serializationQueue = new ActionBlockSlim <QueueItem>(
                m_nonSerializedDebug ? 1 : maxDegreeOfParallelism,
                item => ProcessQueueItem(item));

            if (m_nonSerializedDebug)
            {
                m_serializationQueue.Complete();    // Don't allow more changes
                m_nonSerializablePips = new List <Pip>();
            }
        }
Example #8
0
        /// <summary>
        /// Needs to take the flushing lock. Called only from <see cref="FlushAsync(OperationContext)"/>. Refactored
        /// out for clarity.
        /// </summary>
        private CounterCollection <FlushableCacheCounters> PerformFlush(OperationContext context)
        {
            _database.Counters[ContentLocationDatabaseCounters.TotalNumberOfCacheFlushes].Increment();
            var counters = new CounterCollection <FlushableCacheCounters>();

            using (_database.Counters[ContentLocationDatabaseCounters.CacheFlush].Start())
            {
                using (_exchangeLock.AcquireWriteLock())
                {
                    _flushingCache = _cache;
                    _cache         = new ConcurrentBigMap <ShortHash, ContentLocationEntry>();
                }

                using (counters[FlushableCacheCounters.FlushingTime].Start()) {
                    var threads = _configuration.FlushDegreeOfParallelism;
                    if (threads <= 0)
                    {
                        threads = Environment.ProcessorCount;
                    }

                    if (_configuration.FlushSingleTransaction)
                    {
                        if (_configuration.FlushDegreeOfParallelism == 1 || _flushingCache.Count <= _configuration.FlushTransactionSize)
                        {
                            _database.PersistBatch(context, _flushingCache);
                        }
                        else
                        {
                            var actionBlock = new ActionBlockSlim <IEnumerable <KeyValuePair <ShortHash, ContentLocationEntry> > >(threads, kvs =>
                            {
                                _database.PersistBatch(context, kvs);
                            });

                            foreach (var kvs in _flushingCache.GetPages(_configuration.FlushTransactionSize))
                            {
                                actionBlock.Post(kvs);
                            }

                            actionBlock.Complete();
                            actionBlock.CompletionAsync().Wait();
                        }
                    }
                    else
                    {
                        var actionBlock = new ActionBlockSlim <KeyValuePair <ShortHash, ContentLocationEntry> >(threads, kv =>
                        {
                            // Do not lock on GetLock here, as it will cause a deadlock with
                            // SetMachineExistenceAndUpdateDatabase. It is correct not do take any locks as well, because
                            // no Store can happen while flush is running.
                            _database.Persist(context, kv.Key, kv.Value);
                        });

                        foreach (var kv in _flushingCache)
                        {
                            actionBlock.Post(kv);
                        }

                        actionBlock.Complete();
                        actionBlock.CompletionAsync().Wait();
                    }
                }

                counters[FlushableCacheCounters.Persisted].Add(_flushingCache.Count);

                _database.Counters[ContentLocationDatabaseCounters.NumberOfPersistedEntries].Add(_flushingCache.Count);

                using (counters[FlushableCacheCounters.CleanupTime].Start())
                {
                    if (_configuration.FlushPreservePercentInMemory > 0)
                    {
                        int targetFlushingSize = (int)(_flushingCache.Count * _configuration.FlushPreservePercentInMemory);
                        int removeAmount       = _flushingCache.Count - targetFlushingSize;

                        foreach (var key in _flushingCache.Keys.Take(removeAmount))
                        {
                            _flushingCache.RemoveKey(key);
                        }
                    }
                    else
                    {
                        using (_exchangeLock.AcquireWriteLock())
                        {
                            _flushingCache = new ConcurrentBigMap <ShortHash, ContentLocationEntry>();
                        }
                    }
                }

                counters[FlushableCacheCounters.Leftover].Add(_flushingCache.Count);
                _database.Counters[ContentLocationDatabaseCounters.TotalNumberOfCompletedCacheFlushes].Increment();
            }

            counters[FlushableCacheCounters.Growth].Add(_cache.Count);
            return(counters);
        }
Example #9
0
        public override int Analyze()
        {
            Console.WriteLine($"Analyzing");

            m_block.Complete();
            m_block.CompletionAsync().GetAwaiter().GetResult();

            Console.WriteLine($"Writing Graph");

            foreach (var pip in PipGraph.RetrieveAllPips())
            {
                var serializedPip = pip;
                var nodeId        = pip.PipId.ToNodeId();

                bool addEdges = true;
                if (pip.PipType == PipType.Process)
                {
                    var entry = GetEntry(pip.PipId);
                    serializedPip = entry.Process;
                    addEdges      = !entry.AddedEdges;
                }

                if (addEdges && AddEdgesForPips)
                {
                    using (var scope = m_mutableGraph.AcquireExclusiveIncomingEdgeScope(nodeId))
                    {
                        foreach (var edge in DataflowGraph.GetIncomingEdges(nodeId))
                        {
                            scope.AddEdge(edge.OtherNode, edge.IsLight);
                        }
                    }
                }

                serializedPip.ResetPipIdForTesting();
                m_pipTable.Add(nodeId.Value, serializedPip);
            }

            m_mutableGraph.Seal();

            CachedGraph.Serializer.SerializeToFileAsync(
                GraphCacheFile.DirectedGraph,
                m_mutableGraph.Serialize,
                Path.Combine(OutputFilePath, nameof(GraphCacheFile.DirectedGraph)))
            .GetAwaiter().GetResult();

            CachedGraph.Serializer.SerializeToFileAsync(
                GraphCacheFile.PipTable,
                w => m_pipTable.Serialize(w, Environment.ProcessorCount),
                Path.Combine(OutputFilePath, nameof(GraphCacheFile.PipTable)))
            .GetAwaiter().GetResult();

            CachedGraph.Serializer.SerializeToFileAsync(
                GraphCacheFile.PipGraphId,
                PipGraph.SerializeGraphId,
                Path.Combine(OutputFilePath, nameof(GraphCacheFile.PipGraphId)))
            .GetAwaiter().GetResult();

            Console.WriteLine($"Simulating [Reading]");
            var simulator = new BuildSimulatorAnalyzer(Input);

            simulator.Increment = SimulatorIncrement ?? simulator.Increment;
            simulator.ExecutionData.DataflowGraph = m_mutableGraph;

            simulator.OutputDirectory = OutputFilePath;
            simulator.ReadExecutionLog();

            Console.WriteLine($"Simulating [Analyzing]");
            simulator.Analyze();

            Console.WriteLine($"Blocking Dependency Analysis");

            DisplayTable <DepColumn> depTable = new DisplayTable <DepColumn>(" , ");

            foreach (var pipId in PipTable.Keys)
            {
                var pipType = PipTable.GetPipType(pipId);
                if (pipType == PipType.Process)
                {
                    var entry = GetEntry(pipId);
                    (PipId node, ulong cost)maxConsumedDependency = default;
                    (PipId node, ulong cost)maxDependency         = default;

                    foreach (var dep in entry.PipDependencies)
                    {
                        var cost = simulator.ExecutionData.BottomUpAggregateCosts[dep.Key.ToNodeId()];
                        if (!maxDependency.node.IsValid || cost > maxDependency.cost)
                        {
                            maxDependency = (dep.Key, cost);
                        }

                        if (dep.Value != null && dep.Value.HasFlag(ContentFlag.Consumed))
                        {
                            if (!maxConsumedDependency.node.IsValid || cost > maxConsumedDependency.cost)
                            {
                                maxConsumedDependency = (dep.Key, cost);
                            }
                        }
                    }

                    depTable.NextRow();
                    depTable.Set(DepColumn.Id, $"{entry.SpecFileName}-{entry.Identifier}");
                    depTable.Set(DepColumn.MaxConsumedDependency, ToString(maxConsumedDependency.node));
                    depTable.Set(DepColumn.MaxConsumedDependencyChainCost, maxConsumedDependency.cost.ToMinutes());
                    depTable.Set(DepColumn.MaxDependency, ToString(maxDependency.node));
                    depTable.Set(DepColumn.MaxDependencyChainCost, maxDependency.cost.ToMinutes());
                }
                else if (pipType == PipType.SealDirectory &&
                         !PipTable.GetSealDirectoryKind(pipId).IsSourceSeal() &&
                         !IsSourceOnlySeal(pipId))
                {
                    var seal  = (SealDirectory)GetPip(pipId);
                    var entry = GetEntry(seal.Directory);
                    (PipId node, ulong cost)maxDependency = default;

                    foreach (var dep in DataflowGraph.GetIncomingEdges(pipId.ToNodeId()))
                    {
                        var cost = simulator.ExecutionData.BottomUpAggregateCosts[dep.OtherNode];
                        if (!maxDependency.node.IsValid || cost > maxDependency.cost)
                        {
                            maxDependency = (dep.OtherNode.ToPipId(), cost);
                        }
                    }

                    depTable.NextRow();
                    depTable.Set(DepColumn.Id, $"{entry.SpecFileName}-{entry.Identifier} ({entry.FileCount} files)");
                    depTable.Set(DepColumn.MaxDependency, ToString(maxDependency.node));
                    depTable.Set(DepColumn.MaxDependencyChainCost, maxDependency.cost.ToMinutes());
                    depTable.Set(DepColumn.Directory, seal.DirectoryRoot.ToString(PathTable));
                }
            }

            using (var blockAnalysisWriter = new StreamWriter(Path.Combine(OutputFilePath, "blockAnalysis.txt")))
            {
                depTable.Write(blockAnalysisWriter);
            }

            m_writer.Dispose();

            Console.WriteLine($"Analyzing complete");

            return(0);
        }
 /// <summary>
 /// Complete processing of this WorkerAnalyzer; block on any remaining concurrent work's completion.
 /// </summary>
 public void Complete()
 {
     m_processingBlock.Complete();
 }