public async Task IncreaseConcurrency() { int count = 0; var waitForFirstTwoItems = TaskSourceSlim.Create <object>(); // Event that will hold first two workers. var mre = new ManualResetEventSlim(false); var actionBlock = new ActionBlockSlim <int>( 2, n => { var currentCount = Interlocked.Increment(ref count); if (currentCount == 2) { // Notify the test that 2 items are processed. waitForFirstTwoItems.SetResult(null); } if (currentCount <= 2) { // This is the first or the second thread that should be blocked before we increase the number of threads. mre.Wait(TimeSpan.FromSeconds(100)); } Thread.Sleep(1); }); // Schedule work actionBlock.Post(1); actionBlock.Post(2); await waitForFirstTwoItems.Task; // The first 2 threads should be blocked in the callback in the action block, // but the count should be incremented Assert.Equal(2, count); var task = actionBlock.CompletionAsync(); // The task should not be completed yet! Assert.NotEqual(TaskStatus.RanToCompletion, task.Status); // This will cause another thread to spawn actionBlock.IncreaseConcurrencyTo(3); // Add more work actionBlock.Post(3); actionBlock.Complete(); // Release the first 2 threads mre.Set(); // Waiting for completion await task; // The new thread should run and increment the count Assert.Equal(3, count); }
public async Task AllTheElementsAreFinished() { int count = 0; var actionBlock = new ActionBlockSlim <int>( 42, n => { Interlocked.Increment(ref count); Thread.Sleep(1); }); var task = actionBlock.CompletionAsync(); actionBlock.Post(1); actionBlock.Post(2); actionBlock.Complete(); await task; Assert.Equal(2, count); }
/// <summary> /// Needs to take the flushing lock. Called only from <see cref="FlushAsync(OperationContext)"/>. Refactored /// out for clarity. /// </summary> private void PerformFlush(OperationContext context) { _database.Counters[ContentLocationDatabaseCounters.TotalNumberOfCacheFlushes].Increment(); using (_database.Counters[ContentLocationDatabaseCounters.CacheFlush].Start()) { using (_exchangeLock.AcquireWriteLock()) { _flushingCache = _cache; _cache = new ConcurrentBigMap <ShortHash, ContentLocationEntry>(); } if (_configuration.FlushSingleTransaction) { _database.PersistBatch(context, _flushingCache); } else { var actionBlock = new ActionBlockSlim <KeyValuePair <ShortHash, ContentLocationEntry> >(_configuration.FlushDegreeOfParallelism, kv => { // Do not lock on GetLock here, as it will cause a deadlock with // SetMachineExistenceAndUpdateDatabase. It is correct not do take any locks as well, because // no Store can happen while flush is running. _database.Persist(context, kv.Key, kv.Value); }); foreach (var kv in _flushingCache) { actionBlock.Post(kv); } actionBlock.Complete(); actionBlock.CompletionAsync().Wait(); } _database.Counters[ContentLocationDatabaseCounters.NumberOfPersistedEntries].Add(_flushingCache.Count); if (_configuration.FlushPreservePercentInMemory > 0) { int targetFlushingSize = (int)(_flushingCache.Count * _configuration.FlushPreservePercentInMemory); int removeAmount = _flushingCache.Count - targetFlushingSize; foreach (var key in _flushingCache.Keys.Take(removeAmount)) { _flushingCache.RemoveKey(key); } } else { using (_exchangeLock.AcquireWriteLock()) { _flushingCache = new ConcurrentBigMap <ShortHash, ContentLocationEntry>(); } } _database.Counters[ContentLocationDatabaseCounters.TotalNumberOfCompletedCacheFlushes].Increment(); } }
/// <summary> /// Adds <paramref name="pip"/> and <paramref name="mutable"/> for serialization. /// </summary> public void ScheduleSerialization(Pip pip, MutablePipState mutable) { if (!m_nonSerializedDebug) { m_serializationQueue.Post(new QueueItem { Pip = pip, Mutable = mutable }); } else { // Test code! The pips are not serialized. Instead we store them in a list to prevent GC from collecting them. lock (m_nonSerializablePips) { m_nonSerializablePips.Add(pip); } } }
public override void ProcessFingerprintComputed(ProcessFingerprintComputationEventData data) { if (data.Kind == FingerprintComputationKind.Execution) { m_block.Post(() => { using (var wrapper = m_pool.GetInstance()) { wrapper.Instance.ProcessFingerprintComputed(data); if ((Interlocked.Increment(ref ProcessedPips) % 10) == 0) { Console.WriteLine($"Processing {ProcessedPips}"); } } }); } }
public async Task AllTheElementsAreProcessedBy2Thread() { const int maxCount = 420; int count = 0; var actionBlock = new ActionBlockSlim <int>( 2, n => { Interlocked.Increment(ref count); }); for (int i = 0; i < maxCount; i++) { actionBlock.Post(i); } actionBlock.Complete(); await actionBlock.CompletionAsync(); Assert.Equal(maxCount, count); }
/// <summary> /// This function is only used by Nuget package download which is going to be replaced soon by not using 1-phase cache lookup anymore. /// </summary> public override Task<Possible<ContentMaterializationResult, Failure>> TryMaterializeContentAsync( IArtifactContentCache cache, FileRealizationMode fileRealizationModes, AbsolutePath path, ContentHash contentHash, bool trackPath = true, bool recordPathInFileContentTable = true) { var request = new MaterializeFileRequest( cache, fileRealizationModes, path, contentHash, trackPath, recordPathInFileContentTable); m_localDiskContentStoreConcurrencyLimiter.Post(request); return request.CompletionSource.Task; }
/// <summary> /// Needs to take the flushing lock. Called only from <see cref="FlushAsync(OperationContext)"/>. Refactored /// out for clarity. /// </summary> private CounterCollection <FlushableCacheCounters> PerformFlush(OperationContext context) { _database.Counters[ContentLocationDatabaseCounters.TotalNumberOfCacheFlushes].Increment(); var counters = new CounterCollection <FlushableCacheCounters>(); using (_database.Counters[ContentLocationDatabaseCounters.CacheFlush].Start()) { using (_exchangeLock.AcquireWriteLock()) { _flushingCache = _cache; _cache = new ConcurrentBigMap <ShortHash, ContentLocationEntry>(); } using (counters[FlushableCacheCounters.FlushingTime].Start()) { var threads = _configuration.FlushDegreeOfParallelism; if (threads <= 0) { threads = Environment.ProcessorCount; } if (_configuration.FlushSingleTransaction) { if (_configuration.FlushDegreeOfParallelism == 1 || _flushingCache.Count <= _configuration.FlushTransactionSize) { _database.PersistBatch(context, _flushingCache); } else { var actionBlock = new ActionBlockSlim <IEnumerable <KeyValuePair <ShortHash, ContentLocationEntry> > >(threads, kvs => { _database.PersistBatch(context, kvs); }); foreach (var kvs in _flushingCache.GetPages(_configuration.FlushTransactionSize)) { actionBlock.Post(kvs); } actionBlock.Complete(); actionBlock.CompletionAsync().Wait(); } } else { var actionBlock = new ActionBlockSlim <KeyValuePair <ShortHash, ContentLocationEntry> >(threads, kv => { // Do not lock on GetLock here, as it will cause a deadlock with // SetMachineExistenceAndUpdateDatabase. It is correct not do take any locks as well, because // no Store can happen while flush is running. _database.Persist(context, kv.Key, kv.Value); }); foreach (var kv in _flushingCache) { actionBlock.Post(kv); } actionBlock.Complete(); actionBlock.CompletionAsync().Wait(); } } counters[FlushableCacheCounters.Persisted].Add(_flushingCache.Count); _database.Counters[ContentLocationDatabaseCounters.NumberOfPersistedEntries].Add(_flushingCache.Count); using (counters[FlushableCacheCounters.CleanupTime].Start()) { if (_configuration.FlushPreservePercentInMemory > 0) { int targetFlushingSize = (int)(_flushingCache.Count * _configuration.FlushPreservePercentInMemory); int removeAmount = _flushingCache.Count - targetFlushingSize; foreach (var key in _flushingCache.Keys.Take(removeAmount)) { _flushingCache.RemoveKey(key); } } else { using (_exchangeLock.AcquireWriteLock()) { _flushingCache = new ConcurrentBigMap <ShortHash, ContentLocationEntry>(); } } } counters[FlushableCacheCounters.Leftover].Add(_flushingCache.Count); _database.Counters[ContentLocationDatabaseCounters.TotalNumberOfCompletedCacheFlushes].Increment(); } counters[FlushableCacheCounters.Growth].Add(_cache.Count); return(counters); }
/// <summary> /// Handle the incoming fingerprint computation (by queueing it for processing by separate thread). /// </summary> public void ProcessFingerprintComputed(ProcessFingerprintComputationEventData data) { m_processingBlock.Post(data); }