public void AddRead(long count) { Interlocked.Add(ref this.readBytes, count); }
public void UnlockShared() { Debug.Assert((word & kSharedLockMaskInWord) != 0, "Trying to S unlock an unlocked record"); Interlocked.Add(ref word, -kSharedLockIncrement); }
private async Task SendAndReceiveFromQueueAdapter(IQueueAdapterFactory adapterFactory, IProviderConfiguration config) { IQueueAdapter adapter = await adapterFactory.CreateAdapter(); IQueueAdapterCache cache = adapterFactory.GetQueueAdapterCache(); // Create receiver per queue IStreamQueueMapper mapper = adapterFactory.GetStreamQueueMapper(); Dictionary<QueueId, IQueueAdapterReceiver> receivers = mapper.GetAllQueues().ToDictionary(queueId => queueId, adapter.CreateReceiver); Dictionary<QueueId, IQueueCache> caches = mapper.GetAllQueues().ToDictionary(queueId => queueId, cache.CreateQueueCache); await Task.WhenAll(receivers.Values.Select(receiver => receiver.Initialize(TimeSpan.FromSeconds(5)))); // test using 2 streams Guid streamId1 = Guid.NewGuid(); Guid streamId2 = Guid.NewGuid(); int receivedBatches = 0; var streamsPerQueue = new ConcurrentDictionary<QueueId, HashSet<IStreamIdentity>>(); // reader threads (at most 2 active queues because only two streams) var work = new List<Task>(); foreach (KeyValuePair<QueueId, IQueueAdapterReceiver> receiverKvp in receivers) { QueueId queueId = receiverKvp.Key; var receiver = receiverKvp.Value; var qCache = caches[queueId]; Task task = Task.Factory.StartNew(() => { while (receivedBatches < NumBatches) { var messages = receiver.GetQueueMessagesAsync(SQSStorage.MAX_NUMBER_OF_MESSAGE_TO_PEAK).Result.ToArray(); if (!messages.Any()) { continue; } foreach (var message in messages.Cast<SQSBatchContainer>()) { streamsPerQueue.AddOrUpdate(queueId, id => new HashSet<IStreamIdentity> { new StreamIdentity(message.StreamGuid, message.StreamGuid.ToString()) }, (id, set) => { set.Add(new StreamIdentity(message.StreamGuid, message.StreamGuid.ToString())); return set; }); output.WriteLine("Queue {0} received message on stream {1}", queueId, message.StreamGuid); Assert.Equal(NumMessagesPerBatch / 2, message.GetEvents<int>().Count()); // "Half the events were ints" Assert.Equal(NumMessagesPerBatch / 2, message.GetEvents<string>().Count()); // "Half the events were strings" } Interlocked.Add(ref receivedBatches, messages.Length); qCache.AddToCache(messages); } }); work.Add(task); } // send events List<object> events = CreateEvents(NumMessagesPerBatch); work.Add(Task.Factory.StartNew(() => Enumerable.Range(0, NumBatches) .Select(i => i % 2 == 0 ? streamId1 : streamId2) .ToList() .ForEach(streamId => adapter.QueueMessageBatchAsync(streamId, streamId.ToString(), events.Take(NumMessagesPerBatch).ToArray(), null, RequestContext.Export()).Wait()))); await Task.WhenAll(work); // Make sure we got back everything we sent Assert.Equal(NumBatches, receivedBatches); // check to see if all the events are in the cache and we can enumerate through them StreamSequenceToken firstInCache = new EventSequenceTokenV2(0); foreach (KeyValuePair<QueueId, HashSet<IStreamIdentity>> kvp in streamsPerQueue) { var receiver = receivers[kvp.Key]; var qCache = caches[kvp.Key]; foreach (IStreamIdentity streamGuid in kvp.Value) { // read all messages in cache for stream IQueueCacheCursor cursor = qCache.GetCacheCursor(streamGuid, firstInCache); int messageCount = 0; StreamSequenceToken tenthInCache = null; StreamSequenceToken lastToken = firstInCache; while (cursor.MoveNext()) { Exception ex; messageCount++; IBatchContainer batch = cursor.GetCurrent(out ex); output.WriteLine("Token: {0}", batch.SequenceToken); Assert.True(batch.SequenceToken.CompareTo(lastToken) >= 0, $"order check for event {messageCount}"); lastToken = batch.SequenceToken; if (messageCount == 10) { tenthInCache = batch.SequenceToken; } } output.WriteLine("On Queue {0} we received a total of {1} message on stream {2}", kvp.Key, messageCount, streamGuid); Assert.Equal(NumBatches / 2, messageCount); Assert.NotNull(tenthInCache); // read all messages from the 10th cursor = qCache.GetCacheCursor(streamGuid, tenthInCache); messageCount = 0; while (cursor.MoveNext()) { messageCount++; } output.WriteLine("On Queue {0} we received a total of {1} message on stream {2}", kvp.Key, messageCount, streamGuid); const int expected = NumBatches / 2 - 10 + 1; // all except the first 10, including the 10th (10 + 1) Assert.Equal(expected, messageCount); } } }
private static long AddToLastKnownFreeAddressSpace(long addend) { return(Interlocked.Add(ref s_hiddenLastKnownFreeAddressSpace, addend)); }
public override bool Execute(ProgramOptions programOptions, JobConfiguration jobConfiguration) { Stopwatch stopWatch = new Stopwatch(); stopWatch.Start(); StepTiming stepTimingFunction = new StepTiming(); stepTimingFunction.JobFileName = programOptions.OutputJobFilePath; stepTimingFunction.StepName = jobConfiguration.Status.ToString(); stepTimingFunction.StepID = (int)jobConfiguration.Status; stepTimingFunction.StartTime = DateTime.Now; stepTimingFunction.NumEntities = jobConfiguration.Target.Count; this.DisplayJobStepStartingStatus(jobConfiguration); FilePathMap = new FilePathMap(programOptions, jobConfiguration); try { if (this.ShouldExecute(programOptions, jobConfiguration) == false) { return(true); } // Process each Controller once int i = 0; var controllers = jobConfiguration.Target.GroupBy(t => t.Controller); foreach (var controllerGroup in controllers) { Stopwatch stopWatchTarget = new Stopwatch(); stopWatchTarget.Start(); JobTarget jobTarget = controllerGroup.ToList()[0]; StepTiming stepTimingTarget = new StepTiming(); stepTimingTarget.Controller = jobTarget.Controller; stepTimingTarget.ApplicationName = jobTarget.Application; stepTimingTarget.ApplicationID = jobTarget.ApplicationID; stepTimingTarget.JobFileName = programOptions.OutputJobFilePath; stepTimingTarget.StepName = jobConfiguration.Status.ToString(); stepTimingTarget.StepID = (int)jobConfiguration.Status; stepTimingTarget.StartTime = DateTime.Now; stepTimingTarget.NumEntities = 1; try { this.DisplayJobTargetStartingStatus(jobConfiguration, jobTarget, i + 1); #region All Dashboards // Set up controller access string allDashboardsJSON = String.Empty; using (ControllerApi controllerApi = new ControllerApi(jobTarget.Controller, jobTarget.UserName, AESEncryptionHelper.Decrypt(jobTarget.UserPassword))) { controllerApi.PrivateApiLogin(); loggerConsole.Info("All Dashboards"); allDashboardsJSON = controllerApi.GetControllerDashboards(); if (allDashboardsJSON != String.Empty) { FileIOHelper.SaveFileToPath(allDashboardsJSON, FilePathMap.ControllerDashboards(jobTarget)); } } #endregion #region Dashboards if (allDashboardsJSON != String.Empty) { JArray allDashboardsArray = JArray.Parse(allDashboardsJSON); loggerConsole.Info("Dashboards ({0} entities)", allDashboardsArray.Count); int numDashboards = 0; var listOfDashboardsChunks = allDashboardsArray.BreakListIntoChunks(DASHBOARDS_EXTRACT_NUMBER_OF_ENTITIES_TO_PROCESS_PER_THREAD); ParallelOptions parallelOptions = new ParallelOptions(); if (programOptions.ProcessSequentially == true) { parallelOptions.MaxDegreeOfParallelism = 1; } else { parallelOptions.MaxDegreeOfParallelism = DASHBOARDS_EXTRACT_NUMBER_OF_THREADS; } Parallel.ForEach <List <JToken>, int>( listOfDashboardsChunks, parallelOptions, () => 0, (listOfDashboardsChunk, loop, subtotal) => { // Set up controller access using (ControllerApi controllerApiParallel = new ControllerApi(jobTarget.Controller, jobTarget.UserName, AESEncryptionHelper.Decrypt(jobTarget.UserPassword))) { controllerApiParallel.PrivateApiLogin(); foreach (JObject dashboardObject in listOfDashboardsChunk) { if (File.Exists(FilePathMap.ControllerDashboard(jobTarget, dashboardObject["name"].ToString(), (long)dashboardObject["id"])) == false) { string dashboardJSON = controllerApiParallel.GetControllerDashboard((long)dashboardObject["id"]); if (dashboardJSON != String.Empty) { FileIOHelper.SaveFileToPath(dashboardJSON, FilePathMap.ControllerDashboard(jobTarget, dashboardObject["name"].ToString(), (long)dashboardObject["id"])); } } } return(listOfDashboardsChunk.Count); } }, (finalResult) => { Interlocked.Add(ref numDashboards, finalResult); Console.Write("[{0}].", numDashboards); } ); loggerConsole.Info("Completed {0} Dashboards", allDashboardsArray.Count); stepTimingTarget.NumEntities = stepTimingTarget.NumEntities + allDashboardsArray.Count; } #endregion } catch (Exception ex) { logger.Warn(ex); loggerConsole.Warn(ex); return(false); } finally { stopWatchTarget.Stop(); this.DisplayJobTargetEndedStatus(jobConfiguration, jobTarget, i + 1, stopWatchTarget); stepTimingTarget.EndTime = DateTime.Now; stepTimingTarget.Duration = stopWatchTarget.Elapsed; stepTimingTarget.DurationMS = stopWatchTarget.ElapsedMilliseconds; List <StepTiming> stepTimings = new List <StepTiming>(1); stepTimings.Add(stepTimingTarget); FileIOHelper.WriteListToCSVFile(stepTimings, new StepTimingReportMap(), FilePathMap.StepTimingReportFilePath(), true); } i++; } return(true); } catch (Exception ex) { logger.Error(ex); loggerConsole.Error(ex); return(false); } finally { stopWatch.Stop(); this.DisplayJobStepEndedStatus(jobConfiguration, stopWatch); stepTimingFunction.EndTime = DateTime.Now; stepTimingFunction.Duration = stopWatch.Elapsed; stepTimingFunction.DurationMS = stopWatch.ElapsedMilliseconds; List <StepTiming> stepTimings = new List <StepTiming>(1); stepTimings.Add(stepTimingFunction); FileIOHelper.WriteListToCSVFile(stepTimings, new StepTimingReportMap(), FilePathMap.StepTimingReportFilePath(), true); } }
/// <summary> /// Logs the receiving of a hello data packet in the statistics. /// </summary> /// <param name="totalLength">The total number of bytes received.</param> /// <remarks> /// This should be called before the received event is invoked so it is up to date for subscribers to that event. /// </remarks> internal void LogHelloReceive(int totalLength) { Interlocked.Increment(ref helloMessagesReceived); Interlocked.Add(ref totalBytesReceived, totalLength); }
public void IncrementActivationCount(int delta) { Interlocked.Add(ref activationCount, delta); }
public static long Add(ref this long value, long operand) => Interlocked.Add(ref value, operand);
internal int AddCall() { return(Interlocked.Add(ref _count, 1)); }
void DrainRegular() { var a = actual; int missed = 1; for (;;) { long r = Volatile.Read(ref requested); long e = 0; while (e != r) { if (Volatile.Read(ref cancelled)) { Clear(); return; } bool d = Volatile.Read(ref done); T v; bool empty = Poll(out v); if (d && empty) { Exception ex = error; if (ex != null) { a.OnError(ex); } else { a.OnComplete(); } return; } if (empty) { break; } a.OnNext(v); e++; } if (e == r) { if (Volatile.Read(ref cancelled)) { Clear(); return; } if (Volatile.Read(ref done) && IsEmpty()) { Exception ex = error; if (ex != null) { a.OnError(ex); } else { a.OnComplete(); } return; } } if (e != 0L && r != long.MaxValue) { Interlocked.Add(ref requested, -e); } missed = QueueDrainHelper.Leave(ref wip, missed); if (missed == 0) { break; } } }
private static int[][] GenerateOneStepFurther(IEnumerable <int[]> multipleTransitionFunctions, int newPosition, long targetCount, ref int maximumEstimatedIntegrityLevel, GeneratorHelper generatorHelper) { var automataCandidates = multipleTransitionFunctions .Select(transitionFunctions => GenerateNewMutableClonesFromSmart(transitionFunctions, newPosition)) .SelectMany(id => id) .ToArray(); var individualVerticesHashes = new long[automataCandidates.Length][]; var verticesIncludingNew = newPosition + 1; for (int automatonID = 0; automatonID < automataCandidates.Length; automatonID += 1) { individualVerticesHashes[automatonID] = new long[verticesIncludingNew]; } #if diagnostics Console.WriteLine($"Total number of automata: {automataCandidates.Length}"); #endif var hashToGroup = new Dictionary <long, SameHashCollisionGroup>(); int estimatedIntegrityLevel = maximumEstimatedIntegrityLevel; void insertIdIntoHashbag(Dictionary <long, SameHashCollisionGroup> map, long hash, int id) { if (!map.ContainsKey(hash)) { map.TryAdd(hash, new SameHashCollisionGroup() { automatonIDs = new ConcurrentBag <int>(), agreedIterations = estimatedIntegrityLevel }); } map[hash].automatonIDs.Add(id); } List <long>[] SetupNewMailbox(int size) { var mailbox = new List <long> [size]; for (int i = 0; i < mailbox.Length; i++) { mailbox[i] = new List <long>(); } return(mailbox); } #if diagnostics int progress = 0; const int progressReportSize = 262143; var last = (automataCandidates.Length / progressReportSize) * progressReportSize; Console.WriteLine("Please wait..."); #endif for (int automatonID = 0; automatonID < automataCandidates.Length; automatonID++) { var temporary = new long[verticesIncludingNew]; var mailbox = SetupNewMailbox(verticesIncludingNew); #if diagnostics if ((automatonID & progressReportSize) == 0 && automatonID > 0) { Interlocked.Add(ref progress, progressReportSize); Console.WriteLine($"Preparing... {(progress) * 100d / automataCandidates.Length}%"); } #endif generatorHelper.InitializeIteration(automataCandidates[automatonID], individualVerticesHashes[automatonID], mailbox); var initialHash = generatorHelper.IterateAndSquashMultiple(automataCandidates[automatonID], individualVerticesHashes[automatonID], temporary, mailbox, estimatedIntegrityLevel); insertIdIntoHashbag(hashToGroup, initialHash, automatonID); } Dictionary <long, SameHashCollisionGroup> integrityLevelConformant = hashToGroup; int requiredIntegrityLevel = maximumEstimatedIntegrityLevel; while (targetCount > hashToGroup.Count) { #if diagnostics Console.WriteLine($"Integrity level is too low: {requiredIntegrityLevel}"); #endif requiredIntegrityLevel += 1; integrityLevelConformant = new Dictionary <long, SameHashCollisionGroup>(); while (hashToGroup.Count > 0 && targetCount > hashToGroup.Count) { // pass groups that fulfil integrity level requirement #if diagnostics Console.WriteLine($"LEFTOVER to recompute: {hashToGroup.Count}"); #endif var stack = new Stack <long>(hashToGroup.Keys); while (stack.Count > 0) { var key = stack.Pop(); if (targetCount == hashToGroup.Count) { break; } var temporary = new long[verticesIncludingNew]; var mailbox = SetupNewMailbox(verticesIncludingNew); var collisionGroup = hashToGroup[key]; var queuedHashes = new Queue <long>(); var differentHashes = new HashSet <long>(); // recompute hashes foreach (var automatonID in collisionGroup.automatonIDs) { var nextHash = generatorHelper.IterateAndSquashMultiple(automataCandidates[automatonID], individualVerticesHashes[automatonID], temporary, mailbox, requiredIntegrityLevel - collisionGroup.agreedIterations); differentHashes.Add(nextHash); queuedHashes.Enqueue(nextHash); } if (differentHashes.Count == 1) { collisionGroup.agreedIterations = requiredIntegrityLevel; hashToGroup.Remove(key, out collisionGroup); integrityLevelConformant.TryAdd(key, collisionGroup); } else { hashToGroup.Remove(key, out collisionGroup); foreach (var automatonID in collisionGroup.automatonIDs) { insertIdIntoHashbag(hashToGroup, queuedHashes.Dequeue(), automatonID); } foreach (var item in differentHashes) { stack.Push(item); } } } ; } foreach (var kvp in hashToGroup) { integrityLevelConformant.TryAdd(kvp.Key, kvp.Value); } hashToGroup = integrityLevelConformant; } #if diagnostics Console.WriteLine($"Integrity level is enough: {requiredIntegrityLevel}"); #endif maximumEstimatedIntegrityLevel = requiredIntegrityLevel; // return all unique automata return(integrityLevelConformant.Select(kvp => { kvp.Value.automatonIDs.TryPeek(out int representativeAutomatonID); return automataCandidates[representativeAutomatonID]; }).ToArray()); }
public void Increment(long amount = 1) { AssertAttached(); Interlocked.Add(ref Value, amount); }
/// <summary> /// 模拟爬取 真`异步 /// </summary> /// <param name="URL">目标地址</param> /// <param name="Path">保存路径</param> private void RealAsyncDownloadImages(string URL, string Path) { using (var client = new HttpClient()) { try { List <Task> tasks = new List <Task>(); if (URL.Substring(0, 2) == @"//") { URL = "http:" + URL; } var content = client.GetStringAsync(URL).ConfigureAwait(false).GetAwaiter().GetResult(); var html = new HtmlAgilityPack.HtmlDocument(); html.LoadHtml(content); var imgsrcList = html.DocumentNode.SelectNodes("//img").Select(m => m.Attributes["src"].Value).Distinct() .ToList(); Interlocked.Add(ref Total, imgsrcList.Count); //imgsrcList = imgsrcList.Select(i => { if (i.Substring(0, 2) == @"//") { return "http:" + i; } else { return i; } }).ToList(); //var html = new HtmlAgilityPack.HtmlDocument(); //List<string> imgsrcList = new List<string>(); //var content = client.GetStringAsync(URL).ContinueWith(c => //{ // try // { // string contentstring = c.ConfigureAwait(false).GetAwaiter().GetResult(); // html.LoadHtml(contentstring); // imgsrcList = html.DocumentNode.SelectNodes("//img").Select(m => m.Attributes["src"].Value).Distinct() // .ToList(); // Invoke((Action)(() => // { // this.outputText.AppendText($"{URL}准备下载:{imgsrcList.Count}个...\r\n"); // })); // } // catch { } //}); //tasks.Add(content); Invoke((Action)(() => { this.TotalLabel.Text = $@"{success + fail}/{Total}(success:{success},fail:{fail};)"; this.outputText.AppendText($"{URL}准备下载:{imgsrcList.Count}个...\r\n"); })); for (int i = 0; i < imgsrcList.Count; i++) { Interlocked.Increment(ref imagenum); var thisuri = imgsrcList[i]; var num = imagenum; this.outputText.AppendText($"{num}:开始下载\r\n"); try { if (imgsrcList[i].Contains(@"//")) { if (imgsrcList[i].Substring(0, 2) == @"//") { imgsrcList[i] = "http:" + imgsrcList[i]; } var stream = client.GetStreamAsync(imgsrcList[i]).ContinueWith(p => { try { var a = p.Result; Image.FromStream(a).Save(Path + $@"\{Guid.NewGuid().ToString()}.jpg"); Interlocked.Increment(ref success); this.outputText.AppendText($"{num}:已完成\r\n"); } catch (Exception ex) { Interlocked.Increment(ref fail); this.outputText.AppendText($"{num}:{thisuri} 下载失败 : {ex.Message}\r\n"); } }); tasks.Add(stream); } else { var HostUrl = URL.Split('/')[2]; var stream = client.GetStreamAsync(@"http://" + HostUrl + imgsrcList[i]).ContinueWith(p => { try { var a = p.Result; Image.FromStream(a).Save(Path + $@"\{Guid.NewGuid().ToString()}.jpg"); Interlocked.Increment(ref success); this.outputText.AppendText($"{num}:已完成\r\n"); } catch (Exception ex) { Interlocked.Increment(ref fail); this.outputText.AppendText($"{num}:{URL} : { thisuri} 下载失败 : {ex.Message}\r\n"); } }); tasks.Add(stream); } } catch (Exception ex) { Interlocked.Increment(ref fail); this.outputText.AppendText($"{num}:{thisuri} 下载失败 : {ex.Message}\r\n"); } } Task.WaitAll(tasks.ToArray()); Invoke((Action)(() => { this.outputText.AppendText($"{URL}: 执行结束\r\n"); })); } catch (Exception ex) { //this.outputText.AppendText(ex.Message + "\r\n"); } } }
private void Search(string file, SearchType searchType, string searchPattern, GrepSearchOption searchOptions, int codePage) { try { ProcessedFile(this, new ProgressStatus(true, processedFilesCount, foundfilesCount, null, file)); IGrepEngine engine = GrepEngineFactory.GetSearchEngine(file, SearchParams, FileFilter); Interlocked.Increment(ref processedFilesCount); Encoding encoding = Encoding.Default; if (codePage > -1) { encoding = Encoding.GetEncoding(codePage); } else if (!Utils.IsBinary(file) && !Utils.IsPdfFile(file)) { encoding = Utils.GetFileEncoding(file); } if (Utils.CancelSearch) { if (cancellationTokenSource != null) { cancellationTokenSource.Cancel(); } return; } List <GrepSearchResult> fileSearchResults = engine.Search(file, searchPattern, searchType, searchOptions, encoding); if (fileSearchResults != null && fileSearchResults.Count > 0) { AddSearchResults(fileSearchResults); } int hits = fileSearchResults.Where(r => r.IsSuccess).Count(); Interlocked.Add(ref foundfilesCount, hits); ProcessedFile(this, new ProgressStatus(false, processedFilesCount, foundfilesCount, fileSearchResults, file)); GrepEngineFactory.ReturnToPool(file, engine); } catch (Exception ex) { logger.Log <Exception>(LogLevel.Error, ex.Message, ex); AddSearchResult(new GrepSearchResult(file, searchPattern, ex.Message, false)); if (ProcessedFile != null) { List <GrepSearchResult> _results = new List <GrepSearchResult> { new GrepSearchResult(file, searchPattern, ex.Message, false) }; ProcessedFile(this, new ProgressStatus(false, processedFilesCount, foundfilesCount, _results, file)); } } finally { if (searchOptions.HasFlag(GrepSearchOption.StopAfterFirstMatch) && searchResults.Count > 0) { if (cancellationTokenSource != null) { cancellationTokenSource.Cancel(); } } } }
/// <summary> /// Logs the receiving of an acknowledgement data packet in the statistics. /// </summary> /// <param name="totalLength">The total number of bytes received.</param> /// <remarks> /// This should be called before the received event is invoked so it is up to date for subscribers to that event. /// </remarks> internal void LogAcknowledgementReceive(int totalLength) { Interlocked.Increment(ref acknowledgementMessagesReceived); Interlocked.Add(ref totalBytesReceived, totalLength); }
public void Tick(int count) { Interlocked.Add(ref this.tickCount, count); }
/// <summary> /// Logs the receiving of a hello data packet in the statistics. /// </summary> /// <param name="totalLength">The total number of bytes received.</param> /// <remarks> /// This should be called before the received event is invoked so it is up to date for subscribers to that event. /// </remarks> internal void LogPingReceive(int totalLength) { Interlocked.Increment(ref pingMessagesReceived); Interlocked.Add(ref totalBytesReceived, totalLength); }
public void AccessRandomKeys() { using (var conn = Create(allowAdmin: true)) { var cluster = conn.GetDatabase(); int slotMovedCount = 0; conn.HashSlotMoved += (s, a) => { Log("{0} moved from {1} to {2}", a.HashSlot, Describe(a.OldEndPoint), Describe(a.NewEndPoint)); Interlocked.Increment(ref slotMovedCount); }; var pairs = new Dictionary <string, string>(); const int COUNT = 500; int index = 0; var servers = conn.GetEndPoints().Select(x => conn.GetServer(x)); foreach (var server in servers) { if (!server.IsSlave) { server.Ping(); server.FlushAllDatabases(); } } for (int i = 0; i < COUNT; i++) { var key = Guid.NewGuid().ToString(); var value = Guid.NewGuid().ToString(); pairs.Add(key, value); cluster.StringSet(key, value, flags: CommandFlags.FireAndForget); } var expected = new string[COUNT]; var actual = new Task <RedisValue> [COUNT]; index = 0; foreach (var pair in pairs) { expected[index] = pair.Value; actual[index] = cluster.StringGetAsync(pair.Key); index++; } cluster.WaitAll(actual); for (int i = 0; i < COUNT; i++) { Assert.Equal(expected[i], (string)actual[i].Result); } int total = 0; Parallel.ForEach(servers, server => { if (!server.IsSlave) { int count = server.Keys(pageSize: 100).Count(); Log("{0} has {1} keys", server.EndPoint, count); Interlocked.Add(ref total, count); } }); foreach (var server in servers) { var counters = server.GetCounters(); Log(counters.ToString()); } int final = Interlocked.CompareExchange(ref total, 0, 0); Assert.Equal(COUNT, final); Assert.Equal(0, Interlocked.CompareExchange(ref slotMovedCount, 0, 0)); } }
public long Add(long len) { return(Interlocked.Add(ref counter, len)); }
private static int increment(ref int val, int inc) { return(-val + Interlocked.Add(ref val, inc) - inc); }
private void ProcessDatagrams(object state) { UdpClient listener = (UdpClient)state; while (true) { // Check if we already closed the server if (listener.Client == null) { return; } // WSAECONNRESET: // The virtual circuit was reset by the remote side executing a hard or abortive close. // The application should close the socket; it is no longer usable. On a UDP-datagram socket // this error indicates a previous send operation resulted in an ICMP Port Unreachable message. // Note the spocket settings on creation of the server. It makes us ignore these resets. IPEndPoint senderEndpoint = null; try { //var result = listener.ReceiveAsync().Result; //senderEndpoint = result.RemoteEndPoint; //receiveBytes = result.Buffer; Byte[] receiveBytes = listener.Receive(ref senderEndpoint); Interlocked.Exchange(ref ServerInfo.AvailableBytes, listener.Available); Interlocked.Increment(ref ServerInfo.NumberOfPacketsInPerSecond); Interlocked.Add(ref ServerInfo.TotalPacketSizeIn, receiveBytes.Length); if (receiveBytes.Length != 0) { _receiveThreadPool.QueueUserWorkItem(() => { try { if (!GreylistManager.IsWhitelisted(senderEndpoint.Address) && GreylistManager.IsBlacklisted(senderEndpoint.Address)) { return; } if (GreylistManager.IsGreylisted(senderEndpoint.Address)) { return; } ProcessMessage(receiveBytes, senderEndpoint); } catch (Exception e) { Log.Warn($"Process message error from: {senderEndpoint.Address}", e); } }); } else { Log.Warn("Unexpected end of transmission?"); continue; } } catch (Exception e) { Log.Error("Unexpected end of transmission?", e); if (listener.Client != null) { continue; } return; } } }
/// <summary> /// Logs the sending of a reliable data packet in the statistics. /// </summary> /// <param name="dataLength">The number of bytes of data sent.</param> /// <remarks> /// This should be called after the data has been sent and should only be called for data that is sent sucessfully. /// </remarks> internal void LogReliableSend(int dataLength) { Interlocked.Increment(ref reliableMessagesSent); Interlocked.Add(ref dataBytesSent, dataLength); }
internal static long AddMemoryFailPointReservation(long size) { // Size can legitimately be negative - see Dispose. return(Interlocked.Add(ref s_memFailPointReservedMemory, (long)size)); }
/// <summary> /// Logs the sending of a fragmented data packet in the statistics. /// </summary> /// <param name="dataLength">The number of bytes of data sent.</param> /// <param name="totalLength">The total number of bytes sent.</param> /// <remarks> /// This should be called after the data has been sent and should only be called for data that is sent sucessfully. /// </remarks> internal void LogFragmentedSend(int dataLength) { Interlocked.Increment(ref fragmentedMessagesSent); Interlocked.Add(ref dataBytesSent, dataLength); }
void Drain() { if (Interlocked.Increment(ref wip) != 1) { return; } int missed = 1; var a = actual; var q = queue; var e = emitted; for (;;) { long r = Volatile.Read(ref requested); while (e != r) { if (Volatile.Read(ref cancelled)) { lock (this) { q.Clear(); } return; } bool d = Volatile.Read(ref done); bool empty; T item; lock (this) { empty = !q.Poll(out item); } if (d && empty) { var ex = error; if (ex != null) { a.OnError(ex); } else { a.OnComplete(); } return; } if (empty) { break; } a.OnNext(item); e++; } if (e == r) { if (Volatile.Read(ref cancelled)) { lock (this) { q.Clear(); } return; } bool d = Volatile.Read(ref done); bool empty; lock (this) { empty = q.IsEmpty(); } if (d && empty) { var ex = error; if (ex != null) { a.OnError(ex); } else { a.OnComplete(); } return; } } int w = Volatile.Read(ref wip); if (w == missed) { emitted = e; missed = Interlocked.Add(ref wip, -missed); if (missed == 0) { break; } } else { missed = w; } } }
/// <summary> /// Logs the receiving of a reliable data packet in the statistics. /// </summary> /// <param name="dataLength">The number of bytes of data received.</param> /// <param name="totalLength">The total number of bytes received.</param> /// <remarks> /// This should be called before the received event is invoked so it is up to date for subscribers to that event. /// </remarks> internal void LogReliableReceive(int dataLength, int totalLength) { Interlocked.Increment(ref reliableMessagesReceived); Interlocked.Add(ref dataBytesReceived, dataLength); Interlocked.Add(ref totalBytesReceived, totalLength); }
// Demonstrates: // ConcurrentQueue<T>.Enqueue() // ConcurrentQueue<T>.TryPeek() // ConcurrentQueue<T>.TryDequeue() static void Main() { // Construct a Queue<>. //Queue<int> cq = new Queue<int>(); // Construct a Queue<>. ConcurrentQueue <int> cq = new ConcurrentQueue <int>(); // Populate the queue. for (int i = 0; i < 10000; i++) { cq.Enqueue(i); } // Peek at the first element. int result; if (!cq.TryPeek(out result)) { Console.WriteLine("CQ: TryPeek failed when it should have succeeded"); } else if (result != 0) { Console.WriteLine("CQ: Expected TryPeek result of 0, got {0}", result); } int outerSum = 0; // An action to consume the ConcurrentQueue. Action action = () => { int localSum = 0; while (cq.TryDequeue(out int localValue)) { localSum += localValue; } Interlocked.Add(ref outerSum, localSum); }; /***** Method 1 to Start 4 concurrent Consuming Action ***/ //Start 4 concurrent consuming actions. Parallel.Invoke(action, action, action, action); /***** Method 2 to Start 4 concurrent Consuming Action ***/ //var task1 = Task.Factory.StartNew(action); //task1.Wait(); //var task2 = Task.Factory.StartNew(action); //task2.Wait(); //var task3 = Task.Factory.StartNew(action); //task3.Wait(); //var task4 = Task.Factory.StartNew(action); //task4.Wait(); /***** Method 3 to Start 4 concurrent Consuming Action ***/ //Task task1 = new Task(action); //task1.Start(); //Task task2 = new Task(action); //task2.Start(); //Task task3 = new Task(action); //task3.Start(); //Task task4 = new Task(action); //task4.Start(); //task1.Wait(); //task2.Wait(); //task3.Wait(); //task4.Wait(); Console.WriteLine("outerSum = {0}, should be 49995000", outerSum); }
/// <summary> /// Logs the receiving of a fragmented data packet in the statistics. /// </summary> /// <param name="dataLength">The number of bytes of data received.</param> /// <param name="totalLength">The total number of bytes received.</param> /// <remarks> /// This should be called before the received event is invoked so it is up to date for subscribers to that event. /// </remarks> internal void LogFragmentedReceive(int dataLength, int totalLength) { Interlocked.Increment(ref fragmentedMessagesReceived); Interlocked.Add(ref dataBytesReceived, dataLength); Interlocked.Add(ref totalBytesReceived, totalLength); }
public int AtomicAddAndGet(int delta) { return(Interlocked.Add(ref _value, delta)); }
public void AddWritten(long count) { Interlocked.Add(ref this.writtenBytes, count); }