public void UnwatchAllIndexes() { Interlocked.Decrement(ref watchAllIndexes); }
public void Decrement() => Interlocked.Decrement(ref _counter);
private void DecreaseRunningThreadCount() => Interlocked.Decrement(ref _currentThreadCount);
/// <summary> /// Decrements the counter by 1 /// </summary> public void Remove() { if (Interlocked.Decrement(ref counter) == 0) TryCompleteAwaitingTask(); }
private void Heartbeat() { VerifyNetworkThread(); double now = NetTime.Now; double delta = now - m_lastHeartbeat; int maxCHBpS = 1250 - m_connections.Count; if (maxCHBpS < 250) { maxCHBpS = 250; } if (delta > (1.0 / (double)maxCHBpS) || delta < 0.0) // max connection heartbeats/second max { m_frameCounter++; m_lastHeartbeat = now; // do handshake heartbeats if ((m_frameCounter % 3) == 0) { foreach (var kvp in m_handshakes) { NetConnection conn = kvp.Value as NetConnection; #if DEBUG // sanity check if (kvp.Key != kvp.Key) { LogWarning("Sanity fail! Connection in handshake list under wrong key!"); } #endif conn.UnconnectedHeartbeat(now); if (conn.m_status == NetConnectionStatus.Connected || conn.m_status == NetConnectionStatus.Disconnected) { #if DEBUG // sanity check if (conn.m_status == NetConnectionStatus.Disconnected && m_handshakes.ContainsKey(conn.ConnectionId)) { LogWarning("Sanity fail! Handshakes list contained disconnected connection!"); m_handshakes.Remove(conn.ConnectionId); } #endif break; // collection has been modified } } } #if DEBUG SendDelayedPackets(); #endif // update m_executeFlushSendQueue if (m_configuration.m_autoFlushSendQueue && m_needFlushSendQueue == true) { m_executeFlushSendQueue = true; m_needFlushSendQueue = false; // a race condition to this variable will simply result in a single superfluous call to FlushSendQueue() } // do connection heartbeats lock (m_connections) { for (int i = m_connections.Count - 1; i >= 0; i--) { var conn = m_connections[i]; conn.Heartbeat(now, m_frameCounter); if (conn.m_status == NetConnectionStatus.Disconnected) { // // remove connection // m_connections.RemoveAt(i); m_connectionLookup.Remove(conn.ConnectionId); } } } m_executeFlushSendQueue = false; // send unsent unconnected messages NetTuple <NetEndPoint, long, NetOutgoingMessage> unsent; while (m_unsentUnconnectedMessages.TryDequeue(out unsent)) { long connectionId = unsent.Item2; NetOutgoingMessage om = unsent.Item3; int len = om.Encode(m_sendBuffer, 0, connectionId, 0); Interlocked.Decrement(ref om.m_recyclingCount); if (om.m_recyclingCount <= 0) { Recycle(om); } bool connReset; SendPacket(len, unsent.Item1, 1, out connReset); } } if (m_upnp != null) { m_upnp.CheckForDiscoveryTimeout(); } // // read from socket // if (m_socket == null) { return; } if (!m_socket.Poll(1000, SelectMode.SelectRead)) // wait up to 1 ms for data to arrive { return; } //if (m_socket == null || m_socket.Available < 1) // return; // update now now = NetTime.Now; do { int bytesReceived = 0; try { bytesReceived = m_socket.ReceiveFrom(m_receiveBuffer, 0, m_receiveBuffer.Length, SocketFlags.None, ref m_senderRemote); } catch (SocketException sx) { switch (sx.SocketErrorCode) { case SocketError.ConnectionReset: // connection reset by peer, aka connection forcibly closed aka "ICMP port unreachable" // we should shut down the connection; but m_senderRemote seemingly cannot be trusted, so which connection should we shut down?! // So, what to do? LogWarning("ConnectionReset"); return; case SocketError.NotConnected: // socket is unbound; try to rebind it (happens on mobile when process goes to sleep) BindSocket(true); return; default: LogWarning("Socket exception: " + sx.ToString()); return; } } if (bytesReceived < NetConstants.HeaderByteSize) { return; } //LogVerbose("Received " + bytesReceived + " bytes"); var ipsender = (NetEndPoint)m_senderRemote; if (m_upnp != null && now < m_upnp.m_discoveryResponseDeadline && bytesReceived > 32) { // is this an UPnP response? string resp = System.Text.Encoding.UTF8.GetString(m_receiveBuffer, 0, bytesReceived); if (resp.Contains("upnp:rootdevice") || resp.Contains("UPnP/1.0")) { try { resp = resp.Substring(resp.ToLower().IndexOf("location:") + 9); resp = resp.Substring(0, resp.IndexOf("\r")).Trim(); m_upnp.ExtractServiceUrl(resp); return; } catch (Exception ex) { LogDebug("Failed to parse UPnP response: " + ex.ToString()); // don't try to parse this packet further return; } } } // // parse packet into messages // int numMessages = 0; int numFragments = 0; int ptr = 0; while ((bytesReceived - ptr) >= NetConstants.HeaderByteSize) { // decode header // 8 bits - NetMessageType // 64 bits - Connection ID // 1 bit - Fragment? // 15 bits - Sequence number // 16 bits - Payload length in bits numMessages++; NetMessageType tp = (NetMessageType)m_receiveBuffer[ptr++]; long connectionId = BitConverter.ToInt64(m_receiveBuffer, ptr); ptr += 8; byte low = m_receiveBuffer[ptr++]; byte high = m_receiveBuffer[ptr++]; bool isFragment = ((low & 1) == 1); ushort sequenceNumber = (ushort)((low >> 1) | (((int)high) << 7)); if (isFragment) { numFragments++; } ushort payloadBitLength = (ushort)(m_receiveBuffer[ptr++] | (m_receiveBuffer[ptr++] << 8)); int payloadByteLength = NetUtility.BytesToHoldBits(payloadBitLength); if (bytesReceived - ptr < payloadByteLength) { LogWarning("Malformed packet; stated payload length " + payloadByteLength + ", remaining bytes " + (bytesReceived - ptr)); return; } if (tp >= NetMessageType.Unused1 && tp <= NetMessageType.Unused29) { ThrowOrLog("Unexpected NetMessageType: " + tp); return; } NetConnection sender = null; m_connectionLookup.TryGetValue(connectionId, out sender); if (sender != null) { if (sender.RemoteEndPoint != ipsender) { sender.MutateEndPoint(ipsender); } sender.m_statistics.PacketReceived(NetConstants.HeaderByteSize + payloadByteLength, 1, isFragment ? 1 : 0); } try { if (tp >= NetMessageType.LibraryError) { if (sender != null) { sender.ReceivedLibraryMessage(tp, ptr, payloadByteLength); } else { ReceivedUnconnectedLibraryMessage(now, ipsender, connectionId, tp, ptr, payloadByteLength); } } else { if (sender == null && !m_configuration.IsMessageTypeEnabled(NetIncomingMessageType.UnconnectedData)) { return; // dropping unconnected message since it's not enabled } NetIncomingMessage msg = CreateIncomingMessage(NetIncomingMessageType.Data, payloadByteLength); msg.m_isFragment = isFragment; msg.m_receiveTime = now; msg.m_sequenceNumber = sequenceNumber; msg.m_receivedMessageType = tp; msg.m_senderConnection = sender; msg.m_senderEndPoint = ipsender; msg.m_bitLength = payloadBitLength; Buffer.BlockCopy(m_receiveBuffer, ptr, msg.m_data, 0, payloadByteLength); if (sender != null) { if (tp == NetMessageType.Unconnected) { // We're connected; but we can still send unconnected messages to this peer msg.m_incomingMessageType = NetIncomingMessageType.UnconnectedData; ReleaseMessage(msg); } else { // connected application (non-library) message sender.ReceivedMessage(msg); } } else { // at this point we know the message type is enabled // unconnected application (non-library) message msg.m_incomingMessageType = NetIncomingMessageType.UnconnectedData; ReleaseMessage(msg); } } } catch (Exception ex) { LogError("Packet parsing error: " + ex.Message + " from " + ipsender); } ptr += payloadByteLength; } m_statistics.PacketReceived(bytesReceived, numMessages, numFragments); } while (m_socket.Available > 0); }
public OdbcAdo(CommonUtils util, string masterConnectionString, string[] slaveConnectionStrings) : base(DataType.Odbc) { base._util = util; if (!string.IsNullOrEmpty(masterConnectionString)) { MasterPool = new OdbcConnectionPool("主库", masterConnectionString, null, null); } if (slaveConnectionStrings != null) { foreach (var slaveConnectionString in slaveConnectionStrings) { var slavePool = new OdbcConnectionPool($"从库{SlavePools.Count + 1}", slaveConnectionString, () => Interlocked.Decrement(ref slaveUnavailables), () => Interlocked.Increment(ref slaveUnavailables)); SlavePools.Add(slavePool); } } }
/// <summary> /// Helper method of GetEnumerator to separate out yield return statement, and prevent lazy evaluation. /// </summary> private IEnumerator <T> GetEnumerator(Segment head, Segment tail, int headLow, int tailHigh) { try { SpinWait spin = new SpinWait(); if (head == tail) { for (int i = headLow; i <= tailHigh; i++) { // If the position is reserved by an Enqueue operation, but the value is not written into, // spin until the value is available. spin.Reset(); while (!head._state[i]._value) { spin.SpinOnce(); } yield return(head._array[i]); } } else { //iterate on head segment for (int i = headLow; i < SEGMENT_SIZE; i++) { // If the position is reserved by an Enqueue operation, but the value is not written into, // spin until the value is available. spin.Reset(); while (!head._state[i]._value) { spin.SpinOnce(); } yield return(head._array[i]); } //iterate on middle segments Segment curr = head.Next; while (curr != tail) { for (int i = 0; i < SEGMENT_SIZE; i++) { // If the position is reserved by an Enqueue operation, but the value is not written into, // spin until the value is available. spin.Reset(); while (!curr._state[i]._value) { spin.SpinOnce(); } yield return(curr._array[i]); } curr = curr.Next; } //iterate on tail segment for (int i = 0; i <= tailHigh; i++) { // If the position is reserved by an Enqueue operation, but the value is not written into, // spin until the value is available. spin.Reset(); while (!tail._state[i]._value) { spin.SpinOnce(); } yield return(tail._array[i]); } } } finally { // This Decrement must happen after the enumeration is over. Interlocked.Decrement(ref _numSnapshotTakers); } }
public void UnwatchAllReplicationConflicts() { Interlocked.Decrement(ref watchAllReplicationConflicts); }
public void UnwatchTransformers() { Interlocked.Decrement(ref watchAllTransformers); }
public void UnwatchConfig() { Interlocked.Decrement(ref watchConfig); }
public void UnwatchAllDocuments() { Interlocked.Decrement(ref watchAllDocuments); }
public void UnwatchCancellations() { Interlocked.Decrement(ref watchCancellations); }
public void UnwatchSync() { Interlocked.Decrement(ref watchSync); }
public void UnwatchConflicts() { Interlocked.Decrement(ref watchConflicts); }
public void Unlock() { Interlocked.Decrement(ref busy); }
public void SendNotification(INotification notification, SendNotificationCallbackDelegate callback) { Interlocked.Increment(ref _trackedNotificationCount); var appleNotification = notification as AppleNotification; if (appleNotification == null) { throw new ArgumentException("Notification was not an AppleNotification", "notification"); } Exception failure; byte[] notificationData; if (!TryGetNotificationData(appleNotification, out notificationData, out failure)) { Interlocked.Decrement(ref _trackedNotificationCount); if (callback != null) { callback(this, new SendNotificationResult(notification, false, failure)); } return; } try { EnsureConnected(); lock (_sendLock) { PollConnection(); Log.Debug("ApplePushChannel instance {0}: Sending notification {1}", _channelInstanceId, appleNotification.Identifier); _networkStream.Write(notificationData, 0, notificationData.Length); _networkStream.Flush(); _sentNotifications.Add(new SentNotification(appleNotification) { Callback = callback }); } } catch (Exception exception) { Disconnect(); Log.Error("Exception during APNS Send with channel {2}: {0} -> {1}", appleNotification.Identifier, exception, _channelInstanceId); // If this failed, we probably had a networking error, so let's requeue the notification Interlocked.Decrement(ref _trackedNotificationCount); if (callback != null) { callback(this, new SendNotificationResult(notification, true, exception)); } } }
public int DecrementAndGet() { return(Interlocked.Decrement(ref _val)); }
private void HandleFailedNotification(int identifier, byte status) { // Get the index of our failed notification (by identifier) var failedIndex = _sentNotifications.FindIndex(n => n.Identifier == identifier); if (failedIndex < 0) { return; } Log.Info("Failed Notification on channel {1}: {0}", identifier, _channelInstanceId); //Get all the notifications before the failed one and mark them as sent! if (failedIndex > 0) { var successful = _sentNotifications.GetRange(0, failedIndex); successful.ForEach(n => { Interlocked.Decrement(ref _trackedNotificationCount); if (n.Callback != null) { n.Callback(this, new SendNotificationResult(n.Notification)); } }); _sentNotifications.RemoveRange(0, failedIndex); } //Get the failed notification itself var failedNotification = _sentNotifications[0]; //Fail and remove the failed index from the list Interlocked.Decrement(ref _trackedNotificationCount); if (failedNotification.Callback != null) { failedNotification.Callback(this, new SendNotificationResult(failedNotification.Notification, false, new NotificationFailureException(status, failedNotification.Notification))); } _sentNotifications.RemoveAt(0); // Notifications sent after the failure must be re-sent _sentNotifications.Reverse(); _sentNotifications.ForEach(n => { Interlocked.Decrement(ref _trackedNotificationCount); if (failedNotification.Callback != null) { failedNotification.Callback(this, new SendNotificationResult(n.Notification, true, new Exception("Sent after previously failed Notification.")) { CountsAsRequeue = false }); } }); _sentNotifications.Clear(); }
// ReSharper disable once FunctionComplexityOverflow // *** I do ^ because this function is the 'hottest' block of code in the program, and saving cycles during execution is ridiculously important here. void RenderChunk(ChunkRef Chunk, ParallelLoopState LoopState) { // *** Track how many chunks have been processed, for user feedback Interlocked.Increment(ref _ProcessedChunks); Interlocked.Increment(ref _ActiveRenderThreads); #if !DEBUG && !FAST // *** In release mode, gracefully handle bad chunks. Explode in debug mode so I can track down the issue. try { #endif int[][] DepthOpacities = _ColourPalette.DepthOpacities; // *** Cancellation logic for parallel processing if (LoopState != null && _Cancellation.IsCancellationRequested) { LoopState.Stop(); } if (LoopState != null && LoopState.IsStopped) { Interlocked.Decrement(ref _ActiveRenderThreads); return; } // *** Hold off on rendering if the user needs to attend to an issue while (_PauseRendering > 0) { Thread.Sleep(50); } // *** Load the chunk from disk here AlphaBlockCollection Blocks = Chunk.Blocks; for (int X = 0; X < 16; X++) { for (int Z = 0; Z < 16; Z++) { // *** Start by finding the topmost block to render int EndY = _RenderStartY(Blocks, X, Z); int Y = EndY; if (Y < 0) { continue; // *** No valid renderable blocks in this column, so continue with the next column } // *** Drill into the column to determine how many blocks down to render int RenderVal = 255; while (RenderVal > 0) { RenderVal -= DepthOpacities[Blocks.GetID(X, Y, Z)][Blocks.GetData(X, Y, Z)]; if (Y == 0) // *** If we've hit the bottom of the map, don't try and keep going. { break; // *** It wouldn't end well. } Y--; } Colour SetColour = Colour.Transparent; // *** What colour to set the current column's pixel to. // *** The Block-Metadata palette for this column's biome Colour[][] BiomePalette = _ColourPalette.FastPalette[Chunk.Biomes.GetBiome(X, Z)]; for (; Y <= EndY; Y++) // *** Now render up from the lowest block to the starting block { // *** For each block we render, grab its palette entry. Colour Entry = BiomePalette[Blocks.GetID(X, Y, Z)][Blocks.GetData(X, Y, Z)]; // *** If it has an associated entity colours list, then it needs special consideration to get its colour if ((Entry.Color & 0xFFFF0000U) == 0x00FF0000U) // *** Check for the flag value (0 Alpha, 255 Red - Blue and Green form the 0-65535 index) { PaletteEntry Entry2 = _ColourPalette.GetPaletteEntry((int)(Entry.Color & 0x0000FFFFU)).First(e => e.IsMatch(Blocks.GetData(X, Y, Z), Blocks.SafeGetTileEntity(X, Y, Z))); if (Entry2 != null) { Entry = Entry2.Color; } } if (Entry.A == 0) { continue; // *** If we're trying to render air, let's not. } // *** Blend in our working colour to the column's pixel, after applying altitude and light-level blends. SetColour.Blend(Entry.Copy().LightLevel((uint)Math.Max(_Config.MinLightLevel, Blocks.GetBlockLight(X, Math.Min(Y + 1, 255), Z))).Altitude(Y)); } Marshal.WriteInt32(_RenderTarget.Scan0 + (_Stride * (((Chunk.Z - _Config.SubregionChunks.Y) << 4) + Z)) + ((((Chunk.X - _Config.SubregionChunks.X) << 4) + X) << 2), (int)SetColour.FullAlpha().Color); } } #if !DEBUG && !FAST // *** When not running in debug mode, chunks that fail to render should NOT crash everything. } catch (Exception Ex) { Interlocked.Increment(ref _PauseRendering); _CorruptChunks = true; RenderingErrorEventArgs E = new RenderingErrorEventArgs { ErrorException = Ex, IsFatal = false, UserErrorMessage = "A chunk failed to render", ErrorCode = ErrorBadChunk }; if (RenderError != null) { RenderError.Invoke(this, E); } Interlocked.Decrement(ref _PauseRendering); } #endif Interlocked.Decrement(ref _ActiveRenderThreads); }
public int NextVariableId() { return(Interlocked.Decrement(ref VariableIdCounter)); }
/// <summary> /// Release resource for a pip /// </summary> public void ReleaseResource() { Interlocked.Decrement(ref m_numRunning); // Decrease the number of running tasks in the current queue. m_pipQueue.TriggerDispatcher(); }
public void NotifyParallelProcessEnded() { Interlocked.Decrement(ref spawnedProcessCount); }
private void DebugRefCountReleaseNativeOverlapped() { Debug.Assert(Interlocked.Decrement(ref _nativeOverlappedCounter) == 0, "NativeOverlapped released too many times."); Interlocked.Decrement(ref _nativeOverlappedUsed); }
/// <summary>Decrements the reference count of a global scope</summary> /// <param name="global">The global scope</param> public static void ReleaseOne(byte global) { Interlocked.Decrement(ref _cache[global]._count); }
public long DecrementRefCount() { return(Interlocked.Decrement(ref _dupes)); }
/// <summary>Decrements the reference count of a global scope</summary> /// <param name="scope">The global scope</param> private static void ReleaseOne(ScopeGlobalCounter scope) { Interlocked.Decrement(ref scope._count); }
public StatsService(DiscordSocketClient client, CommandHandler cmdHandler, IBotCredentials creds, NadekoBot nadeko, IDataCache cache) { _log = LogManager.GetCurrentClassLogger(); _client = client; _creds = creds; _redis = cache.Redis; _started = DateTime.UtcNow; _client.MessageReceived += _ => Task.FromResult(Interlocked.Increment(ref _messageCounter)); cmdHandler.CommandExecuted += (_, e) => Task.FromResult(Interlocked.Increment(ref _commandsRan)); _client.ChannelCreated += (c) => { var _ = Task.Run(() => { if (c is ITextChannel) { Interlocked.Increment(ref _textChannels); } else if (c is IVoiceChannel) { Interlocked.Increment(ref _voiceChannels); } }); return(Task.CompletedTask); }; _client.ChannelDestroyed += (c) => { var _ = Task.Run(() => { if (c is ITextChannel) { Interlocked.Decrement(ref _textChannels); } else if (c is IVoiceChannel) { Interlocked.Decrement(ref _voiceChannels); } }); return(Task.CompletedTask); }; _client.GuildAvailable += (g) => { var _ = Task.Run(() => { var tc = g.Channels.Count(cx => cx is ITextChannel); var vc = g.Channels.Count - tc; Interlocked.Add(ref _textChannels, tc); Interlocked.Add(ref _voiceChannels, vc); }); return(Task.CompletedTask); }; _client.JoinedGuild += (g) => { var _ = Task.Run(() => { var tc = g.Channels.Count(cx => cx is ITextChannel); var vc = g.Channels.Count - tc; Interlocked.Add(ref _textChannels, tc); Interlocked.Add(ref _voiceChannels, vc); }); return(Task.CompletedTask); }; _client.GuildUnavailable += (g) => { var _ = Task.Run(() => { var tc = g.Channels.Count(cx => cx is ITextChannel); var vc = g.Channels.Count - tc; Interlocked.Add(ref _textChannels, -tc); Interlocked.Add(ref _voiceChannels, -vc); }); return(Task.CompletedTask); }; _client.LeftGuild += (g) => { var _ = Task.Run(() => { var tc = g.Channels.Count(cx => cx is ITextChannel); var vc = g.Channels.Count - tc; Interlocked.Add(ref _textChannels, -tc); Interlocked.Add(ref _voiceChannels, -vc); }); return(Task.CompletedTask); }; if (_client.ShardId == 0) { _carbonitexTimer = new Timer(async(state) => { if (string.IsNullOrWhiteSpace(_creds.CarbonKey)) { return; } try { using (var http = new HttpClient()) { using (var content = new FormUrlEncodedContent( new Dictionary <string, string> { { "servercount", nadeko.GuildCount.ToString() }, { "key", _creds.CarbonKey } })) { content.Headers.Clear(); content.Headers.Add("Content-Type", "application/x-www-form-urlencoded"); await http.PostAsync("https://www.carbonitex.net/discord/data/botdata.php", content).ConfigureAwait(false); } } } catch { // ignored } }, null, TimeSpan.FromHours(1), TimeSpan.FromHours(1)); } _botlistTimer = new Timer(async(state) => { if (string.IsNullOrWhiteSpace(_creds.BotListToken)) { return; } try { using (var http = new HttpClient()) { using (var content = new FormUrlEncodedContent( new Dictionary <string, string> { { "shard_count", _creds.TotalShards.ToString() }, { "shard_id", client.ShardId.ToString() }, { "server_count", client.Guilds.Count().ToString() } })) { content.Headers.Clear(); content.Headers.Add("Content-Type", "application/x-www-form-urlencoded"); http.DefaultRequestHeaders.Add("Authorization", _creds.BotListToken); await http.PostAsync($"https://discordbots.org/api/bots/{client.CurrentUser.Id}/stats", content).ConfigureAwait(false); } } } catch (Exception ex) { _log.Error(ex); // ignored } }, null, TimeSpan.FromMinutes(5), TimeSpan.FromHours(1)); var platform = "other"; if (RuntimeInformation.IsOSPlatform(OSPlatform.Linux)) { platform = "linux"; } else if (RuntimeInformation.IsOSPlatform(OSPlatform.OSX)) { platform = "osx"; } else if (RuntimeInformation.IsOSPlatform(OSPlatform.Windows)) { platform = "windows"; } _dataTimer = new Timer(async(state) => { try { using (var http = new HttpClient()) { using (var content = new FormUrlEncodedContent( new Dictionary <string, string> { { "id", string.Concat(MD5.Create().ComputeHash(Encoding.ASCII.GetBytes(_creds.ClientId.ToString())).Select(x => x.ToString("X2"))) }, { "guildCount", nadeko.GuildCount.ToString() }, { "version", BotVersion }, { "platform", platform } })) { content.Headers.Clear(); content.Headers.Add("Content-Type", "application/x-www-form-urlencoded"); await http.PostAsync("https://selfstats.nadekobot.me/", content).ConfigureAwait(false); } } } catch { // ignored } }, null, TimeSpan.FromSeconds(1), TimeSpan.FromHours(1)); }
/// <summary> /// entry handler for the thread /// </summary> private void entryHandler() { string itemID; ItemType item = null; string threadname = Thread.CurrentThread.Name; if (this.rampUp) { eBayApi.RampUp(); } for (int i = 0; i < this.numCallsPerThread; i++) { if (this.stop) { break; } //add an item and log all processing message string message = String.Format("Thread: {0} ,Begin adding an item...", threadname); logMessage(message); itemID = eBayApi.AddItem(mMetrics); if (itemID != null && itemID != string.Empty) { message = String.Format("Thread: {0} ,Add item success: {1}", threadname, itemID.ToString()); } else { message = String.Format("Thread: {0} ,Add item failure.", threadname); } logMessage(message); //get an item and log all processing message if (itemID != null && itemID != string.Empty) { item = eBayApi.GetItem(itemID, mMetrics); message = String.Format("Thread: {0} ,Get an item success: {1}", threadname, itemID.ToString()); } else { message = String.Format("Thread: {0} ,Get an item failure.", threadname); } logMessage(message); //revise an item and log all processing message if (itemID != null && itemID != string.Empty) { eBayApi.ReviseItem(item, mMetrics); item = null; message = String.Format("Thread: {0} ,Revise an item success: {1}", threadname, itemID.ToString()); } else { message = String.Format("Thread: {0} ,Revise an item failure.", threadname); } logMessage(message); string fmsg = String.Format("Thread: {0} finished.", threadname); logMessage(fmsg); if (Interlocked.Decrement(ref numCalls) == 0) { mMetrics.GenerateReport(logger); if (FetchCompleteEvent != null) { FetchCompleteEvent(this, new EventArgs()); } } } //close for loop } //close GetItemHandler method
public StatsService(DiscordSocketClient client, CommandHandler cmdHandler, IBotCredentials creds, NadekoBot nadeko, IDataCache cache, IHttpClientFactory factory) { _log = LogManager.GetCurrentClassLogger(); _client = client; _creds = creds; _redis = cache.Redis; _httpFactory = factory; _started = DateTime.UtcNow; _client.MessageReceived += _ => Task.FromResult(Interlocked.Increment(ref _messageCounter)); cmdHandler.CommandExecuted += (_, e) => Task.FromResult(Interlocked.Increment(ref _commandsRan)); _client.ChannelCreated += (c) => { var _ = Task.Run(() => { if (c is ITextChannel) { Interlocked.Increment(ref _textChannels); } else if (c is IVoiceChannel) { Interlocked.Increment(ref _voiceChannels); } }); return(Task.CompletedTask); }; _client.ChannelDestroyed += (c) => { var _ = Task.Run(() => { if (c is ITextChannel) { Interlocked.Decrement(ref _textChannels); } else if (c is IVoiceChannel) { Interlocked.Decrement(ref _voiceChannels); } }); return(Task.CompletedTask); }; _client.GuildAvailable += (g) => { var _ = Task.Run(() => { var tc = g.Channels.Count(cx => cx is ITextChannel); var vc = g.Channels.Count - tc; Interlocked.Add(ref _textChannels, tc); Interlocked.Add(ref _voiceChannels, vc); }); return(Task.CompletedTask); }; _client.JoinedGuild += (g) => { var _ = Task.Run(() => { var tc = g.Channels.Count(cx => cx is ITextChannel); var vc = g.Channels.Count - tc; Interlocked.Add(ref _textChannels, tc); Interlocked.Add(ref _voiceChannels, vc); }); return(Task.CompletedTask); }; _client.GuildUnavailable += (g) => { var _ = Task.Run(() => { var tc = g.Channels.Count(cx => cx is ITextChannel); var vc = g.Channels.Count - tc; Interlocked.Add(ref _textChannels, -tc); Interlocked.Add(ref _voiceChannels, -vc); }); return(Task.CompletedTask); }; _client.LeftGuild += (g) => { var _ = Task.Run(() => { var tc = g.Channels.Count(cx => cx is ITextChannel); var vc = g.Channels.Count - tc; Interlocked.Add(ref _textChannels, -tc); Interlocked.Add(ref _voiceChannels, -vc); }); return(Task.CompletedTask); }; if (_client.ShardId == 0) { _carbonitexTimer = new Timer(async(state) => { if (string.IsNullOrWhiteSpace(_creds.CarbonKey)) { return; } try { using (var http = _httpFactory.CreateClient()) { using (var content = new FormUrlEncodedContent( new Dictionary <string, string> { { "servercount", nadeko.GuildCount.ToString() }, { "key", _creds.CarbonKey } })) { content.Headers.Clear(); content.Headers.Add("Content-Type", "application/x-www-form-urlencoded"); using (await http.PostAsync(new Uri("https://www.carbonitex.net/discord/data/botdata.php"), content).ConfigureAwait(false)) { } } } } catch { // ignored } }, null, TimeSpan.FromHours(1), TimeSpan.FromHours(1)); } _botlistTimer = new Timer(async(state) => { if (string.IsNullOrWhiteSpace(_creds.BotListToken)) { return; } try { using (var http = _httpFactory.CreateClient()) { using (var content = new FormUrlEncodedContent( new Dictionary <string, string> { { "shard_count", _creds.TotalShards.ToString() }, { "shard_id", client.ShardId.ToString() }, { "server_count", client.Guilds.Count().ToString() } })) { content.Headers.Clear(); content.Headers.Add("Content-Type", "application/x-www-form-urlencoded"); http.DefaultRequestHeaders.Add("Authorization", _creds.BotListToken); using (await http.PostAsync(new Uri($"https://discordbots.org/api/bots/{client.CurrentUser.Id}/stats"), content).ConfigureAwait(false)) { } } } } catch (Exception ex) { _log.Error(ex); // ignored } }, null, TimeSpan.FromMinutes(5), TimeSpan.FromHours(1)); }
public FirebirdAdo(CommonUtils util, string masterConnectionString, string[] slaveConnectionStrings, Func <DbConnection> connectionFactory) : base(DataType.Firebird, masterConnectionString, slaveConnectionStrings) { base._util = util; if (connectionFactory != null) { var pool = new FreeSql.Internal.CommonProvider.DbConnectionPool(DataType.Firebird, connectionFactory); MasterPool = pool; _CreateCommandConnection = pool.TestConnection; return; } if (!string.IsNullOrEmpty(masterConnectionString)) { MasterPool = new FirebirdConnectionPool("主库", masterConnectionString, null, null); } if (slaveConnectionStrings != null) { foreach (var slaveConnectionString in slaveConnectionStrings) { var slavePool = new FirebirdConnectionPool($"从库{SlavePools.Count + 1}", slaveConnectionString, () => Interlocked.Decrement(ref slaveUnavailables), () => Interlocked.Increment(ref slaveUnavailables)); SlavePools.Add(slavePool); } } }