public virtual void OnMatch(long newMatchIndex, LeaderContext leaderContext) { lock (this) { bool progress = newMatchIndex > _matchIndex; if (newMatchIndex > _matchIndex) { _matchIndex = newMatchIndex; } else { _log.warn("%s: match index not progressing. This should be transient.", StatusAsString()); } switch (_mode) { case Org.Neo4j.causalclustering.core.consensus.shipping.RaftLogShipper.Mode.Mismatch: if (SendNextBatchAfterMatch(leaderContext)) { _log.info("%s: caught up after mismatch, moving to PIPELINE mode", StatusAsString()); _mode = PIPELINE; } else { _log.info("%s: starting catch up after mismatch, moving to CATCHUP mode", StatusAsString()); _mode = Mode.Catchup; } break; case Org.Neo4j.causalclustering.core.consensus.shipping.RaftLogShipper.Mode.Catchup: if (_matchIndex >= _lastSentIndex) { if (SendNextBatchAfterMatch(leaderContext)) { _log.info("%s: caught up, moving to PIPELINE mode", StatusAsString()); _mode = PIPELINE; } } break; case Org.Neo4j.causalclustering.core.consensus.shipping.RaftLogShipper.Mode.Pipeline: if (_matchIndex == _lastSentIndex) { AbortTimeout(); } else if (progress) { ScheduleTimeout(_retryTimeMillis); } break; default: throw new System.InvalidOperationException("Unknown mode: " + _mode); } _lastLeaderContext = leaderContext; } }
public override void Init() { _logger.info("Initiating metrics..."); if (_metricsBuilt && _reporter.Empty) { _logger.warn("Several metrics were enabled but no exporting option was configured to report values to. " + "Disabling kernel metrics extension."); _life.clear(); } if (!_reporter.Empty && !_metricsBuilt) { _logger.warn("Exporting tool have been configured to report values to but no metrics were enabled. " + "Disabling kernel metrics extension."); _life.clear(); } _life.init(); }
public override void LeftCluster(InstanceId instanceId, URI member) { string name = instanceId.InstanceNameFromURI(member); _log.warn("Demoting member " + name + " because it left the cluster"); // Suggest reelection for all roles of this node _election.demote(instanceId); }
public override void LeftCluster(InstanceId instanceId, URI member) { if (_heartbeatContext.isFailedBasedOnSuspicions(instanceId)) { _log.warn("Instance " + instanceId + " (" + member + ") has left the cluster " + "but is still treated as failed by HeartbeatContext"); _heartbeatContext.serverLeftCluster(instanceId); } }
public override void ChannelRead(ChannelHandlerContext ctx, object msg) { if (_protocol.isExpecting(CatchupClientProtocol.State.MessageType)) { sbyte byteValue = (( ByteBuf )msg).readByte(); ResponseMessageType responseMessageType = from(byteValue); switch (responseMessageType.innerEnumValue) { case Org.Neo4j.causalclustering.catchup.ResponseMessageType.InnerEnum.STORE_ID: _protocol.expect(CatchupClientProtocol.State.StoreId); break; case Org.Neo4j.causalclustering.catchup.ResponseMessageType.InnerEnum.TX: _protocol.expect(CatchupClientProtocol.State.TxPullResponse); break; case Org.Neo4j.causalclustering.catchup.ResponseMessageType.InnerEnum.FILE: _protocol.expect(CatchupClientProtocol.State.FileHeader); break; case Org.Neo4j.causalclustering.catchup.ResponseMessageType.InnerEnum.STORE_COPY_FINISHED: _protocol.expect(CatchupClientProtocol.State.StoreCopyFinished); break; case Org.Neo4j.causalclustering.catchup.ResponseMessageType.InnerEnum.CORE_SNAPSHOT: _protocol.expect(CatchupClientProtocol.State.CoreSnapshot); break; case Org.Neo4j.causalclustering.catchup.ResponseMessageType.InnerEnum.TX_STREAM_FINISHED: _protocol.expect(CatchupClientProtocol.State.TxStreamFinished); break; case Org.Neo4j.causalclustering.catchup.ResponseMessageType.InnerEnum.PREPARE_STORE_COPY_RESPONSE: _protocol.expect(CatchupClientProtocol.State.PrepareStoreCopyResponse); break; case Org.Neo4j.causalclustering.catchup.ResponseMessageType.InnerEnum.INDEX_SNAPSHOT_RESPONSE: _protocol.expect(CatchupClientProtocol.State.IndexSnapshotResponse); break; default: _log.warn("No handler found for message type %s (%d)", responseMessageType.name(), byteValue); break; } ReferenceCountUtil.release(msg); } else { ctx.fireChannelRead(msg); } }
public override void Close() { long end = DateTimeHelper.CurrentUnixTimeMillis(); long duration = end - _start; if (string.ReferenceEquals(_outcome, null)) { _log.debug(format("Finished: %s in %d ms", _tag, duration)); } else { _log.warn(format("%s: %s in %d ms", _outcome, _tag, duration)); } }
//JAVA TO C# CONVERTER WARNING: Method 'throws' clauses are not available in C#: //ORIGINAL LINE: private void waitForUpToDateStore() throws InterruptedException, java.util.concurrent.ExecutionException //JAVA TO C# CONVERTER NOTE: Members cannot have the same name as their enclosing type: private void WaitForUpToDateStoreConflict() { bool upToDate = false; do { try { upToDate = _catchupProcess.upToDateFuture().get(1, MINUTES); } catch (TimeoutException) { _log.warn("Waiting for up-to-date store. State: " + _catchupProcess.describeState()); } } while (!upToDate); }
//JAVA TO C# CONVERTER WARNING: Method 'throws' clauses are not available in C#: //ORIGINAL LINE: protected void channelRead0(io.netty.channel.ChannelHandlerContext ctx, final TxPullRequest msg) throws Exception //JAVA TO C# CONVERTER WARNING: 'final' parameters are ignored unless the option to convert to C# 7.2 'in' parameters is selected: protected internal override void ChannelRead0(ChannelHandlerContext ctx, TxPullRequest msg) { _monitor.increment(); if (msg.PreviousTxId() <= 0) { _log.error("Illegal tx pull request"); EndInteraction(ctx, E_INVALID_REQUEST, -1); return; } StoreId localStoreId = _storeIdSupplier.get(); StoreId expectedStoreId = msg.ExpectedStoreId(); long firstTxId = msg.PreviousTxId() + 1; /* * This is the minimum transaction id we must send to consider our streaming operation successful. The kernel can * concurrently prune even future transactions while iterating and the cursor will silently fail on iteration, so * we need to add our own protection for this reason and also as a generally important sanity check for the fulfillment * of the consistent recovery contract which requires us to stream transactions at least as far as the time when the * file copy operation completed. */ long txIdPromise = _transactionIdStore.LastCommittedTransactionId; IOCursor <CommittedTransactionRepresentation> txCursor = GetCursor(txIdPromise, ctx, firstTxId, localStoreId, expectedStoreId); if (txCursor != null) { ChunkedTransactionStream txStream = new ChunkedTransactionStream(_log, localStoreId, firstTxId, txIdPromise, txCursor, _protocol); // chunked transaction stream ends the interaction internally and closes the cursor ctx.writeAndFlush(txStream).addListener(f => { if (_log.DebugEnabled || !f.Success) { string message = format("Streamed transactions [%d--%d] to %s", firstTxId, txStream.LastTxId(), ctx.channel().remoteAddress()); if (f.Success) { _log.debug(message); } else { _log.warn(message, f.cause()); } } }); } }
public override void ChannelRead(ChannelHandlerContext ctx, object msg) { if (_protocol.isExpecting(CatchupServerProtocol.State.MessageType)) { RequestMessageType requestMessageType = RequestMessageType.from((( ByteBuf )msg).readByte()); if (requestMessageType.Equals(RequestMessageType.TxPullRequest)) { _protocol.expect(CatchupServerProtocol.State.TxPull); } else if (requestMessageType.Equals(RequestMessageType.StoreId)) { _protocol.expect(CatchupServerProtocol.State.GetStoreId); } else if (requestMessageType.Equals(RequestMessageType.CoreSnapshot)) { _protocol.expect(CatchupServerProtocol.State.GetCoreSnapshot); } else if (requestMessageType.Equals(RequestMessageType.PrepareStoreCopy)) { _protocol.expect(CatchupServerProtocol.State.PrepareStoreCopy); } else if (requestMessageType.Equals(RequestMessageType.StoreFile)) { _protocol.expect(CatchupServerProtocol.State.GetStoreFile); } else if (requestMessageType.Equals(RequestMessageType.IndexSnapshot)) { _protocol.expect(CatchupServerProtocol.State.GetIndexSnapshot); } else { _log.warn("No handler found for message type %s", requestMessageType); } ReferenceCountUtil.release(msg); } else { ctx.fireChannelRead(msg); } }
public override void Send(MemberId to, RaftMessages_RaftMessage message, bool block) { Optional <ClusterId> clusterId = _clusterIdentity.get(); if (!clusterId.Present) { _log.warn("Attempting to send a message before bound to a cluster"); return; } Optional <CoreServerInfo> coreServerInfo = _coreTopologyService.localCoreServers().find(to); if (coreServerInfo.Present) { _outbound.send(coreServerInfo.get().RaftServer, Org.Neo4j.causalclustering.core.consensus.RaftMessages_ClusterIdAwareMessage.of(clusterId.get(), message), block); } else { _unknownAddressMonitor.logAttemptToSendToMemberWithNoKnownAddress(to); } }
private void TryConnect() { lock (this) { if (_disposed) { return; } else if (_fChannel != null && !_fChannel.Done) { return; } _fChannel = _bootstrap.connect(_destination.socketAddress()); _channel = _fChannel.channel(); _fChannel.addListener((ChannelFuture f) => { if (!f.Success) { long millis = _connectionBackoff.Millis; _cappedLogger.warn("Failed to connect to: " + _destination.socketAddress() + ". Retrying in " + millis + " ms"); f.channel().eventLoop().schedule(this.tryConnect, millis, MILLISECONDS); _connectionBackoff.increment(); } else { _log.info("Connected: " + f.channel()); f.channel().closeFuture().addListener(closed => { _log.warn(string.Format("Lost connection to: {0} ({1})", _destination, _channel.remoteAddress())); _connectionBackoff = _connectionBackoffStrategy.newTimeout(); f.channel().eventLoop().schedule(this.tryConnect, 0, MILLISECONDS); }); } }); } }
public override void Failed(InstanceId server) { // Suggest reelection for all roles of this node _log.warn(" instance " + server + " is being demoted since it failed"); _election.demote(server); }
public override void Start() { _vmPauseMonitor = new VmPauseMonitor(_config.get(GraphDatabaseSettings.vm_pause_monitor_measurement_duration), _config.get(GraphDatabaseSettings.vm_pause_monitor_stall_alert_threshold), _log, _jobScheduler, vmPauseInfo => _log.warn("Detected VM stop-the-world pause: %s", vmPauseInfo)); _vmPauseMonitor.start(); }
public override void TransactionalLogRecordReadFailure(long[] transactionOffsets, int transactionIndex, long logVersion) { _log.warn(transactionIndex > 0 ? format("Fail to read transaction log version %d. Last valid transaction start offset is: %d.", logVersion, transactionOffsets[transactionIndex - 1]) : format("Fail to read first transaction of log version %d.", logVersion)); }