public IDisposable Initialize(DatabaseSmugglerOptions options, SmugglerResult result, out long buildVersion) { _currentTypeIndex = 0; if (options.OperateOnTypes.HasFlag(DatabaseItemType.Documents) || options.OperateOnTypes.HasFlag(DatabaseItemType.RevisionDocuments) || options.OperateOnTypes.HasFlag(DatabaseItemType.Tombstones) || options.OperateOnTypes.HasFlag(DatabaseItemType.Conflicts) || options.OperateOnTypes.HasFlag(DatabaseItemType.Counters)) { _returnContext = _database.DocumentsStorage.ContextPool.AllocateOperationContext(out _context); _disposeTransaction = _context.OpenReadTransaction(); LastEtag = DocumentsStorage.ReadLastEtag(_disposeTransaction.InnerTransaction); } if (options.OperateOnTypes.HasFlag(DatabaseItemType.CompareExchange) || options.OperateOnTypes.HasFlag(DatabaseItemType.Identities)) { _returnServerContext = _database.ServerStore.ContextPool.AllocateOperationContext(out _serverContext); _disposeServerTransaction = _serverContext.OpenReadTransaction(); } buildVersion = ServerVersion.Build; return(new DisposableAction(() => { _disposeServerTransaction?.Dispose(); _returnServerContext?.Dispose(); _disposeTransaction?.Dispose(); _returnContext?.Dispose(); })); }
internal void Materialize() { if (_includedKeys == null || _includedKeys.Count == 0) { return; } if (_serverContext == null) { if (_throwWhenServerContextIsAllocated) { throw new InvalidOperationException("Cannot allocate new server context during materialization of compare exchange includes."); } _releaseContext = _database.ServerStore.ContextPool.AllocateOperationContext(out _serverContext); _serverContext.OpenReadTransaction(); } foreach (var includedKey in _includedKeys) { if (string.IsNullOrEmpty(includedKey)) { continue; } var value = _database.ServerStore.Cluster.GetCompareExchangeValue(_serverContext, CompareExchangeKey.GetStorageKey(_database.Name, includedKey)); if (Results == null) { Results = new Dictionary <string, CompareExchangeValue <BlittableJsonReaderObject> >(StringComparer.OrdinalIgnoreCase); } Results.Add(includedKey, new CompareExchangeValue <BlittableJsonReaderObject>(includedKey, value.Index, value.Value)); } }
public IDisposable OpenReadTransaction() { var documentsTx = Documents.OpenReadTransaction(); RavenTransaction serverTx = null; if (Server != null) { serverTx = Server.OpenReadTransaction(); } return(new DisposeTransactions(documentsTx, serverTx)); }
private CertificateDefinition ReadCertificateFromCluster(TransactionOperationContext ctx, string key) { using (ctx.OpenReadTransaction()) { var certificate = ServerStore.Cluster.Read(ctx, key); if (certificate == null) { return(null); } return(JsonDeserializationServer.CertificateDefinition(certificate)); } }
private bool CanHandleLogDivergence(TransactionOperationContext context, LogLengthNegotiation negotiation, ref long midpointIndex, ref long midpointTerm, ref long minIndex, ref long maxIndex) { if (_engine.Log.IsInfoEnabled) { _engine.Log.Info( $"{ToString()}: Our appended entries has been diverged, same index with different terms. " + $"My index/term {midpointIndex}/{midpointTerm}, while yours is {negotiation.PrevLogIndex}/{negotiation.PrevLogTerm}."); } using (context.OpenReadTransaction()) { while (midpointTerm > negotiation.PrevLogTerm && midpointIndex > 0) { // try to find any log in the previous term midpointIndex--; midpointTerm = _engine.GetTermFor(context, midpointIndex) ?? 0; } if (midpointTerm == 0 || midpointIndex == 0) { return(false); } // start the binary search again with those boundaries minIndex = Math.Min(midpointIndex, _engine.GetFirstEntryIndex(context)); maxIndex = midpointIndex; midpointIndex = (minIndex + maxIndex) / 2; midpointTerm = _engine.GetTermForKnownExisting(context, midpointIndex); if (maxIndex < minIndex) { if (_engine.Log.IsInfoEnabled) { _engine.Log.Info($"{ToString()}: Got minIndex: {minIndex} bigger than maxIndex: {maxIndex} will request the entire snapshot. " + $"midpointIndex: {midpointIndex}, midpointTerm: {midpointTerm}."); } Debug.Assert(false, "This is a safeguard against any potential bug here, so in worst case we request the entire snapshot"); return(false); } if (minIndex == maxIndex && midpointTerm != negotiation.PrevLogTerm) { return(false); } } return(true); }
public override void VerifyCanExecuteCommand(ServerStore store, TransactionOperationContext context, bool isClusterAdmin) { using (context.OpenReadTransaction()) { var read = store.Cluster.Read(context, Name); if (read == null) { return; } var definition = JsonDeserializationServer.CertificateDefinition(read); if (definition.SecurityClearance != SecurityClearance.ClusterAdmin) { return; } } AssertClusterAdmin(isClusterAdmin); }
public Task <SmugglerInitializeResult> InitializeAsync(DatabaseSmugglerOptionsServerSide options, SmugglerResult result) { _currentTypeIndex = 0; if (options.OperateOnTypes.HasFlag(DatabaseItemType.Documents) || options.OperateOnTypes.HasFlag(DatabaseItemType.RevisionDocuments) || options.OperateOnTypes.HasFlag(DatabaseItemType.Tombstones) || options.OperateOnTypes.HasFlag(DatabaseItemType.Conflicts) || options.OperateOnTypes.HasFlag(DatabaseItemType.CounterGroups) || options.OperateOnTypes.HasFlag(DatabaseItemType.TimeSeries)) { _returnContext = _database.DocumentsStorage.ContextPool.AllocateOperationContext(out _context); _disposeTransaction = _context.OpenReadTransaction(); LastEtag = DocumentsStorage.ReadLastEtag(_disposeTransaction.InnerTransaction); LastDatabaseChangeVector = DocumentsStorage.GetDatabaseChangeVector(_disposeTransaction.InnerTransaction); } if (options.OperateOnTypes.HasFlag(DatabaseItemType.CompareExchange) || options.OperateOnTypes.HasFlag(DatabaseItemType.Identities) || options.OperateOnTypes.HasFlag(DatabaseItemType.CompareExchangeTombstones) || options.OperateOnTypes.HasFlag(DatabaseItemType.Subscriptions) || options.OperateOnTypes.HasFlag(DatabaseItemType.ReplicationHubCertificates)) { _returnServerContext = _database.ServerStore.ContextPool.AllocateOperationContext(out _serverContext); _disposeServerTransaction = _serverContext.OpenReadTransaction(); using (var rawRecord = _database.ServerStore.Cluster.ReadRawDatabaseRecord(_serverContext, _database.Name)) { LastRaftIndex = rawRecord.EtagForBackup; } } var disposable = new DisposableAction(() => { _disposeServerTransaction?.Dispose(); _returnServerContext?.Dispose(); _disposeTransaction?.Dispose(); _returnContext?.Dispose(); }); return(Task.FromResult(new SmugglerInitializeResult(disposable, ServerVersion.Build))); }
private (List <Slice>, string) ExtractIdsFromQuery(IndexQueryServerSide query, DocumentsOperationContext context) { if (string.IsNullOrWhiteSpace(query.Query)) { return(null, null); } if (query.Metadata.Query.Where == null) { return(null, null); } if (query.Metadata.IndexFieldNames.Contains(QueryFieldName.DocumentId) == false) { return(null, null); } IDisposable releaseServerContext = null; IDisposable closeServerTransaction = null; TransactionOperationContext serverContext = null; try { if (query.Metadata.HasCmpXchg) { releaseServerContext = context.DocumentDatabase.ServerStore.ContextPool.AllocateOperationContext(out serverContext); closeServerTransaction = serverContext.OpenReadTransaction(); } using (closeServerTransaction) { var idsRetriever = new RetrieveDocumentIdsVisitor(serverContext, context, query.Metadata, _context.Allocator); idsRetriever.Visit(query.Metadata.Query.Where, query.QueryParameters); return(idsRetriever.Ids?.OrderBy(x => x, SliceComparer.Instance).ToList(), idsRetriever.StartsWith); } } finally { releaseServerContext?.Dispose(); } }
public IDisposable Initialize(DatabaseSmugglerOptions options, SmugglerResult result, out long buildVersion) { _currentTypeIndex = 0; _returnContext = _database.DocumentsStorage.ContextPool.AllocateOperationContext(out _context); _disposeTransaction = _context.OpenReadTransaction(); _returnServerContext = _database.ServerStore.ContextPool.AllocateOperationContext(out _serverContext); _disposeServerTransaction = _serverContext.OpenReadTransaction(); buildVersion = ServerVersion.Build; return(new DisposableAction(() => { _disposeServerTransaction.Dispose(); _returnServerContext.Dispose(); _disposeTransaction.Dispose(); _returnContext.Dispose(); })); }
protected async Task WaitForIndexToBeApplied(TransactionOperationContext context, long index) { DatabaseTopology dbTopology; using (context.OpenReadTransaction()) { dbTopology = ServerStore.Cluster.ReadDatabaseTopology(context, Database.Name); } if (dbTopology.RelevantFor(ServerStore.NodeTag)) { var db = await ServerStore.DatabasesLandlord.TryGetOrCreateResourceStore(Database.Name); await db.RachisLogIndexNotifications.WaitForIndexNotification(index, ServerStore.Engine.OperationTimeout); } else { await ServerStore.Cluster.WaitForIndexNotification(index); } }
public IDisposable Initialize(DatabaseSmugglerOptions options, SmugglerResult result, out long buildVersion) { _currentTypeIndex = 0; if (options.OperateOnTypes.HasFlag(DatabaseItemType.Documents) || options.OperateOnTypes.HasFlag(DatabaseItemType.RevisionDocuments) || options.OperateOnTypes.HasFlag(DatabaseItemType.Tombstones) || options.OperateOnTypes.HasFlag(DatabaseItemType.Conflicts) || options.OperateOnTypes.HasFlag(DatabaseItemType.CounterGroups)) { _returnContext = _database.DocumentsStorage.ContextPool.AllocateOperationContext(out _context); _disposeTransaction = _context.OpenReadTransaction(); LastEtag = DocumentsStorage.ReadLastEtag(_disposeTransaction.InnerTransaction); LastDatabaseChangeVector = DocumentsStorage.GetDatabaseChangeVector(_disposeTransaction.InnerTransaction); } if (options.OperateOnTypes.HasFlag(DatabaseItemType.CompareExchange) || options.OperateOnTypes.HasFlag(DatabaseItemType.Identities) || options.OperateOnTypes.HasFlag(DatabaseItemType.CompareExchangeTombstones)) { _returnServerContext = _database.ServerStore.ContextPool.AllocateOperationContext(out _serverContext); _disposeServerTransaction = _serverContext.OpenReadTransaction(); using (var rawRecord = _database.ServerStore.Cluster.ReadRawDatabaseRecord(_serverContext, _database.Name)) { LastRaftIndex = rawRecord.GetEtagForBackup(); } } buildVersion = ServerVersion.Build; return(new DisposableAction(() => { _disposeServerTransaction?.Dispose(); _returnServerContext?.Dispose(); _disposeTransaction?.Dispose(); _returnContext?.Dispose(); })); }
private async Task UpdateDatabaseRecord(TransactionOperationContext context, Action <DatabaseRecord> action, string raftRequestId) { if (context == null) { throw new ArgumentNullException(nameof(context)); } if (action == null) { throw new ArgumentNullException(nameof(action)); } using (context.OpenReadTransaction()) { var record = ServerStore.Cluster.ReadDatabase(context, Database.Name, out long index); action(record); var result = await ServerStore.WriteDatabaseRecordAsync(Database.Name, record, index, raftRequestId); await Database.RachisLogIndexNotifications.WaitForIndexNotification(result.Index, ServerStore.Engine.OperationTimeout); } }
public IEnumerable <Document> MoreLikeThis( IndexQueryServerSide query, IQueryResultRetriever retriever, DocumentsOperationContext context, CancellationToken token) { IDisposable releaseServerContext = null; IDisposable closeServerTransaction = null; TransactionOperationContext serverContext = null; MoreLikeThisQuery moreLikeThisQuery; try { if (query.Metadata.HasCmpXchg) { releaseServerContext = context.DocumentDatabase.ServerStore.ContextPool.AllocateOperationContext(out serverContext); closeServerTransaction = serverContext.OpenReadTransaction(); } using (closeServerTransaction) moreLikeThisQuery = QueryBuilder.BuildMoreLikeThisQuery(serverContext, context, query.Metadata, query.Metadata.Query.Where, query.QueryParameters, _analyzer, _queryBuilderFactories); } finally { releaseServerContext?.Dispose(); } var options = moreLikeThisQuery.Options != null?JsonDeserializationServer.MoreLikeThisOptions(moreLikeThisQuery.Options) : MoreLikeThisOptions.Default; HashSet <string> stopWords = null; if (string.IsNullOrWhiteSpace(options.StopWordsDocumentId) == false) { var stopWordsDoc = context.DocumentDatabase.DocumentsStorage.Get(context, options.StopWordsDocumentId); if (stopWordsDoc == null) { throw new InvalidOperationException($"Stop words document {options.StopWordsDocumentId} could not be found"); } if (stopWordsDoc.Data.TryGet(nameof(MoreLikeThisStopWords.StopWords), out BlittableJsonReaderArray value) && value != null) { stopWords = new HashSet <string>(StringComparer.OrdinalIgnoreCase); for (var i = 0; i < value.Length; i++) { stopWords.Add(value.GetStringByIndex(i)); } } } var ir = _searcher.IndexReader; var mlt = new RavenMoreLikeThis(ir, options, _state); int?baseDocId = null; if (moreLikeThisQuery.BaseDocument == null) { var td = _searcher.Search(moreLikeThisQuery.BaseDocumentQuery, 1, _state); // get the current Lucene docid for the given RavenDB doc ID if (td.ScoreDocs.Length == 0) { throw new InvalidOperationException("Given filtering expression did not yield any documents that could be used as a base of comparison"); } baseDocId = td.ScoreDocs[0].Doc; } if (stopWords != null) { mlt.SetStopWords(stopWords); } string[] fieldNames; if (options.Fields != null && options.Fields.Length > 0) { fieldNames = options.Fields; } else { fieldNames = ir.GetFieldNames(IndexReader.FieldOption.INDEXED) .Where(x => x != Constants.Documents.Indexing.Fields.DocumentIdFieldName && x != Constants.Documents.Indexing.Fields.ReduceKeyHashFieldName) .ToArray(); } mlt.SetFieldNames(fieldNames); mlt.Analyzer = _analyzer; var pageSize = GetPageSize(_searcher, query.PageSize); Query mltQuery; if (baseDocId.HasValue) { mltQuery = mlt.Like(baseDocId.Value); } else { using (var blittableJson = ParseJsonStringIntoBlittable(moreLikeThisQuery.BaseDocument, context)) mltQuery = mlt.Like(blittableJson); } var tsdc = TopScoreDocCollector.Create(pageSize, true); if (moreLikeThisQuery.FilterQuery != null && moreLikeThisQuery.FilterQuery is MatchAllDocsQuery == false) { mltQuery = new BooleanQuery { { mltQuery, Occur.MUST }, { moreLikeThisQuery.FilterQuery, Occur.MUST } }; } _searcher.Search(mltQuery, tsdc, _state); var hits = tsdc.TopDocs().ScoreDocs; var ids = new HashSet <string>(StringComparer.OrdinalIgnoreCase); foreach (var hit in hits) { if (hit.Doc == baseDocId) { continue; } var doc = _searcher.Doc(hit.Doc, _state); var id = doc.Get(Constants.Documents.Indexing.Fields.DocumentIdFieldName, _state) ?? doc.Get(Constants.Documents.Indexing.Fields.ReduceKeyHashFieldName, _state); if (id == null) { continue; } if (ids.Add(id) == false) { continue; } yield return(retriever.Get(doc, hit.Score, _state)); } }
private void ReplicateToDestination() { try { var connectionInfo = GetTcpInfo(); using (_tcpClient = new TcpClient()) { ConnectSocket(connectionInfo, _tcpClient); using (_database.DocumentsStorage.ContextPool.AllocateOperationContext(out _documentsContext)) using (_database.ConfigurationStorage.ContextPool.AllocateOperationContext(out _configurationContext)) using (var stream = _tcpClient.GetStream()) { var documentSender = new ReplicationDocumentSender(stream, this, _log); var indexAndTransformerSender = new ReplicationIndexTransformerSender(stream, this, _log); using (_writer = new BlittableJsonTextWriter(_documentsContext, stream)) using (_parser = _documentsContext.ParseMultiFrom(stream)) { //send initial connection information _documentsContext.Write(_writer, new DynamicJsonValue { [nameof(TcpConnectionHeaderMessage.DatabaseName)] = _destination.Database, [nameof(TcpConnectionHeaderMessage.Operation)] = TcpConnectionHeaderMessage.OperationTypes.Replication.ToString(), }); //start request/response for fetching last etag _documentsContext.Write(_writer, new DynamicJsonValue { ["Type"] = "GetLastEtag", ["SourceDatabaseId"] = _database.DbId.ToString(), ["SourceDatabaseName"] = _database.Name, ["SourceUrl"] = _database.Configuration.Core.ServerUrl, ["MachineName"] = Environment.MachineName, }); _writer.Flush(); //handle initial response to last etag and staff try { using (_configurationContext.OpenReadTransaction()) using (_documentsContext.OpenReadTransaction()) { var response = HandleServerResponse(); if (response.Item1 == ReplicationMessageReply.ReplyType.Error) { if (response.Item2.Contains("DatabaseDoesNotExistsException")) { throw new DatabaseDoesNotExistsException(); } throw new InvalidOperationException(response.Item2); } } } catch (DatabaseDoesNotExistsException e) { var msg = $"Failed to parse initial server replication response, because there is no database named {_database.Name} on the other end. " + "In order for the replication to work, a database with the same name needs to be created at the destination"; if (_log.IsInfoEnabled) { _log.Info(msg, e); } using (var txw = _configurationContext.OpenWriteTransaction()) { _database.Alerts.AddAlert(new Alert { Key = FromToString, Type = AlertType.Replication, Message = msg, CreatedAt = DateTime.UtcNow, Severity = AlertSeverity.Warning }, _configurationContext, txw); txw.Commit(); } throw; } catch (Exception e) { var msg = $"Failed to parse initial server response. This is definitely not supposed to happen. Exception thrown: {e}"; if (_log.IsInfoEnabled) { _log.Info(msg, e); } using (var txw = _configurationContext.OpenWriteTransaction()) { _database.Alerts.AddAlert(new Alert { Key = FromToString, Type = AlertType.Replication, Message = msg, CreatedAt = DateTime.UtcNow, Severity = AlertSeverity.Error }, _configurationContext, txw); txw.Commit(); } throw; } while (_cts.IsCancellationRequested == false) { _documentsContext.ResetAndRenew(); long currentEtag; Debug.Assert(_database.IndexMetadataPersistence.IsInitialized); using (_configurationContext.OpenReadTransaction()) currentEtag = _database.IndexMetadataPersistence.ReadLastEtag(_configurationContext.Transaction.InnerTransaction); if (_destination.SkipIndexReplication == false && currentEtag != indexAndTransformerSender.LastEtag) { indexAndTransformerSender.ExecuteReplicationOnce(); } var sp = Stopwatch.StartNew(); while (documentSender.ExecuteReplicationOnce()) { if (sp.ElapsedMilliseconds > 60 * 1000) { _waitForChanges.Set(); break; } } //if this returns false, this means either timeout or canceled token is activated while (WaitForChanges(_minimalHeartbeatInterval, _cts.Token) == false) { _configurationContext.ResetAndRenew(); _documentsContext.ResetAndRenew(); using (_documentsContext.OpenReadTransaction()) using (_configurationContext.OpenReadTransaction()) { SendHeartbeat(); } } _waitForChanges.Reset(); } } } } } catch (OperationCanceledException) { if (_log.IsInfoEnabled) { _log.Info($"Operation canceled on replication thread ({FromToString}). Stopped the thread."); } } catch (IOException e) { if (_log.IsInfoEnabled) { if (e.InnerException is SocketException) { _log.Info( $"SocketException was thrown from the connection to remote node ({FromToString}). This might mean that the remote node is done or there is a network issue.", e); } else { _log.Info($"IOException was thrown from the connection to remote node ({FromToString}).", e); } } } catch (Exception e) { if (_log.IsInfoEnabled) { _log.Info($"Unexpected exception occured on replication thread ({FromToString}). Replication stopped (will be retried later).", e); } Failed?.Invoke(this, e); } }
private void NegotiateMatchEntryWithLeaderAndApplyEntries(TransactionOperationContext context, RemoteConnection connection, LogLengthNegotiation negotiation) { long minIndex; long maxIndex; long midpointTerm; long midpointIndex; using (context.OpenReadTransaction()) { minIndex = _engine.GetFirstEntryIndex(context); if (minIndex == 0) // no entries at all { connection.Send(context, new LogLengthNegotiationResponse { Status = LogLengthNegotiationResponse.ResponseStatus.Acceptable, Message = "No entries at all here, give me everything from the start", CurrentTerm = _term, LastLogIndex = 0, }); return; // leader will know where to start from here } maxIndex = Math.Min( _engine.GetLastEntryIndex(context), // max negotiation.PrevLogIndex ); midpointIndex = (maxIndex + minIndex) / 2; midpointTerm = _engine.GetTermForKnownExisting(context, midpointIndex); } while ((midpointTerm == negotiation.PrevLogTerm && midpointIndex == negotiation.PrevLogIndex) == false) { _engine.Timeout.Defer(_connection.Source); _engine.ValidateTerm(_term); if (midpointIndex == negotiation.PrevLogIndex && midpointTerm != negotiation.PrevLogTerm) { // our appended entries has been diverged, same index with different terms. var msg = $"Our appended entries has been diverged, same index with different terms. " + $"My index/term {midpointIndex}/{midpointTerm}, while yours is {negotiation.PrevLogIndex}/{negotiation.PrevLogTerm}."; if (_engine.Log.IsInfoEnabled) { _engine.Log.Info($"{ToString()}: {msg}"); } connection.Send(context, new LogLengthNegotiationResponse { Status = LogLengthNegotiationResponse.ResponseStatus.Acceptable, Message = msg, CurrentTerm = _term, LastLogIndex = 0 }); return; } connection.Send(context, new LogLengthNegotiationResponse { Status = LogLengthNegotiationResponse.ResponseStatus.Negotiation, Message = $"Term/Index mismatch from leader, need to figure out at what point the logs match, range: {maxIndex} - {minIndex} | {midpointIndex} in term {midpointTerm}", CurrentTerm = _term, MaxIndex = maxIndex, MinIndex = minIndex, MidpointIndex = midpointIndex, MidpointTerm = midpointTerm }); negotiation = connection.Read <LogLengthNegotiation>(context); _engine.Timeout.Defer(_connection.Source); if (negotiation.Truncated) { if (_engine.Log.IsInfoEnabled) { _engine.Log.Info($"{ToString()}: Got a truncated response from the leader will request all entries"); } connection.Send(context, new LogLengthNegotiationResponse { Status = LogLengthNegotiationResponse.ResponseStatus.Acceptable, Message = "We have entries that are already truncated at the leader, will ask for full snapshot", CurrentTerm = _term, LastLogIndex = 0 }); return; } using (context.OpenReadTransaction()) { if (_engine.GetTermFor(context, negotiation.PrevLogIndex) == negotiation.PrevLogTerm) { minIndex = Math.Min(midpointIndex + 1, maxIndex); } else { maxIndex = Math.Max(midpointIndex - 1, minIndex); } } midpointIndex = (maxIndex + minIndex) / 2; using (context.OpenReadTransaction()) midpointTerm = _engine.GetTermForKnownExisting(context, midpointIndex); } if (_engine.Log.IsInfoEnabled) { _engine.Log.Info($"{ToString()}: agreed upon last matched index = {midpointIndex} on term = {midpointTerm}"); } connection.Send(context, new LogLengthNegotiationResponse { Status = LogLengthNegotiationResponse.ResponseStatus.Acceptable, Message = $"Found a log index / term match at {midpointIndex} with term {midpointTerm}", CurrentTerm = _term, LastLogIndex = midpointIndex, }); }
private void NegotiateWithLeader(TransactionOperationContext context, LogLengthNegotiation negotiation) { _debugRecorder.Start(); // only the leader can send append entries, so if we accepted it, it's the leader if (_engine.Log.IsInfoEnabled) { _engine.Log.Info($"{ToString()}: Got a negotiation request for term {negotiation.Term} where our term is {_term}"); } if (negotiation.Term != _term) { // Our leader is no longer a valid one var msg = $"The term was changed after leader validation from {_term} to {negotiation.Term}, so you are no longer a valid leader"; _connection.Send(context, new LogLengthNegotiationResponse { Status = LogLengthNegotiationResponse.ResponseStatus.Rejected, Message = msg, CurrentTerm = _term, LastLogIndex = negotiation.PrevLogIndex }); throw new InvalidOperationException($"We close this follower because: {msg}"); } long prevTerm; using (context.OpenReadTransaction()) { prevTerm = _engine.GetTermFor(context, negotiation.PrevLogIndex) ?? 0; } if (prevTerm != negotiation.PrevLogTerm) { if (_engine.Log.IsInfoEnabled) { _engine.Log.Info($"{ToString()}: Got a negotiation request with PrevLogTerm={negotiation.PrevLogTerm} while our PrevLogTerm={prevTerm}" + " will negotiate to find next matched index"); } // we now have a mismatch with the log position, and need to negotiate it with // the leader NegotiateMatchEntryWithLeaderAndApplyEntries(context, _connection, negotiation); } else { if (_engine.Log.IsInfoEnabled) { _engine.Log.Info($"{ToString()}: Got a negotiation request with identical PrevLogTerm will continue to steady state"); } // this (or the negotiation above) completes the negotiation process _connection.Send(context, new LogLengthNegotiationResponse { Status = LogLengthNegotiationResponse.ResponseStatus.Acceptable, Message = $"Found a log index / term match at {negotiation.PrevLogIndex} with term {prevTerm}", CurrentTerm = _term, LastLogIndex = negotiation.PrevLogIndex }); } _debugRecorder.Record("Matching Negotiation is over, waiting for snapshot"); _engine.Timeout.Defer(_connection.Source); // at this point, the leader will send us a snapshot message // in most cases, it is an empty snapshot, then start regular append entries // the reason we send this is to simplify the # of states in the protocol var snapshot = _connection.ReadInstallSnapshot(context); _debugRecorder.Record("Snapshot received"); using (context.OpenWriteTransaction()) { var lastTerm = _engine.GetTermFor(context, snapshot.LastIncludedIndex); var lastCommitIndex = _engine.GetLastEntryIndex(context); if (snapshot.LastIncludedTerm == lastTerm && snapshot.LastIncludedIndex < lastCommitIndex) { if (_engine.Log.IsInfoEnabled) { _engine.Log.Info( $"{ToString()}: Got installed snapshot with last index={snapshot.LastIncludedIndex} while our lastCommitIndex={lastCommitIndex}, will just ignore it"); } //This is okay to ignore because we will just get the committed entries again and skip them ReadInstallSnapshotAndIgnoreContent(context); } else if (InstallSnapshot(context)) { if (_engine.Log.IsInfoEnabled) { _engine.Log.Info( $"{ToString()}: Installed snapshot with last index={snapshot.LastIncludedIndex} with LastIncludedTerm={snapshot.LastIncludedTerm} "); } _engine.SetLastCommitIndex(context, snapshot.LastIncludedIndex, snapshot.LastIncludedTerm); _engine.ClearLogEntriesAndSetLastTruncate(context, snapshot.LastIncludedIndex, snapshot.LastIncludedTerm); } else { var lastEntryIndex = _engine.GetLastEntryIndex(context); if (lastEntryIndex < snapshot.LastIncludedIndex) { var message = $"The snapshot installation had failed because the last included index {snapshot.LastIncludedIndex} in term {snapshot.LastIncludedTerm} doesn't match the last entry {lastEntryIndex}"; if (_engine.Log.IsInfoEnabled) { _engine.Log.Info($"{ToString()}: {message}"); } throw new InvalidOperationException(message); } } // snapshot always has the latest topology if (snapshot.Topology == null) { const string message = "Expected to get topology on snapshot"; if (_engine.Log.IsInfoEnabled) { _engine.Log.Info($"{ToString()}: {message}"); } throw new InvalidOperationException(message); } using (var topologyJson = context.ReadObject(snapshot.Topology, "topology")) { if (_engine.Log.IsInfoEnabled) { _engine.Log.Info($"{ToString()}: topology on install snapshot: {topologyJson}"); } var topology = JsonDeserializationRachis <ClusterTopology> .Deserialize(topologyJson); RachisConsensus.SetTopology(_engine, context, topology); } context.Transaction.Commit(); } _engine.Timeout.Defer(_connection.Source); _debugRecorder.Record("Invoking StateMachine.SnapshotInstalled"); // notify the state machine, we do this in an async manner, and start // the operator in a separate thread to avoid timeouts while this is // going on var task = Task.Run(() => _engine.SnapshotInstalledAsync(snapshot.LastIncludedIndex)); var sp = Stopwatch.StartNew(); var timeToWait = (int)(_engine.ElectionTimeout.TotalMilliseconds / 4); while (task.Wait(timeToWait) == false) { // this may take a while, so we let the other side know that // we are still processing, and we reset our own timer while // this is happening MaybeNotifyLeaderThatWeAreStillAlive(context, sp); } _debugRecorder.Record("Done with StateMachine.SnapshotInstalled"); // we might have moved from passive node, so we need to start the timeout clock _engine.Timeout.Start(_engine.SwitchToCandidateStateOnTimeout); _debugRecorder.Record("Snapshot installed"); //Here we send the LastIncludedIndex as our matched index even for the case where our lastCommitIndex is greater //So we could validate that the entries sent by the leader are indeed the same as the ones we have. _connection.Send(context, new InstallSnapshotResponse { Done = true, CurrentTerm = _term, LastLogIndex = snapshot.LastIncludedIndex }); _engine.Timeout.Defer(_connection.Source); }
protected Query GetLuceneQuery(DocumentsOperationContext context, QueryMetadata metadata, QueryExpression whereExpression, BlittableJsonReaderObject parameters, Analyzer analyzer, QueryBuilderFactories factories) { Query documentQuery; if (string.IsNullOrEmpty(metadata.QueryText)) { if (_logger.IsInfoEnabled) { _logger.Info($"Issuing query on index {_indexName} for all documents"); } documentQuery = new MatchAllDocsQuery(); } else { if (_logger.IsInfoEnabled) { _logger.Info($"Issuing query on index {_indexName} for: {metadata.Query}"); } // RavenPerFieldAnalyzerWrapper searchAnalyzer = null; try { //_persistence._a //searchAnalyzer = parent.CreateAnalyzer(new LowerCaseKeywordAnalyzer(), toDispose, true); //searchAnalyzer = parent.AnalyzerGenerators.Aggregate(searchAnalyzer, (currentAnalyzer, generator) => //{ // Analyzer newAnalyzer = generator.GenerateAnalyzerForQuerying(parent.PublicName, query.Query, currentAnalyzer); // if (newAnalyzer != currentAnalyzer) // { // DisposeAnalyzerAndFriends(toDispose, currentAnalyzer); // } // return parent.CreateAnalyzer(newAnalyzer, toDispose, true); //}); IDisposable releaseServerContext = null; IDisposable closeServerTransaction = null; TransactionOperationContext serverContext = null; try { if (metadata.HasCmpXchg) { releaseServerContext = context.DocumentDatabase.ServerStore.ContextPool.AllocateOperationContext(out serverContext); closeServerTransaction = serverContext.OpenReadTransaction(); } using (closeServerTransaction) documentQuery = QueryBuilder.BuildQuery(serverContext, context, metadata, whereExpression, _index.Definition, parameters, analyzer, factories); } finally { releaseServerContext?.Dispose(); } } finally { //DisposeAnalyzerAndFriends(toDispose, searchAnalyzer); } } //var afterTriggers = ApplyIndexTriggers(documentQuery); return(documentQuery); }
private void NegotiateWithLeader(TransactionOperationContext context, LogLengthNegotiation negotiation) { _debugRecorder.Start(); // only the leader can send append entries, so if we accepted it, it's the leader if (_engine.Log.IsInfoEnabled) { _engine.Log.Info($"{ToString()}: Got a negotiation request for term {negotiation.Term} where our term is {_term}."); } if (negotiation.Term != _term) { // Our leader is no longer a valid one var msg = $"The term was changed after leader validation from {_term} to {negotiation.Term}, so you are no longer a valid leader"; _connection.Send(context, new LogLengthNegotiationResponse { Status = LogLengthNegotiationResponse.ResponseStatus.Rejected, Message = msg, CurrentTerm = _term, LastLogIndex = negotiation.PrevLogIndex }); throw new InvalidOperationException($"We close this follower because: {msg}"); } long prevTerm; bool requestSnapshot; bool requestFullSnapshot = false; using (context.OpenReadTransaction()) { prevTerm = _engine.GetTermFor(context, negotiation.PrevLogIndex) ?? 0; requestSnapshot = _engine.GetSnapshotRequest(context); } if (requestSnapshot) { if (_engine.Log.IsInfoEnabled) { _engine.Log.Info($"{ToString()}: Request snapshot by the admin"); } _connection.Send(context, new LogLengthNegotiationResponse { Status = LogLengthNegotiationResponse.ResponseStatus.Acceptable, Message = "Request snapshot by the admin", CurrentTerm = _term, LastLogIndex = 0 }); requestFullSnapshot = true; } else if (prevTerm != negotiation.PrevLogTerm) { if (_engine.Log.IsInfoEnabled) { _engine.Log.Info($"{ToString()}: Got a negotiation request with PrevLogTerm={negotiation.PrevLogTerm} while our PrevLogTerm={prevTerm}" + " will negotiate to find next matched index"); } // we now have a mismatch with the log position, and need to negotiate it with // the leader requestFullSnapshot = NegotiateMatchEntryWithLeaderAndApplyEntries(context, _connection, negotiation); } else { if (_engine.Log.IsInfoEnabled) { _engine.Log.Info($"{ToString()}: Got a negotiation request with identical PrevLogTerm will continue to steady state"); } // this (or the negotiation above) completes the negotiation process _connection.Send(context, new LogLengthNegotiationResponse { Status = LogLengthNegotiationResponse.ResponseStatus.Acceptable, Message = $"Found a log index / term match at {negotiation.PrevLogIndex} with term {prevTerm}", CurrentTerm = _term, LastLogIndex = negotiation.PrevLogIndex }); } _debugRecorder.Record("Matching Negotiation is over, waiting for snapshot"); _engine.Timeout.Defer(_connection.Source); // at this point, the leader will send us a snapshot message // in most cases, it is an empty snapshot, then start regular append entries // the reason we send this is to simplify the # of states in the protocol var snapshot = _connection.ReadInstallSnapshot(context); _debugRecorder.Record("Start receiving snapshot"); // reading the snapshot from network and committing it to the disk might take a long time. using (var cts = new CancellationTokenSource()) { KeepAliveAndExecuteAction(() => ReadAndCommitSnapshot(context, snapshot, cts.Token), cts, "ReadAndCommitSnapshot"); } _debugRecorder.Record("Snapshot was received and committed"); // notify the state machine, we do this in an async manner, and start // the operator in a separate thread to avoid timeouts while this is // going on using (var cts = new CancellationTokenSource()) { KeepAliveAndExecuteAction(() => { _engine.SnapshotInstalled(snapshot.LastIncludedIndex, requestFullSnapshot, cts.Token); }, cts, "SnapshotInstalledAsync"); } _debugRecorder.Record("Done with StateMachine.SnapshotInstalled"); _debugRecorder.Record("Snapshot installed"); //Here we send the LastIncludedIndex as our matched index even for the case where our lastCommitIndex is greater //So we could validate that the entries sent by the leader are indeed the same as the ones we have. _connection.Send(context, new InstallSnapshotResponse { Done = true, CurrentTerm = _term, LastLogIndex = snapshot.LastIncludedIndex }); _engine.Timeout.Defer(_connection.Source); }
private void NegotiateWithLeader(TransactionOperationContext context, LogLengthNegotiation negotiation) { // only the leader can send append entries, so if we accepted it, it's the leader if (_engine.Log.IsInfoEnabled) { _engine.Log.Info($"Follower {_engine.Tag}: Got a negotiation request for term {negotiation.Term} where our term is {_engine.CurrentTerm}"); } if (negotiation.Term > _engine.CurrentTerm) { _engine.FoundAboutHigherTerm(negotiation.Term); } long prevTerm; using (context.OpenReadTransaction()) { prevTerm = _engine.GetTermFor(context, negotiation.PrevLogIndex) ?? 0; } if (prevTerm != negotiation.PrevLogTerm) { if (_engine.Log.IsInfoEnabled) { _engine.Log.Info($"Follower {_engine.Tag}: Got a negotiation request with PrevLogTerm={negotiation.PrevLogTerm} while our PrevLogTerm={prevTerm}" + " will negotiate to find next matched index"); } // we now have a mismatch with the log position, and need to negotiate it with // the leader NegotiateMatchEntryWithLeaderAndApplyEntries(context, _connection, negotiation); } else { if (_engine.Log.IsInfoEnabled) { _engine.Log.Info($"Follower {_engine.Tag}: Got a negotiation request with identical PrevLogTerm will continue to steady state"); } // this (or the negotiation above) completes the negotiation process _connection.Send(context, new LogLengthNegotiationResponse { Status = LogLengthNegotiationResponse.ResponseStatus.Acceptable, Message = $"Found a log index / term match at {negotiation.PrevLogIndex} with term {prevTerm}", CurrentTerm = _engine.CurrentTerm, LastLogIndex = negotiation.PrevLogIndex }); } _engine.Timeout.Defer(_connection.Source); // at this point, the leader will send us a snapshot message // in most cases, it is an empty snapshot, then start regular append entries // the reason we send this is to simplify the # of states in the protocol var snapshot = _connection.ReadInstallSnapshot(context); using (context.OpenWriteTransaction()) { var lastCommitIndex = _engine.GetLastCommitIndex(context); if (snapshot.LastIncludedIndex < lastCommitIndex) { if (_engine.Log.IsInfoEnabled) { _engine.Log.Info( $"Follower {_engine.Tag}: Got installed snapshot with last index={snapshot.LastIncludedIndex} while our lastCommitIndex={lastCommitIndex}, will just ignore it"); } //This is okay to ignore because we will just get the commited entries again and skip them ReadInstallSnapshotAndIgnoreContent(context); } else if (InstallSnapshot(context)) { if (_engine.Log.IsInfoEnabled) { _engine.Log.Info( $"Follower {_engine.Tag}: Installed snapshot with last index={snapshot.LastIncludedIndex} with LastIncludedTerm={snapshot.LastIncludedTerm} "); } _engine.SetLastCommitIndex(context, snapshot.LastIncludedIndex, snapshot.LastIncludedTerm); _engine.ClearLogEntriesAndSetLastTruncate(context, snapshot.LastIncludedIndex, snapshot.LastIncludedTerm); } else { var lastEntryIndex = _engine.GetLastEntryIndex(context); if (lastEntryIndex < snapshot.LastIncludedIndex) { var message = $"The snapshot installation had failed because the last included index {snapshot.LastIncludedIndex} in term {snapshot.LastIncludedTerm} doesn't match the last entry {lastEntryIndex}"; if (_engine.Log.IsInfoEnabled) { _engine.Log.Info($"Follower {_engine.Tag}: {message}"); } throw new InvalidOperationException(message); } } // snapshot always has the latest topology if (snapshot.Topology == null) { const string message = "Expected to get topology on snapshot"; if (_engine.Log.IsInfoEnabled) { _engine.Log.Info($"Follower {_engine.Tag}: {message}"); } throw new InvalidOperationException(message); } using (var topologyJson = context.ReadObject(snapshot.Topology, "topology")) { if (_engine.Log.IsInfoEnabled) { _engine.Log.Info($"Follower {_engine.Tag}: topology on install snapshot: {topologyJson}"); } var topology = JsonDeserializationRachis <ClusterTopology> .Deserialize(topologyJson); RachisConsensus.SetTopology(_engine, context, topology); } context.Transaction.Commit(); } //Here we send the LastIncludedIndex as our matched index even for the case where our lastCommitIndex is greater //So we could validate that the entries sent by the leader are indeed the same as the ones we have. _connection.Send(context, new InstallSnapshotResponse { Done = true, CurrentTerm = _engine.CurrentTerm, LastLogIndex = snapshot.LastIncludedIndex }); _engine.Timeout.Defer(_connection.Source); // notify the state machine _engine.SnapshotInstalled(context, snapshot.LastIncludedIndex); _engine.Timeout.Defer(_connection.Source); }
private void NegotiateMatchEntryWithLeaderAndApplyEntries(TransactionOperationContext context, RemoteConnection connection, LogLengthNegotiation negotiation) { long minIndex; long maxIndex; long midpointTerm; long midpointIndex; using (context.OpenReadTransaction()) { minIndex = _engine.GetFirstEntryIndex(context); if (minIndex == 0) // no entries at all { connection.Send(context, new LogLengthNegotiationResponse { Status = LogLengthNegotiationResponse.ResponseStatus.Acceptable, Message = "No entries at all here, give me everything from the start", CurrentTerm = _engine.CurrentTerm, LastLogIndex = 0 }); return; // leader will know where to start from here } maxIndex = Math.Min( _engine.GetLastEntryIndex(context), // max negotiation.PrevLogIndex ); midpointIndex = (maxIndex + minIndex) / 2; midpointTerm = _engine.GetTermForKnownExisting(context, midpointIndex); } while (minIndex < maxIndex) { _engine.Timeout.Defer(_connection.Source); // TODO: cancellation //_cancellationTokenSource.Token.ThrowIfCancellationRequested(); connection.Send(context, new LogLengthNegotiationResponse { Status = LogLengthNegotiationResponse.ResponseStatus.Negotiation, Message = $"Term/Index mismatch from leader, need to figure out at what point the logs match, range: {maxIndex} - {minIndex} | {midpointIndex} in term {midpointTerm}", CurrentTerm = _engine.CurrentTerm, MaxIndex = maxIndex, MinIndex = minIndex, MidpointIndex = midpointIndex, MidpointTerm = midpointTerm }); var response = connection.Read <LogLengthNegotiation>(context); _engine.Timeout.Defer(_connection.Source); if (response.Truncated) { if (_engine.Log.IsInfoEnabled) { _engine.Log.Info($"Follower {_engine.Tag}: Got a truncated response from the leader will request all entries"); } connection.Send(context, new LogLengthNegotiationResponse { Status = LogLengthNegotiationResponse.ResponseStatus.Acceptable, Message = "We have entries that are already truncated at the leader, will ask for full snapshot", CurrentTerm = _engine.CurrentTerm, LastLogIndex = 0 }); return; } using (context.OpenReadTransaction()) { if (_engine.GetTermFor(context, response.PrevLogIndex) == response.PrevLogTerm) { minIndex = midpointIndex + 1; } else { maxIndex = midpointIndex - 1; } } midpointIndex = (maxIndex + minIndex) / 2; using (context.OpenReadTransaction()) midpointTerm = _engine.GetTermForKnownExisting(context, midpointIndex); } if (_engine.Log.IsInfoEnabled) { _engine.Log.Info($"Follower {_engine.Tag}: agreed upon last matched index = {midpointIndex} on term = {_engine.CurrentTerm}"); } connection.Send(context, new LogLengthNegotiationResponse { Status = LogLengthNegotiationResponse.ResponseStatus.Acceptable, Message = $"Found a log index / term match at {midpointIndex} with term {midpointTerm}", CurrentTerm = _engine.CurrentTerm, LastLogIndex = midpointIndex }); }