public override unsafe void Execute(TransactionOperationContext context, Table items, long index, RawDatabaseRecord record, RachisState state, out object result) { long i = 1; var originalName = SubscriptionName; var tryToSetName = true; result = null; var subscriptionId = SubscriptionId ?? index; SubscriptionName = string.IsNullOrEmpty(SubscriptionName) ? subscriptionId.ToString() : SubscriptionName; var baseName = SubscriptionName; if (SubscriptionName.Length > DocumentIdWorker.MaxIdSize) { throw new SubscriptionNameException($"Subscription Name is too long, must be at most {DocumentIdWorker.MaxIdSize} bytes"); } while (tryToSetName) { var subscriptionItemName = SubscriptionState.GenerateSubscriptionItemKeyName(DatabaseName, SubscriptionName); using (Slice.From(context.Allocator, subscriptionItemName, out Slice valueName)) using (Slice.From(context.Allocator, subscriptionItemName.ToLowerInvariant(), out Slice valueNameLowered)) { if (items.ReadByKey(valueNameLowered, out TableValueReader tvr)) { var ptr = tvr.Read(2, out int size); var doc = new BlittableJsonReaderObject(ptr, size, context); var existingSubscriptionState = JsonDeserializationClient.SubscriptionState(doc); if (SubscriptionId != existingSubscriptionState.SubscriptionId) { if (string.IsNullOrEmpty(originalName)) { SubscriptionName = $"{baseName}.{i}"; i++; continue; } throw new RachisApplyException("A subscription could not be modified because the name '" + subscriptionItemName + "' is already in use in a subscription with different Id."); } if (string.IsNullOrEmpty(InitialChangeVector) == false && InitialChangeVector == nameof(Constants.Documents.SubscriptionChangeVectorSpecialStates.DoNotChange)) { InitialChangeVector = existingSubscriptionState.ChangeVectorForNextBatchStartingPoint; } else { AssertValidChangeVector(); } } else { AssertValidChangeVector(); } using (var receivedSubscriptionState = context.ReadObject(new SubscriptionState { Query = Query, ChangeVectorForNextBatchStartingPoint = InitialChangeVector, SubscriptionId = subscriptionId, SubscriptionName = SubscriptionName, LastBatchAckTime = null, Disabled = Disabled, MentorNode = MentorNode, LastClientConnectionTime = null }.ToJson(), SubscriptionName)) { ClusterStateMachine.UpdateValue(index, items, valueNameLowered, valueName, receivedSubscriptionState); } tryToSetName = false; } } }
public override void SetResponse(BlittableJsonReaderObject response, bool fromCache) { Result = JsonDeserializationClient.SubscriptionState(response); }
public override void SetResponse(JsonOperationContext context, BlittableJsonReaderObject response, bool fromCache) { Result = JsonDeserializationClient.GetTimeSeriesStatisticsResult(response); }
private void ListenToNewTcpConnection(TcpListener listener) { Task.Run(async() => { TcpClient tcpClient; try { tcpClient = await listener.AcceptTcpClientAsync(); } catch (ObjectDisposedException) { // shutting down return; } catch (Exception e) { if (_tcpLogger.IsInfoEnabled) { _tcpLogger.Info("Failed to accept new tcp connection", e); } return; } ListenToNewTcpConnection(listener); try { tcpClient.NoDelay = true; tcpClient.ReceiveBufferSize = 32 * 1024; tcpClient.SendBufferSize = 4096; Stream stream = tcpClient.GetStream(); stream = await AuthenticateAsServerIfSslNeeded(stream); var tcp = new TcpConnectionOptions { ContextPool = _tcpContextPool, Stream = stream, TcpClient = tcpClient, PinnedBuffer = JsonOperationContext.ManagedPinnedBuffer.LongLivedInstance() }; try { TcpConnectionHeaderMessage header; using (_tcpContextPool.AllocateOperationContext(out JsonOperationContext context)) { using (var headerJson = await context.ParseToMemoryAsync( stream, "tcp-header", BlittableJsonDocumentBuilder.UsageMode.None, tcp.PinnedBuffer, ServerStore.ServerShutdown, // we don't want to allow external (and anonymous) users to send us unlimited data // a maximum of 2 KB for the header is big enough to include any valid header that // we can currently think of maxSize: 1024 * 2 )) { header = JsonDeserializationClient.TcpConnectionHeaderMessage(headerJson); if (Logger.IsInfoEnabled) { Logger.Info( $"New {header.Operation} TCP connection to {header.DatabaseName ?? "the cluster node"} from {tcpClient.Client.RemoteEndPoint}"); } } if (MatchingOperationVersion(header, out var error) == false) { RespondToTcpConnection(stream, context, error, TcpConnectionStatus.TcpVersionMismatch); if (Logger.IsInfoEnabled) { Logger.Info( $"New {header.Operation} TCP connection to {header.DatabaseName ?? "the cluster node"} from {tcpClient.Client.RemoteEndPoint} failed because:" + $" {error}"); } return; //we will not accept not matching versions } bool authSuccessful = TryAuthorize(Configuration, tcp.Stream, header, out var err); RespondToTcpConnection(stream, context, error, authSuccessful ? TcpConnectionStatus.Ok : TcpConnectionStatus.AuthorizationFailed); if (authSuccessful == false) { if (Logger.IsInfoEnabled) { Logger.Info( $"New {header.Operation} TCP connection to {header.DatabaseName ?? "the cluster node"} from {tcpClient.Client.RemoteEndPoint}" + $" is not authorized to access {header.DatabaseName ?? "the cluster node"} because {err}"); } return; // cannot proceed } } if (await DispatchServerWideTcpConnection(tcp, header)) { tcp = null; //do not keep reference -> tcp will be disposed by server-wide connection handlers return; } await DispatchDatabaseTcpConnection(tcp, header); } catch (Exception e) { if (_tcpLogger.IsInfoEnabled) { _tcpLogger.Info("Failed to process TCP connection run", e); } SendErrorIfPossible(tcp, e); } } catch (Exception e) { if (_tcpLogger.IsInfoEnabled) { _tcpLogger.Info("Failure when processing tcp connection", e); } } }); }
public override unsafe void Execute(ClusterOperationContext context, Table items, long index, RawDatabaseRecord record, RachisState state, out object result) { result = null; var shouldUpdateChangeVector = true; var subscriptionName = SubscriptionName; if (string.IsNullOrEmpty(subscriptionName)) { subscriptionName = SubscriptionId.ToString(); } //insert all docs to voron table. If exists, then batchId will be replaced var subscriptionStateTable = context.Transaction.InnerTransaction.OpenTable(ClusterStateMachine.SubscriptionStateSchema, ClusterStateMachine.SubscriptionState); var itemKey = SubscriptionState.GenerateSubscriptionItemKeyName(DatabaseName, subscriptionName); using (Slice.From(context.Allocator, itemKey.ToLowerInvariant(), out Slice valueNameLowered)) using (Slice.From(context.Allocator, itemKey, out Slice valueName)) { if (items.ReadByKey(valueNameLowered, out var tvr) == false) { throw new RachisApplyException($"Cannot find subscription {subscriptionName} @ {DatabaseName}"); } var ptr = tvr.Read(2, out int size); var existingValue = new BlittableJsonReaderObject(ptr, size, context); if (existingValue == null) { throw new SubscriptionDoesNotExistException($"Subscription with name '{subscriptionName}' does not exist in database '{DatabaseName}'"); } var subscriptionState = JsonDeserializationClient.SubscriptionState(existingValue); var topology = record.Topology; var lastResponsibleNode = AcknowledgeSubscriptionBatchCommand.GetLastResponsibleNode(HasHighlyAvailableTasks, topology, NodeTag); var appropriateNode = topology.WhoseTaskIsIt(RachisState.Follower, subscriptionState, lastResponsibleNode); if (appropriateNode == null && record.DeletionInProgress.ContainsKey(NodeTag)) { throw new DatabaseDoesNotExistException($"Stopping subscription '{subscriptionName}' on node {NodeTag}, because database '{DatabaseName}' is being deleted."); } if (appropriateNode != NodeTag) { throw new SubscriptionDoesNotBelongToNodeException( $"Cannot apply {nameof(AcknowledgeSubscriptionBatchCommand)} for subscription '{subscriptionName}' with id '{SubscriptionId}', on database '{DatabaseName}', on node '{NodeTag}'," + $" because the subscription task belongs to '{appropriateNode ?? "N/A"}'.") { AppropriateNode = appropriateNode }; } if (CurrentChangeVector == nameof(Constants.Documents.SubscriptionChangeVectorSpecialStates.DoNotChange)) { context.ReadObject(existingValue, subscriptionName); shouldUpdateChangeVector = false; } if (subscriptionState.ChangeVectorForNextBatchStartingPoint != PreviouslyRecordedChangeVector) { throw new SubscriptionChangeVectorUpdateConcurrencyException($"Can't record subscription with name '{subscriptionName}' due to inconsistency in change vector progress. Probably there was an admin intervention that changed the change vector value. Stored value: {subscriptionState.ChangeVectorForNextBatchStartingPoint}, received value: {PreviouslyRecordedChangeVector}"); } if (shouldUpdateChangeVector) { subscriptionState.ChangeVectorForNextBatchStartingPoint = ChangeVectorUtils.MergeVectors(CurrentChangeVector, subscriptionState.ChangeVectorForNextBatchStartingPoint); subscriptionState.NodeTag = NodeTag; using (var obj = context.ReadObject(subscriptionState.ToJson(), "subscription")) { ClusterStateMachine.UpdateValue(index, items, valueNameLowered, valueName, obj); } } } foreach (var deletedId in Deleted) { using (SubscriptionConnectionsState.GetDatabaseAndSubscriptionAndDocumentKey(context, DatabaseName, SubscriptionId, deletedId, out var key)) { using var _ = Slice.External(context.Allocator, key, out var keySlice); subscriptionStateTable.DeleteByKey(keySlice); } } foreach (var documentRecord in Documents) { using (SubscriptionConnectionsState.GetDatabaseAndSubscriptionAndDocumentKey(context, DatabaseName, SubscriptionId, documentRecord.DocumentId, out var key)) using (subscriptionStateTable.Allocate(out var tvb)) { using var _ = Slice.External(context.Allocator, key, out var keySlice); using var __ = Slice.From(context.Allocator, documentRecord.ChangeVector, out var changeVectorSlice); tvb.Add(keySlice); tvb.Add(changeVectorSlice); tvb.Add(Bits.SwapBytes(index)); // batch id subscriptionStateTable.Set(tvb); } } foreach (var revisionRecord in Revisions) { using (SubscriptionConnectionsState.GetDatabaseAndSubscriptionAndRevisionKey(context, DatabaseName, SubscriptionId, revisionRecord.Current, out var key)) using (subscriptionStateTable.Allocate(out var tvb)) { using var _ = Slice.External(context.Allocator, key, out var keySlice); using var __ = Slice.From(context.Allocator, revisionRecord.Previous ?? string.Empty, out var changeVectorSlice); tvb.Add(keySlice); tvb.Add(changeVectorSlice); //prev change vector tvb.Add(Bits.SwapBytes(index)); // batch id subscriptionStateTable.Set(tvb); } } }
public override void SetResponse(BlittableJsonReaderObject response) { Result = JsonDeserializationClient.PutIndexResult(response); }
public override void SetResponse(BlittableJsonReaderObject response) { Result = JsonDeserializationClient.CreateSubscriptionResult(response); }
public override void SetResponse(JsonOperationContext context, BlittableJsonReaderObject response, bool fromCache) { Result = JsonDeserializationClient.CreateSubscriptionResult(response); }
private void ListenToNewTcpConnection(TcpListener listener) { Task.Run(async() => { TcpClient tcpClient; try { tcpClient = await listener.AcceptTcpClientAsync(); } catch (ObjectDisposedException) { // shutting down return; } catch (Exception e) { if (_tcpLogger.IsInfoEnabled) { _tcpLogger.Info("Failed to accept new tcp connection", e); } return; } ListenToNewTcpConnection(listener); TcpConnectionOptions tcp = null; try { tcpClient.NoDelay = true; tcpClient.ReceiveBufferSize = 32 * 1024; tcpClient.SendBufferSize = 4096; var stream = tcpClient.GetStream(); tcp = new TcpConnectionOptions() { Stream = stream, TcpClient = tcpClient, DisposeOnConnectionClose = { stream, tcpClient } }; tcp.DisposeOnConnectionClose.Add( _tcpContextPool.AllocateOperationContext(out tcp.Context) ); tcp.MultiDocumentParser = tcp.Context.ParseMultiFrom(stream); try { TcpConnectionHeaderMessage header; using (var headerJson = await tcp.MultiDocumentParser.ParseToMemoryAsync()) { header = JsonDeserializationClient.TcpConnectionHeaderMessage(headerJson); if (_logger.IsInfoEnabled) { _logger.Info($"New {header.Operation} TCP connection to {header.DatabaseName} from {tcpClient.Client.RemoteEndPoint}"); } } tcp.Operation = header.Operation; var databaseLoadingTask = ServerStore.DatabasesLandlord.TryGetOrCreateResourceStore(header.DatabaseName); if (databaseLoadingTask == null) { ThrowNoSuchDatabase(header); return;// never hit } var databaseLoadTimeout = ServerStore.DatabasesLandlord.DatabaseLoadTimeout; if (databaseLoadingTask.IsCompleted == false) { var resultingTask = await Task.WhenAny(databaseLoadingTask, Task.Delay(databaseLoadTimeout)); if (resultingTask != databaseLoadingTask) { ThrowTimeoutOnDatbaseLoad(header); } } tcp.DocumentDatabase = await databaseLoadingTask; tcp.DocumentDatabase.RunningTcpConnections.Add(tcp); switch (header.Operation) { case TcpConnectionHeaderMessage.OperationTypes.BulkInsert: BulkInsertConnection.Run(tcp); break; case TcpConnectionHeaderMessage.OperationTypes.Subscription: SubscriptionConnection.SendSubscriptionDocuments(tcp); break; case TcpConnectionHeaderMessage.OperationTypes.Replication: var documentReplicationLoader = tcp.DocumentDatabase.DocumentReplicationLoader; documentReplicationLoader.AcceptIncomingConnection(tcp); break; default: throw new InvalidOperationException("Unknown operation for tcp " + header.Operation); } tcp = null; } catch (Exception e) { if (_tcpLogger.IsInfoEnabled) { _tcpLogger.Info("Failed to process TCP connection run", e); } if (tcp != null) { using (var errorWriter = new BlittableJsonTextWriter(tcp.Context, tcp.Stream)) { tcp.Context.Write(errorWriter, new DynamicJsonValue { ["Type"] = "Error", ["Exception"] = e.ToString() }); } } } } catch (Exception e) { if (_tcpLogger.IsInfoEnabled) { _tcpLogger.Info("Failure when processing tcp connection", e); } } finally { tcp?.Dispose(); } }); }
public override void SetResponse(JsonOperationContext context, BlittableJsonReaderObject response, bool fromCache) { Result = JsonDeserializationClient.PutIndexesResponse(response).Results; }
public Task GetOngoingTaskInfo() { if (ResourceNameValidator.IsValidResourceName(Database.Name, ServerStore.Configuration.Core.DataDirectory.FullPath, out string errorMessage) == false) { throw new BadRequestException(errorMessage); } var key = GetLongQueryString("key"); var typeStr = GetQueryStringValueAndAssertIfSingleAndNotEmpty("type"); using (ServerStore.ContextPool.AllocateOperationContext(out TransactionOperationContext context)) { using (context.OpenReadTransaction()) { var clusterTopology = ServerStore.GetClusterTopology(context); var record = ServerStore.Cluster.ReadDatabase(context, Database.Name); var dbTopology = record?.Topology; if (Enum.TryParse <OngoingTaskType>(typeStr, true, out var type) == false) { throw new ArgumentException($"Unknown task type: {type}", "type"); } string tag; switch (type) { case OngoingTaskType.Replication: var watcher = record?.ExternalReplication.Find(x => x.TaskId == key); if (watcher == null) { HttpContext.Response.StatusCode = (int)HttpStatusCode.NotFound; break; } tag = dbTopology?.WhoseTaskIsIt(watcher, ServerStore.IsPassive()); var replicationTaskInfo = new OngoingTaskReplication { TaskId = watcher.TaskId, TaskName = watcher.Name, MentorNode = watcher.MentorNode, ResponsibleNode = new NodeId { NodeTag = tag, NodeUrl = clusterTopology.GetUrlFromTag(tag) }, DestinationDatabase = watcher.Database, TaskState = watcher.Disabled ? OngoingTaskState.Disabled : OngoingTaskState.Enabled, DestinationUrl = watcher.Url }; WriteResult(context, replicationTaskInfo); break; case OngoingTaskType.Backup: var backupConfiguration = record?.PeriodicBackups?.Find(x => x.TaskId == key); if (backupConfiguration == null) { HttpContext.Response.StatusCode = (int)HttpStatusCode.NotFound; break; } tag = dbTopology?.WhoseTaskIsIt(backupConfiguration, ServerStore.IsPassive()); var backupDestinations = GetBackupDestinations(backupConfiguration); var backupStatus = Database.PeriodicBackupRunner.GetBackupStatus(key); var nextBackup = Database.PeriodicBackupRunner.GetNextBackupDetails(record, backupConfiguration, backupStatus); var backupTaskInfo = new OngoingTaskBackup { TaskId = backupConfiguration.TaskId, BackupType = backupConfiguration.BackupType, TaskName = backupConfiguration.Name, TaskState = backupConfiguration.Disabled ? OngoingTaskState.Disabled : OngoingTaskState.Enabled, ResponsibleNode = new NodeId { NodeTag = tag, NodeUrl = clusterTopology.GetUrlFromTag(tag) }, BackupDestinations = backupDestinations, LastFullBackup = backupStatus.LastFullBackup, LastIncrementalBackup = backupStatus.LastIncrementalBackup, NextBackup = nextBackup }; WriteResult(context, backupTaskInfo); break; case OngoingTaskType.SqlEtl: var sqlEtl = record?.SqlEtls?.Find(x => x.TaskId == key); if (sqlEtl == null) { HttpContext.Response.StatusCode = (int)HttpStatusCode.NotFound; break; } WriteResult(context, new OngoingTaskSqlEtlDetails() { TaskId = sqlEtl.TaskId, TaskName = sqlEtl.Name, Configuration = sqlEtl, TaskState = GetEtlTaskState(sqlEtl) }); break; case OngoingTaskType.RavenEtl: var ravenEtl = record?.RavenEtls?.Find(x => x.TaskId == key); if (ravenEtl == null) { HttpContext.Response.StatusCode = (int)HttpStatusCode.NotFound; break; } WriteResult(context, new OngoingTaskRavenEtlDetails() { TaskId = ravenEtl.TaskId, TaskName = ravenEtl.Name, Configuration = ravenEtl, TaskState = GetEtlTaskState(ravenEtl) }); break; case OngoingTaskType.Subscription: var nameKey = GetQueryStringValueAndAssertIfSingleAndNotEmpty("taskName"); var itemKey = SubscriptionState.GenerateSubscriptionItemKeyName(record.DatabaseName, nameKey); var doc = ServerStore.Cluster.Read(context, itemKey); if (doc == null) { HttpContext.Response.StatusCode = (int)HttpStatusCode.NotFound; break; } var subscriptionState = JsonDeserializationClient.SubscriptionState(doc); tag = dbTopology?.WhoseTaskIsIt(subscriptionState, ServerStore.IsPassive()); var subscriptionStateInfo = new SubscriptionStateWithNodeDetails { Query = subscriptionState.Query, ChangeVectorForNextBatchStartingPoint = subscriptionState.ChangeVectorForNextBatchStartingPoint, SubscriptionId = subscriptionState.SubscriptionId, SubscriptionName = subscriptionState.SubscriptionName, LastTimeServerMadeProgressWithDocuments = subscriptionState.LastTimeServerMadeProgressWithDocuments, Disabled = subscriptionState.Disabled, LastClientConnectionTime = subscriptionState.LastClientConnectionTime, MentorNode = subscriptionState.MentorNode, ResponsibleNode = new NodeId { NodeTag = tag, NodeUrl = clusterTopology.GetUrlFromTag(tag) } }; // Todo: here we'll need to talk with the running node? TaskConnectionStatus = subscriptionState.Disabled ? OngoingTaskConnectionStatus.NotActive : OngoingTaskConnectionStatus.Active, WriteResult(context, subscriptionStateInfo.ToJson()); break; default: HttpContext.Response.StatusCode = (int)HttpStatusCode.NotFound; break; } } } return(Task.CompletedTask); }
public override void SetResponse(BlittableJsonReaderObject response, bool fromCache) { Result = JsonDeserializationClient.DeleteDatabaseResult(response); }
public override void SetResponse(JsonOperationContext context, BlittableJsonReaderObject response, bool fromCache) { Result = JsonDeserializationClient.AttachmentDetails(response); }
public override void SetResponse(BlittableJsonReaderObject response) { Result = JsonDeserializationClient.GetStatisticsResult(response); }
private async Task SaveLastState(long operationId) { var retries = 0; using (Parameters.Database.ServerStore.ContextPool.AllocateOperationContext(out TransactionOperationContext context)) { while (retries++ < 15) { var operationState = await GetOperationState(Options.DatabaseName, operationId, context); if (operationState == null) { return; } if (operationState.TryGet("Status", out OperationStatus operationStatus) == false) { return; } if (operationStatus == OperationStatus.InProgress) { await Task.Delay(1000, Parameters.CancelToken.Token); continue; } if (operationStatus == OperationStatus.Canceled || operationStatus == OperationStatus.Faulted) { throw new InvalidOperationException("Couldn't get last operation state because the " + $"operation state is {operationStatus.ToString()} " + "although the operation was completed successfully"); } if (operationState.TryGet("Result", out BlittableJsonReaderObject smugglerResultBlittable) == false) { return; } var smugglerResult = JsonDeserializationClient.SmugglerResult(smugglerResultBlittable); if (smugglerResult == null) { return; } if ((_buildVersion >= 40000 && _buildVersion < 41000) || _buildVersion == 40) { // prevent NRE, counter were added in 4.1 smugglerResult.Counters = new SmugglerProgressBase.CountsWithLastEtag(); } if ((_buildVersion >= 40000 && _buildVersion < 50000) || (_buildVersion >= 40 && _buildVersion < 50)) { // prevent NRE, time series were added in 5.0 smugglerResult.TimeSeries = new SmugglerProgressBase.CountsWithLastEtag(); } var importInfo = new ImportInfo { LastEtag = smugglerResult.GetLastEtag() + 1, ServerUrl = Options.ServerUrl, DatabaseName = Options.DatabaseName }; var importInfoBlittable = DocumentConventions.DefaultForServer.Serialization.DefaultConverter.ToBlittable(importInfo, context); await SaveLastOperationState(importInfoBlittable); return; } } }
public override void SetResponse(JsonOperationContext context, BlittableJsonReaderObject response, bool fromCache) { Result = JsonDeserializationClient.PutServerWideBackupConfigurationResponse(response); }
public bool Update(UpdateStep step) { var ids = new HashSet <long>(); var minimal = long.MaxValue; const string dbKey = "db/"; var continueAfterCommit = true; var skip = 0; while (continueAfterCommit) { continueAfterCommit = false; var fixedItems = 0; var items = step.WriteTx.OpenTable(ClusterStateMachine.ItemsSchema, ClusterStateMachine.Items); using (Slice.From(step.WriteTx.Allocator, dbKey, out Slice loweredPrefix)) { foreach (var result in items.SeekByPrimaryKeyPrefix(loweredPrefix, Slices.Empty, 0)) { var databaseName = ClusterStateMachine.GetCurrentItemKey(result.Value).Substring(3); using (Slice.From(step.WriteTx.Allocator, dbKey + databaseName.ToLowerInvariant(), out var key)) { if (items.VerifyKeyExists(key) == false) { continue; } } using (Slice.From(step.WriteTx.Allocator, SubscriptionState.SubscriptionPrefix(databaseName), out var startWith)) using (var ctx = JsonOperationContext.ShortTermSingleUse()) { foreach (var holder in items.SeekByPrimaryKeyPrefix(startWith, Slices.Empty, skip)) { skip++; var reader = holder.Value.Reader; var ptr = reader.Read(2, out int size); using (var doc = new BlittableJsonReaderObject(ptr, size, ctx)) { if (doc.TryGet(nameof(SubscriptionState.SubscriptionId), out long id) == false) { continue; } if (minimal > id) { minimal = id; } if (ids.Add(id)) { continue; } minimal--; ids.Add(minimal); var subscriptionState = JsonDeserializationClient.SubscriptionState(doc); subscriptionState.SubscriptionId = minimal; var subscriptionItemName = SubscriptionState.GenerateSubscriptionItemKeyName(databaseName, subscriptionState.SubscriptionName); using (Slice.From(step.WriteTx.Allocator, subscriptionItemName, out Slice valueName)) using (Slice.From(step.WriteTx.Allocator, subscriptionItemName.ToLowerInvariant(), out Slice valueNameLowered)) using (var receivedSubscriptionState = ctx.ReadObject(subscriptionState.ToJson(), subscriptionState.SubscriptionName)) { ClusterStateMachine.UpdateValue(0, items, valueNameLowered, valueName, receivedSubscriptionState); } } fixedItems++; if (fixedItems < 1024) { continue; } continueAfterCommit = true; break; } } if (continueAfterCommit) { break; } } } if (continueAfterCommit) { step.Commit(null); step.RenewTransactions(); } } return(true); }
public async Task TestPeriodicBackupCredentials() { var type = GetQueryStringValueAndAssertIfSingleAndNotEmpty("type"); if (Enum.TryParse(type, out PeriodicBackupConnectionType connectionType) == false) { throw new ArgumentException($"Unknown backup connection: {type}"); } using (ServerStore.ContextPool.AllocateOperationContext(out TransactionOperationContext context)) { DynamicJsonValue result; try { var connectionInfo = await context.ReadForMemoryAsync(RequestBodyStream(), "test-connection"); switch (connectionType) { case PeriodicBackupConnectionType.S3: var s3Settings = JsonDeserializationClient.S3Settings(connectionInfo); using (var awsClient = new RavenAwsS3Client(s3Settings, cancellationToken: ServerStore.ServerShutdown)) { awsClient.TestConnection(); } break; case PeriodicBackupConnectionType.Glacier: var glacierSettings = JsonDeserializationClient.GlacierSettings(connectionInfo); using (var glacierClient = new RavenAwsGlacierClient(glacierSettings, cancellationToken: ServerStore.ServerShutdown)) { glacierClient.TestConnection(); } break; case PeriodicBackupConnectionType.Azure: var azureSettings = JsonDeserializationClient.AzureSettings(connectionInfo); using (var azureClient = new RavenAzureClient(azureSettings, cancellationToken: ServerStore.ServerShutdown)) { azureClient.TestConnection(); } break; case PeriodicBackupConnectionType.GoogleCloud: var googleCloudSettings = JsonDeserializationClient.GoogleCloudSettings(connectionInfo); using (var googleCloudClient = new RavenGoogleCloudClient(googleCloudSettings, cancellationToken: ServerStore.ServerShutdown)) { await googleCloudClient.TestConnection(); } break; case PeriodicBackupConnectionType.FTP: var ftpSettings = JsonDeserializationClient.FtpSettings(connectionInfo); using (var ftpClient = new RavenFtpClient(ftpSettings)) { ftpClient.TestConnection(); } break; case PeriodicBackupConnectionType.Local: case PeriodicBackupConnectionType.None: default: throw new ArgumentOutOfRangeException(); } result = new DynamicJsonValue { [nameof(NodeConnectionTestResult.Success)] = true, }; } catch (Exception e) { result = new DynamicJsonValue { [nameof(NodeConnectionTestResult.Success)] = false, [nameof(NodeConnectionTestResult.Error)] = e.ToString() }; } await using (var writer = new AsyncBlittableJsonTextWriter(context, ResponseBodyStream())) { context.Write(writer, result); } } }
public async Task <string> AuthenticateAsync(string url, string apiKey, JsonOperationContext context) { var uri = new Uri(url.ToWebSocketPath()); using (var webSocket = new RavenClientWebSocket()) { try { if (Logger.IsInfoEnabled) { Logger.Info($"Trying to connect using WebSocket to {uri} for authentication"); } try { await webSocket.ConnectAsync(uri, _disposedToken.Token); } catch (WebSocketException webSocketException) { throw new InvalidOperationException($"Cannot connect using WebSocket to {uri} for authentication", webSocketException); } AuthenticatorChallenge authenticatorChallenge; using (var result = await Recieve(webSocket, context)) { if (result == null) { throw new InvalidDataException("Got null authtication challenge"); } authenticatorChallenge = JsonDeserializationClient.AuthenticatorChallenge(result); } var challenge = ComputeChallenge(authenticatorChallenge, apiKey); await Send(webSocket, context, "ChallengeResponse", challenge); string currentToken; using (var reader = await Recieve(webSocket, context)) { string error; if (reader.TryGet("Error", out error)) { string exceptionType; if (reader.TryGet("ExceptionType", out exceptionType) == false || exceptionType == "InvalidOperationException") { throw new InvalidOperationException("Server returned error: " + error); } if (exceptionType == "InvalidApiKeyException") { throw new InvalidApiKeyException(error); } } string currentOauthToken; if (reader.TryGet("CurrentToken", out currentOauthToken) == false || currentOauthToken == null) { throw new InvalidOperationException("Missing 'CurrentToken' in response message"); } currentToken = currentOauthToken; } try { await webSocket.CloseAsync(WebSocketCloseStatus.NormalClosure, "Close from client", _disposedToken.Token); } catch (Exception ex) { if (Logger.IsInfoEnabled) { Logger.Info("Failed to close the client", ex); } } return(currentToken); } catch (Exception ex) { if (Logger.IsInfoEnabled) { Logger.Info($"Failed to DoOAuthRequest to {url} with {apiKey}", ex); } throw; } } }