public async Task <HttpResponseMessage> DeleteRange() { var range = await ReadJsonObjectAsync <TimeSeriesDeleteRange>().ConfigureAwait(false); if (range == null || string.IsNullOrEmpty(range.Type) || string.IsNullOrEmpty(range.Key)) { return(GetEmptyMessage(HttpStatusCode.BadRequest)); } if (range.Start > range.End) { throw new InvalidOperationException("start cannot be greater than end"); } using (var writer = TimeSeries.CreateWriter()) { writer.DeleteRange(range.Type, range.Key, range.Start.UtcTicks, range.End.UtcTicks); writer.DeleteRangeInRollups(range.Type, range.Key, range.Start, range.End); writer.Commit(); TimeSeries.MetricsTimeSeries.Deletes.Mark(); TimeSeries.Publisher.RaiseNotification(new KeyChangeNotification { Type = range.Type, Key = range.Key, Action = TimeSeriesChangeAction.DeleteInRange, Start = range.Start, End = range.End, }); return(new HttpResponseMessage(HttpStatusCode.NoContent)); } }
public async Task <HttpResponseMessage> AppendPoint() { var point = await ReadJsonObjectAsync <TimeSeriesFullPoint>().ConfigureAwait(false); if (point == null || string.IsNullOrEmpty(point.Type) || string.IsNullOrEmpty(point.Key) || point.Values == null || point.Values.Length == 0) { return(GetEmptyMessage(HttpStatusCode.BadRequest)); } using (var writer = TimeSeries.CreateWriter()) { var newPointWasAppended = writer.Append(point.Type, point.Key, point.At, point.Values); writer.Commit(); TimeSeries.MetricsTimeSeries.ClientRequests.Mark(); TimeSeries.Publisher.RaiseNotification(new KeyChangeNotification { Type = point.Type, Key = point.Key, Action = TimeSeriesChangeAction.Append, At = point.At, Values = point.Values, }); return(GetEmptyMessage(newPointWasAppended ? HttpStatusCode.Created : HttpStatusCode.OK)); } }
public async Task <HttpResponseMessage> ReplicationsSave() { var newReplicationDocument = await ReadJsonObjectAsync <TimeSeriesReplicationDocument>().ConfigureAwait(false); using (var writer = TimeSeries.CreateWriter()) { writer.UpdateReplications(newReplicationDocument); writer.Commit(); return(GetEmptyMessage()); } }
public HttpResponseMessage DeleteType(string type) { if (string.IsNullOrEmpty(type)) { return(GetEmptyMessage(HttpStatusCode.BadRequest)); } using (var writer = TimeSeries.CreateWriter()) { writer.DeleteType(type); writer.Commit(); } TimeSeries.MetricsTimeSeries.ClientRequests.Mark(); return(new HttpResponseMessage(HttpStatusCode.NoContent)); }
public HttpResponseMessage PutType(TimeSeriesType type) { if (string.IsNullOrEmpty(type.Type) || type.Fields == null || type.Fields.Length < 1) { return(GetEmptyMessage(HttpStatusCode.BadRequest)); } using (var writer = TimeSeries.CreateWriter()) { writer.CreateType(type.Type, type.Fields); writer.Commit(); } TimeSeries.MetricsTimeSeries.ClientRequests.Mark(); return(new HttpResponseMessage(HttpStatusCode.Created)); }
public async Task <HttpResponseMessage> DeletePoints() { var points = await ReadJsonObjectAsync <TimeSeriesPointId[]>().ConfigureAwait(false); if (points == null || points.Length == 0) { return(GetEmptyMessage(HttpStatusCode.BadRequest)); } var deletedCount = 0; using (var writer = TimeSeries.CreateWriter()) { foreach (var point in points) { if (string.IsNullOrEmpty(point.Type)) { throw new InvalidOperationException("Point type cannot be empty"); } if (string.IsNullOrEmpty(point.Key)) { throw new InvalidOperationException("Point key cannot be empty"); } if (writer.DeletePoint(point)) { deletedCount++; } writer.DeletePointInRollups(point); TimeSeries.MetricsTimeSeries.Deletes.Mark(); TimeSeries.Publisher.RaiseNotification(new KeyChangeNotification { Type = point.Type, Key = point.Key, Action = TimeSeriesChangeAction.Delete, }); } writer.Commit(); return(GetMessageWithObject(deletedCount)); } }
public async Task <HttpResponseMessage> Post() { TimeSeries.MetricsTimeSeries.IncomingReplications.Mark(); ReplicationMessage replicationMessage; try { replicationMessage = await ReadJsonObjectAsync <ReplicationMessage>().ConfigureAwait(false); } catch (Exception e) { return(Request.CreateResponse(HttpStatusCode.BadRequest, e.Message)); } long lastEtag = 0; using (var writer = TimeSeries.CreateWriter()) { // var changeNotifications = new List<ChangeNotification>(); foreach (var logItem in replicationMessage.Logs) { lastEtag = Math.Max(logItem.Etag, lastEtag); writer.PostReplicationLogItem(logItem); } writer.Commit(); /*using (var reader = TimeSeriestorage.CreateReader()) * { * changeNotifications.ForEach(change => * { * change.Total = reader.GetCounterTotal(change.GroupName, change.CounterName); * TimeSeriestorage.Publisher.RaiseNotification(change); * }); * }*/ return(GetEmptyMessage()); } }
public HttpResponseMessage DeleteKey(string type, string key) { if (string.IsNullOrEmpty(type) || string.IsNullOrEmpty(key)) { return(GetEmptyMessage(HttpStatusCode.BadRequest)); } using (var writer = TimeSeries.CreateWriter()) { var pointsDeleted = writer.DeleteKey(type, key); writer.DeleteKeyInRollups(type, key); writer.Commit(); TimeSeries.MetricsTimeSeries.Deletes.Mark(); TimeSeries.Publisher.RaiseNotification(new KeyChangeNotification { Type = type, Key = key, Action = TimeSeriesChangeAction.Delete, }); return(GetMessageWithObject(pointsDeleted)); } }
public async Task <HttpResponseMessage> TimeSeriesBatch() { if (string.IsNullOrEmpty(GetQueryStringValue("no-op")) == false) { // this is a no-op request which is there just to force the client HTTP layer to handle the authentication // only used for legacy clients return(GetEmptyMessage()); } if ("generate-single-use-auth-token".Equals(GetQueryStringValue("op"), StringComparison.InvariantCultureIgnoreCase)) { // using windows auth with anonymous access = none sometimes generate a 401 even though we made two requests // instead of relying on windows auth, which require request buffering, we generate a one time token and return it. // we KNOW that the user have access to this db for writing, since they got here, so there is no issue in generating // a single use token for them. var authorizer = (MixedModeRequestAuthorizer)Configuration.Properties[typeof(MixedModeRequestAuthorizer)]; var token = authorizer.GenerateSingleUseAuthToken(TimeSeriesName, User); return(GetMessageWithObject(new { Token = token })); } if (HttpContext.Current != null) { HttpContext.Current.Server.ScriptTimeout = 60 * 60 * 6; // six hours should do it, I think. } var sp = Stopwatch.StartNew(); var status = new BatchStatus { IsTimedOut = false }; var timeoutTokenSource = new CancellationTokenSource(); var timeSeriesChanges = 0; var operationId = ExtractOperationId(); var inputStream = await InnerRequest.Content.ReadAsStreamAsync().ConfigureAwait(false); var task = Task.Factory.StartNew(() => { var timeout = timeoutTokenSource.TimeoutAfter(TimeSpan.FromSeconds(360)); //TODO : make this configurable var changeBatches = YieldChangeBatches(inputStream, timeout, countOfChanges => timeSeriesChanges += countOfChanges); try { foreach (var changeBatch in changeBatches) { using (var writer = TimeSeries.CreateWriter()) { TimeSeries.Publisher.RaiseNotification(new BulkOperationNotification { Type = BatchType.Started, OperationId = operationId }); foreach (var change in changeBatch) { writer.Append(change.Type, change.Key, change.At, change.Values); } writer.Commit(); TimeSeries.Publisher.RaiseNotification(new BulkOperationNotification { Type = BatchType.Ended, OperationId = operationId }); } } } catch (OperationCanceledException) { // happens on timeout TimeSeries.Publisher.RaiseNotification(new BulkOperationNotification { Type = BatchType.Error, OperationId = operationId, Message = "Operation cancelled, likely because of a batch timeout" }); status.IsTimedOut = true; status.Faulted = true; throw; } catch (Exception e) { var errorMessage = e.SimplifyException().Message; TimeSeries.Publisher.RaiseNotification(new BulkOperationNotification { Type = BatchType.Error, OperationId = operationId, Message = errorMessage }); status.Faulted = true; status.State = RavenJObject.FromObject(new { Error = errorMessage }); throw; } finally { status.Completed = true; status.TimeSeries = timeSeriesChanges; } }, timeoutTokenSource.Token); //TODO: do not forget to add task Id AddRequestTraceInfo(log => log.AppendFormat("\tTimeSeries batch operation received {0:#,#;;0} changes in {1}", timeSeriesChanges, sp.Elapsed)); long id; DatabasesLandlord.SystemDatabase.Tasks.AddTask(task, status, new TaskActions.PendingTaskDescription { StartTime = SystemTime.UtcNow, TaskType = TaskActions.PendingTaskType.TimeSeriesBatchOperation, Payload = operationId.ToString() }, out id, timeoutTokenSource); task.Wait(timeoutTokenSource.Token); return(GetMessageWithObject(new { OperationId = id }, HttpStatusCode.Accepted)); }