protected override long ExecuteCmd(DocumentsOperationContext context) { var tss = context.DocumentDatabase.DocumentsStorage.TimeSeriesStorage; RollupSchema.Create(context.Transaction.InnerTransaction, TimeSeriesRollupTable, 16); var table = context.Transaction.InnerTransaction.OpenTable(RollupSchema, TimeSeriesRollupTable); foreach (var key in tss.Stats.GetTimeSeriesNameByPolicy(context, _collection, _from.Name, _skip, BatchSize)) { using (table.Allocate(out var tvb)) using (DocumentIdWorker.GetStringPreserveCase(context, _collection.Name, out var collectionSlice)) using (Slice.From(context.Allocator, _to.Name, ByteStringType.Immutable, out var policyToApply)) using (Slice.From(context.Allocator, string.Empty, ByteStringType.Immutable, out var changeVectorSlice)) { tvb.Add(key); tvb.Add(collectionSlice); tvb.Add(Bits.SwapBytes(NextRollup(DateTime.MinValue, _to))); tvb.Add(policyToApply); tvb.Add(0L); tvb.Add(changeVectorSlice); table.Set(tvb); } Marked++; } return(Marked); }
private void CreateTombstone(DocumentsOperationContext context, Slice keySlice, long revisionEtag, CollectionName collectionName, string changeVector, long lastModifiedTicks) { var newEtag = _documentsStorage.GenerateNextEtag(); var table = context.Transaction.InnerTransaction.OpenTable(TombstonesSchema, RevisionsTombstonesSlice); if (table.VerifyKeyExists(keySlice)) { return; // revisions (and revisions tombstones) are immutable, we can safely ignore this } using (DocumentIdWorker.GetStringPreserveCase(context, collectionName.Name, out Slice collectionSlice)) using (Slice.From(context.Allocator, changeVector, out var cv)) using (table.Allocate(out TableValueBuilder tvb)) { tvb.Add(keySlice.Content.Ptr, keySlice.Size); tvb.Add(Bits.SwapBytes(newEtag)); tvb.Add(Bits.SwapBytes(revisionEtag)); tvb.Add(context.GetTransactionMarker()); tvb.Add((byte)Tombstone.TombstoneType.Revision); tvb.Add(collectionSlice); tvb.Add((int)DocumentFlags.None); tvb.Add(cv.Content.Ptr, cv.Size); tvb.Add(lastModifiedTicks); table.Set(tvb); } }
public void Delete(DocumentsOperationContext context, string id, Slice lowerId, CollectionName collectionName, string changeVector, long lastModifiedTicks, NonPersistentDocumentFlags nonPersistentFlags, DocumentFlags flags) { using (DocumentIdWorker.GetStringPreserveCase(context, id, out Slice idPtr)) { var deleteRevisionDocument = context.ReadObject(new DynamicJsonValue { [Constants.Documents.Metadata.Key] = new DynamicJsonValue { [Constants.Documents.Metadata.Collection] = collectionName.Name } }, "RevisionsBin"); Delete(context, lowerId, idPtr, id, collectionName, deleteRevisionDocument, changeVector, lastModifiedTicks, nonPersistentFlags, flags); } }
public void Initialize() { _internalScopesToDispose.Add(DocumentIdWorker.GetSliceFromId(_context, DocId, out DocumentKeyPrefix, SpecialChars.RecordSeparator)); // documentId/ _internalScopesToDispose.Add(DocumentIdWorker.GetLower(_context.Allocator, Name, out LowerTimeSeriesName)); var keyBufferSize = DocumentKeyPrefix.Size + LowerTimeSeriesName.Size + 1 /* separator */ + sizeof(long) /* segment start */; _internalScopesToDispose.Add(_context.Allocator.Allocate(keyBufferSize, out TimeSeriesKeyBuffer)); _externalScopesToDispose.Add(CreateTimeSeriesKeyPrefixSlice(_context, TimeSeriesKeyBuffer, DocumentKeyPrefix, LowerTimeSeriesName, out TimeSeriesPrefixSlice)); // documentId/timeseries/ _externalScopesToDispose.Add(Slice.External(_context.Allocator, TimeSeriesKeyBuffer, 0, DocumentKeyPrefix.Size + LowerTimeSeriesName.Size, out StatsKey)); // documentId/timeseries if (Collection != null) { _internalScopesToDispose.Add(DocumentIdWorker.GetStringPreserveCase(_context, Collection, out CollectionSlice)); } _internalScopesToDispose.Add(Slice.From(_context.Allocator, Name, out NameSlice)); }
public bool Update(UpdateStep step) { // Update collections using (step.DocumentsStorage.ContextPool.AllocateOperationContext(out DocumentsOperationContext context)) { var readTable = step.ReadTx.OpenTable(CollectionsSchema, CollectionsSlice); if (readTable != null) { var writeTable = step.WriteTx.OpenTable(CollectionsSchema, CollectionsSlice); foreach (var read in readTable.SeekByPrimaryKey(Slices.BeforeAllKeys, 0)) { using (TableValueReaderUtil.CloneTableValueReader(context, read)) { var collection = TableValueToString(context, (int)CollectionsTable.Name, ref read.Reader); using (DocumentIdWorker.GetStringPreserveCase(context, collection, out Slice collectionSlice)) using (writeTable.Allocate(out TableValueBuilder write)) { write.Add(collectionSlice); var pk = read.Reader.Read((int)CollectionsTable.Name, out int size); using (Slice.External(context.Allocator, pk, size, out var pkSlice)) { writeTable.DeleteByKey(pkSlice); } writeTable.Insert(write); } } } } } // Update tombstones's collection value using (step.DocumentsStorage.ContextPool.AllocateOperationContext(out DocumentsOperationContext context)) { foreach (var collection in step.DocumentsStorage.GetTombstoneCollections(step.ReadTx)) { string tableName; if (collection == AttachmentsTombstones || collection == RevisionsTombstones) { tableName = collection; } else { var collectionName = new CollectionName(collection); tableName = collectionName.GetTableName(CollectionTableType.Tombstones); } var readTable = step.ReadTx.OpenTable(TombstonesSchema, tableName); if (readTable == null) { continue; } var writeTable = step.WriteTx.OpenTable(TombstonesSchema, tableName); // We seek by an index instead the PK because // we weed to ensure that we aren't accessing an IsGlobal key foreach (var read in readTable.SeekForwardFrom(TombstonesSchema.FixedSizeIndexes[CollectionEtagsSlice], 0, 0)) { // We copy the memory of the read so AssertNoReferenceToOldData won't throw. // This is done instead of moving AssertNoReferenceToOldData to assert later // after we allocate the new write memory. using (TableValueReaderUtil.CloneTableValueReader(context, read)) { var type = *(Tombstone.TombstoneType *)read.Reader.Read((int)TombstoneTable.Type, out _); var oldCollection = TableValueToString(context, (int)TombstoneTable.Collection, ref read.Reader); using (DocumentIdWorker.GetStringPreserveCase(context, oldCollection, out Slice collectionSlice)) using (writeTable.Allocate(out TableValueBuilder write)) { write.Add(read.Reader.Read((int)TombstoneTable.LowerId, out int size), size); write.Add(read.Reader.Read((int)TombstoneTable.Etag, out size), size); write.Add(read.Reader.Read((int)TombstoneTable.DeletedEtag, out size), size); write.Add(read.Reader.Read((int)TombstoneTable.TransactionMarker, out size), size); write.Add(read.Reader.Read((int)TombstoneTable.Type, out size), size); if (type == Tombstone.TombstoneType.Attachment) { write.Add(read.Reader.Read((int)TombstoneTable.Collection, out size), size); } else { write.Add(collectionSlice); } write.Add(read.Reader.Read((int)TombstoneTable.Flags, out size), size); write.Add(read.Reader.Read((int)TombstoneTable.ChangeVector, out size), size); write.Add(read.Reader.Read((int)TombstoneTable.LastModified, out size), size); writeTable.Set(write); } } } } } // Update conflicts' collection value using (step.DocumentsStorage.ContextPool.AllocateOperationContext(out DocumentsOperationContext context)) { var readTable = step.ReadTx.OpenTable(ConflictsSchema, ConflictsSlice); if (readTable != null) { var writeTable = step.WriteTx.OpenTable(ConflictsSchema, ConflictsSlice); foreach (var read in readTable.SeekByPrimaryKey(Slices.BeforeAllKeys, 0)) { using (TableValueReaderUtil.CloneTableValueReader(context, read)) { var oldCollection = TableValueToString(context, (int)ConflictsTable.Collection, ref read.Reader); using (DocumentIdWorker.GetStringPreserveCase(context, oldCollection, out Slice collectionSlice)) using (writeTable.Allocate(out TableValueBuilder write)) { write.Add(read.Reader.Read((int)ConflictsTable.LowerId, out int size), size); write.Add(read.Reader.Read((int)ConflictsTable.RecordSeparator, out size), size); write.Add(read.Reader.Read((int)ConflictsTable.ChangeVector, out size), size); write.Add(read.Reader.Read((int)ConflictsTable.Id, out size), size); write.Add(read.Reader.Read((int)ConflictsTable.Data, out size), size); write.Add(read.Reader.Read((int)ConflictsTable.Etag, out size), size); write.Add(collectionSlice); write.Add(read.Reader.Read((int)ConflictsTable.LastModified, out size), size); write.Add(read.Reader.Read((int)ConflictsTable.Flags, out size), size); writeTable.Set(write); } } } } } return(true); }
private CollectionName PutCounters(UpdateStep step, DocumentsOperationContext context, HashSet <string> dbIds, Dictionary <string, List <CounterDetail> > allCountersBatch, string docId) { string collection = null; using (DocumentIdWorker.GetSliceFromId(context, docId, out Slice lowerId)) { var docsTable = new Table(DocsSchema, step.ReadTx); if (docsTable.ReadByKey(lowerId, out var tvr)) { using (var doc = new BlittableJsonReaderObject(tvr.Read((int)DocumentsTable.Data, out int size), size, context)) { collection = CollectionName.GetCollectionName(doc); } } else { // document does not exist return(null); } } var collectionName = new CollectionName(collection); using (DocumentIdWorker.GetSliceFromId(context, docId, out Slice documentKeyPrefix, separator: SpecialChars.RecordSeparator)) { var maxNumberOfCountersPerGroup = Math.Max(32, 2048 / (dbIds.Count * 32 + 1)); // rough estimate var orderedKeys = allCountersBatch.OrderBy(x => x.Key).ToList(); var listOfDbIds = dbIds.ToList(); for (int i = 0; i < orderedKeys.Count / maxNumberOfCountersPerGroup + (orderedKeys.Count % maxNumberOfCountersPerGroup == 0 ? 0 : 1); i++) { var currentBatch = allCountersBatch.Skip(maxNumberOfCountersPerGroup * i).Take(maxNumberOfCountersPerGroup); using (var data = WriteNewCountersDocument(context, listOfDbIds, currentBatch)) { var etag = step.DocumentsStorage.GenerateNextEtag(); var changeVector = ChangeVectorUtils.NewChangeVector( step.DocumentsStorage.DocumentDatabase.ServerStore.NodeTag, etag, _dbId); var table = step.DocumentsStorage.CountersStorage.GetCountersTable(step.WriteTx, collectionName); data.TryGet(CountersStorage.Values, out BlittableJsonReaderObject values); BlittableJsonReaderObject.PropertyDetails prop = default; values.GetPropertyByIndex(0, ref prop); using (table.Allocate(out TableValueBuilder tvb)) { using (Slice.From(context.Allocator, changeVector, out var cv)) using (DocumentIdWorker.GetStringPreserveCase(context, collectionName.Name, out Slice collectionSlice)) using (context.Allocator.Allocate(documentKeyPrefix.Size + prop.Name.Size, out var counterKeyBuffer)) using (Slice.From(context.Allocator, prop.Name, out var nameSlice)) using (CreateCounterKeySlice(context, counterKeyBuffer, documentKeyPrefix, nameSlice, out var counterKeySlice)) { if (i == 0) { tvb.Add(documentKeyPrefix); } else { tvb.Add(counterKeySlice); } tvb.Add(Bits.SwapBytes(etag)); tvb.Add(cv); tvb.Add(data.BasePointer, data.Size); tvb.Add(collectionSlice); tvb.Add(context.GetTransactionMarker()); table.Set(tvb); } } } } } return(collectionName); }
private void DeleteCounter(UpdateStep step, LazyStringValue tombstoneKey, DocumentsOperationContext context) { var(docId, counterName) = ExtractDocIdAndNameFromCounterTombstone(context, tombstoneKey); using (docId) using (counterName) using (DocumentIdWorker.GetLowerIdSliceAndStorageKey(context, docId, out Slice lowerId, out _)) { string collection = null; var docsTable = new Table(DocsSchema, step.ReadTx); if (docsTable.ReadByKey(lowerId, out var tvr)) { using (var doc = new BlittableJsonReaderObject(tvr.Read((int)DocumentsTable.Data, out int size), size, context)) { collection = CollectionName.GetCollectionName(doc); } } var collectionName = new CollectionName(collection); var table = step.DocumentsStorage.CountersStorage.GetCountersTable(step.WriteTx, collectionName); if (table.ReadByKey(lowerId, out var existing) == false) { return; } // (int)CountersTable.Data = 3 var data = new BlittableJsonReaderObject(existing.Read(3, out int oldSize), oldSize, context); if (data.TryGet(CountersStorage.Values, out BlittableJsonReaderObject counters) == false || counters.TryGetMember(counterName, out object counterToDelete) == false || counterToDelete is LazyStringValue) // already deleted { return; } var deleteCv = step.DocumentsStorage.CountersStorage.GenerateDeleteChangeVectorFromRawBlob(data, counterToDelete as BlittableJsonReaderObject.RawBlob); counters.Modifications = new DynamicJsonValue(counters) { [counterName] = deleteCv }; using (var old = data) { data = context.ReadObject(data, null, BlittableJsonDocumentBuilder.UsageMode.ToDisk); } var newEtag = step.DocumentsStorage.GenerateNextEtag(); var newChangeVector = ChangeVectorUtils.NewChangeVector(step.DocumentsStorage.DocumentDatabase.ServerStore.NodeTag, newEtag, _dbId); using (data) using (Slice.From(context.Allocator, newChangeVector, out var cv)) using (DocumentIdWorker.GetStringPreserveCase(context, collectionName.Name, out Slice collectionSlice)) using (table.Allocate(out TableValueBuilder tvb)) { tvb.Add(lowerId); tvb.Add(Bits.SwapBytes(newEtag)); tvb.Add(cv); tvb.Add(data.BasePointer, data.Size); tvb.Add(collectionSlice); tvb.Add(context.GetTransactionMarker()); table.Set(tvb); } } }