private static unsafe void UpdateDocumentCounters(UpdateStep step, DocumentsOperationContext context, string docId, CollectionName collection) { using (DocumentIdWorker.GetSliceFromId(context, docId, out Slice lowerDocId)) { var table = step.WriteTx.OpenTable(DocumentsStorage.DocsSchema, collection.GetTableName(CollectionTableType.Documents)); if (table.ReadByKey(lowerDocId, out var tvr) == false) { return; // document doesn't exists } var tableId = tvr.Id; var counterNames = step.DocumentsStorage.CountersStorage.GetCountersForDocument(context, step.WriteTx, docId).ToList(); var doc = step.DocumentsStorage.TableValueToDocument(context, ref tvr, skipValidationInDebug: true); if (doc.TryGetMetadata(out var metadata) == false) { if (counterNames.Count > 0) { doc.Flags |= DocumentFlags.HasCounters; var dvj = new DynamicJsonValue { [Constants.Documents.Metadata.Counters] = counterNames }; doc.Data.Modifications = new DynamicJsonValue(doc.Data) { [Constants.Documents.Metadata.Key] = dvj }; } else { doc.Flags &= ~DocumentFlags.HasCounters; } }
public Table EnsureRevisionTableCreated(Transaction tx, CollectionName collection) { var tableName = collection.GetTableName(CollectionTableType.Revisions); if (_tableCreated.Contains(collection.Name) == false) { // RavenDB-11705: It is possible that this will revert if the transaction // aborts, so we must record this only after the transaction has been committed // note that calling the Create() method multiple times is a noop RevisionsSchema.Create(tx, tableName, 16); tx.LowLevelTransaction.OnDispose += _ => { if (tx.LowLevelTransaction.Committed == false) { return; } // not sure if we can _rely_ on the tx write lock here, so let's be safe and create // a new instance, just in case _tableCreated = new HashSet <string>(_tableCreated, StringComparer.OrdinalIgnoreCase) { collection.Name }; }; } return(tx.OpenTable(RevisionsSchema, tableName)); }
public bool Update(UpdateStep step) { // When the revision are enabled and we delete a document we stored it with a 'RevisionDelete' flag. This flag was used to find the deleted revisions. // Now we store the resolved conflicts as revisions, so it could be, that a deleted revision will contain flags such as 'Conflicted' or 'Resolved'. // This change require use to change the index definition and the logic of how we find the deleted revisions. // So we say that if the revision is deleted it will be stored with the 'DeletedEtag' to the etag value, // otherwise (if the revision is a document) it will be stored with 'DeletedEtag' set to 0. step.DocumentsStorage.RevisionsStorage = new RevisionsStorage(step.DocumentsStorage.DocumentDatabase, step.WriteTx); using (step.DocumentsStorage.ContextPool.AllocateOperationContext(out DocumentsOperationContext context)) { step.WriteTx.DeleteTree("RevisionsFlagsAndEtag"); // remove the old index step.WriteTx.CreateTree(DeleteRevisionEtagSlice); foreach (var collection in step.DocumentsStorage.RevisionsStorage.GetCollections(step.ReadTx)) { var collectionName = new CollectionName(collection); var tableName = collectionName.GetTableName(CollectionTableType.Revisions); var readTable = step.ReadTx.OpenTable(RevisionsSchema, tableName); if (readTable == null) { continue; } var writeTable = step.DocumentsStorage.RevisionsStorage.EnsureRevisionTableCreated(step.WriteTx, collectionName); foreach (var read in readTable.SeekForwardFrom(RevisionsSchema.FixedSizeIndexes[CollectionRevisionsEtagsSlice], 0, 0)) { using (TableValueReaderUtil.CloneTableValueReader(context, read)) using (writeTable.Allocate(out TableValueBuilder write)) { var flags = TableValueToFlags((int)Columns.Flags, ref read.Reader); write.Add(read.Reader.Read((int)Columns.ChangeVector, out int size), size); write.Add(read.Reader.Read((int)Columns.LowerId, out size), size); write.Add(read.Reader.Read((int)Columns.RecordSeparator, out size), size); write.Add(read.Reader.Read((int)Columns.Etag, out size), size); write.Add(read.Reader.Read((int)Columns.Id, out size), size); write.Add(read.Reader.Read((int)Columns.Document, out size), size); write.Add((int)flags); if ((flags & DocumentFlags.DeleteRevision) == DocumentFlags.DeleteRevision) { write.Add(read.Reader.Read((int)Columns.Etag, out size), size); // set the DeletedEtag } else { write.Add(NotDeletedRevisionMarker); } write.Add(read.Reader.Read((int)Columns.LastModified, out size), size); write.Add(read.Reader.Read((int)Columns.TransactionMarker, out size), size); writeTable.Set(write, true); } } } } return(true); }
private static void UpdateSchemaForDocumentsAndRevisions(UpdateStep step) { using var _ = step.DocumentsStorage.ContextPool.AllocateOperationContext(out DocumentsOperationContext context); var collections = step.WriteTx.OpenTable(DocumentsStorage.CollectionsSchema, DocumentsStorage.CollectionsSlice); foreach (var tvr in collections.SeekByPrimaryKey(Slices.BeforeAllKeys, 0)) { var collection = DocumentsStorage.TableValueToId(context, (int)DocumentsStorage.CollectionsTable.Name, ref tvr.Reader); var collectionName = new CollectionName(collection); var tableTree = step.WriteTx.CreateTree(collectionName.GetTableName(CollectionTableType.Documents), RootObjectType.Table); DocumentsStorage.DocsSchema.SerializeSchemaIntoTableTree(tableTree); var revisionsTree = step.WriteTx.ReadTree(collectionName.GetTableName(CollectionTableType.Revisions), RootObjectType.Table); if (revisionsTree != null) { RevisionsStorage.RevisionsSchema.SerializeSchemaIntoTableTree(revisionsTree); } } }
public Table EnsureRevisionTableCreated(Transaction tx, CollectionName collection) { var tableName = collection.GetTableName(CollectionTableType.Revisions); if (_tableCreated.Add(collection.Name)) { RevisionsSchema.Create(tx, tableName, 16); } return(tx.OpenTable(RevisionsSchema, tableName)); }
private Table GetOrCreateTable(Transaction tx, CollectionName collection) { var tableName = collection.GetTableName(CollectionTableType.TimeSeriesStats); // TODO: cache the collection and pass Slice if (tx.IsWriteTransaction) { TimeSeriesStatsSchema.Create(tx, tableName, 16); } return(tx.OpenTable(TimeSeriesStatsSchema, tableName)); }
public bool Update(UpdateStep step) { step.DocumentsStorage.RevisionsStorage = new RevisionsStorage(step.DocumentsStorage.DocumentDatabase, step.WriteTx); // update revisions using (step.DocumentsStorage.ContextPool.AllocateOperationContext(out DocumentsOperationContext context)) { foreach (var collection in step.DocumentsStorage.RevisionsStorage.GetCollections(step.ReadTx)) { var collectionName = new CollectionName(collection); var tableName = collectionName.GetTableName(CollectionTableType.Revisions); var readTable = step.ReadTx.OpenTable(RevisionsSchema, tableName); if (readTable == null) { continue; } var writeTable = step.DocumentsStorage.RevisionsStorage.EnsureRevisionTableCreated(step.WriteTx, collectionName); foreach (var read in readTable.SeekForwardFrom(RevisionsSchema.FixedSizeIndexes[CollectionRevisionsEtagsSlice], 0, 0)) { using (TableValueReaderUtil.CloneTableValueReader(context, read)) using (writeTable.Allocate(out TableValueBuilder write)) { var flags = TableValueToFlags((int)Columns.Flags, ref read.Reader); var lastModified = TableValueToDateTime((int)Columns.LastModified, ref read.Reader); write.Add(read.Reader.Read((int)Columns.ChangeVector, out int size), size); write.Add(read.Reader.Read((int)Columns.LowerId, out size), size); write.Add(read.Reader.Read((int)Columns.RecordSeparator, out size), size); write.Add(read.Reader.Read((int)Columns.Etag, out size), size); write.Add(read.Reader.Read((int)Columns.Id, out size), size); write.Add(read.Reader.Read((int)Columns.Document, out size), size); write.Add((int)flags); write.Add(read.Reader.Read((int)Columns.DeletedEtag, out size), size); write.Add(lastModified.Ticks); write.Add(read.Reader.Read((int)Columns.TransactionMarker, out size), size); if ((flags & DocumentFlags.Resolved) == DocumentFlags.Resolved) { write.Add((int)DocumentFlags.Resolved); } else { write.Add(0); } write.Add(Bits.SwapBytes(lastModified.Ticks)); writeTable.Set(write, true); } } } } return(true); }
public bool Update(UpdateStep step) { // Update collections using (step.DocumentsStorage.ContextPool.AllocateOperationContext(out DocumentsOperationContext context)) { var readTable = step.ReadTx.OpenTable(CollectionsSchema, CollectionsSlice); if (readTable != null) { var writeTable = step.WriteTx.OpenTable(CollectionsSchema, CollectionsSlice); foreach (var read in readTable.SeekByPrimaryKey(Slices.BeforeAllKeys, 0)) { using (TableValueReaderUtil.CloneTableValueReader(context, read)) { var collection = TableValueToString(context, (int)CollectionsTable.Name, ref read.Reader); using (DocumentIdWorker.GetStringPreserveCase(context, collection, out Slice collectionSlice)) using (writeTable.Allocate(out TableValueBuilder write)) { write.Add(collectionSlice); var pk = read.Reader.Read((int)CollectionsTable.Name, out int size); using (Slice.External(context.Allocator, pk, size, out var pkSlice)) { writeTable.DeleteByKey(pkSlice); } writeTable.Insert(write); } } } } } // Update tombstones's collection value using (step.DocumentsStorage.ContextPool.AllocateOperationContext(out DocumentsOperationContext context)) { foreach (var collection in step.DocumentsStorage.GetTombstoneCollections(step.ReadTx)) { string tableName; if (collection == AttachmentsTombstones || collection == RevisionsTombstones) { tableName = collection; } else { var collectionName = new CollectionName(collection); tableName = collectionName.GetTableName(CollectionTableType.Tombstones); } var readTable = step.ReadTx.OpenTable(TombstonesSchema, tableName); if (readTable == null) { continue; } var writeTable = step.WriteTx.OpenTable(TombstonesSchema, tableName); // We seek by an index instead the PK because // we weed to ensure that we aren't accessing an IsGlobal key foreach (var read in readTable.SeekForwardFrom(TombstonesSchema.FixedSizeIndexes[CollectionEtagsSlice], 0, 0)) { // We copy the memory of the read so AssertNoReferenceToOldData won't throw. // This is done instead of moving AssertNoReferenceToOldData to assert later // after we allocate the new write memory. using (TableValueReaderUtil.CloneTableValueReader(context, read)) { var type = *(Tombstone.TombstoneType *)read.Reader.Read((int)TombstoneTable.Type, out _); var oldCollection = TableValueToString(context, (int)TombstoneTable.Collection, ref read.Reader); using (DocumentIdWorker.GetStringPreserveCase(context, oldCollection, out Slice collectionSlice)) using (writeTable.Allocate(out TableValueBuilder write)) { write.Add(read.Reader.Read((int)TombstoneTable.LowerId, out int size), size); write.Add(read.Reader.Read((int)TombstoneTable.Etag, out size), size); write.Add(read.Reader.Read((int)TombstoneTable.DeletedEtag, out size), size); write.Add(read.Reader.Read((int)TombstoneTable.TransactionMarker, out size), size); write.Add(read.Reader.Read((int)TombstoneTable.Type, out size), size); if (type == Tombstone.TombstoneType.Attachment) { write.Add(read.Reader.Read((int)TombstoneTable.Collection, out size), size); } else { write.Add(collectionSlice); } write.Add(read.Reader.Read((int)TombstoneTable.Flags, out size), size); write.Add(read.Reader.Read((int)TombstoneTable.ChangeVector, out size), size); write.Add(read.Reader.Read((int)TombstoneTable.LastModified, out size), size); writeTable.Set(write); } } } } } // Update conflicts' collection value using (step.DocumentsStorage.ContextPool.AllocateOperationContext(out DocumentsOperationContext context)) { var readTable = step.ReadTx.OpenTable(ConflictsSchema, ConflictsSlice); if (readTable != null) { var writeTable = step.WriteTx.OpenTable(ConflictsSchema, ConflictsSlice); foreach (var read in readTable.SeekByPrimaryKey(Slices.BeforeAllKeys, 0)) { using (TableValueReaderUtil.CloneTableValueReader(context, read)) { var oldCollection = TableValueToString(context, (int)ConflictsTable.Collection, ref read.Reader); using (DocumentIdWorker.GetStringPreserveCase(context, oldCollection, out Slice collectionSlice)) using (writeTable.Allocate(out TableValueBuilder write)) { write.Add(read.Reader.Read((int)ConflictsTable.LowerId, out int size), size); write.Add(read.Reader.Read((int)ConflictsTable.RecordSeparator, out size), size); write.Add(read.Reader.Read((int)ConflictsTable.ChangeVector, out size), size); write.Add(read.Reader.Read((int)ConflictsTable.Id, out size), size); write.Add(read.Reader.Read((int)ConflictsTable.Data, out size), size); write.Add(read.Reader.Read((int)ConflictsTable.Etag, out size), size); write.Add(collectionSlice); write.Add(read.Reader.Read((int)ConflictsTable.LastModified, out size), size); write.Add(read.Reader.Read((int)ConflictsTable.Flags, out size), size); writeTable.Set(write); } } } } } return(true); }