public int InsertPage(byte[] buffer, int size) { var hashKey = new HashKey(buffer, size); var key = (Slice)ConvertToKey(hashKey); var pageByKey = storage.Pages.GetIndex(Tables.Pages.Indices.ByKey); var pageData = storage.Pages.GetIndex(Tables.Pages.Indices.Data); var result = pageByKey.Read(Snapshot, key, writeBatch.Value); if (result != null) { var id = (Slice)result.Reader.ToStringValue(); ushort version; var page = LoadJson(storage.Pages, id, writeBatch.Value, out version); if (page == null) { throw new InvalidOperationException(string.Format("Could not find page '{0}'. Probably data is corrupted.", id)); } var usageCount = page.Value <int>("usage_count"); page["usage_count"] = usageCount + 1; storage.Pages.Add(writeBatch.Value, id, page, version); return(page.Value <int>("id")); } var newPageId = IdGenerator.GetNextIdForTable(storage.Pages); var newPageKeyString = CreateKey(newPageId); var newPageKey = (Slice)newPageKeyString; var newPage = new RavenJObject { { "id", newPageId }, { "page_strong_hash", hashKey.Strong }, { "page_weak_hash", hashKey.Weak }, { "usage_count", 1 } }; storage.Pages.Add(writeBatch.Value, newPageKey, newPage, 0); var dataStream = CreateStream(); using (var finalDataStream = fileCodecs.Aggregate((Stream) new UndisposableStream(dataStream), (current, codec) => codec.EncodePage(current))) { finalDataStream.Write(buffer, 0, size); finalDataStream.Flush(); } dataStream.Position = 0; pageData.Add(writeBatch.Value, newPageKey, dataStream, 0); pageByKey.Add(writeBatch.Value, key, newPageKeyString); return(newPageId); }
private RavenJObject ReadDocument(Tuple <MemoryStream, RavenJObject, int> stream, JsonDocumentMetadata metadata) { if (stream.Item2 != null) { return(stream.Item2); } RavenJObject result; Stream docDataStream = stream.Item1; if (documentCodecs.Any()) { var metadataCopy = (RavenJObject)metadata.Metadata.CloneToken(); using (docDataStream = documentCodecs .Aggregate(docDataStream, (dataStream, codec) => codec.Decode(metadata.Key, metadataCopy, dataStream))) result = docDataStream.ToJObject(); } else { result = docDataStream.ToJObject(); } Debug.Assert(metadata.Etag != null); documentCacher.SetCachedDocument(metadata.Key, metadata.Etag.Value, result, metadata.Metadata, stream.Item3); return(result); }
private JObject ReadDocument(Tuple <MemoryStream, JObject> stream, JsonDocumentMetadata metadata) { if (stream.Item2 != null) { return(stream.Item2); } var memoryStream = stream.Item1; if (documentCodecs.Count() > 0) { byte[] buffer = memoryStream.GetBuffer(); var metadataCopy = new JObject(metadata.Metadata); var dataBuffer = new byte[memoryStream.Length - memoryStream.Position]; Buffer.BlockCopy(buffer, (int)memoryStream.Position, dataBuffer, 0, dataBuffer.Length); documentCodecs.Aggregate(dataBuffer, (bytes, codec) => codec.Value.Decode(metadata.Key, metadataCopy, bytes)); memoryStream = new MemoryStream(dataBuffer); } var result = memoryStream.ToJObject(); storage.SetCachedDocument(metadata.Key, metadata.Etag, Tuple.Create(new JObject(metadata.Metadata), new JObject(result))); return(result); }
private RavenJObject ReadDocument(Tuple <MemoryStream, RavenJObject> stream, JsonDocumentMetadata metadata) { if (stream.Item2 != null) { return(stream.Item2); } var memoryStream = stream.Item1; if (documentCodecs.Count() > 0) { byte[] buffer = memoryStream.GetBuffer(); var metadataCopy = (RavenJObject)metadata.Metadata.CloneToken(); var dataBuffer = new byte[memoryStream.Length - memoryStream.Position]; Buffer.BlockCopy(buffer, (int)memoryStream.Position, dataBuffer, 0, dataBuffer.Length); documentCodecs.Aggregate(dataBuffer, (bytes, codec) => codec.Value.Decode(metadata.Key, metadataCopy, bytes)); memoryStream = new MemoryStream(dataBuffer); } var result = memoryStream.ToJObject(); Debug.Assert(metadata.Etag != null); documentCacher.SetCachedDocument(metadata.Key, metadata.Etag.Value, result, metadata.Metadata); return(result); }
public int InsertPage(byte[] buffer, int size) { var key = new HashKey(buffer, size); Api.JetSetCurrentIndex(session, Pages, "by_keys"); Api.MakeKey(session, Pages, key.Weak, MakeKeyGrbit.NewKey); Api.MakeKey(session, Pages, key.Strong, MakeKeyGrbit.None); if (Api.TrySeek(session, Pages, SeekGrbit.SeekEQ)) { Api.EscrowUpdate(session, Pages, tableColumnsCache.PagesColumns["usage_count"], 1); return(Api.RetrieveColumnAsInt32(session, Pages, tableColumnsCache.PagesColumns["id"]).Value); } var bookMarkBuffer = new byte[bookmarkMost]; var actualSize = 0; using (var update = new Update(session, Pages, JET_prep.Insert)) { Api.SetColumn(session, Pages, tableColumnsCache.PagesColumns["page_strong_hash"], key.Strong); Api.SetColumn(session, Pages, tableColumnsCache.PagesColumns["page_weak_hash"], key.Weak); using (var columnStream = new ColumnStream(session, Pages, tableColumnsCache.PagesColumns["data"])) { using (Stream stream = new BufferedStream(columnStream)) using (var finalStream = fileCodecs.Aggregate(stream, (current, codec) => codec.EncodePage(current))) { finalStream.Write(buffer, 0, size); finalStream.Flush(); } } try { update.Save(bookMarkBuffer, bookMarkBuffer.Length, out actualSize); } catch (EsentKeyDuplicateException) { // it means that page is being inserted by another thread throw new ConcurrencyException("The same file page is being created"); } } Api.JetGotoBookmark(session, Pages, bookMarkBuffer, actualSize); return(Api.RetrieveColumnAsInt32(session, Pages, tableColumnsCache.PagesColumns["id"]).Value); }
private Query ApplyIndexTriggers(Query luceneQuery) { luceneQuery = indexQueryTriggers.Aggregate(luceneQuery, (current, indexQueryTrigger) => indexQueryTrigger.Value.ProcessQuery(parent.name, current, indexQuery)); return(luceneQuery); }
private bool WriteDocumentData(string key, string normalizedKey, Etag etag, RavenJObject data, RavenJObject metadata, InvokeSource source, out Etag newEtag, out Etag existingEtag, out DateTime savedAt) { var normalizedKeySlice = (Slice)normalizedKey; var keyByEtagDocumentIndex = tableStorage.Documents.GetIndex(Tables.Documents.Indices.KeyByEtag); ushort?existingVersion; var isUpdate = tableStorage.Documents.Contains(Snapshot, normalizedKeySlice, writeBatch.Value, out existingVersion); existingEtag = null; if (isUpdate) { existingEtag = EnsureDocumentEtagMatch(normalizedKey, etag, "PUT"); keyByEtagDocumentIndex.Delete(writeBatch.Value, existingEtag); } else if (etag != null && etag != Etag.Empty) { throw new ConcurrencyException("PUT attempted on document '" + key + "' using a non current etag (document deleted)") { ExpectedETag = etag }; } var dataStream = CreateStream(); using (var finalDataStream = documentCodecs.Aggregate((Stream) new UndisposableStream(dataStream), (current, codec) => codec.Encode(normalizedKey, data, metadata, current))) { data.WriteTo(finalDataStream); finalDataStream.Flush(); } dataStream.Position = 0; tableStorage.Documents.Add(writeBatch.Value, normalizedKeySlice, dataStream, existingVersion ?? 0); newEtag = uuidGenerator.CreateSequentialUuid(UuidType.Documents); var keepLastModified = source == InvokeSource.FromConflictAtReplication && metadata.ContainsKey(Constants.LastModified); savedAt = keepLastModified ? metadata.Value <DateTime>(Constants.LastModified) : SystemTime.UtcNow; var isUpdated = PutDocumentMetadataInternal(key, normalizedKeySlice, metadata, newEtag, savedAt); keyByEtagDocumentIndex.Add(writeBatch.Value, newEtag, normalizedKey); return(isUpdated); }
public Guid AddDocumentInTransaction(string key, Guid?etag, RavenJObject data, RavenJObject metadata, TransactionInformation transactionInformation) { var readResult = storage.Documents.Read(new RavenJObject { { "key", key } }); if (readResult != null) // update { StorageHelper.AssertNotModifiedByAnotherTransaction(storage, this, key, readResult, transactionInformation); AssertValidEtag(key, readResult, storage.DocumentsModifiedByTransactions.Read(new RavenJObject { { "key", key } }), etag, "DELETE"); var ravenJObject = ((RavenJObject)readResult.Key.CloneToken()); ravenJObject["txId"] = transactionInformation.Id.ToByteArray(); if (storage.Documents.UpdateKey(ravenJObject) == false) { throw new ConcurrencyException("PUT attempted on document '" + key + "' that is currently being modified by another transaction") { Key = key } } ; } else { readResult = storage.DocumentsModifiedByTransactions.Read(new RavenJObject { { "key", key } }); StorageHelper.AssertNotModifiedByAnotherTransaction(storage, this, key, readResult, transactionInformation); } storage.Transactions.UpdateKey(new RavenJObject { { "txId", transactionInformation.Id.ToByteArray() }, { "timeout", SystemTime.UtcNow.Add(transactionInformation.Timeout) } }); var ms = new MemoryStream(); metadata.WriteTo(ms); using (var stream = documentCodecs.Aggregate <Lazy <AbstractDocumentCodec>, Stream>(ms, (memoryStream, codec) => codec.Value.Encode(key, data, metadata, memoryStream))) { data.WriteTo(stream); stream.Flush(); } var newEtag = generator.CreateSequentialUuid(UuidType.DocumentTransactions); storage.DocumentsModifiedByTransactions.Put(new RavenJObject { { "key", key }, { "etag", newEtag.ToByteArray() }, { "modified", SystemTime.UtcNow }, { "txId", transactionInformation.Id.ToByteArray() } }, ms.ToArray()); return(newEtag); }
private bool WriteDocumentData(string key, Etag etag, RavenJObject data, RavenJObject metadata, out Etag newEtag, out Etag existingEtag, out DateTime savedAt) { var keyByEtagDocumentIndex = tableStorage.Documents.GetIndex(Tables.Documents.Indices.KeyByEtag); var loweredKey = CreateKey(key); ushort?existingVersion; var isUpdate = tableStorage.Documents.Contains(Snapshot, loweredKey, writeBatch.Value, out existingVersion); existingEtag = null; if (isUpdate) { existingEtag = EnsureDocumentEtagMatch(loweredKey, etag, "PUT"); keyByEtagDocumentIndex.Delete(writeBatch.Value, existingEtag); } else if (etag != null && etag != Etag.Empty) { throw new ConcurrencyException("PUT attempted on document '" + key + "' using a non current etag (document deleted)") { ExpectedETag = etag }; } var dataStream = CreateStream(); using (var finalDataStream = documentCodecs.Aggregate((Stream) new UndisposableStream(dataStream), (current, codec) => codec.Encode(loweredKey, data, metadata, current))) { data.WriteTo(finalDataStream); finalDataStream.Flush(); } dataStream.Position = 0; tableStorage.Documents.Add(writeBatch.Value, loweredKey, dataStream, existingVersion); newEtag = uuidGenerator.CreateSequentialUuid(UuidType.Documents); savedAt = SystemTime.UtcNow; var isUpdated = PutDocumentMetadataInternal(key, metadata, newEtag, savedAt); keyByEtagDocumentIndex.Add(writeBatch.Value, newEtag, loweredKey); return(isUpdated); }
public void PutMappedResult(int view, string docId, string reduceKey, RavenJObject data) { var mappedResultsByViewAndDocumentId = tableStorage.MappedResults.GetIndex(Tables.MappedResults.Indices.ByViewAndDocumentId); var mappedResultsByView = tableStorage.MappedResults.GetIndex(Tables.MappedResults.Indices.ByView); var mappedResultsByViewAndReduceKey = tableStorage.MappedResults.GetIndex(Tables.MappedResults.Indices.ByViewAndReduceKey); var mappedResultsByViewAndReduceKeyAndSourceBucket = tableStorage.MappedResults.GetIndex(Tables.MappedResults.Indices.ByViewAndReduceKeyAndSourceBucket); var mappedResultsData = tableStorage.MappedResults.GetIndex(Tables.MappedResults.Indices.Data); var ms = CreateStream(); using (var stream = documentCodecs.Aggregate((Stream) new UndisposableStream(ms), (ds, codec) => codec.Value.Encode(reduceKey, data, null, ds))) { data.WriteTo(stream); stream.Flush(); } var id = generator.CreateSequentialUuid(UuidType.MappedResults); var idAsString = id.ToString(); var bucket = IndexingUtil.MapBucket(docId); var reduceKeyHash = HashKey(reduceKey); tableStorage.MappedResults.Add( writeBatch.Value, idAsString, new RavenJObject { { "view", view }, { "reduceKey", reduceKey }, { "docId", docId }, { "etag", id.ToByteArray() }, { "bucket", bucket }, { "timestamp", SystemTime.UtcNow } }, 0); ms.Position = 0; mappedResultsData.Add(writeBatch.Value, idAsString, ms, 0); mappedResultsByViewAndDocumentId.MultiAdd(writeBatch.Value, CreateKey(view, docId), idAsString); mappedResultsByView.MultiAdd(writeBatch.Value, CreateKey(view), idAsString); mappedResultsByViewAndReduceKey.MultiAdd(writeBatch.Value, CreateKey(view, reduceKey, reduceKeyHash), idAsString); mappedResultsByViewAndReduceKeyAndSourceBucket.MultiAdd(writeBatch.Value, CreateKey(view, reduceKey, reduceKeyHash, bucket), idAsString); }
public Guid AddDocumentInTransaction(string key, Guid?etag, RavenJObject data, RavenJObject metadata, TransactionInformation transactionInformation) { var readResult = storage.Documents.Read(new RavenJObject { { "key", key } }); if (readResult != null) // update { StorageHelper.AssertNotModifiedByAnotherTransaction(storage, this, key, readResult, transactionInformation); AssertValidEtag(key, readResult, storage.DocumentsModifiedByTransactions.Read(new RavenJObject { { "key", key } }), etag); ((RavenJObject)readResult.Key)["txId"] = transactionInformation.Id.ToByteArray(); if (storage.Documents.UpdateKey(readResult.Key) == false) { throw new ConcurrencyException("PUT attempted on document '" + key + "' that is currently being modified by another transaction"); } } else { readResult = storage.DocumentsModifiedByTransactions.Read(new RavenJObject { { "key", key } }); StorageHelper.AssertNotModifiedByAnotherTransaction(storage, this, key, readResult, transactionInformation); } storage.Transactions.UpdateKey(new RavenJObject { { "txId", transactionInformation.Id.ToByteArray() }, { "timeout", SystemTime.UtcNow.Add(transactionInformation.Timeout) } }); var ms = new MemoryStream(); metadata.WriteTo(ms); var dataBytes = documentCodecs.Aggregate(data.ToBytes(), (bytes, codec) => codec.Encode(key, data, metadata, bytes)); ms.Write(dataBytes, 0, dataBytes.Length); var newEtag = generator.CreateSequentialUuid(); storage.DocumentsModifiedByTransactions.Put(new RavenJObject { { "key", key }, { "etag", newEtag.ToByteArray() }, { "modified", SystemTime.UtcNow }, { "txId", transactionInformation.Id.ToByteArray() } }, ms.ToArray()); return(newEtag); }
public void PutMappedResult(string view, string docId, string reduceKey, RavenJObject data) { var ms = new MemoryStream(); using (var stream = documentCodecs.Aggregate((Stream)ms, (ds, codec) => codec.Value.Encode(reduceKey, data, null, ds))) { data.WriteTo(stream); } var byteArray = generator.CreateSequentialUuid().ToByteArray(); var key = new RavenJObject { { "view", view }, { "reduceKey", reduceKey }, { "docId", docId }, { "etag", byteArray }, { "bucket", IndexingUtil.MapBucket(docId) }, { "timestamp", SystemTime.UtcNow } }; storage.MappedResults.Put(key, ms.ToArray()); }
public void CompleteTransaction(Guid txId, Action <DocumentInTransactionData> perDocumentModified) { Api.JetSetCurrentIndex(session, Transactions, "by_tx_id"); Api.MakeKey(session, Transactions, txId, MakeKeyGrbit.NewKey); if (Api.TrySeek(session, Transactions, SeekGrbit.SeekEQ)) { Api.JetDelete(session, Transactions); } Api.JetSetCurrentIndex(session, DocumentsModifiedByTransactions, "by_tx"); Api.MakeKey(session, DocumentsModifiedByTransactions, txId, MakeKeyGrbit.NewKey); if (Api.TrySeek(session, DocumentsModifiedByTransactions, SeekGrbit.SeekEQ) == false) { return; } Api.MakeKey(session, DocumentsModifiedByTransactions, txId, MakeKeyGrbit.NewKey); Api.JetSetIndexRange(session, DocumentsModifiedByTransactions, SetIndexRangeGrbit.RangeInclusive | SetIndexRangeGrbit.RangeUpperLimit); var bookmarksToDelete = new List <byte[]>(); var documentsInTransaction = new List <DocumentInTransactionData>(); do { // esent index ranges are approximate, and we need to check them ourselves as well if ( Api.RetrieveColumnAsGuid(session, DocumentsModifiedByTransactions, tableColumnsCache.DocumentsModifiedByTransactionsColumns["locked_by_transaction"]) != txId) { continue; } var metadata = Api.RetrieveColumn(session, DocumentsModifiedByTransactions, tableColumnsCache.DocumentsModifiedByTransactionsColumns["metadata"]); var key = Api.RetrieveColumnAsString(session, DocumentsModifiedByTransactions, tableColumnsCache.DocumentsModifiedByTransactionsColumns["key"], Encoding.Unicode); RavenJObject dataAsJson; var metadataAsJson = metadata.ToJObject(); using ( Stream stream = new BufferedStream(new ColumnStream(session, DocumentsModifiedByTransactions, tableColumnsCache.DocumentsModifiedByTransactionsColumns["data"]))) { using (var data = documentCodecs.Aggregate(stream, (dataStream, codec) => codec.Decode(key, metadataAsJson, dataStream))) dataAsJson = data.ToJObject(); } documentsInTransaction.Add(new DocumentInTransactionData { Data = dataAsJson, Delete = Api.RetrieveColumnAsBoolean(session, DocumentsModifiedByTransactions, tableColumnsCache.DocumentsModifiedByTransactionsColumns["delete_document"]).Value, Etag = Api.RetrieveColumn(session, DocumentsModifiedByTransactions, tableColumnsCache.DocumentsModifiedByTransactionsColumns["etag"]). TransfromToGuidWithProperSorting(), Key = key, Metadata = metadata.ToJObject(), }); bookmarksToDelete.Add(Api.GetBookmark(session, DocumentsModifiedByTransactions)); } while (Api.TryMoveNext(session, DocumentsModifiedByTransactions)); foreach (var bookmark in bookmarksToDelete) { Api.JetGotoBookmark(session, DocumentsModifiedByTransactions, bookmark, bookmark.Length); Api.JetDelete(session, DocumentsModifiedByTransactions); } foreach (var documentInTransactionData in documentsInTransaction) { perDocumentModified(documentInTransactionData); } }