public void AddSignature(string name, int level, Action <Stream> action) { var signatureData = storage.Signatures.GetIndex(Tables.Signatures.Indices.Data); var signaturesByName = storage.Signatures.GetIndex(Tables.Signatures.Indices.ByName); var id = IdGenerator.GetNextIdForTable(storage.Signatures); var key = CreateKey(id); var signature = new RavenJObject { { "id", id }, { "name", name }, { "level", level }, { "created_at", DateTime.UtcNow } }; var stream = CreateStream(); action(stream); stream.Position = 0; storage.Signatures.Add(writeBatch.Value, key, signature, 0); signatureData.Add(writeBatch.Value, key, stream, 0); signaturesByName.MultiAdd(writeBatch.Value, CreateKey(name), key); }
public int InsertPage(byte[] buffer, int size) { var hashKey = new HashKey(buffer, size); var key = ConvertToKey(hashKey); var pageByKey = storage.Pages.GetIndex(Tables.Pages.Indices.ByKey); var pageData = storage.Pages.GetIndex(Tables.Pages.Indices.Data); var result = pageByKey.Read(Snapshot, key, writeBatch.Value); if (result != null) { var id = result.Reader.ToStringValue(); ushort version; var page = LoadJson(storage.Pages, id, writeBatch.Value, out version); if (page == null) { throw new InvalidOperationException(string.Format("Could not find page '{0}'. Probably data is corrupted.", id)); } var usageCount = page.Value <int>("usage_count"); page["usage_count"] = usageCount + 1; storage.Pages.Add(writeBatch.Value, id, page, version); return(page.Value <int>("id")); } var newId = IdGenerator.GetNextIdForTable(storage.Pages); var newKey = CreateKey(newId); var newPage = new RavenJObject { { "id", newId }, { "page_strong_hash", hashKey.Strong }, { "page_weak_hash", hashKey.Weak }, { "usage_count", 0 } }; storage.Pages.Add(writeBatch.Value, newKey, newPage, 0); pageData.Add(writeBatch.Value, newKey, buffer, 0); pageByKey.Add(writeBatch.Value, key, newKey); return(newId); }
public void AssociatePage(string filename, int pageId, int pagePositionInFile, int pageSize) { var usageByFileName = storage.Usage.GetIndex(Tables.Usage.Indices.ByFileName); var usageByFileNameAndPosition = storage.Usage.GetIndex(Tables.Usage.Indices.ByFileNameAndPosition); var key = CreateKey(filename); ushort version; var file = LoadFileByKey(key, out version); var totalSize = file.Value <long?>("total_size"); var uploadedSize = file.Value <int>("uploaded_size"); if (totalSize != null && totalSize >= 0 && uploadedSize + pageSize > totalSize) { throw new InvalidDataException("Try to upload more data than the file was allocated for (" + totalSize + ") and new size would be: " + (uploadedSize + pageSize)); } file["uploaded_size"] = uploadedSize + pageSize; // using chunked encoding, we don't know what the size is // we use negative values here for keeping track of the unknown size if (totalSize == null || totalSize < 0) { var actualSize = totalSize ?? 0; file["total_size"] = actualSize - pageSize; } storage.Files.Add(writeBatch.Value, key, file, version); var id = IdGenerator.GetNextIdForTable(storage.Usage); var usageKey = CreateKey(id); var usage = new RavenJObject { { "id", id }, { "name", filename }, { "file_pos", pagePositionInFile }, { "page_id", pageId }, { "page_size", pageSize } }; storage.Usage.Add(writeBatch.Value, usageKey, usage, 0); usageByFileName.MultiAdd(writeBatch.Value, CreateKey(filename), usageKey); usageByFileNameAndPosition.Add(writeBatch.Value, CreateKey(filename, pagePositionInFile), usageKey, 0); }