public void SynchronizingFileStream_should_write_to_storage_by_64kB_pages() { using (var stream = SynchronizingFileStream.CreatingOrOpeningAndWritting( transactionalStorage, new MockIndexStorage(), new StorageOperationsTask(transactionalStorage, new MockIndexStorage(), new EmptyNotificationsPublisher()), "file", EmptyETagMetadata)) { var buffer = new byte[StorageConstants.MaxPageSize]; new Random().NextBytes(buffer); stream.Write(buffer, 0, 32768); stream.Write(buffer, 32767, 32768); stream.Write(buffer, 0, 1); stream.PreventUploadComplete = false; } FileAndPages fileAndPages = null; transactionalStorage.Batch(accessor => fileAndPages = accessor.GetFile("file", 0, 10)); Assert.Equal(2, fileAndPages.Pages.Count); Assert.Equal(StorageConstants.MaxPageSize, fileAndPages.Pages[0].Size); Assert.Equal(1, fileAndPages.Pages[1].Size); }
private RavenJObject GetLocalMetadata(string fileName) { RavenJObject result = null; try { storage.Batch(accessor => { result = accessor.GetFile(fileName, 0, 0).Metadata; }); } catch (FileNotFoundException) { return(null); } FileAndPages fileAndPages = null; { try { storage.Batch(accessor => fileAndPages = accessor.GetFile(fileName, 0, 0)); } catch (FileNotFoundException) { } } return(result); }
public HttpResponseMessage Head(string name) { name = RavenFileNameHelper.RavenPath(name); FileAndPages fileAndPages = null; try { Storage.Batch(accessor => fileAndPages = accessor.GetFile(name, 0, 0)); } catch (FileNotFoundException) { log.Debug("Cannot get metadata of a file '{0}' because file was not found", name); return(new HttpResponseMessage(HttpStatusCode.NotFound)); } if (fileAndPages.Metadata.Keys.Contains(SynchronizationConstants.RavenDeleteMarker)) { log.Debug("Cannot get metadata of a file '{0}' because file was deleted", name); return(new HttpResponseMessage(HttpStatusCode.NotFound)); } var httpResponseMessage = GetEmptyMessage(); AddHeaders(httpResponseMessage, fileAndPages.Metadata); return(httpResponseMessage); }
public HttpResponseMessage Get(string name) { name = RavenFileNameHelper.RavenPath(name); FileAndPages fileAndPages = null; try { Storage.Batch(accessor => fileAndPages = accessor.GetFile(name, 0, 0)); } catch (FileNotFoundException) { log.Debug("File '{0}' was not found", name); throw new HttpResponseException(HttpStatusCode.NotFound); } if (fileAndPages.Metadata.Keys.Contains(SynchronizationConstants.RavenDeleteMarker)) { log.Debug("File '{0}' is not accessible to get (Raven-Delete-Marker set)", name); throw new HttpResponseException(HttpStatusCode.NotFound); } var readingStream = StorageStream.Reading(Storage, name); var result = StreamResult(name, readingStream); AddHeaders(result, fileAndPages.Metadata); return(result.WithNoCache()); }
public override Task <SynchronizationReport> PerformAsync(RavenFileSystemClient.SynchronizationClient destination) { FileAndPages fileAndPages = null; Storage.Batch(accessor => fileAndPages = accessor.GetFile(FileName, 0, 0)); return(destination.RenameAsync(FileName, rename, fileAndPages.Metadata, ServerInfo)); }
public void RefreshMetadata() { if (Storage != null) { FileAndPages fileAndPages = null; Storage.Batch(accessor => fileAndPages = accessor.GetFile(FileName, 0, 0)); FileMetadata = fileAndPages.Metadata; } }
public FileAndPages GetFile(string filename, int start, int pagesToLoad) { var key = CreateKey(filename); ushort version; var file = LoadJson(storage.Files, key, writeBatch.Value, out version); if (file == null) { throw new FileNotFoundException("Could not find file: " + filename); } var f = ConvertToFile(file); var fileInformation = new FileAndPages { TotalSize = f.TotalSize, Name = f.Name, Metadata = f.Metadata, UploadedSize = f.UploadedSize, Start = start }; if (pagesToLoad > 0) { var usageByFileNameAndPosition = storage.Usage.GetIndex(Tables.Usage.Indices.ByFileNameAndPosition); using (var iterator = usageByFileNameAndPosition.Iterate(Snapshot, writeBatch.Value)) { if (iterator.Seek(CreateKey(filename, start))) { do { var id = iterator.CreateReaderForCurrent().ToStringValue(); var usage = LoadJson(storage.Usage, id, writeBatch.Value, out version); var name = usage.Value <string>("name"); if (name.Equals(filename, StringComparison.InvariantCultureIgnoreCase) == false) { break; } fileInformation.Pages.Add(new PageInformation { Id = usage.Value <int>("page_id"), Size = usage.Value <int>("page_size") }); }while (iterator.MoveNext() && fileInformation.Pages.Count < pagesToLoad); } } } return(fileInformation); }
private RavenJObject GetMetadata(string fileName) { try { FileAndPages fileAndPages = null; storage.Batch(accessor => fileAndPages = accessor.GetFile(fileName, 0, 0)); return(fileAndPages.Metadata); } catch (FileNotFoundException) { return(new RavenJObject()); } }
public FileAndPages GetFile(string filename, int start, int pagesToLoad) { Api.JetSetCurrentIndex(session, Files, "by_name"); Api.MakeKey(session, Files, filename, Encoding.Unicode, MakeKeyGrbit.NewKey); if (Api.TrySeek(session, Files, SeekGrbit.SeekEQ) == false) { throw new FileNotFoundException("Could not find file: " + filename); } var fileInformation = new FileAndPages { TotalSize = GetTotalSize(), UploadedSize = BitConverter.ToInt64(Api.RetrieveColumn(session, Files, tableColumnsCache.FilesColumns["uploaded_size"]), 0), Metadata = RetrieveMetadata(), Name = filename, Start = start }; if (pagesToLoad > 0) { Api.JetSetCurrentIndex(session, Usage, "by_name_and_pos"); Api.MakeKey(session, Usage, filename, Encoding.Unicode, MakeKeyGrbit.NewKey); Api.MakeKey(session, Usage, start, MakeKeyGrbit.None); if (Api.TrySeek(session, Usage, SeekGrbit.SeekGE)) { Api.MakeKey(session, Usage, filename, Encoding.Unicode, MakeKeyGrbit.NewKey); Api.JetSetIndexRange(session, Usage, SetIndexRangeGrbit.RangeInclusive); do { var name = Api.RetrieveColumnAsString(session, Usage, tableColumnsCache.UsageColumns["name"]); if (name != filename) { continue; } fileInformation.Pages.Add(new PageInformation { Size = Api.RetrieveColumnAsInt32(session, Usage, tableColumnsCache.UsageColumns["page_size"]).Value, Id = Api.RetrieveColumnAsInt32(session, Usage, tableColumnsCache.UsageColumns["page_id"]).Value }); } while (Api.TryMoveNext(session, Usage) && fileInformation.Pages.Count < pagesToLoad); } } return(fileInformation); }
public async void Should_reuse_pages_where_nothing_has_changed() { var file = SyncTestUtils.PreparePagesStream(3); file.Position = 0; var sourceContent = new MemoryStream(); file.CopyTo(sourceContent); sourceContent.Position = StorageConstants.MaxPageSize + 1; sourceContent.Write(new byte[] { 0, 0, 0, 0 }, 0, 4); // change content of the 2nd page var destinationContent = file; sourceContent.Position = 0; await source.UploadAsync("test", sourceContent); destinationContent.Position = 0; await destination.UploadAsync("test", destinationContent); var contentUpdate = new ContentUpdateWorkItem("test", "http://localhost:12345", sourceRfs.Storage, sourceRfs.SigGenerator); sourceContent.Position = 0; // force to upload entire file, we just want to check which pages will be reused await contentUpdate.UploadToAsync(destination.Synchronization); await destination.Synchronization.ResolveConflictAsync("test", ConflictResolutionStrategy.RemoteVersion); await contentUpdate.UploadToAsync(destination.Synchronization); FileAndPages fileAndPages = null; destinationRfs.Storage.Batch(accessor => fileAndPages = accessor.GetFile("test", 0, 256)); Assert.Equal(3, fileAndPages.Pages.Count); Assert.Equal(1, fileAndPages.Pages[0].Id); // reused page Assert.Equal(4, fileAndPages.Pages[1].Id); // new page -> id == 4 Assert.Equal(3, fileAndPages.Pages[2].Id); // reused page sourceContent.Position = 0; var metadata = await destination.GetMetadataForAsync("test"); Assert.Equal(sourceContent.GetMD5Hash(), metadata.Value <string>("Content-MD5")); }
protected SynchronizationWorkItem(string fileName, string sourceServerUrl, ITransactionalStorage storage) { Storage = storage; FileName = fileName; FileAndPages fileAndPages = null; Storage.Batch(accessor => fileAndPages = accessor.GetFile(fileName, 0, 0)); FileMetadata = fileAndPages.Metadata; ServerInfo = new ServerInfo { Id = Storage.Id, FileSystemUrl = sourceServerUrl }; conflictDetector = new ConflictDetector(); conflictResolver = new ConflictResolver(); }
private DataInfo GetLocalFileDataInfo(string fileName) { FileAndPages fileAndPages = null; try { Storage.Batch(accessor => fileAndPages = accessor.GetFile(fileName, 0, 0)); } catch (FileNotFoundException) { return(null); } return(new DataInfo { CreatedAt = Convert.ToDateTime(fileAndPages.Metadata.Value <string>("Last-Modified")).ToUniversalTime(), Length = fileAndPages.TotalSize ?? 0, Name = fileAndPages.Name }); }
public void Should_reuse_second_page_if_only_first_one_changed() { var file = SyncTestUtils.PreparePagesStream(2); file.Position = 0; var sourceContent = new MemoryStream(); file.CopyTo(sourceContent); sourceContent.Position = 0; sourceContent.Write(new byte[] { 0, 0, 0, 0 }, 0, 4); // change content of the 1st page var destinationContent = file; sourceContent.Position = 0; source.UploadAsync("test", sourceContent).Wait(); destinationContent.Position = 0; destination.UploadAsync("test", destinationContent).Wait(); var contentUpdate = new ContentUpdateWorkItem("test", "http://localhost:12345", sourceRfs.Storage, sourceRfs.SigGenerator); sourceContent.Position = 0; // force to upload entire file, we just want to check which pages will be reused contentUpdate.UploadToAsync(destination.Synchronization).Wait(); destination.Synchronization.ResolveConflictAsync("test", ConflictResolutionStrategy.RemoteVersion).Wait(); contentUpdate.UploadToAsync(destination.Synchronization).Wait(); FileAndPages fileAndPages = null; destinationRfs.Storage.Batch(accessor => fileAndPages = accessor.GetFile("test", 0, 256)); Assert.Equal(2, fileAndPages.Pages.Count); Assert.Equal(3, fileAndPages.Pages[0].Id); // new page -> id == 3 Assert.Equal(2, fileAndPages.Pages[1].Id); // reused page -> id still == 2 sourceContent.Position = 0; Assert.Equal(sourceContent.GetMD5Hash(), destination.GetMetadataForAsync("test").Result["Content-MD5"]); }
private void MovePageFrame(long offset) { offset = Math.Min(Length, offset); if (offset < currentPageFrameOffset || fileAndPages == null) { TransactionalStorage.Batch(accessor => fileAndPages = accessor.GetFile(Name, 0, PagesBatchSize)); currentPageFrameOffset = 0; } while (currentPageFrameOffset + CurrentPageFrameSize - 1 < offset) { var lastPageFrameSize = CurrentPageFrameSize; var nextPageIndex = fileAndPages.Start + fileAndPages.Pages.Count; TransactionalStorage.Batch(accessor => fileAndPages = accessor.GetFile(Name, nextPageIndex, PagesBatchSize)); if (fileAndPages.Pages.Count < 1) { fileAndPages.Start = 0; break; } currentPageFrameOffset += lastPageFrameSize; } currentOffset = offset; }
public void Should_work() { var filename = "test"; var greaterFileName = filename + ".bin"; // append something transactionalStorage.Batch( accessor => { accessor.PutFile(filename, 6, metadataWithEtag); var pageId = accessor.InsertPage(new byte[] { 1, 2, 3 }, 3); accessor.AssociatePage(filename, pageId, 0, 3); pageId = accessor.InsertPage(new byte[] { 4, 5, 6 }, 3); accessor.AssociatePage(filename, pageId, 3, 3); accessor.CompleteFileUpload(filename); }); transactionalStorage.Batch( accessor => { accessor.PutFile(greaterFileName, 6, metadataWithEtag); var pageId = accessor.InsertPage(new byte[] { 11, 22, 33 }, 3); accessor.AssociatePage(greaterFileName, pageId, 0, 3); pageId = accessor.InsertPage(new byte[] { 44, 55, 66 }, 3); accessor.AssociatePage(greaterFileName, pageId, 3, 3); accessor.CompleteFileUpload(greaterFileName); }); FileAndPages fileAndPages = null; transactionalStorage.Batch(accessor => fileAndPages = accessor.GetFile(filename, 0, 32)); Assert.Equal(2, fileAndPages.Pages.Count); Assert.Equal(1, fileAndPages.Pages[0].Id); Assert.Equal(2, fileAndPages.Pages[1].Id); }
public void Should_reuse_pages_when_data_appended(int numberOfPages) { var file = SyncTestUtils.PreparePagesStream(numberOfPages); var sourceContent = new CombinedStream(file, SyncTestUtils.PreparePagesStream(numberOfPages)); // add new pages at the end var destinationContent = file; sourceContent.Position = 0; source.UploadAsync("test", sourceContent).Wait(); destinationContent.Position = 0; destination.UploadAsync("test", destinationContent).Wait(); var contentUpdate = new ContentUpdateWorkItem("test", "http://localhost:12345", sourceRfs.Storage, sourceRfs.SigGenerator); // force to upload entire file, we just want to check which pages will be reused contentUpdate.UploadToAsync(destination.Synchronization).Wait(); destination.Synchronization.ResolveConflictAsync("test", ConflictResolutionStrategy.RemoteVersion).Wait(); contentUpdate.UploadToAsync(destination.Synchronization).Wait(); FileAndPages fileAndPages = null; destinationRfs.Storage.Batch(accessor => fileAndPages = accessor.GetFile("test", 0, 2 * numberOfPages)); Assert.Equal(2 * numberOfPages, fileAndPages.Pages.Count); for (var i = 0; i < numberOfPages; i++) { Assert.Equal(i + 1, fileAndPages.Pages[i].Id); // if page ids are in the original order it means that they were used the existing pages } sourceContent.Position = 0; Assert.Equal(sourceContent.GetMD5Hash(), destination.GetMetadataForAsync("test").Result["Content-MD5"]); }
public async Task <HttpResponseMessage> Manifest(string id) { var filename = Uri.UnescapeDataString(id); FileAndPages fileAndPages = null; try { Storage.Batch(accessor => fileAndPages = accessor.GetFile(filename, 0, 0)); } catch (FileNotFoundException) { Log.Debug("Signature manifest for a file '{0}' was not found", filename); return(Request.CreateResponse(HttpStatusCode.NotFound)); } long?fileLength = fileAndPages.TotalSize; using (var signatureRepository = new StorageSignatureRepository(Storage, filename)) { var rdcManager = new LocalRdcManager(signatureRepository, Storage, SigGenerator); var signatureManifest = await rdcManager.GetSignatureManifestAsync( new DataInfo { Name = filename, CreatedAt = Convert.ToDateTime(fileAndPages.Metadata.Value <string>("Last-Modified")) .ToUniversalTime() }); signatureManifest.FileLength = fileLength ?? 0; Log.Debug("Signature manifest for a file '{0}' was downloaded. Signatures count was {1}", filename, signatureManifest.Signatures.Count); return(this.GetMessageWithObject(signatureManifest, HttpStatusCode.OK) .WithNoCache()); } }
public FileAndPages GetFile(string filename, int start, int pagesToLoad) { var key = CreateKey(filename); ushort version; var file = LoadJson(storage.Files, key, writeBatch.Value, out version); if (file == null) throw new FileNotFoundException("Could not find file: " + filename); var f = ConvertToFile(file); var fileInformation = new FileAndPages { TotalSize = f.TotalSize, Name = f.Name, Metadata = f.Metadata, UploadedSize = f.UploadedSize, Start = start }; if (pagesToLoad > 0) { var usageByFileNameAndPosition = storage.Usage.GetIndex(Tables.Usage.Indices.ByFileNameAndPosition); using (var iterator = usageByFileNameAndPosition.Iterate(Snapshot, writeBatch.Value)) { if (iterator.Seek(CreateKey(filename, start))) { do { var id = iterator.CreateReaderForCurrent().ToStringValue(); var usage = LoadJson(storage.Usage, id, writeBatch.Value, out version); var name = usage.Value<string>("name"); if (name.Equals(filename, StringComparison.InvariantCultureIgnoreCase) == false) break; fileInformation.Pages.Add(new PageInformation { Id = usage.Value<int>("page_id"), Size = usage.Value<int>("page_size") }); } while (iterator.MoveNext() && fileInformation.Pages.Count < pagesToLoad); } } } return fileInformation; }
public FileAndPages GetFile(string filename, int start, int pagesToLoad) { Api.JetSetCurrentIndex(session, Files, "by_name"); Api.MakeKey(session, Files, filename, Encoding.Unicode, MakeKeyGrbit.NewKey); if (Api.TrySeek(session, Files, SeekGrbit.SeekEQ) == false) throw new FileNotFoundException("Could not find file: " + filename); var fileInformation = new FileAndPages { TotalSize = GetTotalSize(), UploadedSize = BitConverter.ToInt64(Api.RetrieveColumn(session, Files, tableColumnsCache.FilesColumns["uploaded_size"]), 0), Metadata = RetrieveMetadata(), Name = filename, Start = start }; if (pagesToLoad > 0) { Api.JetSetCurrentIndex(session, Usage, "by_name_and_pos"); Api.MakeKey(session, Usage, filename, Encoding.Unicode, MakeKeyGrbit.NewKey); Api.MakeKey(session, Usage, start, MakeKeyGrbit.None); if (Api.TrySeek(session, Usage, SeekGrbit.SeekGE)) { Api.MakeKey(session, Usage, filename, Encoding.Unicode, MakeKeyGrbit.NewKey); Api.JetSetIndexRange(session, Usage, SetIndexRangeGrbit.RangeInclusive); do { var name = Api.RetrieveColumnAsString(session, Usage, tableColumnsCache.UsageColumns["name"]); if (name != filename) continue; fileInformation.Pages.Add(new PageInformation { Size = Api.RetrieveColumnAsInt32(session, Usage, tableColumnsCache.UsageColumns["page_size"]).Value, Id = Api.RetrieveColumnAsInt32(session, Usage, tableColumnsCache.UsageColumns["page_id"]).Value }); } while (Api.TryMoveNext(session, Usage) && fileInformation.Pages.Count < pagesToLoad); } } return fileInformation; }