Ejemplo n.º 1
0
        public void SynchronizingFileStream_should_write_to_storage_by_64kB_pages()
        {
            using (var stream = SynchronizingFileStream.CreatingOrOpeningAndWriting(
                       transactionalStorage, new MockIndexStorage(),
                       new StorageOperationsTask(transactionalStorage, new MockIndexStorage(), new EmptyNotificationsPublisher()),
                       "file", EmptyETagMetadata))
            {
                var buffer = new byte[StorageConstants.MaxPageSize];

                new Random().NextBytes(buffer);

                stream.Write(buffer, 0, 32768);
                stream.Write(buffer, 32767, 32768);
                stream.Write(buffer, 0, 1);

                stream.PreventUploadComplete = false;
            }

            FileAndPagesInformation fileAndPages = null;

            transactionalStorage.Batch(accessor => fileAndPages = accessor.GetFile("file", 0, 10));

            Assert.Equal(2, fileAndPages.Pages.Count);
            Assert.Equal(StorageConstants.MaxPageSize, fileAndPages.Pages[0].Size);
            Assert.Equal(1, fileAndPages.Pages[1].Size);
        }
Ejemplo n.º 2
0
        private RavenJObject GetLocalMetadata(string fileName)
        {
            RavenJObject result = null;

            try
            {
                storage.Batch(accessor => { result = accessor.GetFile(fileName, 0, 0).Metadata; });
            }
            catch (FileNotFoundException)
            {
                return(null);
            }

            // TODO Check if the call to GetFile is needed.
            FileAndPagesInformation fileAndPages = null;

            {
                try
                {
                    storage.Batch(accessor => fileAndPages = accessor.GetFile(fileName, 0, 0));
                }
                catch (FileNotFoundException)
                {
                }
            }

            return(result);
        }
Ejemplo n.º 3
0
        public HttpResponseMessage Get(string name)
        {
            name = RavenFileNameHelper.RavenPath(name);
            FileAndPagesInformation fileAndPages = null;

            try
            {
                Storage.Batch(accessor => fileAndPages = accessor.GetFile(name, 0, 0));
            }
            catch (FileNotFoundException)
            {
                log.Debug("File '{0}' was not found", name);
                throw new HttpResponseException(HttpStatusCode.NotFound);
            }

            if (fileAndPages.Metadata.Keys.Contains(SynchronizationConstants.RavenDeleteMarker))
            {
                log.Debug("File '{0}' is not accessible to get (Raven-Delete-Marker set)", name);
                throw new HttpResponseException(HttpStatusCode.NotFound);
            }

            var readingStream = StorageStream.Reading(Storage, name);
            var result        = StreamResult(name, readingStream);

            var etag = new Etag(fileAndPages.Metadata.Value <string>(Constants.MetadataEtagField));

            fileAndPages.Metadata.Remove(Constants.MetadataEtagField);
            WriteHeaders(fileAndPages.Metadata, etag, result);

            return(result.WithNoCache());
        }
Ejemplo n.º 4
0
        public HttpResponseMessage Get(string name)
        {
            name = FileHeader.Canonize(name);
            FileAndPagesInformation fileAndPages = null;

            Storage.Batch(accessor => fileAndPages = accessor.GetFile(name, 0, 0));

            if (fileAndPages.Metadata.Keys.Contains(SynchronizationConstants.RavenDeleteMarker))
            {
                if (log.IsDebugEnabled)
                {
                    log.Debug("File '{0}' is not accessible to get (Raven-Delete-Marker set)", name);
                }
                throw new HttpResponseException(HttpStatusCode.NotFound);
            }

            var readingStream = StorageStream.Reading(Storage, name);

            var filename = GetFileName(name, fileAndPages.Metadata);
            var result   = StreamResult(filename, readingStream);

            var etag = new Etag(fileAndPages.Metadata.Value <string>(Constants.MetadataEtagField));

            fileAndPages.Metadata.Remove(Constants.MetadataEtagField);
            WriteHeaders(fileAndPages.Metadata, etag, result);

            if (log.IsDebugEnabled)
            {
                log.Debug("File '{0}' with etag {1} is being retrieved.", name, etag);
            }

            return(result.WithNoCache());
        }
Ejemplo n.º 5
0
        public async Task <HttpResponseMessage> Manifest(string id)
        {
            var canonicalFilename = FileHeader.Canonize(id);

            FileAndPagesInformation fileAndPages = null;

            Storage.Batch(accessor => fileAndPages = accessor.GetFile(canonicalFilename, 0, 0));

            long?fileLength = fileAndPages.TotalSize;

            using (var signatureRepository = new StorageSignatureRepository(Storage, canonicalFilename))
            {
                var rdcManager        = new LocalRdcManager(signatureRepository, Storage, SigGenerator);
                var signatureManifest = await rdcManager.GetSignatureManifestAsync(
                    new DataInfo
                {
                    Name         = canonicalFilename,
                    LastModified = fileAndPages.Metadata.Value <DateTime>(Constants.RavenLastModified).ToUniversalTime()
                });

                signatureManifest.FileLength = fileLength ?? 0;

                Log.Debug("Signature manifest for a file '{0}' was downloaded. Signatures count was {1}", id, signatureManifest.Signatures.Count);

                return(GetMessageWithObject(signatureManifest)
                       .WithNoCache());
            }
        }
Ejemplo n.º 6
0
        public HttpResponseMessage Head(string name)
        {
            name = FileHeader.Canonize(name);
            FileAndPagesInformation fileAndPages = null;

            Storage.Batch(accessor => fileAndPages = accessor.GetFile(name, 0, 0));

            if (fileAndPages.Metadata.Keys.Contains(SynchronizationConstants.RavenDeleteMarker))
            {
                if (log.IsDebugEnabled)
                {
                    log.Debug("Cannot get metadata of a file '{0}' because file was deleted", name);
                }
                throw new FileNotFoundException();
            }

            var httpResponseMessage = GetEmptyMessage();

            var etag = new Etag(fileAndPages.Metadata.Value <string>(Constants.MetadataEtagField));

            fileAndPages.Metadata.Remove(Constants.MetadataEtagField);

            WriteHeaders(fileAndPages.Metadata, etag, httpResponseMessage);

            return(httpResponseMessage);
        }
Ejemplo n.º 7
0
        public HttpResponseMessage Head(string name)
        {
            name = RavenFileNameHelper.RavenPath(name);
            FileAndPagesInformation fileAndPages = null;

            try
            {
                Storage.Batch(accessor => fileAndPages = accessor.GetFile(name, 0, 0));
            }
            catch (FileNotFoundException)
            {
                log.Debug("Cannot get metadata of a file '{0}' because file was not found", name);
                return(new HttpResponseMessage(HttpStatusCode.NotFound));
            }

            if (fileAndPages.Metadata.Keys.Contains(SynchronizationConstants.RavenDeleteMarker))
            {
                log.Debug("Cannot get metadata of a file '{0}' because file was deleted", name);
                return(new HttpResponseMessage(HttpStatusCode.NotFound));
            }



            var httpResponseMessage = GetEmptyMessage();

            var etag = new Etag(fileAndPages.Metadata.Value <string>(Constants.MetadataEtagField));

            fileAndPages.Metadata.Remove(Constants.MetadataEtagField);

            WriteHeaders(fileAndPages.Metadata, etag, httpResponseMessage);

            return(httpResponseMessage);
        }
Ejemplo n.º 8
0
        public override Task <SynchronizationReport> PerformAsync(IAsyncFilesSynchronizationCommands destination)
        {
            FileAndPagesInformation fileAndPages = null;

            Storage.Batch(accessor => fileAndPages = accessor.GetFile(FileName, 0, 0));

            return(destination.RenameAsync(FileName, rename, fileAndPages.Metadata, ServerInfo));
        }
Ejemplo n.º 9
0
        public override Task <SynchronizationReport> PerformAsync(ISynchronizationServerClient synchronizationServerClient)
        {
            FileAndPagesInformation fileAndPages = null;

            Storage.Batch(accessor => fileAndPages = accessor.GetFile(FileName, 0, 0));

            return(synchronizationServerClient.DeleteAsync(FileName, fileAndPages.Metadata, FileSystemInfo));
        }
Ejemplo n.º 10
0
        private void StreamExportToClient(Stream gzip2, string[] fileNames)
        {
            using (var gzip = new BufferedStream(gzip2))
            //     using (var gzip = new GZipStream(stream, CompressionMode.Compress,true))
            {
                var binaryWriter = new BinaryWriter(gzip);

                var buffer     = new byte[StorageConstants.MaxPageSize];
                var pageBuffer = new byte[StorageConstants.MaxPageSize];

                foreach (var name in fileNames)
                {
                    FileAndPagesInformation fileAndPages = null;
                    var cannonizedName = FileHeader.Canonize(name);

                    try
                    {
                        Storage.Batch(accessor => fileAndPages = accessor.GetFile(cannonizedName, 0, 0));
                    }
                    catch (Exception ex)
                    {
                        throw;
                    }

                    // if we didn't find the document, we'll write "-1" to the stream, signaling that
                    if (fileAndPages.Metadata.Keys.Contains(SynchronizationConstants.RavenDeleteMarker))
                    {
                        if (log.IsDebugEnabled)
                        {
                            log.Debug("File '{0}' is not accessible to get (Raven-Delete-Marker set)", name);
                        }

                        binaryWriter.Write(-1);
                        continue;
                    }

                    var fileSize = fileAndPages.UploadedSize;
                    binaryWriter.Write(fileSize);

                    var readingStream = StorageStream.Reading(Storage, cannonizedName);
                    var bytesRead     = 0;
                    do
                    {
                        try
                        {
                            bytesRead = readingStream.ReadUsingExternalTempBuffer(buffer, 0, buffer.Length, pageBuffer);
                        }
                        catch (Exception ex)
                        {
                            throw;
                        }
                        gzip.Write(buffer, 0, bytesRead);
                    } while (bytesRead > 0);
                }
            }

            //gzip2.Flush();
        }
Ejemplo n.º 11
0
 public void RefreshMetadata()
 {
     if (Storage != null)
     {
         FileAndPagesInformation fileAndPages = null;
         Storage.Batch(accessor => fileAndPages = accessor.GetFile(FileName, 0, 0));
         FileMetadata = fileAndPages.Metadata;
     }
 }
Ejemplo n.º 12
0
        public FileAndPagesInformation GetFile(string filename, int start, int pagesToLoad)
        {
            var key = (Slice)CreateKey(filename);

            ushort version;
            var    file = LoadJson(storage.Files, key, writeBatch.Value, out version);

            if (file == null)
            {
                throw new FileNotFoundException("Could not find file: " + filename);
            }

            var f = ConvertToFile(file);
            var fileInformation = new FileAndPagesInformation
            {
                TotalSize    = f.TotalSize,
                Name         = f.FullPath,
                Metadata     = f.Metadata,
                UploadedSize = f.UploadedSize,
                Start        = start
            };

            if (pagesToLoad > 0)
            {
                var usageByFileNameAndPosition = storage.Usage.GetIndex(Tables.Usage.Indices.ByFileNameAndPosition);

                using (var iterator = usageByFileNameAndPosition.Iterate(Snapshot, writeBatch.Value))
                {
                    if (iterator.Seek((Slice)CreateKey(filename, start)))
                    {
                        do
                        {
                            var id    = (Slice)iterator.CreateReaderForCurrent().ToStringValue();
                            var usage = LoadJson(storage.Usage, id, writeBatch.Value, out version);

                            var name = usage.Value <string>("name");
                            if (name.Equals(filename, StringComparison.InvariantCultureIgnoreCase) == false)
                            {
                                break;
                            }

                            fileInformation.Pages.Add(new PageInformation
                            {
                                Id             = usage.Value <int>("page_id"),
                                Size           = usage.Value <int>("page_size"),
                                PositionInFile = usage.Value <int>("file_pos")
                            });
                        }while (iterator.MoveNext() && fileInformation.Pages.Count < pagesToLoad);
                    }
                }
            }

            return(fileInformation);
        }
Ejemplo n.º 13
0
 private RavenJObject GetMetadata(string fileName)
 {
     try
     {
         FileAndPagesInformation fileAndPages = null;
         storage.Batch(accessor => fileAndPages = accessor.GetFile(fileName, 0, 0));
         return(fileAndPages.Metadata);
     }
     catch (FileNotFoundException)
     {
         return(new RavenJObject());
     }
 }
Ejemplo n.º 14
0
        public FileAndPagesInformation GetFile(string filename, int start, int pagesToLoad)
        {
            Api.JetSetCurrentIndex(session, Files, "by_name");
            Api.MakeKey(session, Files, filename, Encoding.Unicode, MakeKeyGrbit.NewKey);
            if (Api.TrySeek(session, Files, SeekGrbit.SeekEQ) == false)
            {
                throw new FileNotFoundException("Could not find file: " + filename);
            }

            var fileInformation = new FileAndPagesInformation
            {
                TotalSize    = GetTotalSize(),
                UploadedSize = BitConverter.ToInt64(Api.RetrieveColumn(session, Files, tableColumnsCache.FilesColumns["uploaded_size"]), 0),
                Metadata     = RetrieveMetadata(),
                Name         = Api.RetrieveColumnAsString(session, Files, tableColumnsCache.FilesColumns["name"]),
                Start        = start
            };

            if (pagesToLoad > 0)
            {
                Api.JetSetCurrentIndex(session, Usage, "by_name_and_pos");
                Api.MakeKey(session, Usage, filename, Encoding.Unicode, MakeKeyGrbit.NewKey);
                Api.MakeKey(session, Usage, start, MakeKeyGrbit.None);
                if (Api.TrySeek(session, Usage, SeekGrbit.SeekGE))
                {
                    Api.MakeKey(session, Usage, filename, Encoding.Unicode, MakeKeyGrbit.NewKey);
                    Api.JetSetIndexRange(session, Usage, SetIndexRangeGrbit.RangeInclusive);

                    do
                    {
                        var name = Api.RetrieveColumnAsString(session, Usage, tableColumnsCache.UsageColumns["name"]);
                        if (name.Equals(filename, StringComparison.InvariantCultureIgnoreCase) == false)
                        {
                            continue;
                        }

                        fileInformation.Pages.Add(new PageInformation
                        {
                            Size           = Api.RetrieveColumnAsInt32(session, Usage, tableColumnsCache.UsageColumns["page_size"]).Value,
                            Id             = Api.RetrieveColumnAsInt32(session, Usage, tableColumnsCache.UsageColumns["page_id"]).Value,
                            PositionInFile = Api.RetrieveColumnAsInt32(session, Usage, tableColumnsCache.UsageColumns["file_pos"]).Value
                        });
                    } while (Api.TryMoveNext(session, Usage) && fileInformation.Pages.Count < pagesToLoad);
                }
            }

            return(fileInformation);
        }
Ejemplo n.º 15
0
        public async void Should_reuse_pages_where_nothing_has_changed()
        {
            string filename = FileHeader.Canonize("test");

            var file = SyncTestUtils.PreparePagesStream(3);

            file.Position = 0;

            var sourceContent = new MemoryStream();

            file.CopyTo(sourceContent);
            sourceContent.Position = StorageConstants.MaxPageSize + 1;
            sourceContent.Write(new byte[] { 0, 0, 0, 0 }, 0, 4);           // change content of the 2nd page

            var destinationContent = file;

            sourceContent.Position = 0;
            await source.UploadAsync(filename, sourceContent);

            destinationContent.Position = 0;
            await destination.UploadAsync(filename, destinationContent);

            var contentUpdate = new ContentUpdateWorkItem(filename, "http://localhost:12345", sourceRfs.Storage, sourceRfs.SigGenerator);


            sourceContent.Position = 0;
            // force to upload entire file, we just want to check which pages will be reused
            await contentUpdate.UploadToAsync(destination.Synchronization);

            await destination.Synchronization.ResolveConflictAsync(filename, ConflictResolutionStrategy.RemoteVersion);

            await contentUpdate.UploadToAsync(destination.Synchronization);

            FileAndPagesInformation fileAndPages = null;

            destinationRfs.Storage.Batch(accessor => fileAndPages = accessor.GetFile(filename, 0, 256));

            Assert.Equal(3, fileAndPages.Pages.Count);
            Assert.Equal(1, fileAndPages.Pages[0].Id);             // reused page
            Assert.Equal(4, fileAndPages.Pages[1].Id);             // new page -> id == 4
            Assert.Equal(3, fileAndPages.Pages[2].Id);             // reused page

            sourceContent.Position = 0;

            var metadata = await destination.GetMetadataForAsync(filename);

            Assert.Equal(sourceContent.GetMD5Hash(), metadata.Value <string>("Content-MD5"));
        }
Ejemplo n.º 16
0
        protected SynchronizationWorkItem(string fileName, string sourceServerUrl, ITransactionalStorage storage)
        {
            Storage  = storage;
            FileName = fileName;

            FileAndPagesInformation fileAndPages = null;

            Storage.Batch(accessor => fileAndPages = accessor.GetFile(fileName, 0, 0));
            FileMetadata = fileAndPages.Metadata;
            ServerInfo   = new ServerInfo
            {
                Id            = Storage.Id,
                FileSystemUrl = sourceServerUrl
            };

            conflictDetector = new ConflictDetector();
            conflictResolver = new ConflictResolver(null, null);
        }
Ejemplo n.º 17
0
        public void Should_reuse_second_page_if_only_first_one_changed()
        {
            string filename = FileHeader.Canonize("test");

            var file = SyncTestUtils.PreparePagesStream(2);

            file.Position = 0;

            var sourceContent = new MemoryStream();

            file.CopyTo(sourceContent);
            sourceContent.Position = 0;
            sourceContent.Write(new byte[] { 0, 0, 0, 0 }, 0, 4);           // change content of the 1st page

            var destinationContent = file;

            sourceContent.Position = 0;
            source.UploadAsync(filename, sourceContent).Wait();
            destinationContent.Position = 0;
            destination.UploadAsync(filename, destinationContent).Wait();

            var contentUpdate = new ContentUpdateWorkItem(filename, "http://localhost:12345", sourceRfs.Storage,
                                                          sourceRfs.SigGenerator);


            sourceContent.Position = 0;
            // force to upload entire file, we just want to check which pages will be reused
            contentUpdate.UploadToAsync(destination.Synchronization).Wait();
            destination.Synchronization.ResolveConflictAsync(filename, ConflictResolutionStrategy.RemoteVersion).Wait();
            contentUpdate.UploadToAsync(destination.Synchronization).Wait();

            FileAndPagesInformation fileAndPages = null;

            destinationRfs.Storage.Batch(accessor => fileAndPages = accessor.GetFile(filename, 0, 256));

            Assert.Equal(2, fileAndPages.Pages.Count);
            Assert.Equal(3, fileAndPages.Pages[0].Id);             // new page -> id == 3
            Assert.Equal(2, fileAndPages.Pages[1].Id);             // reused page -> id still == 2

            sourceContent.Position = 0;
            Assert.Equal(sourceContent.GetMD5Hash(), destination.GetMetadataForAsync(filename).Result["Content-MD5"]);
        }
Ejemplo n.º 18
0
        private DataInfo GetLocalFileDataInfo(string fileName)
        {
            FileAndPagesInformation fileAndPages = null;

            try
            {
                Storage.Batch(accessor => fileAndPages = accessor.GetFile(fileName, 0, 0));
            }
            catch (FileNotFoundException)
            {
                return(null);
            }

            return(new DataInfo
            {
                LastModified = fileAndPages.Metadata.Value <DateTime>(Constants.LastModified).ToUniversalTime(),
                Length = fileAndPages.TotalSize ?? 0,
                Name = fileAndPages.Name
            });
        }
Ejemplo n.º 19
0
        public async void Should_reuse_pages_when_data_appended(int numberOfPages)
        {
            string filename = FileHeader.Canonize("test");

            var file = SyncTestUtils.PreparePagesStream(numberOfPages);

            var sourceContent = new CombinedStream(file, SyncTestUtils.PreparePagesStream(numberOfPages));
            // add new pages at the end
            var destinationContent = file;

            sourceContent.Position = 0;
            await source.UploadAsync(filename, sourceContent);

            destinationContent.Position = 0;
            await destination.UploadAsync(filename, destinationContent);

            var contentUpdate = new ContentUpdateWorkItem(filename, "http://localhost:12345", sourceRfs.Storage, sourceRfs.SigGenerator);

            // force to upload entire file, we just want to check which pages will be reused
            await contentUpdate.UploadToAsync(destination.Synchronization);

            await destination.Synchronization.ResolveConflictAsync(filename, ConflictResolutionStrategy.RemoteVersion);

            await contentUpdate.UploadToAsync(destination.Synchronization);


            FileAndPagesInformation fileAndPages = null;

            destinationRfs.Storage.Batch(accessor => fileAndPages = accessor.GetFile(filename, 0, 2 * numberOfPages));

            Assert.Equal(2 * numberOfPages, fileAndPages.Pages.Count);

            for (var i = 0; i < numberOfPages; i++)
            {
                Assert.Equal(i + 1, fileAndPages.Pages[i].Id);
                // if page ids are in the original order it means that they were used the existing pages
            }

            sourceContent.Position = 0;
            Assert.Equal(sourceContent.GetMD5Hash(), destination.GetMetadataForAsync(filename).Result["Content-MD5"]);
        }
Ejemplo n.º 20
0
        public void StorageStream_should_write_to_storage_by_64kB_pages()
        {
            using (var stream = StorageStream.CreatingNewAndWritting(fs, "file", new RavenJObject()))
            {
                var buffer = new byte[StorageConstants.MaxPageSize];

                new Random().NextBytes(buffer);

                stream.Write(buffer, 0, 32768);
                stream.Write(buffer, 32767, 32768);
                stream.Write(buffer, 0, 1);
            }

            FileAndPagesInformation fileAndPages = null;

            transactionalStorage.Batch(accessor => fileAndPages = accessor.GetFile("file", 0, 10));

            Assert.Equal(2, fileAndPages.Pages.Count);
            Assert.Equal(StorageConstants.MaxPageSize, fileAndPages.Pages[0].Size);
            Assert.Equal(1, fileAndPages.Pages[1].Size);
        }
Ejemplo n.º 21
0
 private void MovePageFrame(long offset)
 {
     offset = Math.Min(Length, offset);
     if (offset < currentPageFrameOffset || fileAndPages == null)
     {
         storage.Batch(accessor => fileAndPages = accessor.GetFile(Name, 0, PagesBatchSize));
         currentPageFrameOffset = 0;
     }
     while (currentPageFrameOffset + CurrentPageFrameSize - 1 < offset)
     {
         var lastPageFrameSize = CurrentPageFrameSize;
         var nextPageIndex     = fileAndPages.Start + fileAndPages.Pages.Count;
         storage.Batch(accessor => fileAndPages = accessor.GetFile(Name, nextPageIndex, PagesBatchSize));
         if (fileAndPages.Pages.Count < 1)
         {
             fileAndPages.Start = 0;
             break;
         }
         currentPageFrameOffset += lastPageFrameSize;
     }
     currentOffset = offset;
 }
Ejemplo n.º 22
0
        public void Should_work()
        {
            var filename        = "test";
            var greaterFileName = filename + ".bin";             // append something

            transactionalStorage.Batch(
                accessor =>
            {
                accessor.PutFile(filename, 6, new RavenJObject());
                var pageId = accessor.InsertPage(new byte[] { 1, 2, 3 }, 3);
                accessor.AssociatePage(filename, pageId, 0, 3);

                pageId = accessor.InsertPage(new byte[] { 4, 5, 6 }, 3);
                accessor.AssociatePage(filename, pageId, 3, 3);

                accessor.CompleteFileUpload(filename);
            });

            transactionalStorage.Batch(
                accessor =>
            {
                accessor.PutFile(greaterFileName, 6, new RavenJObject());
                var pageId = accessor.InsertPage(new byte[] { 11, 22, 33 }, 3);
                accessor.AssociatePage(greaterFileName, pageId, 0, 3);

                pageId = accessor.InsertPage(new byte[] { 44, 55, 66 }, 3);
                accessor.AssociatePage(greaterFileName, pageId, 3, 3);

                accessor.CompleteFileUpload(greaterFileName);
            });

            FileAndPagesInformation fileAndPages = null;

            transactionalStorage.Batch(accessor => fileAndPages = accessor.GetFile(filename, 0, 32));

            Assert.Equal(2, fileAndPages.Pages.Count);
            Assert.Equal(1, fileAndPages.Pages[0].Id);
            Assert.Equal(2, fileAndPages.Pages[1].Id);
        }
Ejemplo n.º 23
0
        public async Task <HttpResponseMessage> Manifest(string id)
        {
            var filename = Uri.UnescapeDataString(id);
            FileAndPagesInformation fileAndPages = null;

            try
            {
                Storage.Batch(accessor => fileAndPages = accessor.GetFile(filename, 0, 0));
            }
            catch (FileNotFoundException)
            {
                Log.Debug("Signature manifest for a file '{0}' was not found", filename);
                return(Request.CreateResponse(HttpStatusCode.NotFound));
            }

            long?fileLength = fileAndPages.TotalSize;

            using (var signatureRepository = new StorageSignatureRepository(Storage, filename))
            {
                var rdcManager        = new LocalRdcManager(signatureRepository, Storage, SigGenerator);
                var signatureManifest = await rdcManager.GetSignatureManifestAsync(
                    new DataInfo
                {
                    Name      = filename,
                    CreatedAt = Convert.ToDateTime(fileAndPages.Metadata.Value <string>(Constants.LastModified))
                                .ToUniversalTime()
                });

                signatureManifest.FileLength = fileLength ?? 0;

                Log.Debug("Signature manifest for a file '{0}' was downloaded. Signatures count was {1}", filename, signatureManifest.Signatures.Count);

                return(this.GetMessageWithObject(signatureManifest, HttpStatusCode.OK)
                       .WithNoCache());
            }
        }
Ejemplo n.º 24
0
        public void StorageStream_should_write_to_storage_by_64kB_pages()
        {
            using (var stream = StorageStream.CreatingNewAndWritting(
                       transactionalStorage, new MockIndexStorage(CreateIndexConfiguration()),
                       new StorageOperationsTask(transactionalStorage, new OrderedPartCollection <AbstractFileDeleteTrigger>(), new MockIndexStorage(CreateIndexConfiguration()), new EmptyNotificationsPublisher()),
                       "file", EmptyETagMetadata))
            {
                var buffer = new byte[StorageConstants.MaxPageSize];

                new Random().NextBytes(buffer);

                stream.Write(buffer, 0, 32768);
                stream.Write(buffer, 32767, 32768);
                stream.Write(buffer, 0, 1);
            }

            FileAndPagesInformation fileAndPages = null;

            transactionalStorage.Batch(accessor => fileAndPages = accessor.GetFile("file", 0, 10));

            Assert.Equal(2, fileAndPages.Pages.Count);
            Assert.Equal(StorageConstants.MaxPageSize, fileAndPages.Pages[0].Size);
            Assert.Equal(1, fileAndPages.Pages[1].Size);
        }
Ejemplo n.º 25
0
        public FileAndPagesInformation GetFile(string filename, int start, int pagesToLoad)
        {
            var key = (Slice)CreateKey(filename);

            ushort version;
            var file = LoadJson(storage.Files, key, writeBatch.Value, out version);
            if (file == null)
                throw new FileNotFoundException("Could not find file: " + filename);

            var f = ConvertToFile(file);
            var fileInformation = new FileAndPagesInformation
                                  {
                                      TotalSize = f.TotalSize,
                                      Name = f.FullPath,
                                      Metadata = f.Metadata,
                                      UploadedSize = f.UploadedSize,
                                      Start = start
                                  };

            if (pagesToLoad > 0)
            {
                var usageByFileNameAndPosition = storage.Usage.GetIndex(Tables.Usage.Indices.ByFileNameAndPosition);

                using (var iterator = usageByFileNameAndPosition.Iterate(Snapshot, writeBatch.Value))
                {
                    if (iterator.Seek((Slice)CreateKey(filename, start)))
                    {
                        do
                        {
                            var id = (Slice)iterator.CreateReaderForCurrent().ToStringValue();
                            var usage = LoadJson(storage.Usage, id, writeBatch.Value, out version);

                            var name = usage.Value<string>("name");
                            if (name.Equals(filename, StringComparison.InvariantCultureIgnoreCase) == false)
                                break;

                            fileInformation.Pages.Add(new PageInformation
                                                      {
                                                          Id = usage.Value<int>("page_id"),
                                                          Size = usage.Value<int>("page_size"),
                                                          PositionInFile = usage.Value<int>("file_pos")
                                                      });
                        }
                        while (iterator.MoveNext() && fileInformation.Pages.Count < pagesToLoad);
                    }
                }
            }

            return fileInformation;
        }
Ejemplo n.º 26
0
		public FileAndPagesInformation GetFile(string filename, int start, int pagesToLoad)
		{
			Api.JetSetCurrentIndex(session, Files, "by_name");
			Api.MakeKey(session, Files, filename, Encoding.Unicode, MakeKeyGrbit.NewKey);
			if (Api.TrySeek(session, Files, SeekGrbit.SeekEQ) == false)
				throw new FileNotFoundException("Could not find file: " + filename);

			var fileInformation = new FileAndPagesInformation
				                      {
					                      TotalSize = GetTotalSize(),
					                      UploadedSize = BitConverter.ToInt64(Api.RetrieveColumn(session, Files, tableColumnsCache.FilesColumns["uploaded_size"]), 0),
					                      Metadata = RetrieveMetadata(),
					                      Name = Api.RetrieveColumnAsString(session, Files, tableColumnsCache.FilesColumns["name"]),
					                      Start = start
				                      };

			if (pagesToLoad > 0)
			{
				Api.JetSetCurrentIndex(session, Usage, "by_name_and_pos");
				Api.MakeKey(session, Usage, filename, Encoding.Unicode, MakeKeyGrbit.NewKey);
				Api.MakeKey(session, Usage, start, MakeKeyGrbit.None);
				if (Api.TrySeek(session, Usage, SeekGrbit.SeekGE))
				{
					Api.MakeKey(session, Usage, filename, Encoding.Unicode, MakeKeyGrbit.NewKey);
					Api.JetSetIndexRange(session, Usage, SetIndexRangeGrbit.RangeInclusive);

					do
					{
						var name = Api.RetrieveColumnAsString(session, Usage, tableColumnsCache.UsageColumns["name"]);
						if (name.Equals(filename, StringComparison.InvariantCultureIgnoreCase) == false)
							continue;

						fileInformation.Pages.Add(new PageInformation
							                          {
								                          Size = Api.RetrieveColumnAsInt32(session, Usage, tableColumnsCache.UsageColumns["page_size"]).Value,
								                          Id = Api.RetrieveColumnAsInt32(session, Usage, tableColumnsCache.UsageColumns["page_id"]).Value,
														  PositionInFile = Api.RetrieveColumnAsInt32(session, Usage, tableColumnsCache.UsageColumns["file_pos"]).Value
							                          });
					} while (Api.TryMoveNext(session, Usage) && fileInformation.Pages.Count < pagesToLoad);
				}
			}

			return fileInformation;
		}