Exemplo n.º 1
0
        public HttpResponseMessage Get(string name)
        {
            name = FileHeader.Canonize(name);
            FileAndPagesInformation fileAndPages = null;

            Storage.Batch(accessor => fileAndPages = accessor.GetFile(name, 0, 0));

            if (fileAndPages.Metadata.Keys.Contains(SynchronizationConstants.RavenDeleteMarker))
            {
                if (log.IsDebugEnabled)
                {
                    log.Debug("File '{0}' is not accessible to get (Raven-Delete-Marker set)", name);
                }
                throw new HttpResponseException(HttpStatusCode.NotFound);
            }

            var readingStream = StorageStream.Reading(Storage, name);

            var filename = GetFileName(name, fileAndPages.Metadata);
            var result   = StreamResult(filename, readingStream);

            var etag = new Etag(fileAndPages.Metadata.Value <string>(Constants.MetadataEtagField));

            fileAndPages.Metadata.Remove(Constants.MetadataEtagField);
            WriteHeaders(fileAndPages.Metadata, etag, result);

            if (log.IsDebugEnabled)
            {
                log.Debug("File '{0}' with etag {1} is being retrieved.", name, etag);
            }

            return(result.WithNoCache());
        }
Exemplo n.º 2
0
        public HttpResponseMessage Get(string name)
        {
            name = RavenFileNameHelper.RavenPath(name);
            FileAndPagesInformation fileAndPages = null;

            try
            {
                Storage.Batch(accessor => fileAndPages = accessor.GetFile(name, 0, 0));
            }
            catch (FileNotFoundException)
            {
                log.Debug("File '{0}' was not found", name);
                throw new HttpResponseException(HttpStatusCode.NotFound);
            }

            if (fileAndPages.Metadata.Keys.Contains(SynchronizationConstants.RavenDeleteMarker))
            {
                log.Debug("File '{0}' is not accessible to get (Raven-Delete-Marker set)", name);
                throw new HttpResponseException(HttpStatusCode.NotFound);
            }

            var readingStream = StorageStream.Reading(Storage, name);
            var result        = StreamResult(name, readingStream);

            var etag = new Etag(fileAndPages.Metadata.Value <string>(Constants.MetadataEtagField));

            fileAndPages.Metadata.Remove(Constants.MetadataEtagField);
            WriteHeaders(fileAndPages.Metadata, etag, result);

            return(result.WithNoCache());
        }
Exemplo n.º 3
0
        public Task <Stream> DownloadFile(FileHeader file)
        {
            var name          = file.FullPath;
            var readingStream = StorageStream.Reading(filesystem.Storage, name);

            return(new CompletedTask <Stream>(readingStream));
        }
Exemplo n.º 4
0
        private void StreamExportToClient(Stream gzip2, string[] fileNames)
        {
            using (var gzip = new BufferedStream(gzip2))
            //     using (var gzip = new GZipStream(stream, CompressionMode.Compress,true))
            {
                var binaryWriter = new BinaryWriter(gzip);

                var buffer     = new byte[StorageConstants.MaxPageSize];
                var pageBuffer = new byte[StorageConstants.MaxPageSize];

                foreach (var name in fileNames)
                {
                    FileAndPagesInformation fileAndPages = null;
                    var cannonizedName = FileHeader.Canonize(name);

                    try
                    {
                        Storage.Batch(accessor => fileAndPages = accessor.GetFile(cannonizedName, 0, 0));
                    }
                    catch (Exception ex)
                    {
                        throw;
                    }

                    // if we didn't find the document, we'll write "-1" to the stream, signaling that
                    if (fileAndPages.Metadata.Keys.Contains(SynchronizationConstants.RavenDeleteMarker))
                    {
                        if (log.IsDebugEnabled)
                        {
                            log.Debug("File '{0}' is not accessible to get (Raven-Delete-Marker set)", name);
                        }

                        binaryWriter.Write(-1);
                        continue;
                    }

                    var fileSize = fileAndPages.UploadedSize;
                    binaryWriter.Write(fileSize);

                    var readingStream = StorageStream.Reading(Storage, cannonizedName);
                    var bytesRead     = 0;
                    do
                    {
                        try
                        {
                            bytesRead = readingStream.ReadUsingExternalTempBuffer(buffer, 0, buffer.Length, pageBuffer);
                        }
                        catch (Exception ex)
                        {
                            throw;
                        }
                        gzip.Write(buffer, 0, bytesRead);
                    } while (bytesRead > 0);
                }
            }

            //gzip2.Flush();
        }
Exemplo n.º 5
0
        public async Task <SynchronizationReport> UploadToAsync(IAsyncFilesSynchronizationCommands destination)
        {
            using (var sourceFileStream = StorageStream.Reading(Storage, FileName))
            {
                var fileSize = sourceFileStream.Length;

                var onlySourceNeed = new List <RdcNeed>
                {
                    new RdcNeed
                    {
                        BlockType   = RdcNeedType.Source,
                        BlockLength = (ulong)fileSize,
                        FileOffset  = 0
                    }
                };

                return(await PushByUsingMultipartRequest(destination, sourceFileStream, onlySourceNeed));
            }
        }
Exemplo n.º 6
0
        public async Task <SynchronizationReport> UploadToAsync(ISynchronizationServerClient synchronizationServerClient)
        {
            using (var sourceFileStream = StorageStream.Reading(Storage, FileName))
            {
                var fileSize = sourceFileStream.Length;

                var onlySourceNeed = new List <RdcNeed>
                {
                    new RdcNeed
                    {
                        BlockType   = RdcNeedType.Source,
                        BlockLength = (ulong)fileSize,
                        FileOffset  = 0
                    }
                };

                return(await PushByUsingMultipartRequest(synchronizationServerClient, sourceFileStream, onlySourceNeed).ConfigureAwait(false));
            }
        }
Exemplo n.º 7
0
        private async Task <SynchronizationReport> SynchronizeTo(IAsyncFilesSynchronizationCommands destination,
                                                                 ISignatureRepository localSignatureRepository,
                                                                 ISignatureRepository remoteSignatureRepository,
                                                                 SignatureManifest sourceSignatureManifest,
                                                                 SignatureManifest destinationSignatureManifest)
        {
            var seedSignatureInfo   = SignatureInfo.Parse(destinationSignatureManifest.Signatures.Last().Name);
            var sourceSignatureInfo = SignatureInfo.Parse(sourceSignatureManifest.Signatures.Last().Name);

            using (var localFile = StorageStream.Reading(Storage, FileName))
            {
                IList <RdcNeed> needList;
                using (var needListGenerator = new NeedListGenerator(remoteSignatureRepository, localSignatureRepository))
                {
                    needList = needListGenerator.CreateNeedsList(seedSignatureInfo, sourceSignatureInfo, Cts.Token);
                }

                return(await PushByUsingMultipartRequest(destination, localFile, needList));
            }
        }
Exemplo n.º 8
0
        public void StorageStream_can_read_overlaping_byte_ranges_from_last_page()
        {
            var buffer = new byte[StorageConstants.MaxPageSize];

            new Random().NextBytes(buffer);

            using (var stream = StorageStream.CreatingNewAndWritting(
                       transactionalStorage, new MockIndexStorage(),
                       new StorageOperationsTask(transactionalStorage, new MockIndexStorage(), new EmptyNotificationsPublisher()),
                       "file", EmptyETagMetadata))
            {
                stream.Write(buffer, 0, StorageConstants.MaxPageSize);
            }

            using (var stream = StorageStream.Reading(transactionalStorage, "file"))
            {
                var readBuffer = new byte[10];

                stream.Seek(StorageConstants.MaxPageSize - 10, SeekOrigin.Begin);
                stream.Read(readBuffer, 0, 10);                 // read last 10 bytes

                var subBuffer = buffer.ToList().Skip(StorageConstants.MaxPageSize - 10).Take(10).ToArray();

                for (int i = 0; i < 10; i++)
                {
                    Assert.Equal(subBuffer[i], readBuffer[i]);
                }

                readBuffer = new byte[5];

                stream.Seek(StorageConstants.MaxPageSize - 5, SeekOrigin.Begin);
                stream.Read(readBuffer, 0, 5);                 // read last 5 bytes - note that they were read last time as well

                subBuffer = buffer.ToList().Skip(StorageConstants.MaxPageSize - 5).Take(5).ToArray();

                for (int i = 0; i < 5; i++)
                {
                    Assert.Equal(subBuffer[i], readBuffer[i]);
                }
            }
        }
Exemplo n.º 9
0
        public void StorageStream_can_read_overlaping_byte_ranges_from_last_page()
        {
            var buffer = new byte[StorageConstants.MaxPageSize];

            new Random().NextBytes(buffer);

            using (var stream = StorageStream.CreatingNewAndWritting(fs, "file", new RavenJObject()))
            {
                stream.Write(buffer, 0, StorageConstants.MaxPageSize);
            }

            using (var stream = StorageStream.Reading(fs.Storage, "file"))
            {
                var readBuffer = new byte[10];

                stream.Seek(StorageConstants.MaxPageSize - 10, SeekOrigin.Begin);
                stream.Read(readBuffer, 0, 10); // read last 10 bytes

                var subBuffer = buffer.ToList().Skip(StorageConstants.MaxPageSize - 10).Take(10).ToArray();

                for (int i = 0; i < 10; i++)
                {
                    Assert.Equal(subBuffer[i], readBuffer[i]);
                }

                readBuffer = new byte[5];

                stream.Seek(StorageConstants.MaxPageSize - 5, SeekOrigin.Begin);
                stream.Read(readBuffer, 0, 5); // read last 5 bytes - note that they were read last time as well

                subBuffer = buffer.ToList().Skip(StorageConstants.MaxPageSize - 5).Take(5).ToArray();

                for (int i = 0; i < 5; i++)
                {
                    Assert.Equal(subBuffer[i], readBuffer[i]);
                }
            }
        }
Exemplo n.º 10
0
        public async Task <HttpResponseMessage> MultipartProceed(string fileSystemName)
        {
            if (!Request.Content.IsMimeMultipartContent())
            {
                throw new HttpResponseException(HttpStatusCode.UnsupportedMediaType);
            }

            var fileName          = Request.Headers.GetValues(SyncingMultipartConstants.FileName).FirstOrDefault();
            var canonicalFilename = FileHeader.Canonize(fileName);

            var tempFileName = RavenFileNameHelper.DownloadingFileName(canonicalFilename);

            var sourceServerInfo = ReadInnerHeaders.Value <ServerInfo>(SyncingMultipartConstants.SourceServerInfo);
            var sourceFileETag   = Guid.Parse(GetHeader(Constants.MetadataEtagField).Trim('\"'));

            var report = new SynchronizationReport(canonicalFilename, sourceFileETag, SynchronizationType.ContentUpdate);

            Log.Debug("Starting to process multipart synchronization request of a file '{0}' with ETag {1} from {2}", fileName, sourceFileETag, sourceServerInfo);

            StorageStream localFile          = null;
            var           isNewFile          = false;
            var           isConflictResolved = false;

            try
            {
                Storage.Batch(accessor =>
                {
                    AssertFileIsNotBeingSynced(canonicalFilename, accessor);
                    FileLockManager.LockByCreatingSyncConfiguration(canonicalFilename, sourceServerInfo, accessor);
                });

                SynchronizationTask.IncomingSynchronizationStarted(canonicalFilename, sourceServerInfo, sourceFileETag, SynchronizationType.ContentUpdate);

                PublishSynchronizationNotification(fileSystemName, canonicalFilename, sourceServerInfo, report.Type, SynchronizationAction.Start);

                Storage.Batch(accessor => StartupProceed(canonicalFilename, accessor));

                RavenJObject sourceMetadata = GetFilteredMetadataFromHeaders(ReadInnerHeaders);

                var localMetadata = GetLocalMetadata(canonicalFilename);
                if (localMetadata != null)
                {
                    AssertConflictDetection(canonicalFilename, localMetadata, sourceMetadata, sourceServerInfo, out isConflictResolved);
                    localFile = StorageStream.Reading(Storage, canonicalFilename);
                }
                else
                {
                    isNewFile = true;
                }

                Historian.UpdateLastModified(sourceMetadata);

                var synchronizingFile = SynchronizingFileStream.CreatingOrOpeningAndWriting(Storage, Search, StorageOperationsTask, tempFileName, sourceMetadata);

                var provider = new MultipartSyncStreamProvider(synchronizingFile, localFile);

                Log.Debug("Starting to process/read multipart content of a file '{0}'", fileName);

                await Request.Content.ReadAsMultipartAsync(provider);

                Log.Debug("Multipart content of a file '{0}' was processed/read", fileName);

                report.BytesCopied     = provider.BytesCopied;
                report.BytesTransfered = provider.BytesTransfered;
                report.NeedListLength  = provider.NumberOfFileParts;

                synchronizingFile.PreventUploadComplete = false;
                synchronizingFile.Flush();
                synchronizingFile.Dispose();
                sourceMetadata["Content-MD5"] = synchronizingFile.FileHash;

                Storage.Batch(accessor => accessor.UpdateFileMetadata(tempFileName, sourceMetadata));

                Storage.Batch(accessor =>
                {
                    StorageOperationsTask.IndicateFileToDelete(canonicalFilename);
                    accessor.RenameFile(tempFileName, canonicalFilename);

                    Search.Delete(tempFileName);
                    Search.Index(canonicalFilename, sourceMetadata);
                });

                if (isNewFile)
                {
                    Log.Debug("Temporary downloading file '{0}' was renamed to '{1}'. Indexes were updated.", tempFileName, fileName);
                }
                else
                {
                    Log.Debug("Old file '{0}' was deleted. Indexes were updated.", fileName);
                }

                if (isConflictResolved)
                {
                    ConflictArtifactManager.Delete(canonicalFilename);
                }
            }
            catch (Exception ex)
            {
                if (ShouldAddExceptionToReport(ex))
                {
                    report.Exception = ex;
                }
            }
            finally
            {
                if (localFile != null)
                {
                    localFile.Dispose();
                }
            }

            if (report.Exception == null)
            {
                Log.Debug(
                    "File '{0}' was synchronized successfully from {1}. {2} bytes were transfered and {3} bytes copied. Need list length was {4}",
                    fileName, sourceServerInfo, report.BytesTransfered, report.BytesCopied, report.NeedListLength);
            }
            else
            {
                Log.WarnException(
                    string.Format("Error has occurred during synchronization of a file '{0}' from {1}", fileName, sourceServerInfo),
                    report.Exception);
            }

            FinishSynchronization(canonicalFilename, report, sourceServerInfo, sourceFileETag);

            PublishFileNotification(fileName, isNewFile ? FileChangeAction.Add : FileChangeAction.Update);
            PublishSynchronizationNotification(fileSystemName, fileName, sourceServerInfo, report.Type, SynchronizationAction.Finish);

            if (isConflictResolved)
            {
                Publisher.Publish(new ConflictNotification
                {
                    FileName = fileName,
                    Status   = ConflictStatus.Resolved
                });
            }

            return(GetMessageWithObject(report));
        }
Exemplo n.º 11
0
        private async Task ExecuteContentUpdate(RavenJObject localMetadata, SynchronizationReport report)
        {
            var tempFileName = RavenFileNameHelper.DownloadingFileName(fileName);

            using (var localFile = localMetadata != null ? StorageStream.Reading(fs.Storage, fileName) : null)
            {
                fs.PutTriggers.Apply(trigger => trigger.OnPut(tempFileName, sourceMetadata));

                fs.Historian.UpdateLastModified(sourceMetadata);

                var synchronizingFile = SynchronizingFileStream.CreatingOrOpeningAndWriting(fs, tempFileName, sourceMetadata);

                fs.PutTriggers.Apply(trigger => trigger.AfterPut(tempFileName, null, sourceMetadata));

                var provider = new MultipartSyncStreamProvider(synchronizingFile, localFile);

                if (Log.IsDebugEnabled)
                {
                    Log.Debug("Starting to process/read multipart content of a file '{0}'", fileName);
                }

                await MultipartContent.ReadAsMultipartAsync(provider).ConfigureAwait(false);

                if (Log.IsDebugEnabled)
                {
                    Log.Debug("Multipart content of a file '{0}' was processed/read", fileName);
                }

                report.BytesCopied     = provider.BytesCopied;
                report.BytesTransfered = provider.BytesTransfered;
                report.NeedListLength  = provider.NumberOfFileParts;

                synchronizingFile.PreventUploadComplete = false;
                synchronizingFile.Flush();
                synchronizingFile.Dispose();
                sourceMetadata["Content-MD5"] = synchronizingFile.FileHash;

                FileUpdateResult updateResult = null;
                fs.Storage.Batch(accessor => updateResult = accessor.UpdateFileMetadata(tempFileName, sourceMetadata, null));

                fs.Storage.Batch(accessor =>
                {
                    using (fs.DisableAllTriggersForCurrentThread())
                    {
                        fs.Files.IndicateFileToDelete(fileName, null);
                    }

                    accessor.RenameFile(tempFileName, fileName);

                    fs.Search.Delete(tempFileName);
                    fs.Search.Index(fileName, sourceMetadata, updateResult.Etag);
                });

                if (Log.IsDebugEnabled)
                {
                    var message = localFile == null
                        ? string.Format("Temporary downloading file '{0}' was renamed to '{1}'. Indexes were updated.", tempFileName, fileName)
                        : string.Format("Old file '{0}' was deleted. Indexes were updated.", fileName);

                    Log.Debug(message);
                }

                fs.Publisher.Publish(new FileChangeNotification {
                    File = fileName, Action = localFile == null ? FileChangeAction.Add : FileChangeAction.Update
                });
            }
        }
Exemplo n.º 12
0
        private IEnumerable <SignatureInfo> PrepareSignatures(string filename)
        {
            var input = StorageStream.Reading(_transactionalStorage, filename);

            return(_sigGenerator.GenerateSignatures(input, filename, _signatureRepository));
        }
Exemplo n.º 13
0
        private void WriteFilesAsAttachments(JsonTextWriter jsonWriter)
        {
            int totalFilesCount = 0;

            fileStorage.Batch(accsesor =>
            {
                totalFilesCount = accsesor.GetFileCount();
            });
            if (totalFilesCount == 0)
            {
                return;
            }
            int currentFilesCount  = 0;
            int previousFilesCount = 0;
            var lastEtag           = Etag.Empty;

            do
            {
                try
                {
                    previousFilesCount = currentFilesCount;
                    fileStorage.Batch(accsesor =>
                    {
                        var fileHeaders = accsesor.GetFilesAfter(lastEtag, batchSize);
                        foreach (var header in fileHeaders)
                        {
                            var file = StorageStream.Reading(fileStorage, header.FullPath);
                            jsonWriter.WriteStartObject();
                            jsonWriter.WritePropertyName("Data");
                            int read = file.Read(filesBuffer, 0, filesBuffer.Length);
                            jsonWriter.WriteRaw("\"");
                            while (read > 0)
                            {
                                var base64 = Convert.ToBase64String(filesBuffer, 0, read);
                                jsonWriter.WriteRaw(base64);
                                jsonWriter.Flush();
                                read = file.Read(filesBuffer, 0, filesBuffer.Length);
                            }
                            jsonWriter.WriteRaw("\"");
                            jsonWriter.SealValue();
                            jsonWriter.WritePropertyName("Metadata");
                            fileMetadata.WriteTo(jsonWriter);
                            jsonWriter.WritePropertyName("Key");
                            jsonWriter.WriteValue(header.FullPath);
                            jsonWriter.WritePropertyName("Etag");
                            jsonWriter.WriteValue(header.Etag.ToString());
                            jsonWriter.WriteEndObject();
                            lastEtag = header.Etag;
                            currentFilesCount++;
                            if (currentFilesCount % batchSize == 0)
                            {
                                ReportProgress("files", currentFilesCount, totalFilesCount);
                            }
                        }
                    });
                }
                catch (Exception e)
                {
                    lastEtag = lastEtag.IncrementBy(1);
                    currentFilesCount++;
                    ReportCorrupted("files", currentFilesCount, e.Message);
                }
                finally
                {
                    if (currentFilesCount > previousFilesCount)
                    {
                        ReportProgress("files", currentFilesCount, totalFilesCount);
                    }
                }
            } while (currentFilesCount < totalFilesCount);
        }
Exemplo n.º 14
0
        public async Task eventually_will_cleanup_all_deleted_files_even_if_they_use_the_same_pages_and_concurrency_exceptions_are_thrown(string storage)
        {
            var client = NewAsyncClient(requestedStorage: storage);
            var rfs    = GetFileSystem();

            var bytes1 = new byte[1024 * 1024 * 3];
            var bytes2 = new byte[1024 * 1024 * 2];

            var random = new Random();

            random.NextBytes(bytes1);
            random.NextBytes(bytes2);

            await client.UploadAsync("1.bin", new MemoryStream(bytes1));

            await client.UploadAsync("1.bin", new MemoryStream(bytes2)); // will indicate old file to delete

            await client.UploadAsync("1.bin", new MemoryStream(bytes1)); // will indicate old file to delete

            await client.UploadAsync("1.bin", new MemoryStream(bytes2)); // will indicate old file to delete

            await client.UploadAsync("2.bin", new MemoryStream(bytes2));

            await client.UploadAsync("2.bin", new MemoryStream(bytes1)); // will indicate old file to delete

            await client.UploadAsync("2.bin", new MemoryStream(bytes2)); // will indicate old file to delete

            await client.DeleteAsync("1.bin");

            await client.DeleteAsync("2.bin");

            var sw = Stopwatch.StartNew();

            while (sw.Elapsed < TimeSpan.FromMinutes(1))
            {
                try
                {
                    await rfs.Files.CleanupDeletedFilesAsync();

                    break;
                }
                catch (Exception e)
                {
                    if (e.InnerException is ConcurrencyException == false)
                    {
                        throw;
                    }

                    // concurrency exceptions are expected because files indicated to deletion use the same pages
                    // so concurrent modifications can result in concurrency exception
                    // however the first deletion attempt should pass, so finally all deleted files should be cleaned up
                }
            }

            IList <FileHeader> files = null;

            rfs.Storage.Batch(accessor =>
            {
                files = accessor.GetFilesAfter(Etag.Empty, 10).ToList();
            });

            Assert.Equal(2, files.Count);
            Assert.True(files.All(x => x.IsTombstone));

            // but after upload there should be two files which aren't tombstones

            await client.UploadAsync("1.bin", new MemoryStream(bytes1));

            await client.UploadAsync("2.bin", new MemoryStream(bytes1));

            rfs.Storage.Batch(accessor =>
            {
                files = accessor.GetFilesAfter(Etag.Empty, 10).ToList();
            });

            Assert.Equal(2, files.Count);
            Assert.False(files.All(x => x.IsTombstone));

            foreach (var file in files)
            {
                var content = new MemoryStream(new byte[file.TotalSize.Value]);

                await StorageStream.Reading(rfs.Storage, files[0].FullPath).CopyToAsync(content);

                Assert.Equal(bytes1, content.ToArray());
            }
        }