public Task <Library.Utility.TempFile> GetFileForTestingAsync(string remotename, long size, string remotehash) { var fe = new FileEntryItem(BackendActionType.Get, remotename); fe.VerifyHashOnly = true; return(RunRetryOnMain(fe, () => DoGet(fe))); }
public VolumeUploadRequest(BlockVolumeWriter blockvolume, FileEntryItem blockEntry, IndexVolumeWriter indexVolume, FileEntryItem indexEntry) { BlockVolume = blockvolume; BlockEntry = blockEntry; IndexVolume = indexVolume; IndexEntry = indexEntry; }
private async Task <IList <Library.Interface.IFileEntry> > DoList(FileEntryItem item) { await m_stats.SendEventAsync(BackendActionType.List, BackendEventType.Started, null, -1); var r = m_backend.List().ToList(); var sb = new StringBuilder(); sb.AppendLine("["); long count = 0; foreach (var e in r) { if (count != 0) { sb.AppendLine(","); } count++; sb.Append(JsonConvert.SerializeObject(e)); } sb.AppendLine(); sb.Append("]"); await m_database.LogRemoteOperationAsync("list", "", sb.ToString()); await m_stats.SendEventAsync(BackendActionType.List, BackendEventType.Completed, null, r.Count); return(r); }
public Task DeleteFileAsync(string remotename, bool suppressCleanup = false) { var fe = new FileEntryItem(BackendActionType.Delete, remotename); return(RunRetryOnMain(fe, () => DoDelete(fe, suppressCleanup) )); }
public Task CreateFolder(string remotename) { var fe = new FileEntryItem(BackendActionType.CreateFolder, remotename); return(RunRetryOnMain(fe, () => DoCreateFolder(fe) )); }
public Task <IList <Library.Interface.IFileEntry> > ListFilesAsync() { var fe = new FileEntryItem(BackendActionType.List, null); return(RunRetryOnMain(fe, () => DoList(fe) )); }
public VolumeUploadRequest(BlockVolumeWriter blockVolume, FileEntryItem blockEntry, TemporaryIndexVolume indexVolume, Options options, BackupDatabase database) { BlockVolume = blockVolume; BlockEntry = blockEntry; IndexVolume = indexVolume; Options = options; Database = database; }
private static FileEntryItem CreateFileEntryForUpload(VolumeWriterBase volume, Options options) { var fileEntry = new FileEntryItem(BackendActionType.Put, volume.RemoteFilename); fileEntry.SetLocalfilename(volume.LocalFilename); fileEntry.Encrypt(options); fileEntry.UpdateHashAndSize(options); return(fileEntry); }
private async Task UploadVolumeWriter(VolumeWriterBase volumeWriter, Worker worker, CancellationToken cancelToken) { var fileEntry = new FileEntryItem(BackendActionType.Put, volumeWriter.RemoteFilename); fileEntry.SetLocalfilename(volumeWriter.LocalFilename); fileEntry.Encrypt(m_options); fileEntry.UpdateHashAndSize(m_options); await UploadFileAsync(fileEntry, worker, cancelToken).ConfigureAwait(false); }
private async Task <bool> DoDelete(FileEntryItem item, bool suppressCleanup) { if (m_options.Dryrun) { Logging.Log.WriteDryrunMessage(LOGTAG, "WouldDeleteRemoteFile", "Would delete remote file: {0}, size: {1}", item.RemoteFilename, Library.Utility.Utility.FormatSizeString(item.Size)); return(true); } await m_stats.SendEventAsync(BackendActionType.Delete, BackendEventType.Started, item.RemoteFilename, item.Size); string result = null; try { m_backend.Delete(item.RemoteFilename); } catch (Exception ex) { var isFileMissingException = ex is Library.Interface.FileMissingException || ex is System.IO.FileNotFoundException; var wr = ex as System.Net.WebException == null ? null : (ex as System.Net.WebException).Response as System.Net.HttpWebResponse; if (isFileMissingException || (wr != null && wr.StatusCode == System.Net.HttpStatusCode.NotFound)) { Logging.Log.WriteWarningMessage(LOGTAG, "DeleteRemoteFileFailed", ex, LC.L("Delete operation failed for {0} with FileNotFound, listing contents", item.RemoteFilename)); bool success = false; try { success = !m_backend.List().Select(x => x.Name).Contains(item.RemoteFilename); } catch { } if (success) { Logging.Log.WriteInformationMessage(LOGTAG, "DeleteRemoteFileSuccess", LC.L("Listing indicates file {0} is deleted correctly", item.RemoteFilename)); return(true); } } result = ex.ToString(); throw; } finally { await m_database.LogRemoteOperationAsync("delete", item.RemoteFilename, result); } await m_database.UpdateRemoteVolumeAsync(item.RemoteFilename, RemoteVolumeState.Deleted, -1, null, suppressCleanup, TimeSpan.FromHours(2)); await m_stats.SendEventAsync(BackendActionType.Delete, BackendEventType.Completed, item.RemoteFilename, item.Size); return(true); }
public Task <Tuple <Library.Utility.TempFile, long, string> > GetFileWithInfoAsync(string remotename) { var fe = new FileEntryItem(BackendActionType.Get, remotename); return(RunRetryOnMain(fe, async() => { var res = await DoGet(fe).ConfigureAwait(false); return new Tuple <Library.Utility.TempFile, long, string>( res, fe.Size, fe.Hash ); })); }
private async Task UploadBlockAndIndexAsync(VolumeUploadRequest upload, Worker worker, CancellationToken cancelToken) { if (await UploadFileAsync(upload.BlockEntry, worker, cancelToken).ConfigureAwait(false)) { // We must use the BlockEntry's RemoteFilename and not the BlockVolume's since the // BlockEntry's' RemoteFilename reflects renamed files due to retries after errors. IndexVolumeWriter indexVolumeWriter = await upload.IndexVolume.CreateVolume(upload.BlockEntry.RemoteFilename, upload.BlockEntry.Hash, upload.BlockEntry.Size, upload.Options, upload.Database); FileEntryItem indexEntry = indexVolumeWriter.CreateFileEntryForUpload(upload.Options); if (await UploadFileAsync(indexEntry, worker, cancelToken).ConfigureAwait(false)) { await m_database.AddIndexBlockLinkAsync(indexVolumeWriter.VolumeID, upload.BlockVolume.VolumeID).ConfigureAwait(false); } } }
public Task PutUnencryptedAsync(string remotename, string localpath) { var fe = new FileEntryItem(BackendActionType.Put, remotename); fe.SetLocalfilename(localpath); fe.Encrypted = true; //Prevent encryption fe.TrackedInDb = false; //Prevent Db updates return(RunRetryOnMain <bool>(fe, async() => { await DoPut(fe).ConfigureAwait(false); m_uploadSuccess = true; return true; })); }
private async Task RenameFileAfterErrorAsync(FileEntryItem item) { var p = VolumeBase.ParseFilename(item.RemoteFilename); var guid = VolumeWriterBase.GenerateGuid(); var time = p.Time.Ticks == 0 ? p.Time : p.Time.AddSeconds(1); var newname = VolumeBase.GenerateFilename(p.FileType, p.Prefix, guid, time, p.CompressionModule, p.EncryptionModule); var oldname = item.RemoteFilename; await m_stats.SendEventAsync(item.Operation, BackendEventType.Rename, oldname, item.Size); await m_stats.SendEventAsync(item.Operation, BackendEventType.Rename, newname, item.Size); Logging.Log.WriteInformationMessage(LOGTAG, "RenameRemoteTargetFile", "Renaming \"{0}\" to \"{1}\"", oldname, newname); await m_database.RenameRemoteFileAsync(oldname, newname); item.RemoteFilename = newname; }
private static async Task UploadVolumeAndIndex(SpillVolumeRequest target, IWriteChannel <IUploadRequest> outputChannel, Options options, BackupDatabase database) { var blockEntry = CreateFileEntryForUpload(target.BlockVolume, options); IndexVolumeWriter indexVolume = null; FileEntryItem indexEntry = null; if (target.IndexVolume != null) { indexVolume = await target.IndexVolume.CreateVolume(target.BlockVolume.RemoteFilename, blockEntry.Hash, blockEntry.Size, options, database).ConfigureAwait(false); indexEntry = CreateFileEntryForUpload(indexVolume, options); } var uploadRequest = new VolumeUploadRequest(target.BlockVolume, blockEntry, indexVolume, indexEntry); await outputChannel.WriteAsync(uploadRequest).ConfigureAwait(false); }
private async Task <bool> UploadFileAsync(FileEntryItem item, Worker worker, CancellationToken cancelToken) { if (cancelToken.IsCancellationRequested) { return(false); } return(await DoWithRetry(async() => { if (item.IsRetry) { await RenameFileAfterErrorAsync(item).ConfigureAwait(false); } await DoPut(item, worker.Backend, cancelToken).ConfigureAwait(false); }, item, worker, cancelToken).ConfigureAwait(false)); }
private async Task <bool> DoCreateFolder(FileEntryItem item) { await m_stats.SendEventAsync(BackendActionType.CreateFolder, BackendEventType.Started, null, -1); string result = null; try { m_backend.CreateFolder(); } catch (Exception ex) { result = ex.ToString(); throw; } finally { await m_database.LogRemoteOperationAsync("createfolder", item.RemoteFilename, result); } await m_stats.SendEventAsync(BackendActionType.CreateFolder, BackendEventType.Completed, null, -1); return(true); }
private TempFile coreDoGetSequential(FileEntryItem item, Interface.IEncryption useDecrypter, out long retDownloadSize, out string retHashcode) { retHashcode = null; retDownloadSize = -1; TempFile retTarget, dlTarget = null, decryptTarget = null; try { dlTarget = new Library.Utility.TempFile(); if (m_backend is Library.Interface.IStreamingBackend && !m_options.DisableStreamingTransfers) { Func<string> getFileHash; // extended to use stacked streams using (var fs = System.IO.File.OpenWrite(dlTarget)) using (var hs = GetFileHasherStream(fs, System.Security.Cryptography.CryptoStreamMode.Write, out getFileHash)) using (var ss = new ShaderStream(hs, true)) { using (var ts = new ThrottledStream(ss, m_options.MaxUploadPrSecond, m_options.MaxDownloadPrSecond)) using (var pgs = new Library.Utility.ProgressReportingStream(ts, item.Size, HandleProgress)) { ((Library.Interface.IStreamingBackend)m_backend).Get(item.RemoteFilename, pgs); } ss.Flush(); retDownloadSize = ss.TotalBytesWritten; retHashcode = getFileHash(); } } else { m_backend.Get(item.RemoteFilename, dlTarget); retDownloadSize = new System.IO.FileInfo(dlTarget).Length; retHashcode = CalculateFileHash(dlTarget); } // Decryption is not placed in the stream stack because there seemed to be an effort // to throw a CryptographicException on fail. If in main stack, we cannot differentiate // in which part of the stack the source of an exception resides. if (useDecrypter != null) { decryptTarget = new Library.Utility.TempFile(); lock (m_encryptionLock) { try { useDecrypter.Decrypt(dlTarget, decryptTarget); } // If we fail here, make sure that we throw a crypto exception catch (System.Security.Cryptography.CryptographicException) { throw; } catch (Exception ex) { throw new System.Security.Cryptography.CryptographicException(ex.Message, ex); } } retTarget = decryptTarget; decryptTarget = null; } else { retTarget = dlTarget; dlTarget = null; } } finally { if (dlTarget != null) dlTarget.Dispose(); if (decryptTarget != null) decryptTarget.Dispose(); } return retTarget; }
private TempFile coreDoGetPiping(FileEntryItem item, Interface.IEncryption useDecrypter, out long retDownloadSize, out string retHashcode) { // With piping allowed, we will parallelize the operation with buffered pipes to maximize throughput: // Separated: Download (only for streaming) - Hashing - Decryption // The idea is to use DirectStreamLink's that are inserted in the stream stack, creating a fork to run // the crypto operations on. retDownloadSize = -1; retHashcode = null; bool enableStreaming = (m_backend is Library.Interface.IStreamingBackend && !m_options.DisableStreamingTransfers); System.Threading.Tasks.Task<string> taskHasher = null; DirectStreamLink linkForkHasher = null; System.Threading.Tasks.Task taskDecrypter = null; DirectStreamLink linkForkDecryptor = null; // keep potential temp files and their streams for cleanup (cannot use using here). TempFile retTarget = null, dlTarget = null, decryptTarget = null; System.IO.Stream dlToStream = null, decryptToStream = null; try { System.IO.Stream nextTierWriter = null; // target of our stacked streams if (!enableStreaming) // we will always need dlTarget if not streaming... dlTarget = new TempFile(); else if (enableStreaming && useDecrypter == null) { dlTarget = new TempFile(); dlToStream = System.IO.File.OpenWrite(dlTarget); nextTierWriter = dlToStream; // actually write through to file. } // setup decryption: fork off a StreamLink from stack, and setup decryptor task if (useDecrypter != null) { linkForkDecryptor = new DirectStreamLink(1 << 16, false, false, nextTierWriter); nextTierWriter = linkForkDecryptor.WriterStream; linkForkDecryptor.SetKnownLength(item.Size, false); // Set length to allow AES-decryption (not streamable yet) decryptTarget = new TempFile(); decryptToStream = System.IO.File.OpenWrite(decryptTarget); taskDecrypter = new System.Threading.Tasks.Task(() => { using (var input = linkForkDecryptor.ReaderStream) using (var output = decryptToStream) lock (m_encryptionLock) { useDecrypter.Decrypt(input, output); } } ); } // setup hashing: fork off a StreamLink from stack, then task computes hash linkForkHasher = new DirectStreamLink(1 << 16, false, false, nextTierWriter); nextTierWriter = linkForkHasher.WriterStream; taskHasher = new System.Threading.Tasks.Task<string>(() => { using (var input = linkForkHasher.ReaderStream) return CalculateFileHash(input); } ); // OK, forks with tasks are set up, so let's do the download which is performed in main thread. bool hadException = false; try { if (enableStreaming) { using (var ss = new ShaderStream(nextTierWriter, false)) { using (var ts = new ThrottledStream(ss, m_options.MaxUploadPrSecond, m_options.MaxDownloadPrSecond)) using (var pgs = new Library.Utility.ProgressReportingStream(ts, item.Size, HandleProgress)) { taskHasher.Start(); // We do not start tasks earlier to be sure the input always gets closed. if (taskDecrypter != null) taskDecrypter.Start(); ((Library.Interface.IStreamingBackend)m_backend).Get(item.RemoteFilename, pgs); } retDownloadSize = ss.TotalBytesWritten; } } else { m_backend.Get(item.RemoteFilename, dlTarget); retDownloadSize = new System.IO.FileInfo(dlTarget).Length; using (dlToStream = System.IO.File.OpenRead(dlTarget)) { taskHasher.Start(); // We do not start tasks earlier to be sure the input always gets closed. if (taskDecrypter != null) taskDecrypter.Start(); new DirectStreamLink.DataPump(dlToStream, nextTierWriter).Run(); } } } catch (Exception) { hadException = true; throw; } finally { // This nested try-catch-finally blocks will make sure we do not miss any exceptions ans all started tasks // are properly ended and tidied up. For what is thrown: If exceptions in main thread occured (download) it is thrown, // then hasher task is checked and last decryption. This resembles old logic. try { retHashcode = taskHasher.Result; } catch (AggregateException ex) { if (!hadException) { hadException = true; throw ex.InnerExceptions[0]; } } finally { if (taskDecrypter != null) { try { taskDecrypter.Wait(); } catch (AggregateException ex) { if (!hadException) { hadException = true; if (ex.InnerExceptions[0] is System.Security.Cryptography.CryptographicException) throw ex.InnerExceptions[0]; else throw new System.Security.Cryptography.CryptographicException(ex.InnerExceptions[0].Message, ex.InnerExceptions[0]); } } } } } if (useDecrypter != null) // return decrypted temp file { retTarget = decryptTarget; decryptTarget = null; } else // return downloaded file { retTarget = dlTarget; dlTarget = null; } } finally { // Be tidy: manually do some cleanup to temp files, as we could not use using's. // Unclosed streams should only occur if we failed even before tasks were started. if (dlToStream != null) dlToStream.Dispose(); if (dlTarget != null) dlTarget.Dispose(); if (decryptToStream != null) decryptToStream.Dispose(); if (decryptTarget != null) decryptTarget.Dispose(); } return retTarget; }
public void PutUnencrypted(string remotename, string localpath) { if (m_lastException != null) throw m_lastException; var req = new FileEntryItem(OperationType.Put, remotename, null); req.SetLocalfilename(localpath); req.Encrypted = true; //Prevent encryption req.NotTrackedInDb = true; //Prevent Db updates if (m_queue.Enqueue(req) && m_options.SynchronousUpload) { req.WaitForComplete(); if (req.Exception != null) throw req.Exception; } if (m_lastException != null) throw m_lastException; }
private void DoDelete(FileEntryItem item) { m_statwriter.SendEvent(BackendActionType.Delete, BackendEventType.Started, item.RemoteFilename, item.Size); string result = null; try { m_backend.Delete(item.RemoteFilename); } catch (Exception ex) { var isFileMissingException = ex is Library.Interface.FileMissingException || ex is System.IO.FileNotFoundException; var wr = ex as System.Net.WebException == null ? null : (ex as System.Net.WebException).Response as System.Net.HttpWebResponse; if (isFileMissingException || (wr != null && wr.StatusCode == System.Net.HttpStatusCode.NotFound)) { m_statwriter.AddWarning(LC.L("Delete operation failed for {0} with FileNotFound, listing contents", item.RemoteFilename), ex); bool success = false; try { success = !m_backend.List().Select(x => x.Name).Contains(item.RemoteFilename); } catch { } if (success) { m_statwriter.AddMessage(LC.L("Listing indicates file {0} is deleted correctly", item.RemoteFilename)); return; } } result = ex.ToString(); throw; } finally { m_db.LogDbOperation("delete", item.RemoteFilename, result); } m_db.LogDbUpdate(item.RemoteFilename, RemoteVolumeState.Deleted, -1, null); m_statwriter.SendEvent(BackendActionType.Delete, BackendEventType.Completed, item.RemoteFilename, item.Size); }
private async Task <bool> DoPut(FileEntryItem item, bool updatedHash = false) { // If this is not already encrypted, do it now item.Encrypt(m_options); updatedHash |= item.UpdateHashAndSize(m_options); if (updatedHash && item.TrackedInDb) { await m_database.UpdateRemoteVolumeAsync(item.RemoteFilename, RemoteVolumeState.Uploading, item.Size, item.Hash); } if (m_options.Dryrun) { Logging.Log.WriteDryrunMessage(LOGTAG, "WouldUploadVolume", "Would upload volume: {0}, size: {1}", item.RemoteFilename, Library.Utility.Utility.FormatSizeString(new FileInfo(item.LocalFilename).Length)); item.DeleteLocalFile(); return(true); } await m_database.LogRemoteOperationAsync("put", item.RemoteFilename, JsonConvert.SerializeObject(new { Size = item.Size, Hash = item.Hash })); await m_stats.SendEventAsync(BackendActionType.Put, BackendEventType.Started, item.RemoteFilename, item.Size); var begin = DateTime.Now; if (m_backend is Library.Interface.IStreamingBackend && !m_options.DisableStreamingTransfers) { using (var fs = System.IO.File.OpenRead(item.LocalFilename)) using (var ts = new ThrottledStream(fs, m_options.MaxUploadPrSecond, m_options.MaxDownloadPrSecond)) using (var pgs = new Library.Utility.ProgressReportingStream(ts, item.Size, pg => HandleProgress(ts, pg))) ((Library.Interface.IStreamingBackend)m_backend).Put(item.RemoteFilename, pgs); } else { m_backend.Put(item.RemoteFilename, item.LocalFilename); } var duration = DateTime.Now - begin; Logging.Log.WriteProfilingMessage(LOGTAG, "UploadSpeed", "Uploaded {0} in {1}, {2}/s", Library.Utility.Utility.FormatSizeString(item.Size), duration, Library.Utility.Utility.FormatSizeString((long)(item.Size / duration.TotalSeconds))); if (item.TrackedInDb) { await m_database.UpdateRemoteVolumeAsync(item.RemoteFilename, RemoteVolumeState.Uploaded, item.Size, item.Hash); } await m_stats.SendEventAsync(BackendActionType.Put, BackendEventType.Completed, item.RemoteFilename, item.Size); if (m_options.ListVerifyUploads) { var f = m_backend.List().Where(n => n.Name.Equals(item.RemoteFilename, StringComparison.OrdinalIgnoreCase)).FirstOrDefault(); if (f == null) { throw new Exception(string.Format("List verify failed, file was not found after upload: {0}", item.RemoteFilename)); } else if (f.Size != item.Size && f.Size >= 0) { throw new Exception(string.Format("List verify failed for file: {0}, size was {1} but expected to be {2}", f.Name, f.Size, item.Size)); } } item.DeleteLocalFile(); await m_database.CommitTransactionAsync("CommitAfterUpload"); return(true); }
public Library.Utility.TempFile GetWithInfo(string remotename, out long size, out string hash) { if (m_lastException != null) throw m_lastException; var req = new FileEntryItem(OperationType.Get, remotename, -1, null); if (m_queue.Enqueue(req)) { req.WaitForComplete(); if (req.Exception != null) throw req.Exception; } if (m_lastException != null) throw m_lastException; size = req.Size; hash = req.Hash; return (Library.Utility.TempFile)req.Result; }
private void DoGet(FileEntryItem item) { Library.Utility.TempFile tmpfile = null; m_statwriter.SendEvent(BackendActionType.Get, BackendEventType.Started, item.RemoteFilename, item.Size); try { var begin = DateTime.Now; // We already know the filename, so we put the decision about if and which decryptor to // use prior to download. This allows to set up stacked streams or a pipe doing decryption Interface.IEncryption useDecrypter = null; if (!item.VerifyHashOnly && !m_options.NoEncryption) { useDecrypter = m_encryption; { lock (m_encryptionLock) { try { // Auto-guess the encryption module var ext = (System.IO.Path.GetExtension(item.RemoteFilename) ?? "").TrimStart('.'); if (!m_encryption.FilenameExtension.Equals(ext, StringComparison.InvariantCultureIgnoreCase)) { // Check if the file is encrypted with something else if (DynamicLoader.EncryptionLoader.Keys.Contains(ext, StringComparer.InvariantCultureIgnoreCase)) { m_statwriter.AddVerboseMessage("Filename extension \"{0}\" does not match encryption module \"{1}\", using matching encryption module", ext, m_options.EncryptionModule); useDecrypter = DynamicLoader.EncryptionLoader.GetModule(ext, m_options.Passphrase, m_options.RawOptions); useDecrypter = useDecrypter ?? m_encryption; } // Check if the file is not encrypted else if (DynamicLoader.CompressionLoader.Keys.Contains(ext, StringComparer.InvariantCultureIgnoreCase)) { m_statwriter.AddVerboseMessage("Filename extension \"{0}\" does not match encryption module \"{1}\", guessing that it is not encrypted", ext, m_options.EncryptionModule); useDecrypter = null; } // Fallback, lets see what happens... else { m_statwriter.AddVerboseMessage("Filename extension \"{0}\" does not match encryption module \"{1}\", attempting to use specified encryption module as no others match", ext, m_options.EncryptionModule); } } } // If we fail here, make sure that we throw a crypto exception catch (System.Security.Cryptography.CryptographicException) { throw; } catch (Exception ex) { throw new System.Security.Cryptography.CryptographicException(ex.Message, ex); } } } } string fileHash; long dataSizeDownloaded; if (m_options.DisablePipedStreaming) tmpfile = coreDoGetSequential(item, useDecrypter, out dataSizeDownloaded, out fileHash); else tmpfile = coreDoGetPiping(item, useDecrypter, out dataSizeDownloaded, out fileHash); var duration = DateTime.Now - begin; Logging.Log.WriteMessage(string.Format("Downloaded {3}{0} in {1}, {2}/s", Library.Utility.Utility.FormatSizeString(dataSizeDownloaded), duration, Library.Utility.Utility.FormatSizeString((long)(dataSizeDownloaded / duration.TotalSeconds)), useDecrypter == null ? "" : "and decrypted "), Duplicati.Library.Logging.LogMessageType.Profiling); m_db.LogDbOperation("get", item.RemoteFilename, JsonConvert.SerializeObject(new { Size = dataSizeDownloaded, Hash = fileHash })); m_statwriter.SendEvent(BackendActionType.Get, BackendEventType.Completed, item.RemoteFilename, dataSizeDownloaded); if (!m_options.SkipFileHashChecks) { if (item.Size >= 0) { if (dataSizeDownloaded != item.Size) throw new Exception(Strings.Controller.DownloadedFileSizeError(item.RemoteFilename, dataSizeDownloaded, item.Size)); } else item.Size = dataSizeDownloaded; if (!string.IsNullOrEmpty(item.Hash)) { if (fileHash != item.Hash) throw new HashMismatchException(Strings.Controller.HashMismatchError(tmpfile, item.Hash, fileHash)); } else item.Hash = fileHash; } if (!item.VerifyHashOnly) { item.Result = tmpfile; tmpfile = null; } } catch { if (tmpfile != null) tmpfile.Dispose(); throw; } }
public Library.Utility.TempFile Get(string remotename, long size, string hash) { if (m_lastException != null) throw m_lastException; var req = new FileEntryItem(OperationType.Get, remotename, size, hash); if (m_queue.Enqueue(req)) ((IDownloadWaitHandle)req).Wait(); if (m_lastException != null) throw m_lastException; return (Library.Utility.TempFile)req.Result; }
public IList<Library.Interface.IFileEntry> List() { if (m_lastException != null) throw m_lastException; var req = new FileEntryItem(OperationType.List, null); if (m_queue.Enqueue(req)) { req.WaitForComplete(); if (req.Exception != null) throw req.Exception; } if (m_lastException != null) throw m_lastException; return (IList<Library.Interface.IFileEntry>)req.Result; }
public void GetForTesting(string remotename, long size, string hash) { if (m_lastException != null) throw m_lastException; if (hash == null) throw new InvalidOperationException("Cannot test a file without the hash"); var req = new FileEntryItem(OperationType.Get, remotename, size, hash); req.VerifyHashOnly = true; if (m_queue.Enqueue(req)) { req.WaitForComplete(); if (req.Exception != null) throw req.Exception; } if (m_lastException != null) throw m_lastException; }
public void CreateFolder(string remotename) { if (m_lastException != null) throw m_lastException; var req = new FileEntryItem(OperationType.CreateFolder, remotename); if (m_queue.Enqueue(req)) { req.WaitForComplete(); if (req.Exception != null) throw req.Exception; } if (m_lastException != null) throw m_lastException; }
private void DoCreateFolder(FileEntryItem item) { m_statwriter.SendEvent(BackendActionType.CreateFolder, BackendEventType.Started, null, -1); string result = null; try { m_backend.CreateFolder(); } catch (Exception ex) { result = ex.ToString(); throw; } finally { m_db.LogDbOperation("createfolder", item.RemoteFilename, result); } m_statwriter.SendEvent(BackendActionType.CreateFolder, BackendEventType.Completed, null, -1); }
public void WaitForComplete(LocalDatabase db, System.Data.IDbTransaction transation) { m_db.FlushDbMessages(db, transation); if (m_lastException != null) throw m_lastException; var item = new FileEntryItem(OperationType.Terminate, null); if (m_queue.Enqueue(item)) item.WaitForComplete(); m_db.FlushDbMessages(db, transation); if (m_lastException != null) throw m_lastException; }
public Task <Library.Utility.TempFile> GetFileAsync(string remotename, long size, string remotehash) { var fe = new FileEntryItem(BackendActionType.Get, remotename, size, remotehash); return(RunRetryOnMain(fe, () => DoGet(fe))); }
public IDownloadWaitHandle GetAsync(string remotename, long size, string hash) { if (m_lastException != null) throw m_lastException; var req = new FileEntryItem(OperationType.Get, remotename, size, hash); if (m_queue.Enqueue(req)) return req; if (m_lastException != null) throw m_lastException; else throw new InvalidOperationException("GetAsync called after backend is shut down"); }
public Library.Utility.TempFile GetWithInfo(string remotename, out long size, out string hash) { if (m_lastException != null) throw m_lastException; hash = null; size = -1; var req = new FileEntryItem(OperationType.Get, remotename, -1, null); if (m_queue.Enqueue(req)) ((IDownloadWaitHandle) req).Wait(out hash, out size); if (m_lastException != null) throw m_lastException; return (Library.Utility.TempFile)req.Result; }
private async Task <Library.Utility.TempFile> DoGet(FileEntryItem item) { Library.Utility.TempFile tmpfile = null; await m_stats.SendEventAsync(BackendActionType.Get, BackendEventType.Started, item.RemoteFilename, item.Size); try { var begin = DateTime.Now; tmpfile = new Library.Utility.TempFile(); if (m_backend is Library.Interface.IStreamingBackend && !m_options.DisableStreamingTransfers) { using (var fs = System.IO.File.OpenWrite(tmpfile)) using (var ts = new ThrottledStream(fs, m_options.MaxUploadPrSecond, m_options.MaxDownloadPrSecond)) using (var pgs = new Library.Utility.ProgressReportingStream(ts, item.Size, pg => HandleProgress(ts, pg))) ((Library.Interface.IStreamingBackend)m_backend).Get(item.RemoteFilename, pgs); } else { m_backend.Get(item.RemoteFilename, tmpfile); } var duration = DateTime.Now - begin; var filehash = FileEntryItem.CalculateFileHash(tmpfile); Logging.Log.WriteProfilingMessage(LOGTAG, "DownloadSpeed", "Downloaded {0} in {1}, {2}/s", Library.Utility.Utility.FormatSizeString(item.Size), duration, Library.Utility.Utility.FormatSizeString((long)(item.Size / duration.TotalSeconds))); await m_database.LogRemoteOperationAsync("get", item.RemoteFilename, JsonConvert.SerializeObject(new { Size = new System.IO.FileInfo(tmpfile).Length, Hash = filehash })); await m_stats.SendEventAsync(BackendActionType.Get, BackendEventType.Completed, item.RemoteFilename, new System.IO.FileInfo(tmpfile).Length); if (!m_options.SkipFileHashChecks) { var nl = new System.IO.FileInfo(tmpfile).Length; if (item.Size >= 0) { if (nl != item.Size) { throw new Exception(Strings.Controller.DownloadedFileSizeError(item.RemoteFilename, nl, item.Size)); } } else { item.Size = nl; } if (!string.IsNullOrEmpty(item.Hash)) { if (filehash != item.Hash) { throw new Duplicati.Library.Main.BackendManager.HashMismatchException(Strings.Controller.HashMismatchError(tmpfile, item.Hash, filehash)); } } else { item.Hash = filehash; } } // Fast exit if (item.VerifyHashOnly) { return(null); } // Decrypt before returning if (!m_options.NoEncryption) { try { using (var tmpfile2 = tmpfile) { tmpfile = new Library.Utility.TempFile(); // Auto-guess the encryption module var ext = (System.IO.Path.GetExtension(item.RemoteFilename) ?? "").TrimStart('.'); if (!string.Equals(m_options.EncryptionModule, ext, StringComparison.OrdinalIgnoreCase)) { // Check if the file is encrypted with something else if (DynamicLoader.EncryptionLoader.Keys.Contains(ext, StringComparer.OrdinalIgnoreCase)) { using (var encmodule = DynamicLoader.EncryptionLoader.GetModule(ext, m_options.Passphrase, m_options.RawOptions)) if (encmodule != null) { Logging.Log.WriteVerboseMessage(LOGTAG, "AutomaticDecryptionDetection", "Filename extension \"{0}\" does not match encryption module \"{1}\", using matching encryption module", ext, m_options.EncryptionModule); encmodule.Decrypt(tmpfile2, tmpfile); } } // Check if the file is not encrypted else if (DynamicLoader.CompressionLoader.Keys.Contains(ext, StringComparer.OrdinalIgnoreCase)) { Logging.Log.WriteVerboseMessage(LOGTAG, "AutomaticDecryptionDetection", "Filename extension \"{0}\" does not match encryption module \"{1}\", guessing that it is not encrypted", ext, m_options.EncryptionModule); } // Fallback, lets see what happens... else { Logging.Log.WriteVerboseMessage(LOGTAG, "AutomaticDecryptionDetection", "Filename extension \"{0}\" does not match encryption module \"{1}\", attempting to use specified encryption module as no others match", ext, m_options.EncryptionModule); using (var encmodule = DynamicLoader.EncryptionLoader.GetModule(m_options.EncryptionModule, m_options.Passphrase, m_options.RawOptions)) encmodule.Decrypt(tmpfile2, tmpfile); } } else { using (var encmodule = DynamicLoader.EncryptionLoader.GetModule(m_options.EncryptionModule, m_options.Passphrase, m_options.RawOptions)) encmodule.Decrypt(tmpfile2, tmpfile); } } } catch (Exception ex) { //If we fail here, make sure that we throw a crypto exception if (ex is System.Security.Cryptography.CryptographicException) { throw; } else { throw new System.Security.Cryptography.CryptographicException(ex.Message, ex); } } } var res = tmpfile; tmpfile = null; return(res); } finally { try { if (tmpfile != null) { tmpfile.Dispose(); } } catch { } } }
private async Task <T> DoWithRetry <T>(FileEntryItem item, Func <Task <T> > method) { item.IsRetry = false; Exception lastException = null; if (!await m_taskreader.TransferProgressAsync) { throw new OperationCanceledException(); } if (m_workerSource.IsCancellationRequested) { throw new OperationCanceledException(); } for (var i = 0; i < m_options.NumberOfRetries; i++) { if (m_options.RetryDelay.Ticks != 0 && i != 0) { await Task.Delay(m_options.RetryDelay).ConfigureAwait(false); } if (!await m_taskreader.TransferProgressAsync) { throw new OperationCanceledException(); } if (m_workerSource.IsCancellationRequested) { throw new OperationCanceledException(); } try { if (m_backend == null) { m_backend = DynamicLoader.BackendLoader.GetBackend(m_backendurl, m_options.RawOptions); } if (m_backend == null) { throw new Exception("Backend failed to re-load"); } var r = await method().ConfigureAwait(false); return(r); } catch (Exception ex) { item.IsRetry = true; lastException = ex; Logging.Log.WriteRetryMessage(LOGTAG, $"Retry{item.Operation}", ex, "Operation {0} with file {1} attempt {2} of {3} failed with message: {4}", item.Operation, item.RemoteFilename, i + 1, m_options.NumberOfRetries, ex.Message); // If the thread is aborted, we exit here if (ex is System.Threading.ThreadAbortException || ex is OperationCanceledException) { break; } await m_stats.SendEventAsync(item.Operation, i < m_options.NumberOfRetries?BackendEventType.Retrying : BackendEventType.Failed, item.RemoteFilename, item.Size); bool recovered = false; if (!m_uploadSuccess && ex is Duplicati.Library.Interface.FolderMissingException && m_options.AutocreateFolders) { try { // If we successfully create the folder, we can re-use the connection m_backend.CreateFolder(); recovered = true; } catch (Exception dex) { Logging.Log.WriteWarningMessage(LOGTAG, "FolderCreateError", dex, "Failed to create folder: {0}", ex.Message); } } if (!recovered) { ResetBackend(ex); } } finally { if (m_options.NoConnectionReuse) { ResetBackend(null); } } } throw lastException; }
public Library.Utility.TempFile Get(string remotename, long size, string hash) { if (m_lastException != null) throw m_lastException; var req = new FileEntryItem(OperationType.Get, remotename, size, hash); if (m_queue.Enqueue(req)) { req.WaitForComplete(); if (req.Exception != null) throw req.Exception; } if (m_lastException != null) throw m_lastException; return (Library.Utility.TempFile)req.Result; }
private void DoDelete(FileEntryItem item) { m_statwriter.SendEvent(BackendActionType.Delete, BackendEventType.Started, item.RemoteFilename, item.Size); string result = null; try { m_backend.Delete(item.RemoteFilename); } catch (Duplicati.Library.Interface.FileMissingException fex) { m_statwriter.AddWarning(LC.L("Delete operation failed for {0} with FileNotFound, listing contents", item.RemoteFilename), fex); bool success = false; try { success = !m_backend.List().Select(x => x.Name).Contains(item.RemoteFilename); } catch { } if (success) m_statwriter.AddMessage(LC.L("Listing indicates file {0} is deleted correctly", item.RemoteFilename)); else throw; } catch (Exception ex) { result = ex.ToString(); throw; } finally { m_db.LogDbOperation("delete", item.RemoteFilename, result); } m_db.LogDbUpdate(item.RemoteFilename, RemoteVolumeState.Deleted, -1, null); m_statwriter.SendEvent(BackendActionType.Delete, BackendEventType.Completed, item.RemoteFilename, item.Size); }
public async Task UploadFileAsync(VolumeWriterBase item, Func <string, Task <IndexVolumeWriter> > createIndexFile = null) { var fe = new FileEntryItem(BackendActionType.Put, item.RemoteFilename); fe.SetLocalfilename(item.LocalFilename); var tcs = new TaskCompletionSource <bool>(); var backgroundhashAndEncrypt = Task.Run(() => { fe.Encrypt(m_options); return(fe.UpdateHashAndSize(m_options)); }); await RunOnMain(async() => { try { await DoWithRetry(fe, async() => { if (fe.IsRetry) { await RenameFileAfterErrorAsync(fe).ConfigureAwait(false); } // Make sure the encryption and hashing has completed await backgroundhashAndEncrypt.ConfigureAwait(false); return(await DoPut(fe).ConfigureAwait(false)); }).ConfigureAwait(false); if (createIndexFile != null) { var ix = await createIndexFile(fe.RemoteFilename).ConfigureAwait(false); var indexFile = new FileEntryItem(BackendActionType.Put, ix.RemoteFilename); indexFile.SetLocalfilename(ix.LocalFilename); await m_database.UpdateRemoteVolumeAsync(indexFile.RemoteFilename, RemoteVolumeState.Uploading, -1, null); await DoWithRetry(indexFile, async() => { if (indexFile.IsRetry) { await RenameFileAfterErrorAsync(indexFile).ConfigureAwait(false); } var res = await DoPut(indexFile).ConfigureAwait(false); // Register that the index file is tracking the block file await m_database.AddIndexBlockLinkAsync( ix.VolumeID, await m_database.GetRemoteVolumeIDAsync(fe.RemoteFilename) ).ConfigureAwait(false); return(res); }); } tcs.TrySetResult(true); } catch (Exception ex) { if (ex is System.Threading.ThreadAbortException) { tcs.TrySetCanceled(); } else { tcs.TrySetException(ex); } } }); await tcs.Task.ConfigureAwait(false); }
protected Task <T> RunRetryOnMain <T>(FileEntryItem fe, Func <Task <T> > method) { return(RunOnMain <T>(() => DoWithRetry <T>(fe, method) )); }
public void Put(VolumeWriterBase item, IndexVolumeWriter indexfile = null) { if (m_lastException != null) throw m_lastException; item.Close(); m_db.LogDbUpdate(item.RemoteFilename, RemoteVolumeState.Uploading, -1, null); var req = new FileEntryItem(OperationType.Put, item.RemoteFilename, null); req.LocalTempfile = item.TempFile; if (m_lastException != null) throw m_lastException; FileEntryItem req2 = null; // As the network link is the bottleneck, // we encrypt the dblock volume before the // upload is enqueue (i.e. on the worker thread) if (m_encryption != null) lock (m_encryptionLock) req.Encrypt(m_encryption, m_statwriter); req.UpdateHashAndSize(m_options); // We do not encrypt the dindex volume, because it is small, // and may need to be re-written if the dblock upload is retried if (indexfile != null) { m_db.LogDbUpdate(indexfile.RemoteFilename, RemoteVolumeState.Uploading, -1, null); req2 = new FileEntryItem(OperationType.Put, indexfile.RemoteFilename); req2.LocalTempfile = indexfile.TempFile; req.Indexfile = new Tuple<IndexVolumeWriter, FileEntryItem>(indexfile, req2); } if (m_queue.Enqueue(req) && m_options.SynchronousUpload) { req.WaitForComplete(); if (req.Exception != null) throw req.Exception; } if (req2 != null && m_queue.Enqueue(req2) && m_options.SynchronousUpload) { req2.WaitForComplete(); if (req2.Exception != null) throw req2.Exception; } if (m_lastException != null) throw m_lastException; }
private void DoDelete(FileEntryItem item) { m_statwriter.SendEvent(BackendActionType.Delete, BackendEventType.Started, item.RemoteFilename, item.Size); string result = null; try { m_backend.Delete(item.RemoteFilename); } catch (Exception ex) { result = ex.ToString(); throw; } finally { m_db.LogDbOperation("delete", item.RemoteFilename, result); } m_db.LogDbUpdate(item.RemoteFilename, RemoteVolumeState.Deleted, -1, null); m_statwriter.SendEvent(BackendActionType.Delete, BackendEventType.Completed, item.RemoteFilename, item.Size); }
public void Delete(string remotename, long size, bool synchronous = false) { if (m_lastException != null) throw m_lastException; m_db.LogDbUpdate(remotename, RemoteVolumeState.Deleting, size, null); var req = new FileEntryItem(OperationType.Delete, remotename, size, null); if (m_queue.Enqueue(req) && synchronous) { req.WaitForComplete(); if (req.Exception != null) throw req.Exception; } if (m_lastException != null) throw m_lastException; }
private void DoGet(FileEntryItem item) { Library.Utility.TempFile tmpfile = null; m_statwriter.SendEvent(BackendActionType.Get, BackendEventType.Started, item.RemoteFilename, item.Size); try { tmpfile = new Library.Utility.TempFile(); if (m_backend is Library.Interface.IStreamingBackend && !m_options.DisableStreamingTransfers) { using (var fs = System.IO.File.OpenWrite(tmpfile)) using (var ts = new ThrottledStream(fs, m_options.MaxDownloadPrSecond, m_options.MaxUploadPrSecond)) using (var pgs = new Library.Utility.ProgressReportingStream(ts, item.Size, HandleProgress)) ((Library.Interface.IStreamingBackend)m_backend).Get(item.RemoteFilename, pgs); } else m_backend.Get(item.RemoteFilename, tmpfile); m_db.LogDbOperation("get", item.RemoteFilename, JsonConvert.SerializeObject(new { Size = new System.IO.FileInfo(tmpfile).Length, Hash = FileEntryItem.CalculateFileHash(tmpfile) })); m_statwriter.SendEvent(BackendActionType.Get, BackendEventType.Completed, item.RemoteFilename, new System.IO.FileInfo(tmpfile).Length); if (!m_options.SkipFileHashChecks) { var nl = new System.IO.FileInfo(tmpfile).Length; if (item.Size >= 0) { if (nl != item.Size) throw new Exception(string.Format(Strings.Controller.DownloadedFileSizeError, item.RemoteFilename, nl, item.Size)); } else item.Size = nl; var nh = FileEntryItem.CalculateFileHash(tmpfile); if (!string.IsNullOrEmpty(item.Hash)) { if (nh != item.Hash) throw new HashMismathcException(string.Format(Strings.Controller.HashMismatchError, tmpfile, item.Hash, nh)); } else item.Hash = nh; } if (!item.VerifyHashOnly) { // Decrypt before returning if (!m_options.NoEncryption) { try { using(var tmpfile2 = tmpfile) { tmpfile = new Library.Utility.TempFile(); lock(m_encryptionLock) m_encryption.Decrypt(tmpfile2, tmpfile); } } catch (Exception ex) { //If we fail here, make sure that we throw a crypto exception if (ex is System.Security.Cryptography.CryptographicException) throw; else throw new System.Security.Cryptography.CryptographicException(ex.Message, ex); } } item.Result = tmpfile; tmpfile = null; } } catch { if (tmpfile != null) tmpfile.Dispose(); throw; } }
private void DoList(FileEntryItem item) { m_statwriter.SendEvent(BackendActionType.List, BackendEventType.Started, null, -1); var r = m_backend.List(); StringBuilder sb = new StringBuilder(); sb.AppendLine("["); long count = 0; foreach (var e in r) { if (count != 0) sb.AppendLine(","); count++; sb.Append(JsonConvert.SerializeObject(e)); } sb.AppendLine(); sb.Append("]"); m_db.LogDbOperation("list", "", sb.ToString()); item.Result = r; m_statwriter.SendEvent(BackendActionType.List, BackendEventType.Completed, null, r.Count); }
private async Task <bool> DoWithRetry(Func <Task> method, FileEntryItem item, Worker worker, CancellationToken cancelToken) { item.IsRetry = false; var retryCount = 0; for (retryCount = 0; retryCount <= m_options.NumberOfRetries; retryCount++) { if (m_options.RetryDelay.Ticks != 0 && retryCount != 0) { await Task.Delay(m_options.RetryDelay).ConfigureAwait(false); } if (cancelToken.IsCancellationRequested) { return(false); } try { if (worker.Backend == null) { worker.Backend = m_backendFactory(); } await method().ConfigureAwait(false); return(true); } catch (Exception ex) { item.IsRetry = true; Logging.Log.WriteRetryMessage(LOGTAG, $"Retry{item.Operation}", ex, "Operation {0} with file {1} attempt {2} of {3} failed with message: {4}", item.Operation, item.RemoteFilename, retryCount + 1, m_options.NumberOfRetries, ex.Message); if (ex is ThreadAbortException || ex is OperationCanceledException) { break; } await m_stats.SendEventAsync(item.Operation, retryCount < m_options.NumberOfRetries?BackendEventType.Retrying : BackendEventType.Failed, item.RemoteFilename, item.Size); bool recovered = false; if (m_options.AutocreateFolders && ex is FolderMissingException) { try { // If we successfully create the folder, we can re-use the connection worker.Backend.CreateFolder(); recovered = true; } catch (Exception dex) { Logging.Log.WriteWarningMessage(LOGTAG, "FolderCreateError", dex, "Failed to create folder: {0}", ex.Message); } } if (!recovered) { ResetBackend(ex, worker); } if (retryCount == m_options.NumberOfRetries) { throw; } } finally { if (m_options.NoConnectionReuse) { ResetBackend(null, worker); } } } return(false); }
private void DoPut(FileEntryItem item) { if (m_encryption != null) lock(m_encryptionLock) item.Encrypt(m_encryption, m_statwriter); if (item.UpdateHashAndSize(m_options) && !item.NotTrackedInDb) m_db.LogDbUpdate(item.RemoteFilename, RemoteVolumeState.Uploading, item.Size, item.Hash); if (item.Indexfile != null && !item.IndexfileUpdated) { item.Indexfile.Item1.FinishVolume(item.Hash, item.Size); item.Indexfile.Item1.Close(); item.IndexfileUpdated = true; } m_db.LogDbOperation("put", item.RemoteFilename, JsonConvert.SerializeObject(new { Size = item.Size, Hash = item.Hash })); m_statwriter.SendEvent(BackendActionType.Put, BackendEventType.Started, item.RemoteFilename, item.Size); if (m_backend is Library.Interface.IStreamingBackend && !m_options.DisableStreamingTransfers) { using (var fs = System.IO.File.OpenRead(item.LocalFilename)) using (var ts = new ThrottledStream(fs, m_options.MaxDownloadPrSecond, m_options.MaxUploadPrSecond)) using (var pgs = new Library.Utility.ProgressReportingStream(ts, item.Size, HandleProgress)) ((Library.Interface.IStreamingBackend)m_backend).Put(item.RemoteFilename, pgs); } else m_backend.Put(item.RemoteFilename, item.LocalFilename); if (!item.NotTrackedInDb) m_db.LogDbUpdate(item.RemoteFilename, RemoteVolumeState.Uploaded, item.Size, item.Hash); m_statwriter.SendEvent(BackendActionType.Put, BackendEventType.Completed, item.RemoteFilename, item.Size); if (m_options.ListVerifyUploads) { var f = m_backend.List().Where(n => n.Name.Equals(item.RemoteFilename, StringComparison.InvariantCultureIgnoreCase)).FirstOrDefault(); if (f == null) throw new Exception(string.Format("List verify failed, file was not found after upload: {0}", f.Name)); else if (f.Size != item.Size && f.Size >= 0) throw new Exception(string.Format("List verify failed for file: {0}, size was {1} but expected to be {2}", f.Name, f.Size, item.Size)); } item.DeleteLocalFile(m_statwriter); }
private async Task DoPut(FileEntryItem item, IBackend backend, CancellationToken cancelToken) { if (cancelToken.IsCancellationRequested) { return; } if (item.TrackedInDb) { await m_database.UpdateRemoteVolumeAsync(item.RemoteFilename, RemoteVolumeState.Uploading, item.Size, item.Hash); } if (m_options.Dryrun) { Logging.Log.WriteDryrunMessage(LOGTAG, "WouldUploadVolume", "Would upload volume: {0}, size: {1}", item.RemoteFilename, Library.Utility.Utility.FormatSizeString(new FileInfo(item.LocalFilename).Length)); item.DeleteLocalFile(); return; } await m_database.LogRemoteOperationAsync("put", item.RemoteFilename, JsonConvert.SerializeObject(new { Size = item.Size, Hash = item.Hash })); await m_stats.SendEventAsync(BackendActionType.Put, BackendEventType.Started, item.RemoteFilename, item.Size, updateProgress : false); m_progressUpdater.StartFileProgress(item.RemoteFilename, item.Size); var begin = DateTime.Now; if (!m_options.DisableStreamingTransfers && backend is IStreamingBackend streamingBackend) { // A download throttle speed is not given to the ThrottledStream as we are only uploading data here using (var fs = File.OpenRead(item.LocalFilename)) using (var ts = new ThrottledStream(fs, m_initialUploadThrottleSpeed, 0)) using (var pgs = new ProgressReportingStream(ts, pg => HandleProgress(ts, pg, item.RemoteFilename))) await streamingBackend.PutAsync(item.RemoteFilename, pgs, cancelToken).ConfigureAwait(false); } else { await backend.PutAsync(item.RemoteFilename, item.LocalFilename, cancelToken).ConfigureAwait(false); } var duration = DateTime.Now - begin; m_progressUpdater.EndFileProgress(item.RemoteFilename); Logging.Log.WriteProfilingMessage(LOGTAG, "UploadSpeed", "Uploaded {0} in {1}, {2}/s", Library.Utility.Utility.FormatSizeString(item.Size), duration, Library.Utility.Utility.FormatSizeString((long)(item.Size / duration.TotalSeconds))); if (item.TrackedInDb) { await m_database.UpdateRemoteVolumeAsync(item.RemoteFilename, RemoteVolumeState.Uploaded, item.Size, item.Hash); } await m_stats.SendEventAsync(BackendActionType.Put, BackendEventType.Completed, item.RemoteFilename, item.Size); if (m_options.ListVerifyUploads) { var f = backend.List().FirstOrDefault(n => n.Name.Equals(item.RemoteFilename, StringComparison.OrdinalIgnoreCase)); if (f == null) { throw new Exception(string.Format("List verify failed, file was not found after upload: {0}", item.RemoteFilename)); } else if (f.Size != item.Size && f.Size >= 0) { throw new Exception(string.Format("List verify failed for file: {0}, size was {1} but expected to be {2}", f.Name, f.Size, item.Size)); } } item.DeleteLocalFile(); await m_database.CommitTransactionAsync("CommitAfterUpload"); }
private void RenameFileAfterError(FileEntryItem item) { var p = VolumeBase.ParseFilename(item.RemoteFilename); var guid = VolumeWriterBase.GenerateGuid(m_options); var time = p.Time.Ticks == 0 ? p.Time : p.Time.AddSeconds(1); var newname = VolumeBase.GenerateFilename(p.FileType, p.Prefix, guid, time, p.CompressionModule, p.EncryptionModule); var oldname = item.RemoteFilename; m_statwriter.SendEvent(item.BackendActionType, BackendEventType.Rename, oldname, item.Size); m_statwriter.SendEvent(item.BackendActionType, BackendEventType.Rename, newname, item.Size); m_statwriter.AddMessage(string.Format("Renaming \"{0}\" to \"{1}\"", oldname, newname)); m_db.LogDbRename(oldname, newname); item.RemoteFilename = newname; // If there is an index file attached to the block file, // it references the block filename, so we create a new index file // which is a copy of the current, but with the new name if (item.Indexfile != null) { if (!item.IndexfileUpdated) { item.Indexfile.Item1.FinishVolume(item.Hash, item.Size); item.Indexfile.Item1.Close(); item.IndexfileUpdated = true; } IndexVolumeWriter wr = null; try { var hashsize = System.Security.Cryptography.HashAlgorithm.Create(m_options.BlockHashAlgorithm).HashSize / 8; wr = new IndexVolumeWriter(m_options); using(var rd = new IndexVolumeReader(p.CompressionModule, item.Indexfile.Item2.LocalFilename, m_options, hashsize)) wr.CopyFrom(rd, x => x == oldname ? newname : x); item.Indexfile.Item1.Dispose(); item.Indexfile = new Tuple<IndexVolumeWriter, FileEntryItem>(wr, item.Indexfile.Item2); item.Indexfile.Item2.LocalTempfile.Dispose(); item.Indexfile.Item2.LocalTempfile = wr.TempFile; wr.Close(); } catch { if (wr != null) try { wr.Dispose(); } catch { } finally { wr = null; } throw; } } }
public static Task Run(BackupDatabase database, Options options, ITaskReader taskreader) { return(AutomationExtensions.RunTask( new { Input = Channels.OutputBlocks.ForRead, Output = Channels.BackendRequest.ForWrite, SpillPickup = Channels.SpillPickup.ForWrite, }, async self => { var noIndexFiles = options.IndexfilePolicy == Options.IndexFileStrategy.None; var fullIndexFiles = options.IndexfilePolicy == Options.IndexFileStrategy.Full; BlockVolumeWriter blockvolume = null; TemporaryIndexVolume indexvolume = null; try { while (true) { var b = await self.Input.ReadAsync(); // Lazy-start a new block volume if (blockvolume == null) { // Before we start a new volume, probe to see if it exists // This will delay creation of volumes for differential backups // There can be a race, such that two workers determine that // the block is missing, but this will be solved by the AddBlock call // which runs atomically if (await database.FindBlockIDAsync(b.HashKey, b.Size) >= 0) { b.TaskCompletion.TrySetResult(false); continue; } blockvolume = new BlockVolumeWriter(options); blockvolume.VolumeID = await database.RegisterRemoteVolumeAsync(blockvolume.RemoteFilename, RemoteVolumeType.Blocks, RemoteVolumeState.Temporary); indexvolume = noIndexFiles ? null : new TemporaryIndexVolume(options); } var newBlock = await database.AddBlockAsync(b.HashKey, b.Size, blockvolume.VolumeID); b.TaskCompletion.TrySetResult(newBlock); if (newBlock) { blockvolume.AddBlock(b.HashKey, b.Data, b.Offset, (int)b.Size, b.Hint); if (indexvolume != null) { indexvolume.AddBlock(b.HashKey, b.Size); if (b.IsBlocklistHashes && fullIndexFiles) { indexvolume.AddBlockListHash(b.HashKey, b.Size, b.Data); } } // If the volume is full, send to upload if (blockvolume.Filesize > options.VolumeSize - options.Blocksize) { //When uploading a new volume, we register the volumes and then flush the transaction // this ensures that the local database and remote storage are as closely related as possible await database.UpdateRemoteVolumeAsync(blockvolume.RemoteFilename, RemoteVolumeState.Uploading, -1, null); blockvolume.Close(); await database.CommitTransactionAsync("CommitAddBlockToOutputFlush"); FileEntryItem blockEntry = blockvolume.CreateFileEntryForUpload(options); TemporaryIndexVolume indexVolumeCopy = null; if (indexvolume != null) { indexVolumeCopy = new TemporaryIndexVolume(options); indexvolume.CopyTo(indexVolumeCopy, false); } var uploadRequest = new VolumeUploadRequest(blockvolume, blockEntry, indexVolumeCopy, options, database); blockvolume = null; indexvolume = null; // Write to output at the end here to prevent sending a full volume to the SpillCollector await self.Output.WriteAsync(uploadRequest); } } // We ignore the stop signal, but not the pause and terminate await taskreader.ProgressAsync; } } catch (Exception ex) { if (ex.IsRetiredException()) { // If we have collected data, merge all pending volumes into a single volume if (blockvolume != null && blockvolume.SourceSize > 0) { await self.SpillPickup.WriteAsync(new SpillVolumeRequest(blockvolume, indexvolume)); } } throw; } })); }
private void DoGet(FileEntryItem item) { Library.Utility.TempFile tmpfile = null; m_statwriter.SendEvent(BackendActionType.Get, BackendEventType.Started, item.RemoteFilename, item.Size); try { var begin = DateTime.Now; tmpfile = new Library.Utility.TempFile(); if (m_backend is Library.Interface.IStreamingBackend && !m_options.DisableStreamingTransfers) { using (var fs = System.IO.File.OpenWrite(tmpfile)) using (var ts = new ThrottledStream(fs, m_options.MaxUploadPrSecond, m_options.MaxDownloadPrSecond)) using (var pgs = new Library.Utility.ProgressReportingStream(ts, item.Size, HandleProgress)) ((Library.Interface.IStreamingBackend)m_backend).Get(item.RemoteFilename, pgs); } else m_backend.Get(item.RemoteFilename, tmpfile); var duration = DateTime.Now - begin; Logging.Log.WriteMessage(string.Format("Downloaded {0} in {1}, {2}/s", Library.Utility.Utility.FormatSizeString(item.Size), duration, Library.Utility.Utility.FormatSizeString((long)(item.Size / duration.TotalSeconds))), Duplicati.Library.Logging.LogMessageType.Profiling); m_db.LogDbOperation("get", item.RemoteFilename, JsonConvert.SerializeObject(new { Size = new System.IO.FileInfo(tmpfile).Length, Hash = CalculateFileHash(tmpfile) })); m_statwriter.SendEvent(BackendActionType.Get, BackendEventType.Completed, item.RemoteFilename, new System.IO.FileInfo(tmpfile).Length); if (!m_options.SkipFileHashChecks) { var nl = new System.IO.FileInfo(tmpfile).Length; if (item.Size >= 0) { if (nl != item.Size) throw new Exception(Strings.Controller.DownloadedFileSizeError(item.RemoteFilename, nl, item.Size)); } else item.Size = nl; var nh = CalculateFileHash(tmpfile); if (!string.IsNullOrEmpty(item.Hash)) { if (nh != item.Hash) throw new HashMismathcException(Strings.Controller.HashMismatchError(tmpfile, item.Hash, nh)); } else item.Hash = nh; } if (!item.VerifyHashOnly) { // Decrypt before returning if (!m_options.NoEncryption) { try { using(var tmpfile2 = tmpfile) { tmpfile = new Library.Utility.TempFile(); lock(m_encryptionLock) { // Auto-guess the encryption module var ext = (System.IO.Path.GetExtension(item.RemoteFilename) ?? "").TrimStart('.'); if (!m_encryption.FilenameExtension.Equals(ext, StringComparison.InvariantCultureIgnoreCase)) { // Check if the file is encrypted with something else if (DynamicLoader.EncryptionLoader.Keys.Contains(ext, StringComparer.InvariantCultureIgnoreCase)) { m_statwriter.AddVerboseMessage("Filename extension \"{0}\" does not match encryption module \"{1}\", using matching encryption module", ext, m_options.EncryptionModule); using(var encmodule = DynamicLoader.EncryptionLoader.GetModule(ext, m_options.Passphrase, m_options.RawOptions)) (encmodule ?? m_encryption).Decrypt(tmpfile2, tmpfile); } // Check if the file is not encrypted else if (DynamicLoader.CompressionLoader.Keys.Contains(ext, StringComparer.InvariantCultureIgnoreCase)) { m_statwriter.AddVerboseMessage("Filename extension \"{0}\" does not match encryption module \"{1}\", guessing that it is not encrypted", ext, m_options.EncryptionModule); } // Fallback, lets see what happens... else { m_statwriter.AddVerboseMessage("Filename extension \"{0}\" does not match encryption module \"{1}\", attempting to use specified encryption module as no others match", ext, m_options.EncryptionModule); m_encryption.Decrypt(tmpfile2, tmpfile); } } else { m_encryption.Decrypt(tmpfile2, tmpfile); } } } } catch (Exception ex) { //If we fail here, make sure that we throw a crypto exception if (ex is System.Security.Cryptography.CryptographicException) throw; else throw new System.Security.Cryptography.CryptographicException(ex.Message, ex); } } item.Result = tmpfile; tmpfile = null; } } catch { if (tmpfile != null) tmpfile.Dispose(); throw; } }