private static FileEntryItem CreateFileEntryForUpload(VolumeWriterBase volume, Options options) { var fileEntry = new FileEntryItem(BackendActionType.Put, volume.RemoteFilename); fileEntry.SetLocalfilename(volume.LocalFilename); fileEntry.Encrypt(options); fileEntry.UpdateHashAndSize(options); return(fileEntry); }
private async Task UploadVolumeWriter(VolumeWriterBase volumeWriter, Worker worker, CancellationToken cancelToken) { var fileEntry = new FileEntryItem(BackendActionType.Put, volumeWriter.RemoteFilename); fileEntry.SetLocalfilename(volumeWriter.LocalFilename); fileEntry.Encrypt(m_options); fileEntry.UpdateHashAndSize(m_options); await UploadFileAsync(fileEntry, worker, cancelToken).ConfigureAwait(false); }
private async Task <bool> DoPut(FileEntryItem item, bool updatedHash = false) { // If this is not already encrypted, do it now item.Encrypt(m_options); updatedHash |= item.UpdateHashAndSize(m_options); if (updatedHash && item.TrackedInDb) { await m_database.UpdateRemoteVolumeAsync(item.RemoteFilename, RemoteVolumeState.Uploading, item.Size, item.Hash); } if (m_options.Dryrun) { Logging.Log.WriteDryrunMessage(LOGTAG, "WouldUploadVolume", "Would upload volume: {0}, size: {1}", item.RemoteFilename, Library.Utility.Utility.FormatSizeString(new FileInfo(item.LocalFilename).Length)); item.DeleteLocalFile(); return(true); } await m_database.LogRemoteOperationAsync("put", item.RemoteFilename, JsonConvert.SerializeObject(new { Size = item.Size, Hash = item.Hash })); await m_stats.SendEventAsync(BackendActionType.Put, BackendEventType.Started, item.RemoteFilename, item.Size); var begin = DateTime.Now; if (m_backend is Library.Interface.IStreamingBackend && !m_options.DisableStreamingTransfers) { using (var fs = System.IO.File.OpenRead(item.LocalFilename)) using (var ts = new ThrottledStream(fs, m_options.MaxUploadPrSecond, m_options.MaxDownloadPrSecond)) using (var pgs = new Library.Utility.ProgressReportingStream(ts, item.Size, pg => HandleProgress(ts, pg))) ((Library.Interface.IStreamingBackend)m_backend).Put(item.RemoteFilename, pgs); } else { m_backend.Put(item.RemoteFilename, item.LocalFilename); } var duration = DateTime.Now - begin; Logging.Log.WriteProfilingMessage(LOGTAG, "UploadSpeed", "Uploaded {0} in {1}, {2}/s", Library.Utility.Utility.FormatSizeString(item.Size), duration, Library.Utility.Utility.FormatSizeString((long)(item.Size / duration.TotalSeconds))); if (item.TrackedInDb) { await m_database.UpdateRemoteVolumeAsync(item.RemoteFilename, RemoteVolumeState.Uploaded, item.Size, item.Hash); } await m_stats.SendEventAsync(BackendActionType.Put, BackendEventType.Completed, item.RemoteFilename, item.Size); if (m_options.ListVerifyUploads) { var f = m_backend.List().Where(n => n.Name.Equals(item.RemoteFilename, StringComparison.OrdinalIgnoreCase)).FirstOrDefault(); if (f == null) { throw new Exception(string.Format("List verify failed, file was not found after upload: {0}", item.RemoteFilename)); } else if (f.Size != item.Size && f.Size >= 0) { throw new Exception(string.Format("List verify failed for file: {0}, size was {1} but expected to be {2}", f.Name, f.Size, item.Size)); } } item.DeleteLocalFile(); await m_database.CommitTransactionAsync("CommitAfterUpload"); return(true); }
public async Task UploadFileAsync(VolumeWriterBase item, Func <string, Task <IndexVolumeWriter> > createIndexFile = null) { var fe = new FileEntryItem(BackendActionType.Put, item.RemoteFilename); fe.SetLocalfilename(item.LocalFilename); var tcs = new TaskCompletionSource <bool>(); var backgroundhashAndEncrypt = Task.Run(() => { fe.Encrypt(m_options); return(fe.UpdateHashAndSize(m_options)); }); await RunOnMain(async() => { try { await DoWithRetry(fe, async() => { if (fe.IsRetry) { await RenameFileAfterErrorAsync(fe).ConfigureAwait(false); } // Make sure the encryption and hashing has completed await backgroundhashAndEncrypt.ConfigureAwait(false); return(await DoPut(fe).ConfigureAwait(false)); }).ConfigureAwait(false); if (createIndexFile != null) { var ix = await createIndexFile(fe.RemoteFilename).ConfigureAwait(false); var indexFile = new FileEntryItem(BackendActionType.Put, ix.RemoteFilename); indexFile.SetLocalfilename(ix.LocalFilename); await m_database.UpdateRemoteVolumeAsync(indexFile.RemoteFilename, RemoteVolumeState.Uploading, -1, null); await DoWithRetry(indexFile, async() => { if (indexFile.IsRetry) { await RenameFileAfterErrorAsync(indexFile).ConfigureAwait(false); } var res = await DoPut(indexFile).ConfigureAwait(false); // Register that the index file is tracking the block file await m_database.AddIndexBlockLinkAsync( ix.VolumeID, await m_database.GetRemoteVolumeIDAsync(fe.RemoteFilename) ).ConfigureAwait(false); return(res); }); } tcs.TrySetResult(true); } catch (Exception ex) { if (ex is System.Threading.ThreadAbortException) { tcs.TrySetCanceled(); } else { tcs.TrySetException(ex); } } }); await tcs.Task.ConfigureAwait(false); }
private void DoPut(FileEntryItem item) { if (m_encryption != null) lock(m_encryptionLock) item.Encrypt(m_encryption, m_statwriter); if (item.UpdateHashAndSize(m_options) && !item.NotTrackedInDb) m_db.LogDbUpdate(item.RemoteFilename, RemoteVolumeState.Uploading, item.Size, item.Hash); if (item.Indexfile != null && !item.IndexfileUpdated) { item.Indexfile.Item1.FinishVolume(item.Hash, item.Size); item.Indexfile.Item1.Close(); item.IndexfileUpdated = true; } m_db.LogDbOperation("put", item.RemoteFilename, JsonConvert.SerializeObject(new { Size = item.Size, Hash = item.Hash })); m_statwriter.SendEvent(BackendActionType.Put, BackendEventType.Started, item.RemoteFilename, item.Size); if (m_backend is Library.Interface.IStreamingBackend && !m_options.DisableStreamingTransfers) { using (var fs = System.IO.File.OpenRead(item.LocalFilename)) using (var ts = new ThrottledStream(fs, m_options.MaxDownloadPrSecond, m_options.MaxUploadPrSecond)) using (var pgs = new Library.Utility.ProgressReportingStream(ts, item.Size, HandleProgress)) ((Library.Interface.IStreamingBackend)m_backend).Put(item.RemoteFilename, pgs); } else m_backend.Put(item.RemoteFilename, item.LocalFilename); if (!item.NotTrackedInDb) m_db.LogDbUpdate(item.RemoteFilename, RemoteVolumeState.Uploaded, item.Size, item.Hash); m_statwriter.SendEvent(BackendActionType.Put, BackendEventType.Completed, item.RemoteFilename, item.Size); if (m_options.ListVerifyUploads) { var f = m_backend.List().Where(n => n.Name.Equals(item.RemoteFilename, StringComparison.InvariantCultureIgnoreCase)).FirstOrDefault(); if (f == null) throw new Exception(string.Format("List verify failed, file was not found after upload: {0}", f.Name)); else if (f.Size != item.Size && f.Size >= 0) throw new Exception(string.Format("List verify failed for file: {0}, size was {1} but expected to be {2}", f.Name, f.Size, item.Size)); } item.DeleteLocalFile(m_statwriter); }
public void Put(VolumeWriterBase item, IndexVolumeWriter indexfile = null) { if (m_lastException != null) throw m_lastException; item.Close(); m_db.LogDbUpdate(item.RemoteFilename, RemoteVolumeState.Uploading, -1, null); var req = new FileEntryItem(OperationType.Put, item.RemoteFilename, null); req.LocalTempfile = item.TempFile; if (m_lastException != null) throw m_lastException; FileEntryItem req2 = null; // As the network link is the bottleneck, // we encrypt the dblock volume before the // upload is enqueue (i.e. on the worker thread) if (m_encryption != null) lock (m_encryptionLock) req.Encrypt(m_encryption, m_statwriter); req.UpdateHashAndSize(m_options); // We do not encrypt the dindex volume, because it is small, // and may need to be re-written if the dblock upload is retried if (indexfile != null) { m_db.LogDbUpdate(indexfile.RemoteFilename, RemoteVolumeState.Uploading, -1, null); req2 = new FileEntryItem(OperationType.Put, indexfile.RemoteFilename); req2.LocalTempfile = indexfile.TempFile; req.Indexfile = new Tuple<IndexVolumeWriter, FileEntryItem>(indexfile, req2); } if (m_queue.Enqueue(req) && m_options.SynchronousUpload) { req.WaitForComplete(); if (req.Exception != null) throw req.Exception; } if (req2 != null && m_queue.Enqueue(req2) && m_options.SynchronousUpload) { req2.WaitForComplete(); if (req2.Exception != null) throw req2.Exception; } if (m_lastException != null) throw m_lastException; }