public async Task Execute() { while (true) { var totalSizeRead = await inputStream.ReadAsync(buffer); TotalSizeRead += totalSizeRead; if (totalSizeRead == 0) // nothing left to read { storage.Batch(accessor => accessor.CompleteFileUpload(filename)); FileHash = IOExtensions.GetMD5Hex(md5Hasher.TransformFinalBlock()); return; // task is done } ConcurrencyAwareExecutor.Execute(() => storage.Batch(accessor => { var hashKey = accessor.InsertPage(buffer, totalSizeRead); accessor.AssociatePage(filename, hashKey, pos, totalSizeRead); })); md5Hasher.TransformBlock(buffer, 0, totalSizeRead); pos++; } }
public HttpResponseMessage Delete(string name) { name = RavenFileNameHelper.RavenPath(name); try { ConcurrencyAwareExecutor.Execute(() => Storage.Batch(accessor => { AssertFileIsNotBeingSynced(name, accessor, true); var fileAndPages = accessor.GetFile(name, 0, 0); var metadata = fileAndPages.Metadata; if (metadata.Keys.Contains(SynchronizationConstants.RavenDeleteMarker)) { throw new FileNotFoundException(); } StorageOperationsTask.IndicateFileToDelete(name); if (!name.EndsWith(RavenFileNameHelper.DownloadingFileSuffix) && // don't create a tombstone for .downloading file metadata != null) // and if file didn't exist { var tombstoneMetadata = new RavenJObject { { SynchronizationConstants.RavenSynchronizationHistory, metadata[SynchronizationConstants.RavenSynchronizationHistory] }, { SynchronizationConstants.RavenSynchronizationVersion, metadata[SynchronizationConstants.RavenSynchronizationVersion] }, { SynchronizationConstants.RavenSynchronizationSource, metadata[SynchronizationConstants.RavenSynchronizationSource] } }.WithDeleteMarker(); Historian.UpdateLastModified(tombstoneMetadata); accessor.PutFile(name, 0, tombstoneMetadata, true); accessor.DeleteConfig(RavenFileNameHelper.ConflictConfigNameForFile(name)); // delete conflict item too } }), ConcurrencyResponseException); } catch (FileNotFoundException) { return(new HttpResponseMessage(HttpStatusCode.NotFound)); } Publisher.Publish(new FileChangeNotification { File = FilePathTools.Cannoicalise(name), Action = FileChangeAction.Delete }); log.Debug("File '{0}' was deleted", name); StartSynchronizeDestinationsInBackground(); return(GetEmptyMessage(HttpStatusCode.NoContent)); }
public async Task Execute() { while (true) { var read = await inputStream.ReadAsync(buffer); TotalSizeRead += read; if (read == 0) // nothing left to read { FileHash = IOExtensions.GetMD5Hex(md5Hasher.TransformFinalBlock()); headers["Content-MD5"] = FileHash; storage.Batch(accessor => { accessor.CompleteFileUpload(filename); putTriggers.Apply(trigger => trigger.AfterUpload(filename, headers)); }); return; // task is done } ConcurrencyAwareExecutor.Execute(() => storage.Batch(accessor => { var hashKey = accessor.InsertPage(buffer, read); accessor.AssociatePage(filename, hashKey, pos, read); putTriggers.Apply(trigger => trigger.OnUpload(filename, headers, hashKey, pos, read)); })); md5Hasher.TransformBlock(buffer, 0, read); pos++; } }
public HttpResponseMessage Patch(string name, string rename) { name = RavenFileNameHelper.RavenPath(name); rename = RavenFileNameHelper.RavenPath(rename); try { ConcurrencyAwareExecutor.Execute(() => Storage.Batch(accessor => { AssertFileIsNotBeingSynced(name, accessor, true); var metadata = accessor.GetFile(name, 0, 0).Metadata; if (metadata.Keys.Contains(SynchronizationConstants.RavenDeleteMarker)) { throw new FileNotFoundException(); } var existingHeader = accessor.ReadFile(rename); if (existingHeader != null && !existingHeader.Metadata.ContainsKey(SynchronizationConstants.RavenDeleteMarker)) { throw new HttpResponseException( Request.CreateResponse(HttpStatusCode.Forbidden, new InvalidOperationException("Cannot rename because file " + rename + " already exists"))); } Historian.UpdateLastModified(metadata); var operation = new RenameFileOperation { FileSystem = FileSystem.Name, Name = name, Rename = rename, MetadataAfterOperation = metadata }; accessor.SetConfig(RavenFileNameHelper.RenameOperationConfigNameForFile(name), JsonExtensions.ToJObject(operation)); accessor.PulseTransaction(); // commit rename operation config StorageOperationsTask.RenameFile(operation); }), ConcurrencyResponseException); } catch (FileNotFoundException) { log.Debug("Cannot rename a file '{0}' to '{1}' because a file was not found", name, rename); return(GetEmptyMessage(HttpStatusCode.NotFound)); } log.Debug("File '{0}' was renamed to '{1}'", name, rename); StartSynchronizeDestinationsInBackground(); return(GetMessageWithString("", HttpStatusCode.NoContent)); }
public Task ResumeFileRenamingAsync() { var filesToRename = new List <RenameFileOperation>(); storage.Batch(accessor => { var renameOpConfigs = accessor.GetConfigsStartWithPrefix(RavenFileNameHelper.RenameOperationConfigPrefix, 0, 10); filesToRename = renameOpConfigs.Select(config => config.JsonDeserialization <RenameFileOperation>()).ToList(); }); if (filesToRename.Count == 0) { return(Task.FromResult <object>(null)); } var tasks = new List <Task>(); foreach (var item in filesToRename) { var renameOperation = item; if (IsRenameInProgress(renameOperation.Name)) { continue; } Log.Debug("Starting to resume a rename operation of a file '{0}' to '{1}'", renameOperation.Name, renameOperation.Rename); var renameTask = Task.Run(() => { try { ConcurrencyAwareExecutor.Execute(() => RenameFile(renameOperation), retries: 1); Log.Debug("File '{0}' was renamed to '{1}'", renameOperation.Name, renameOperation.Rename); } catch (Exception e) { Log.WarnException( string.Format("Could not rename file '{0}' to '{1}'", renameOperation.Name, renameOperation.Rename), e); throw; } }); renameFileTasks.AddOrUpdate(renameOperation.Name, renameTask, (file, oldTask) => renameTask); tasks.Add(renameTask); } return(Task.WhenAll(tasks)); }
public HttpResponseMessage Delete(string name) { ConcurrencyAwareExecutor.Execute(() => Storage.Batch(accessor => accessor.DeleteConfig(name)), ConcurrencyResponseException); Publisher.Publish(new ConfigurationChangeNotification { Name = name, Action = ConfigurationChangeAction.Delete }); Log.Debug("Config '{0}' was deleted", name); return(GetEmptyMessage(HttpStatusCode.NoContent)); }
public async Task <HttpResponseMessage> Put(string name) { var json = await ReadJsonAsync(); ConcurrencyAwareExecutor.Execute(() => Storage.Batch(accessor => accessor.SetConfig(name, json)), ConcurrencyResponseException); Publisher.Publish(new ConfigurationChangeNotification { Name = name, Action = ConfigurationChangeAction.Set }); Log.Debug("Config '{0}' was inserted", name); return(this.GetMessageWithObject(json, HttpStatusCode.Created) .WithNoCache()); }
public HttpResponseMessage Post(string name) { name = RavenFileNameHelper.RavenPath(name); var headers = this.GetFilteredMetadataFromHeaders(InnerHeaders); Historian.UpdateLastModified(headers); Historian.Update(name, headers); try { ConcurrencyAwareExecutor.Execute(() => Storage.Batch(accessor => { AssertFileIsNotBeingSynced(name, accessor, true); accessor.UpdateFileMetadata(name, headers); }), ConcurrencyResponseException); } catch (FileNotFoundException) { log.Debug("Cannot update metadata because file '{0}' was not found", name); return(GetEmptyMessage(HttpStatusCode.NotFound)); } Search.Index(name, headers); Publisher.Publish(new FileChangeNotification { File = FilePathTools.Cannoicalise(name), Action = FileChangeAction.Update }); StartSynchronizeDestinationsInBackground(); log.Debug("Metadata of a file '{0}' was updated", name); //Hack needed by jquery on the client side. We need to find a better solution for this return(GetEmptyMessage(HttpStatusCode.NoContent)); }
public Task CleanupDeletedFilesAsync() { var filesToDelete = new List <DeleteFileOperation>(); storage.Batch(accessor => filesToDelete = accessor.GetConfigsStartWithPrefix(RavenFileNameHelper.DeleteOperationConfigPrefix, 0, 10) .Select(config => config.JsonDeserialization <DeleteFileOperation>()) .ToList()); if (filesToDelete.Count == 0) { return(Task.FromResult <object>(null)); } var tasks = new List <Task>(); foreach (var fileToDelete in filesToDelete) { var deletingFileName = fileToDelete.CurrentFileName; if (IsDeleteInProgress(deletingFileName)) { continue; } if (IsUploadInProgress(fileToDelete.OriginalFileName)) { continue; } if (IsSynchronizationInProgress(fileToDelete.OriginalFileName)) { continue; } if (fileToDelete.OriginalFileName.EndsWith(RavenFileNameHelper.DownloadingFileSuffix)) // if it's .downloading file { if (IsSynchronizationInProgress(SynchronizedFileName(fileToDelete.OriginalFileName))) // and file is being synced { continue; } } Log.Debug("Starting to delete file '{0}' from storage", deletingFileName); var deleteTask = Task.Run(() => { try { ConcurrencyAwareExecutor.Execute(() => storage.Batch(accessor => accessor.Delete(deletingFileName)), retries: 1); } catch (Exception e) { Log.WarnException(string.Format("Could not delete file '{0}' from storage", deletingFileName), e); return; } var configName = RavenFileNameHelper.DeleteOperationConfigNameForFile(deletingFileName); storage.Batch(accessor => accessor.DeleteConfig(configName)); notificationPublisher.Publish(new ConfigurationChangeNotification { Name = configName, Action = ConfigurationChangeAction.Delete }); Log.Debug("File '{0}' was deleted from storage", deletingFileName); }); deleteFileTasks.AddOrUpdate(deletingFileName, deleteTask, (file, oldTask) => deleteTask); tasks.Add(deleteTask); } return(Task.WhenAll(tasks)); }
public async Task <HttpResponseMessage> Put(string name, string uploadId = null) { try { FileSystem.MetricsCounters.FilesPerSecond.Mark(); name = RavenFileNameHelper.RavenPath(name); var headers = this.GetFilteredMetadataFromHeaders(InnerHeaders); Historian.UpdateLastModified(headers); headers["Creation-Date"] = DateTime.UtcNow.ToString("yyyy-MM-ddTHH:mm:ss.fffffff", CultureInfo.InvariantCulture); Historian.Update(name, headers); SynchronizationTask.Cancel(name); long?size = -1; ConcurrencyAwareExecutor.Execute(() => Storage.Batch(accessor => { AssertFileIsNotBeingSynced(name, accessor, true); StorageOperationsTask.IndicateFileToDelete(name); var contentLength = Request.Content.Headers.ContentLength; var sizeHeader = GetHeader("RavenFS-size"); long sizeForParse; if (contentLength == 0 || long.TryParse(sizeHeader, out sizeForParse) == false) { size = contentLength; if (Request.Headers.TransferEncodingChunked ?? false) { size = null; } } else { size = sizeForParse; } accessor.PutFile(name, size, headers); Search.Index(name, headers); })); log.Debug("Inserted a new file '{0}' with ETag {1}", name, headers.Value <Guid>(Constants.MetadataEtagField)); using (var contentStream = await Request.Content.ReadAsStreamAsync()) using (var readFileToDatabase = new ReadFileToDatabase(BufferPool, Storage, contentStream, name)) { await readFileToDatabase.Execute(); if (readFileToDatabase.TotalSizeRead != size) { Storage.Batch(accessor => { StorageOperationsTask.IndicateFileToDelete(name); }); throw new HttpResponseException(HttpStatusCode.BadRequest); } Historian.UpdateLastModified(headers); // update with the final file size log.Debug("File '{0}' was uploaded. Starting to update file metadata and indexes", name); headers["Content-MD5"] = readFileToDatabase.FileHash; Storage.Batch(accessor => accessor.UpdateFileMetadata(name, headers)); int totalSizeRead = readFileToDatabase.TotalSizeRead; headers["Content-Length"] = totalSizeRead.ToString(CultureInfo.InvariantCulture); Search.Index(name, headers); Publisher.Publish(new FileChangeNotification { Action = FileChangeAction.Add, File = FilePathTools.Cannoicalise(name) }); log.Debug("Updates of '{0}' metadata and indexes were finished. New file ETag is {1}", name, headers.Value <Guid>(Constants.MetadataEtagField)); StartSynchronizeDestinationsInBackground(); } } catch (Exception ex) { if (uploadId != null) { Guid uploadIdentifier; if (Guid.TryParse(uploadId, out uploadIdentifier)) { Publisher.Publish(new CancellationNotification { UploadId = uploadIdentifier, File = name }); } } log.WarnException(string.Format("Failed to upload a file '{0}'", name), ex); var concurrencyException = ex as ConcurrencyException; if (concurrencyException != null) { throw ConcurrencyResponseException(concurrencyException); } throw; } return(GetEmptyMessage(HttpStatusCode.Created)); }
public async Task <HttpResponseMessage> Put(string name, string uploadId = null, bool preserveTimestamps = false) { try { FileSystem.MetricsCounters.FilesPerSecond.Mark(); name = FileHeader.Canonize(name); var headers = this.GetFilteredMetadataFromHeaders(ReadInnerHeaders); if (preserveTimestamps) { if (!headers.ContainsKey(Constants.RavenCreationDate)) { if (headers.ContainsKey(Constants.CreationDate)) { headers[Constants.RavenCreationDate] = headers[Constants.CreationDate]; } else { throw new InvalidOperationException("Preserve Timestamps requires that the client includes the Raven-Creation-Date header."); } } var lastModified = GetHeader(Constants.RavenLastModified); if (lastModified != null) { DateTimeOffset when; if (!DateTimeOffset.TryParse(lastModified, out when)) { when = DateTimeOffset.UtcNow; } Historian.UpdateLastModified(headers, when); } else { Historian.UpdateLastModified(headers); } } else { headers[Constants.RavenCreationDate] = DateTimeOffset.UtcNow; Historian.UpdateLastModified(headers); } // TODO: To keep current filesystems working. We should remove when adding a new migration. headers[Constants.CreationDate] = headers[Constants.RavenCreationDate].Value <DateTimeOffset>().ToString("yyyy-MM-ddTHH:mm:ss.fffffffZ", CultureInfo.InvariantCulture); Historian.Update(name, headers); SynchronizationTask.Cancel(name); long?size = -1; ConcurrencyAwareExecutor.Execute(() => Storage.Batch(accessor => { AssertPutOperationNotVetoed(name, headers); AssertFileIsNotBeingSynced(name, accessor, true); var contentLength = Request.Content.Headers.ContentLength; var sizeHeader = GetHeader("RavenFS-size"); long sizeForParse; if (contentLength == 0 || long.TryParse(sizeHeader, out sizeForParse) == false) { size = contentLength; if (Request.Headers.TransferEncodingChunked ?? false) { size = null; } } else { size = sizeForParse; } FileSystem.PutTriggers.Apply(trigger => trigger.OnPut(name, headers)); using (FileSystem.DisableAllTriggersForCurrentThread()) { StorageOperationsTask.IndicateFileToDelete(name); } accessor.PutFile(name, size, headers); FileSystem.PutTriggers.Apply(trigger => trigger.AfterPut(name, size, headers)); Search.Index(name, headers); })); log.Debug("Inserted a new file '{0}' with ETag {1}", name, headers.Value <Guid>(Constants.MetadataEtagField)); using (var contentStream = await Request.Content.ReadAsStreamAsync()) using (var readFileToDatabase = new ReadFileToDatabase(BufferPool, Storage, FileSystem.PutTriggers, contentStream, name, headers)) { await readFileToDatabase.Execute(); if (readFileToDatabase.TotalSizeRead != size) { StorageOperationsTask.IndicateFileToDelete(name); throw new HttpResponseException(HttpStatusCode.BadRequest); } if (!preserveTimestamps) { Historian.UpdateLastModified(headers); // update with the final file size. } log.Debug("File '{0}' was uploaded. Starting to update file metadata and indexes", name); headers["Content-MD5"] = readFileToDatabase.FileHash; Storage.Batch(accessor => accessor.UpdateFileMetadata(name, headers)); int totalSizeRead = readFileToDatabase.TotalSizeRead; headers["Content-Length"] = totalSizeRead.ToString(CultureInfo.InvariantCulture); Search.Index(name, headers); Publisher.Publish(new FileChangeNotification { Action = FileChangeAction.Add, File = FilePathTools.Cannoicalise(name) }); log.Debug("Updates of '{0}' metadata and indexes were finished. New file ETag is {1}", name, headers.Value <Guid>(Constants.MetadataEtagField)); StartSynchronizeDestinationsInBackground(); } } catch (Exception ex) { if (uploadId != null) { Guid uploadIdentifier; if (Guid.TryParse(uploadId, out uploadIdentifier)) { Publisher.Publish(new CancellationNotification { UploadId = uploadIdentifier, File = name }); } } log.WarnException(string.Format("Failed to upload a file '{0}'", name), ex); var concurrencyException = ex as ConcurrencyException; if (concurrencyException != null) { throw ConcurrencyResponseException(concurrencyException); } throw; } return(GetEmptyMessage(HttpStatusCode.Created)); }