public Task PermanentDelete(StoreLocation location) { var blob = GetBlockBlob(location); LeoTrace.WriteLine("Deleted blob: " + blob.Name); return(blob.ExecuteWrap(b => b.DeleteIfExistsAsync(DeleteSnapshotsOption.IncludeSnapshots, null, null, null))); }
public async Task <ObjectWithMetadata <T> > LoadObject <T>(StoreLocation location, string snapshot = null, IEncryptor encryptor = null) where T : ObjectWithAuditInfo { var data = await LoadData(location, snapshot, encryptor); if (data == null) { return(null); } if (!data.Metadata.ContainsKey(MetadataConstants.TypeMetadataKey)) { LeoTrace.WriteLine(string.Format("Warning: Data type is not in metadata. expected {0}", typeof(T).FullName)); } else if (data.Metadata[MetadataConstants.TypeMetadataKey] != typeof(T).FullName) { LeoTrace.WriteLine(string.Format("Warning: Data type does not match metadata. actual '{0}' vs expected '{1}'", data.Metadata[MetadataConstants.TypeMetadataKey], typeof(T).FullName)); } LeoTrace.WriteLine("Getting data object: " + location); var strData = await data.Stream.ReadBytes(); var str = Encoding.UTF8.GetString(strData, 0, strData.Length); var obj = JsonConvert.DeserializeObject <T>(str); obj.Audit = data.Metadata.Audit; LeoTrace.WriteLine("Returning data object: " + location); return(new ObjectWithMetadata <T>(obj, data.Metadata)); }
public async Task SoftDelete(StoreLocation location, UpdateAuditInfo audit) { // In Azure we cannot delete the blob as this will loose the snapshots // Instead we will just add some metadata var blob = GetBlockBlob(location); var props = await GetBlobProperties(blob); var meta = await GetActualMetadata(blob, props, null); // If already deleted don't worry about it! if (meta == null || meta.ContainsKey(_deletedKey)) { return; } var fullInfo = TransformAuditInformation(meta, audit); meta.Audit = fullInfo; props.Metadata[MetadataConstants.AuditMetadataKey] = AzureStoreMetadataEncoder.EncodeMetadata(meta[MetadataConstants.AuditMetadataKey]); props.Metadata[_deletedKey] = DateTime.UtcNow.Ticks.ToString(); await blob.SetMetadataAsync(props.Metadata); LeoTrace.WriteLine("Soft deleted (2 calls): " + blob.Name); }
private IAsyncEnumerable <ICloudBlob> ListBlobs(CloudBlobContainer container, string prefix, BlobListingDetails options) { // Clean up the prefix if required prefix = prefix == null ? null : SafePath.MakeSafeFilePath(prefix); return(AsyncEnumerableEx.Create <ICloudBlob>(async(y) => { BlobContinuationToken token = new BlobContinuationToken(); try { do { var segment = await container.ListBlobsSegmentedAsync(prefix, true, options, null, token, null, null, y.CancellationToken).ConfigureAwait(false); LeoTrace.WriteLine("Listed blob segment for prefix: " + prefix); foreach (var blob in segment.Results.OfType <ICloudBlob>()) { await y.YieldReturn(blob).ConfigureAwait(false); } token = segment.ContinuationToken; }while (token != null && !y.CancellationToken.IsCancellationRequested); } catch (StorageException e) { if (e.RequestInformation.HttpStatusCode != 404) { throw e.Wrap(container.Name + "_" + (prefix ?? string.Empty) + "*"); } } })); }
/// <summary>Removes an existing file in the directory. </summary> public override void DeleteFile(string name) { var location = GetLocation(name); SafeTask.SafeWait(() => _store.Delete(location, null, _options)); LeoTrace.WriteLine(String.Format("DELETE {0}", location.BasePath)); _memoryCache.Remove(GetCacheKey(name)); }
public Task PermanentDeleteContainer(string container) { container = SafeContainerName(container); LeoTrace.WriteLine("Trying to delete container: " + container); var c = _blobStorage.GetBlobContainerClient(container); return(c.DeleteIfExistsAsync()); }
public Task CreateContainerIfNotExists(string container) { container = SafeContainerName(container); LeoTrace.WriteLine("Trying to create container: " + container); var c = _blobStorage.GetBlobContainerClient(container); return(c.CreateIfNotExistsAsync()); }
private async Task <Metadata> GetBlobMetadata(CloudBlockBlob blob) { LeoTrace.WriteLine("Downloading blob metadata: " + blob.Name); if (!await blob.ExecuteWrap(b => b.FetchAttributesAsync(), true).ConfigureAwait(false)) { return(null); } return(await GetActualMetadata(blob).ConfigureAwait(false)); }
public async IAsyncEnumerable <bool> RunEvery(StoreLocation location, TimeSpan interval, Action <Exception> unhandledExceptions = null, [EnumeratorCancellation] CancellationToken token = default) { var blob = GetBlockBlob(location); var minimum = TimeSpan.FromSeconds(5); // so we're not polling the leased blob too fast while (!token.IsCancellationRequested) { var timeLeft = TimeSpan.FromSeconds(0); // Don't allow you to throw to get out of the loop... bool canExecute = false; try { var lastPerformed = DateTimeOffset.MinValue; var lease = await LockInternal(blob); if (lease != null) { await using (var arl = lease.Item1) { var props = await GetBlobProperties(blob, token); if (props.Metadata.ContainsKey("lastPerformed")) { DateTimeOffset.TryParseExact(props.Metadata["lastPerformed"], "R", CultureInfo.InvariantCulture, DateTimeStyles.AdjustToUniversal, out lastPerformed); } if (DateTimeOffset.UtcNow >= lastPerformed + interval) { lastPerformed = DateTimeOffset.UtcNow; props.Metadata["lastPerformed"] = lastPerformed.ToString("R", CultureInfo.InvariantCulture); await blob.SetMetadataAsync(props.Metadata, new BlobRequestConditions { LeaseId = lease.Item2 }, token); canExecute = true; } } } timeLeft = (lastPerformed + interval) - DateTimeOffset.UtcNow; } catch (TaskCanceledException) { throw; } catch (Exception e) { unhandledExceptions?.Invoke(e); LeoTrace.WriteLine("Error on lock loop: " + e.Message); } if (canExecute) { yield return(true); } // Do this outside the exception to prevent it going out of control await Task.Delay(timeLeft > minimum?timeLeft : minimum, token); } }
private async Task <Metadata> GetBlobMetadata(BlockBlobClient blob, string snapshot) { LeoTrace.WriteLine("Downloading blob metadata: " + blob.Name); try { var props = await blob.GetPropertiesAsync(); return(await GetActualMetadata(blob, props, snapshot)); } catch (RequestFailedException e) when(e.ErrorCode == BlobErrorCode.BlobNotFound) { return(null); } }
public async Task PermanentDeleteContainer(string container) { container = SafeContainerName(container); LeoTrace.WriteLine("Trying to delete container: " + container); var c = _blobStorage.GetContainerReference(container); try { await c.DeleteIfExistsAsync().ConfigureAwait(false); } catch (StorageException e) { throw e.Wrap(c.Name); } }
public IAsyncEnumerable <bool> RunEvery(StoreLocation location, TimeSpan interval, Action <Exception> unhandledExceptions = null) { return(AsyncEnumerableEx.Create <bool>(async(y) => { var blob = GetBlockBlob(location); var minimum = TimeSpan.FromSeconds(5); // so we're not polling the leased blob too fast while (!y.CancellationToken.IsCancellationRequested) { var timeLeft = TimeSpan.FromSeconds(0); // Don't allow you to throw to get out of the loop... try { var lastPerformed = DateTimeOffset.MinValue; var lease = await LockInternal(blob).ConfigureAwait(false); if (lease != null) { using (var arl = lease.Item1) { await blob.ExecuteWrap(b => b.FetchAttributesAsync(null, null, null, y.CancellationToken)).ConfigureAwait(false); if (blob.Metadata.ContainsKey("lastPerformed")) { DateTimeOffset.TryParseExact(blob.Metadata["lastPerformed"], "R", CultureInfo.InvariantCulture, DateTimeStyles.AdjustToUniversal, out lastPerformed); } if (DateTimeOffset.UtcNow >= lastPerformed + interval) { await y.YieldReturn(true).ConfigureAwait(false); lastPerformed = DateTimeOffset.UtcNow; blob.Metadata["lastPerformed"] = lastPerformed.ToString("R", CultureInfo.InvariantCulture); await blob.ExecuteWrap(b => b.SetMetadataAsync(AccessCondition.GenerateLeaseCondition(lease.Item2), null, null, y.CancellationToken)).ConfigureAwait(false); } } } timeLeft = (lastPerformed + interval) - DateTimeOffset.UtcNow; } catch (TaskCanceledException) { throw; } catch (Exception e) { unhandledExceptions?.Invoke(e); LeoTrace.WriteLine("Error on lock loop: " + e.Message); } // Do this outside the exception to prevent it going out of control await Task.Delay(timeLeft > minimum ? timeLeft : minimum, y.CancellationToken).ConfigureAwait(false); } })); }
public async Task Complete(CancellationToken ct) { if (!_hasCompleted) { _hasCompleted = true; var data = _buff.GetBuffer(); var length = (int)_buff.Length; if (_partNumber == 1) { // We haven't even uploaded one block yet... just upload it straight... using (var ms = new MemoryStream(data, 0, length, false)) { await _blob.UploadAsync(ms, new BlobUploadOptions { Conditions = _condition, Metadata = _metadata }); } LeoTrace.WriteLine("Uploaded Single Block: " + _blob.Name); } else { if (length > 0) { var key = GetKey(_partNumber); await PutBlobAsync(key, data, length, ct); _partNumber++; } var blocks = new List <string>(); for (var i = 1; i < _partNumber; i++) { blocks.Add(GetKey(i)); } await _blob.CommitBlockListAsync(blocks, new CommitBlockListOptions { Conditions = _condition, Metadata = _metadata }, ct); LeoTrace.WriteLine("Finished Put Blocks using " + _partNumber + " total blocks: " + _blob.Name); } _buff.SetLength(0); } }
public async Task Complete(CancellationToken ct) { if (!_hasCompleted) { _hasCompleted = true; var data = _buff.GetBuffer(); var length = (int)_buff.Length; if (_partNumber == 1) { // We haven't even uploaded one block yet... just upload it straight... await _blob.UploadFromByteArrayAsync(data, 0, length, _condition, null, null, ct).ConfigureAwait(false); LeoTrace.WriteLine("Uploaded Single Block: " + _blob.Name); } else { if (length > 0) { var key = GetKey(_partNumber); await PutBlobAsync(key, data, length, ct).ConfigureAwait(false); _partNumber++; } var blocks = new List <string>(); for (var i = 1; i < _partNumber; i++) { blocks.Add(GetKey(i)); } await _blob.PutBlockListAsync(blocks, null, null, null, ct).ConfigureAwait(false); LeoTrace.WriteLine("Finished Put Blocks using " + _partNumber + " total blocks: " + _blob.Name); } _buff.SetLength(0); } }
public async Task SoftDelete(StoreLocation location, UpdateAuditInfo audit) { // In Azure we cannot delete the blob as this will loose the snapshots // Instead we will just add some metadata var blob = GetBlockBlob(location); var metadata = await GetBlobMetadata(blob).ConfigureAwait(false); // If already deleted don't worry about it! if (metadata == null || metadata.ContainsKey(_deletedKey)) { return; } var fullInfo = TransformAuditInformation(metadata, audit); metadata.Audit = fullInfo; blob.Metadata[MetadataConstants.AuditMetadataKey] = metadata[MetadataConstants.AuditMetadataKey]; blob.Metadata[_deletedKey] = DateTime.UtcNow.Ticks.ToString(); await blob.ExecuteWrap(b => b.SetMetadataAsync()).ConfigureAwait(false); LeoTrace.WriteLine("Soft deleted (2 calls): " + blob.Name); }
private async Task <Tuple <IAsyncDisposable, string> > LockInternal(BlockBlobClient blob) { BlobLease lease; var client = blob.GetBlobLeaseClient(); try { lease = await client.AcquireAsync(LeaseDuration); } catch (RequestFailedException e) when(e.ErrorCode == BlobErrorCode.LeaseAlreadyPresent) { return(null); } catch (RequestFailedException e) when(e.ErrorCode == BlobErrorCode.BlobNotFound) { lease = null; } // May not have had a blob pushed... if (lease == null) { try { using (var stream = new MemoryStream(new byte[1])) { try { await blob.UploadAsync(stream, new BlobUploadOptions { Conditions = new BlobRequestConditions { IfNoneMatch = ETag.All } }); } catch (RequestFailedException e) when(e.ErrorCode == BlobErrorCode.BlobAlreadyExists || e.ErrorCode == BlobErrorCode.ConditionNotMet) { } // Just eat storage exceptions at this point... something was created obviously } lease = await client.AcquireAsync(LeaseDuration); } catch (RequestFailedException e) when(e.ErrorCode == BlobErrorCode.LeaseAlreadyPresent) { return(null); } } var condition = new BlobRequestConditions { LeaseId = lease.LeaseId }; // Every 30 secs keep the lock renewed var keepAlive = LeaseRenewInterval(client, condition) .TakeUntilDisposed(onDispose: async() => { try { await client.ReleaseAsync(condition); } catch (Exception e) { LeoTrace.WriteLine("Release failed: " + e.Message); } }); return(Tuple.Create(keepAlive, lease.LeaseId)); }
private async Task <OptimisticStoreWriteResult> SaveDataInternal(StoreLocation location, Metadata metadata, UpdateAuditInfo audit, Func <IWriteAsyncStream, Task <long?> > savingFunc, CancellationToken token, bool isOptimistic) { var blob = GetBlockBlob(location); // We always want to save the new audit information when saving! var props = await GetBlobProperties(blob); var currentMetadata = await GetActualMetadata(blob, props, null); var auditInfo = TransformAuditInformation(currentMetadata, audit); metadata = metadata ?? new Metadata(); metadata.Audit = auditInfo; var result = new OptimisticStoreWriteResult() { Result = true }; try { // If the ETag value is empty then the store value must not exist yet... var condition = isOptimistic ? (string.IsNullOrEmpty(metadata.ETag) ? new BlobRequestConditions { IfNoneMatch = ETag.All } : new BlobRequestConditions { IfMatch = new ETag(metadata.ETag) }) : null; // Copy the metadata across var newMeta = new Dictionary <string, string>(); foreach (var m in metadata) { newMeta[m.Key] = AzureStoreMetadataEncoder.EncodeMetadata(m.Value); } // Always store the version - We use this to do more efficient things on read newMeta[StoreVersionKey] = StoreVersionValue; long?length; using (var stream = new AzureWriteBlockBlobStream(blob, condition, newMeta)) { length = await savingFunc(stream); await stream.Complete(token); } if (length.HasValue && (metadata == null || !metadata.ContentLength.HasValue)) { newMeta[MetadataConstants.ContentLengthMetadataKey] = length.Value.ToString(CultureInfo.InvariantCulture); // Save the length straight away before the snapshot... await blob.SetMetadataAsync(newMeta, cancellationToken : token); } // Create a snapshot straight away on azure // Note: this shouldnt matter for cost as any blocks that are the same do not cost extra if (_enableSnapshots) { var snapshotBlob = await blob.CreateSnapshotAsync(cancellationToken : token); // Save the snapshot back to original blob... newMeta[InternalSnapshotKey] = snapshotBlob.Value.Snapshot; await blob.SetMetadataAsync(newMeta, cancellationToken : token); LeoTrace.WriteLine("Created Snapshot: " + blob.Name); } var newProps = await blob.GetPropertiesAsync(); result.Metadata = await GetActualMetadata(blob, newProps, null); } catch (RequestFailedException e) { if (isOptimistic) { // First condition occurrs when the eTags do not match // Second condition when we specified no eTag (ie must be new blob) if (e.Status == (int)HttpStatusCode.PreconditionFailed || (e.Status == (int)HttpStatusCode.Conflict && e.ErrorCode == BlobErrorCode.BlobAlreadyExists)) { result.Result = false; } else { // Might have been a different error? throw; } } else { if (e.Status == (int)HttpStatusCode.Conflict || e.ErrorCode == BlobErrorCode.LeaseIdMissing) { throw new LockException("The underlying storage is currently locked for save"); } // Might have been a different error? throw; } } return(result); }
private async Task <OptimisticStoreWriteResult> SaveDataInternal(StoreLocation location, Metadata metadata, UpdateAuditInfo audit, Func <IWriteAsyncStream, Task <long?> > savingFunc, CancellationToken token, bool isOptimistic) { var blob = GetBlockBlob(location); // We always want to save the new audit information when saving! var currentMetadata = await GetBlobMetadata(blob).ConfigureAwait(false); var auditInfo = TransformAuditInformation(currentMetadata, audit); metadata = metadata ?? new Metadata(); metadata.Audit = auditInfo; var result = new OptimisticStoreWriteResult() { Result = true }; try { // If the ETag value is empty then the store value must not exist yet... var condition = isOptimistic ? (string.IsNullOrEmpty(metadata.ETag) ? AccessCondition.GenerateIfNoneMatchCondition("*") : AccessCondition.GenerateIfMatchCondition(metadata.ETag)) : null; // Copy the metadata across blob.Metadata.Clear(); foreach (var m in metadata) { blob.Metadata[m.Key] = m.Value; } // Always store the version - We use this to do more efficient things on read blob.Metadata[StoreVersionKey] = StoreVersionValue; long?length; using (var stream = new AzureWriteBlockBlobStream(blob, condition)) { length = await savingFunc(stream).ConfigureAwait(false); await stream.Complete(token).ConfigureAwait(false); } if (length.HasValue && (metadata == null || !metadata.ContentLength.HasValue)) { blob.Metadata[MetadataConstants.ContentLengthMetadataKey] = length.Value.ToString(CultureInfo.InvariantCulture); // Save the length straight away before the snapshot... await blob.SetMetadataAsync(null, null, null, token).ConfigureAwait(false); } // Create a snapshot straight away on azure // Note: this shouldnt matter for cost as any blocks that are the same do not cost extra if (_enableSnapshots) { var snapshotBlob = await blob.CreateSnapshotAsync(blob.Metadata, null, null, null, token).ConfigureAwait(false); var snapshot = snapshotBlob.SnapshotTime.Value.UtcTicks.ToString(CultureInfo.InvariantCulture); // Save the snapshot back to original blob... blob.Metadata[InternalSnapshotKey] = snapshot; await blob.SetMetadataAsync(null, null, null, token).ConfigureAwait(false); LeoTrace.WriteLine("Created Snapshot: " + blob.Name); } result.Metadata = await GetActualMetadata(blob).ConfigureAwait(false); } catch (StorageException exc) { if (isOptimistic) { // First condition occurrs when the eTags do not match // Second condition when we specified no eTag (ie must be new blob) if (exc.RequestInformation.HttpStatusCode == (int)HttpStatusCode.PreconditionFailed || (exc.RequestInformation.HttpStatusCode == (int)HttpStatusCode.Conflict && exc.RequestInformation.ExtendedErrorInformation.ErrorCode == "BlobAlreadyExists")) { result.Result = false; } else { // Might have been a different error? throw exc.Wrap(blob.Name); } } else { if (exc.RequestInformation.HttpStatusCode == (int)HttpStatusCode.Conflict || exc.RequestInformation.ExtendedErrorInformation.ErrorCode == "LeaseIdMissing") { throw new LockException("The underlying storage is currently locked for save"); } // Might have been a different error? throw exc.Wrap(blob.Name); } } return(result); }
private async Task <Tuple <IDisposable, string> > LockInternal(ICloudBlob blob) { string leaseId; try { leaseId = await blob.AcquireLeaseAsync(TimeSpan.FromMinutes(1), null).ConfigureAwait(false); LeoTrace.WriteLine("Leased Blob: " + blob.Name); } catch (StorageException e) { // If we have a conflict this blob is already locked... if (e.RequestInformation.HttpStatusCode == 409) { return(null); } if (e.RequestInformation.HttpStatusCode == 404) { leaseId = null; } else { throw e.Wrap(blob.Name); } } // May not have had a blob pushed... if (leaseId == null) { try { using (var stream = new MemoryStream(new byte[1])) { try { await blob.UploadFromStreamAsync(stream).ConfigureAwait(false); } catch (StorageException) { } // Just eat storage exceptions at this point... something was created obviously } leaseId = await blob.AcquireLeaseAsync(TimeSpan.FromMinutes(1), null).ConfigureAwait(false); LeoTrace.WriteLine("Created new blob and lease (2 calls): " + blob.Name); } catch (StorageException e) { // If we have a conflict this blob is already locked... if (e.RequestInformation.HttpStatusCode == 409) { return(null); } if (e.RequestInformation.HttpStatusCode == 404) { return(null); } else { throw e.Wrap(blob.Name); } } } var condition = AccessCondition.GenerateLeaseCondition(leaseId); // Every 30 secs keep the lock renewed var keepAlive = AsyncEnumerableEx.CreateTimer(TimeSpan.FromSeconds(30)) .Select(t => { LeoTrace.WriteLine("Renewed Lease: " + blob.Name); return(blob.RenewLeaseAsync(condition)); }) .Unwrap() .TakeUntilDisposed(null, t => { try { // We need to do this to make sure after the dispose the lease is gone blob.ReleaseLeaseAsync(condition).GetAwaiter().GetResult(); } catch (Exception e) { LeoTrace.WriteLine("Release failed: " + e.Message); } }); return(Tuple.Create((IDisposable)keepAlive, leaseId)); }
public async Task <Metadata> SaveData(StoreLocation location, Metadata mdata, UpdateAuditInfo audit, Func <IWriteAsyncStream, Task> savingFunc, CancellationToken token, IEncryptor encryptor = null, SecureStoreOptions options = SecureStoreOptions.All) { LeoTrace.WriteLine("Saving: " + location.Container + ", " + location.BasePath + ", " + (location.Id.HasValue ? location.Id.Value.ToString() : "null")); var metadata = new Metadata(mdata); /**************************************************** * SETUP METADATA * ***************************************************/ if (encryptor != null) { metadata[MetadataConstants.EncryptionMetadataKey] = encryptor.Algorithm; } else { metadata.Remove(MetadataConstants.EncryptionMetadataKey); } if (options.HasFlag(SecureStoreOptions.Compress)) { if (_compressor == null) { throw new ArgumentException("Compression option should not be used if no compressor has been implemented", "options"); } metadata[MetadataConstants.CompressionMetadataKey] = _compressor.Algorithm; } else { metadata.Remove(MetadataConstants.CompressionMetadataKey); } /**************************************************** * PREPARE THE SAVE STREAM * ***************************************************/ var m = await _store.SaveData(location, metadata, audit, async (stream) => { LengthCounterStream counter = null; stream = stream.AddTransformer(s => { // Encrypt just before writing to the stream (if we need) if (encryptor != null) { s = encryptor.Encrypt(s, false); } // Compression comes right before encryption if (options.HasFlag(SecureStoreOptions.Compress)) { s = _compressor.CompressWriteStream(s); } // Always place the length counter stream counter = new LengthCounterStream(s); return(counter); }); await savingFunc(stream); await stream.Complete(token); return(counter.Length); }, token); /**************************************************** * POST SAVE TASKS (BACKUP, INDEX) * ***************************************************/ // The rest of the tasks are done asyncly var tasks = new List <Task>(); if (options.HasFlag(SecureStoreOptions.Backup)) { if (_backupQueue == null) { throw new ArgumentException("Backup option should not be used if no backup queue has been defined", "options"); } tasks.Add(_backupQueue.SendMessage(GetMessageDetails(location, metadata))); } if (options.HasFlag(SecureStoreOptions.Index)) { tasks.Add(ForceIndex(location, mdata)); } if (tasks.Count > 0) { await Task.WhenAll(tasks); } return(m); }