private async Task <CloudAppendBlob> GetBlobReferenceAsync(CloudStorageAccount storageAccount, string blobContainerName, string blobName, bool bypassBlobCreationValidation) { CloudBlobClient cloudBlobClient = storageAccount.CreateCloudBlobClient(); CloudBlobContainer cloudBlobContainer = cloudBlobClient.GetContainerReference(blobContainerName); await CreateBlobContainerIfNotExistsAsync(cloudBlobContainer, bypassBlobCreationValidation).ConfigureAwait(false); CloudAppendBlob newCloudAppendBlob = null; try { newCloudAppendBlob = cloudBlobContainer.GetAppendBlobReference(blobName); newCloudAppendBlob.CreateOrReplaceAsync(AccessCondition.GenerateIfNotExistsCondition(), null, null).GetAwaiter().GetResult(); } catch (StorageException ex) when(ex.RequestInformation?.HttpStatusCode == (int)HttpStatusCode.Conflict && ex.RequestInformation?.ErrorCode == "BlobAlreadyExists") { //StorageException (http 409 conflict, error code BlobAlreadyExists) is thrown due to the AccessCondition. The append blob already exists. //No problem this is expected } catch (Exception ex) { Debugging.SelfLog.WriteLine($"Failed to create blob: {ex}"); throw; } if (newCloudAppendBlob != null) { //this is the first time the code gets its hands on this blob reference, get the blob properties from azure. //used later on to know when to roll over the file if the 50.000 max blocks is getting close. await newCloudAppendBlob.FetchAttributesAsync().ConfigureAwait(false); } return(newCloudAppendBlob); }
public static CloudBlob GetCorrespondingTypeBlobReference(CloudBlob blob) { CloudBlob targetBlob; switch (blob.Properties.BlobType) { case BlobType.BlockBlob: targetBlob = new CloudBlockBlob(blob.SnapshotQualifiedUri, blob.ServiceClient.Credentials); break; case BlobType.PageBlob: targetBlob = new CloudPageBlob(blob.SnapshotQualifiedUri, blob.ServiceClient.Credentials); break; case BlobType.AppendBlob: targetBlob = new CloudAppendBlob(blob.SnapshotQualifiedUri, blob.ServiceClient.Credentials); break; default: throw new InvalidOperationException(string.Format( CultureInfo.CurrentCulture, Resources.InvalidBlobType, blob.Properties.BlobType, blob.Name)); } Task.Run(() => targetBlob.FetchAttributesAsync()).Wait(); return(targetBlob); }
private static readonly int MaxBlocksOnBlobBeforeRoll = 49500; //small margin to the practical max of 50k, in case of many multiple writers to the same blob public async Task <CloudAppendBlob> GetCloudBlobAsync(CloudBlobClient cloudBlobClient, string blobContainerName, string blobName, bool bypassBlobCreationValidation, long?blobSizeLimitBytes = null) { // Check if the current known blob is the targeted blob if (currentCloudAppendBlob != null && currentBlobName.Equals(blobName, StringComparison.OrdinalIgnoreCase)) { // Before performing validate first fetch attributes for current file size await currentCloudAppendBlob.FetchAttributesAsync().ConfigureAwait(false); // Check if the current blob is within the block count and file size limits if (ValidateBlobProperties(currentCloudAppendBlob, blobSizeLimitBytes)) { return(currentCloudAppendBlob); } else { // The blob is correct but needs to be rolled over currentBlobRollSequence++; await GetCloudAppendBlobAsync(cloudBlobClient, blobContainerName, blobName, bypassBlobCreationValidation); } } else { //first time to get a cloudblob or the blobname has changed currentBlobRollSequence = 0; await GetCloudAppendBlobAsync(cloudBlobClient, blobContainerName, blobName, bypassBlobCreationValidation, blobSizeLimitBytes); } return(currentCloudAppendBlob); }
private async Task CheckBloksCountAsync() { if (!_blob.Properties.AppendBlobCommittedBlockCount.HasValue) { await _blob.FetchAttributesAsync(); } if (_blob.Properties.AppendBlobCommittedBlockCount.Value < _maxBlocksCount) { return; } int i = 1; while (true) { var fileName = $"{_blob.Name}--{i:00}"; _blob = _blobContainer.GetAppendBlobReference(fileName); bool exists = await _blob.ExistsAsync(); if (!exists) { break; } ++i; } _log.WriteInfo("BlobSaver.CheckBloksCountAsync", _container, $"Created additional blob - {_blob.Name}"); await InitBlobPropertiesAsync(); }
/// <summary> /// Delivers blob information like length, lastmodified, etag /// </summary> /// <returns></returns> public async Task <BlobProperties> GetBlobProperties() { if (await blob.ExistsAsync()) { await blob.FetchAttributesAsync(); return(blob.Properties); } return(null); }
private CloudAppendBlob SetupCloudAppendBlobReference(string blobName, int blockCount) { CloudAppendBlob cloudAppendBlob = A.Fake <CloudAppendBlob>(opt => opt.WithArgumentsForConstructor(new object[] { new Uri("https://account.suffix.blobs.com/logcontainer/" + blobName) })); SetCloudBlobBlockCount(cloudAppendBlob, blockCount); A.CallTo(() => cloudAppendBlob.Name).Returns(blobName); A.CallTo(() => cloudAppendBlob.CreateOrReplaceAsync(A <AccessCondition> .Ignored, null, null)).Returns(Task.FromResult(true)); A.CallTo(() => cloudAppendBlob.FetchAttributesAsync()).Returns(Task.FromResult(true)); A.CallTo(() => blobContainer.GetAppendBlobReference(blobName)).Returns(cloudAppendBlob); return(cloudAppendBlob); }
private async Task <bool> DetectNewFormatAsync( byte[] buffer, int filledCount, int prevDelimiterIndex, CloudAppendBlob blob) { if (blob.Metadata == null) { await blob.FetchAttributesAsync(); } if (blob.Metadata.ContainsKey(_newFormatKey) && bool.TryParse(blob.Metadata[_newFormatKey], out bool isNewFormat)) { return(isNewFormat); } return(false); }
private void Roll() { _day = DateTime.Now; string stamp = _day.ToString("yyyy-MM-dd"); string blobName = $"{_baseBlobName}_{stamp}.log"; _blob = _container.GetAppendBlobReference(blobName); if (!_blob.ExistsAsync().Result) { _blob.CreateOrReplaceAsync().Wait(); _blob.FetchAttributesAsync().Wait(); _blob.Metadata["Day"] = stamp; _blob.Metadata["Name"] = _baseBlobName; _blob.SetMetadataAsync().Wait(); } }
public static CloudBlob GetCorrespondingTypeBlobReference(CloudBlob blob, OperationContext operationContext) { CloudBlob targetBlob; switch (blob.Properties.BlobType) { case BlobType.BlockBlob: targetBlob = new CloudBlockBlob(blob.SnapshotQualifiedUri, blob.ServiceClient.Credentials); break; case BlobType.PageBlob: targetBlob = new CloudPageBlob(blob.SnapshotQualifiedUri, blob.ServiceClient.Credentials); break; case BlobType.AppendBlob: targetBlob = new CloudAppendBlob(blob.SnapshotQualifiedUri, blob.ServiceClient.Credentials); break; default: throw new InvalidOperationException(string.Format( CultureInfo.CurrentCulture, Resources.InvalidBlobType, blob.Properties.BlobType, blob.Name)); } try { Task.Run(() => targetBlob.FetchAttributesAsync(null, null, operationContext)).Wait(); } catch (AggregateException e) when(e.InnerException is StorageException) { throw e.InnerException; } return(targetBlob); }
private async Task InitBlobAsync(string storagePath) { _blob = _blobContainer.GetAppendBlobReference(storagePath); if (await _blob.ExistsAsync()) { if (!_blob.Properties.AppendBlobCommittedBlockCount.HasValue) { await _blob.FetchAttributesAsync(); } bool isBlobCompressed = _blob.Metadata.ContainsKey(_compressedKey) && bool.Parse(_blob.Metadata[_compressedKey]); bool isNewFormat = _blob.Metadata.ContainsKey(_newFormatKey) && bool.Parse(_blob.Metadata[_newFormatKey]); if (_blob.Properties.AppendBlobCommittedBlockCount < _maxBlocksCount && isBlobCompressed == _compressData && isNewFormat) { return; } int i = 1; while (true) { var fileName = $"{storagePath}--{i:00}"; _blob = _blobContainer.GetAppendBlobReference(fileName); bool exists = await _blob.ExistsAsync(); if (!exists) { break; } ++i; } } _log.WriteInfo("BlobSaver.InitBlobAsync", _container, $"Created blob - {_blob.Name}"); await InitBlobPropertiesAsync(); }
/// <inheritdoc /> public Task FetchAttributesAsync(CancellationToken cancellationToken) { return(_sdk.FetchAttributesAsync(cancellationToken)); }
/// <inheritdoc /> public Task FetchAttributesAsync(CancellationToken cancellationToken) { return(_sdk.FetchAttributesAsync(accessCondition: null, options: null, operationContext: null, cancellationToken: cancellationToken)); }
private async Task <long> GetFileSize() { await file.FetchAttributesAsync(); return(file.Properties.Length); }