public async Task <bool> ExecuteAsync(CancellationToken ct) { if (Items.Length == 0) { Log.LogError("No items were provided for upload."); return(false); } Log.LogMessage("Begin uploading blobs to Azure account {0} in container {1}.", AccountName, ContainerName); try { AzureStorageUtils blobUtils = new AzureStorageUtils(AccountName, AccountKey, ContainerName); List <Task> uploadTasks = new List <Task>(); foreach (var item in Items) { uploadTasks.Add(Task.Run(async() => { string relativeBlobPath = item.GetMetadata("RelativeBlobPath"); if (string.IsNullOrEmpty(relativeBlobPath)) { throw new Exception(string.Format("Metadata 'RelativeBlobPath' is missing for item '{0}'.", item.ItemSpec)); } if (!File.Exists(item.ItemSpec)) { throw new Exception(string.Format("The file '{0}' does not exist.", item.ItemSpec)); } BlobClient blobReference = blobUtils.GetBlob(relativeBlobPath); if (!Overwrite && await blobReference.ExistsAsync()) { if (PassIfExistingItemIdentical) { if (await blobUtils.IsFileIdenticalToBlobAsync(item.ItemSpec, blobReference)) { return; } } throw new Exception(string.Format("The blob '{0}' already exists.", relativeBlobPath)); } CancellationTokenSource timeoutTokenSource = new CancellationTokenSource(TimeSpan.FromMinutes(UploadTimeoutInMinutes)); using (Stream localFileStream = File.OpenRead(item.ItemSpec)) { await blobReference.UploadAsync(localFileStream, timeoutTokenSource.Token); } })); } await Task.WhenAll(uploadTasks); Log.LogMessage("Upload to Azure is complete, a total of {0} items were uploaded.", Items.Length); } catch (Exception e) { Log.LogErrorFromException(e, true); } return(!Log.HasLoggedErrors); }
public async Task UploadFileToStorageContainer(Stream stream, string path) { BlobContainerClient containerClient = GetFileShareContainer(); BlobClient blobClient = containerClient.GetBlobClient(path); await blobClient.UploadAsync(stream); }
public static async Task <IActionResult> Run( [HttpTrigger(AuthorizationLevel.Function, "get", "post", Route = null)] HttpRequest req, ILogger log) { log.LogInformation("C# HTTP trigger function processed a request."); string name = req.Query["name"]; string requestBody = await new StreamReader(req.Body).ReadToEndAsync(); dynamic data = JsonConvert.DeserializeObject(requestBody); name = name ?? data?.name; var connection = Environment.GetEnvironmentVariable("azureconnection"); var containerName = "commondataservice-kapish2020-ca7639d241a143b695207c2fb17d54"; BlobContainerClient container = new BlobContainerClient(connection, containerName); var model = container.GetBlobs().Where(blb => blb.Name == "model.json").FirstOrDefault(); var blobC = new BlobClient(connection, containerName, model.Name); var resp = await blobC.DownloadAsync(); StringBuilder sb = new StringBuilder(); using (var streamReader = new StreamReader(resp.Value.Content)) { while (!streamReader.EndOfStream) { var line = await streamReader.ReadLineAsync(); sb.Append(line); } } dynamic modeldata = JsonConvert.DeserializeObject(sb.ToString()); JObject jobject = modeldata; JObject entity = (JObject)jobject.GetValue("entities").FirstOrDefault(); var attributes = entity.GetValue("attributes"); List <string> attributesList = new List <string>(); foreach (var att in attributes) { attributesList.Add(((JObject)att).GetValue("name").ToString()); } var blobs = container.GetBlobs().Where(blb => blb.Name == "opportunity/Snapshot/2020_1608038345.csv"); //kan jobba med filtret här för att få rätt blobs, så att jag får en snapshot i månaden osv. List <OpportunityHistory> opportunities = new List <OpportunityHistory>(); foreach (var blob in blobs) { log.LogInformation(blob.Name); var blockBlob = new BlockBlobClient(connection, containerName, blob.Name); opportunities.AddRange(await QueryFile(blockBlob, log, attributesList)); } return(new OkObjectResult(opportunities)); }
public override void Run(CancellationToken cancellationToken) { using var stream = RandomStream.Create(Options.Size); BlobClient.Upload(stream, transferOptions: Options.StorageTransferOptions, cancellationToken: cancellationToken); }
/// <summary> /// Attempts to claim ownership of partitions for processing. /// </summary> /// /// <param name="partitionOwnership">An enumerable containing all the ownership to claim.</param> /// /// <returns>An enumerable containing the successfully claimed ownership.</returns> /// public override async Task <IEnumerable <PartitionOwnership> > ClaimOwnershipAsync(IEnumerable <PartitionOwnership> partitionOwnership) { var claimedOwnership = new List <PartitionOwnership>(); var metadata = new Dictionary <string, string>(); Response <BlobContentInfo> contentInfoResponse; Response <BlobInfo> infoResponse; foreach (PartitionOwnership ownership in partitionOwnership) { metadata[BlobMetadataKey.OwnerIdentifier] = ownership.OwnerIdentifier; metadata[BlobMetadataKey.Offset] = ownership.Offset?.ToString() ?? string.Empty; metadata[BlobMetadataKey.SequenceNumber] = ownership.SequenceNumber?.ToString() ?? string.Empty; var blobAccessConditions = new BlobAccessConditions(); var blobName = $"{ ownership.FullyQualifiedNamespace }/{ ownership.EventHubName }/{ ownership.ConsumerGroup }/{ ownership.PartitionId }"; BlobClient blobClient = _containerClient.GetBlobClient(blobName); try { // Even though documentation states otherwise, we cannot use UploadAsync when the blob already exists in // the current storage SDK. For this reason, we are using the specified ETag as an indication of what // method to use. if (ownership.ETag == null) { blobAccessConditions.HttpAccessConditions = new HttpAccessConditions { IfNoneMatch = new ETag("*") }; MemoryStream blobContent = null; try { blobContent = new MemoryStream(new byte[0]); contentInfoResponse = await blobClient.UploadAsync(blobContent, metadata : metadata, accessConditions : blobAccessConditions).ConfigureAwait(false); } catch (StorageRequestFailedException ex) when(ex.ErrorCode == BlobErrorCode.BlobAlreadyExists) { // A blob could have just been created by another Event Processor that claimed ownership of this // partition. In this case, there's no point in retrying because we don't have the correct ETag. Log($"Ownership with partition id = '{ ownership.PartitionId }' is not claimable."); continue; } finally { blobContent?.Dispose(); } ownership.LastModifiedTime = contentInfoResponse.Value.LastModified; ownership.ETag = contentInfoResponse.Value.ETag.ToString(); } else { blobAccessConditions.HttpAccessConditions = new HttpAccessConditions { IfMatch = new ETag(ownership.ETag) }; try { infoResponse = await blobClient.SetMetadataAsync(metadata, blobAccessConditions).ConfigureAwait(false); } catch (StorageRequestFailedException ex) when(ex.ErrorCode == BlobErrorCode.BlobNotFound) { // No ownership was found, which means the ETag should have been set to null in order to // claim this ownership. For this reason, we consider it a failure and don't try again. Log($"Ownership with partition id = '{ ownership.PartitionId }' is not claimable."); continue; } ownership.LastModifiedTime = infoResponse.Value.LastModified; ownership.ETag = infoResponse.Value.ETag.ToString(); } // Small workaround to retrieve the eTag. The current storage SDK returns it enclosed in // double quotes ('"ETAG_VALUE"' instead of 'ETAG_VALUE'). Match match = s_doubleQuotesExpression.Match(ownership.ETag); if (match.Success) { ownership.ETag = match.Groups[1].ToString(); } claimedOwnership.Add(ownership); Log($"Ownership with partition id = '{ ownership.PartitionId }' claimed."); } catch (StorageRequestFailedException ex) when(ex.ErrorCode == BlobErrorCode.ConditionNotMet) { Log($"Ownership with partition id = '{ ownership.PartitionId }' is not claimable."); } } return(claimedOwnership); }
public async Task ClaimCheck() { await using (var scope = await ServiceBusScope.CreateWithQueue(enablePartitioning: false, enableSession: false)) { #region Snippet:CreateBlobContainer #if SNIPPET var containerClient = new BlobContainerClient("<storage connection string>", "claim-checks"); #else var containerClient = new BlobContainerClient(TestEnvironment.StorageClaimCheckConnectionString, "claim-checks"); #endif await containerClient.CreateIfNotExistsAsync(); #endregion try { #region Snippet:UploadMessage byte[] body = ServiceBusTestUtilities.GetRandomBuffer(1000000); string blobName = Guid.NewGuid().ToString(); await containerClient.UploadBlobAsync(blobName, new BinaryData(body)); var message = new ServiceBusMessage { ApplicationProperties = { ["blob-name"] = blobName } }; #endregion #region Snippet:ClaimCheckSendMessage #if SNIPPET var client = new ServiceBusClient("<service bus connection string>"); #else var client = new ServiceBusClient(TestEnvironment.ServiceBusConnectionString); #endif ServiceBusSender sender = client.CreateSender(scope.QueueName); await sender.SendMessageAsync(message); #endregion #region Snippet:ReceiveClaimCheck ServiceBusReceiver receiver = client.CreateReceiver(scope.QueueName); ServiceBusReceivedMessage receivedMessage = await receiver.ReceiveMessageAsync(); if (receivedMessage.ApplicationProperties.TryGetValue("blob-name", out object blobNameReceived)) { #if SNIPPET var blobClient = new BlobClient("<storage connection string>", "claim-checks", (string)blobNameReceived); #else var blobClient = new BlobClient( TestEnvironment.StorageClaimCheckConnectionString, "claim-checks", (string)blobNameReceived); #endif BlobDownloadResult downloadResult = await blobClient.DownloadContentAsync(); BinaryData messageBody = downloadResult.Content; // Once we determine that we are done with the message, we complete it and delete the corresponding blob. await receiver.CompleteMessageAsync(receivedMessage); await blobClient.DeleteAsync(); #if !SNIPPET Assert.AreEqual(body, messageBody.ToArray()); #endif } #endregion } finally { await containerClient.DeleteAsync(); } } }
public async Task StartTranslationWithAzureBlob() { /** * FILE: SampleTranslationWithAzureBlob.cs * DESCRIPTION: * This sample demonstrates how to use Azure Blob Storage to set up the necessary resources to create a translation * operation. Run the sample to create containers, upload documents, and generate SAS tokens for the source/target * containers. Once the operation is completed, use the storage library to download your documents locally. * * PREREQUISITE: * This sample requires you install Azure.Storage.Blobs nuget package: * https://www.nuget.org/packages/Azure.Storage.Blobs * * USAGE: * Set the environment variables with your own values before running the sample: * 1) DOCUMENT_TRANSLATION_ENDPOINT - the endpoint to your Document Translation resource. * 2) DOCUMENT_TRANSLATION_API_KEY - your Document Translation API key. * 3) DOCUMENT_TRANSLATION_CONNECTION_STRING - the connection string to your Storage account * 4) AZURE_DOCUMENT_PATH - (optional) the path and file extension of your document in this directory * e.g. "path/mydocument.txt" * Optionally, you can also set the following variables in code: * 5) sourceContainerName - the name of your source container * 6) targetContainerName - the name of your target container **/ #if SNIPPET string endpoint = "<Document Translator Resource Endpoint>"; string apiKey = "<Document Translator Resource API Key>"; #else string endpoint = TestEnvironment.Endpoint; string apiKey = TestEnvironment.ApiKey; #endif var client = new DocumentTranslationClient(new Uri(endpoint), new AzureKeyCredential(apiKey)); var storageConnectionString = Environment.GetEnvironmentVariable("DOCUMENT_TRANSLATION_CONNECTION_STRING"); #if SNIPPET string sourceContainerName = "<Source Container Name>"; string targetContainerName = "<Target Container Name>"; #else string sourceContainerName = GenerateRandomName("source"); string targetContainerName = GenerateRandomName("target"); #endif string documentPath = Environment.GetEnvironmentVariable("AZURE_DOCUMENT_PATH"); // Create source and target storage containers BlobServiceClient blobServiceClient = new BlobServiceClient(storageConnectionString); BlobContainerClient sourceContainerClient = await blobServiceClient.CreateBlobContainerAsync(sourceContainerName ?? "translation-source-container", PublicAccessType.BlobContainer).ConfigureAwait(false); BlobContainerClient targetContainerClient = await blobServiceClient.CreateBlobContainerAsync(targetContainerName ?? "translation-target-container", PublicAccessType.BlobContainer).ConfigureAwait(false); // Upload blob (file) to the source container BlobClient srcBlobClient = sourceContainerClient.GetBlobClient(!string.IsNullOrWhiteSpace(documentPath) ? Path.GetFileName(documentPath) : "example_source_document.txt"); if (!string.IsNullOrWhiteSpace(documentPath)) { using (FileStream uploadFileStream = File.OpenRead(documentPath)) { await srcBlobClient.UploadAsync(uploadFileStream, true).ConfigureAwait(false); } } else { await srcBlobClient.UploadAsync(new MemoryStream(Encoding.UTF8.GetBytes("Hello.\nThis is a testing text.")), true).ConfigureAwait(false); } Console.WriteLine($"Uploaded document {srcBlobClient.Uri} to source storage container"); // Generate SAS tokens for source & target Uri srcSasUri = sourceContainerClient.GenerateSasUri(BlobContainerSasPermissions.List | BlobContainerSasPermissions.Read, DateTime.UtcNow.AddMinutes(30)); Uri tgtSasUri = targetContainerClient.GenerateSasUri(BlobContainerSasPermissions.List | BlobContainerSasPermissions.Write | BlobContainerSasPermissions.Delete, DateTime.UtcNow.AddMinutes(30)); // Submit the translation operation and wait for it to finish var operationRequest = new DocumentTranslationInput(srcSasUri, tgtSasUri, "es"); DocumentTranslationOperation operationResult = await client.StartTranslationAsync(operationRequest); await operationResult.WaitForCompletionAsync(); Console.WriteLine($"Operation status: {operationResult.Status}"); Console.WriteLine($"Operation created on: {operationResult.CreatedOn}"); Console.WriteLine($"Operation last updated on: {operationResult.LastModified}"); Console.WriteLine($"Total number of translations on documents: {operationResult.DocumentsTotal}"); Console.WriteLine("\nOf total documents..."); Console.WriteLine($"{operationResult.DocumentsFailed} failed"); Console.WriteLine($"{operationResult.DocumentsSucceeded} succeeded"); await foreach (DocumentStatusResult document in operationResult.GetDocumentStatusesAsync()) { if (document.Status == DocumentTranslationStatus.Succeeded) { Console.WriteLine($"Document at {document.SourceDocumentUri} was translated to {document.TranslatedToLanguageCode} language.You can find translated document at {document.TranslatedDocumentUri}"); } else { Console.WriteLine($"Document ID: {document.Id}, Error Code: {document.Error.Code}, Message: {document.Error.Message}"); } } }
/// <summary> /// Initializes a new instance of the <see cref="AzureBlobStorageImageResolver"/> class. /// </summary> /// <param name="blob">The Azure blob.</param> public AzureBlobStorageImageResolver(BlobClient blob) => this.blob = blob;
public DefaultBlobReaderFtpResponseAction(BlobClient blobClient, IMapper mapper = null) : base(blobClient, mapper) { }
public AzureBlobFileHandle(BlobClient client) { this.Client = client; }
public async Task <ChangeFeed> BuildChangeFeed( DateTimeOffset?startTime, DateTimeOffset?endTime, string continuation, bool async, CancellationToken cancellationToken) { DateTimeOffset lastConsumable; Queue <string> years = new Queue <string>(); Queue <string> segments = new Queue <string>(); ChangeFeedCursor cursor = null; // Create cursor if (continuation != null) { cursor = JsonSerializer.Deserialize <ChangeFeedCursor>(continuation); ValidateCursor(_containerClient, cursor); startTime = BlobChangeFeedExtensions.ToDateTimeOffset(cursor.CurrentSegmentCursor.SegmentPath).Value; endTime = cursor.EndTime; } // Round start and end time if we are not using the cursor. else { startTime = startTime.RoundDownToNearestHour(); endTime = endTime.RoundUpToNearestHour(); } // Check if Change Feed has been abled for this account. bool changeFeedContainerExists; if (async) { changeFeedContainerExists = await _containerClient.ExistsAsync(cancellationToken : cancellationToken).ConfigureAwait(false); } else { changeFeedContainerExists = _containerClient.Exists(cancellationToken: cancellationToken); } if (!changeFeedContainerExists) { throw new ArgumentException("Change Feed hasn't been enabled on this account, or is currently being enabled."); } // Get last consumable BlobClient blobClient = _containerClient.GetBlobClient(Constants.ChangeFeed.MetaSegmentsPath); BlobDownloadStreamingResult blobDownloadInfo; if (async) { blobDownloadInfo = await blobClient.DownloadStreamingAsync(cancellationToken : cancellationToken).ConfigureAwait(false); } else { blobDownloadInfo = blobClient.DownloadStreaming(cancellationToken: cancellationToken); } JsonDocument jsonMetaSegment; if (async) { jsonMetaSegment = await JsonDocument.ParseAsync( blobDownloadInfo.Content, cancellationToken : cancellationToken ).ConfigureAwait(false); } else { jsonMetaSegment = JsonDocument.Parse(blobDownloadInfo.Content); } lastConsumable = jsonMetaSegment.RootElement.GetProperty("lastConsumable").GetDateTimeOffset(); // Get year paths years = await GetYearPathsInternal( async, cancellationToken).ConfigureAwait(false); // Dequeue any years that occur before start time if (startTime.HasValue) { while (years.Count > 0 && BlobChangeFeedExtensions.ToDateTimeOffset(years.Peek()) < startTime.RoundDownToNearestYear()) { years.Dequeue(); } } // There are no years. if (years.Count == 0) { return(ChangeFeed.Empty()); } while (segments.Count == 0 && years.Count > 0) { // Get Segments for year segments = await BlobChangeFeedExtensions.GetSegmentsInYearInternal( containerClient : _containerClient, yearPath : years.Dequeue(), startTime : startTime, endTime : BlobChangeFeedExtensions.MinDateTime(lastConsumable, endTime), async : async, cancellationToken : cancellationToken) .ConfigureAwait(false); } // We were on the last year, and there were no more segments. if (segments.Count == 0) { return(ChangeFeed.Empty()); } Segment currentSegment = await _segmentFactory.BuildSegment( async, segments.Dequeue(), cursor?.CurrentSegmentCursor) .ConfigureAwait(false); return(new ChangeFeed( _containerClient, _segmentFactory, years, segments, currentSegment, lastConsumable, startTime, endTime)); }
private static async Task DeleteBlobAsync(string blobName) { BlobClient blobClient = new BlobClient(_connectionString, _blobContainerName, blobName); await blobClient.DeleteIfExistsAsync(); }
/// <summary> /// Tests a blob SAS to determine which operations it allows. /// </summary> /// <param name="sasUri">A string containing a URI with a SAS appended.</param> /// <param name="blobContent">A string content content to write to the blob.</param> static void TestBlobSAS(Uri sasUri, string blobContent) { //Try performing blob operations using the SAS provided. //Return a reference to the blob using the SAS URI. BlobClient blob = new BlobClient(sasUri); //Create operation: Upload a blob with the specified name to the container. //If the blob does not exist, it will be created. If it does exist, it will be overwritten. try { //string blobContent = "This blob was created with a shared access signature granting write permissions to the blob. "; blob.Upload(BinaryData.FromString(blobContent)); Console.WriteLine("Create operation succeeded for SAS " + sasUri); Console.WriteLine(); } catch (RequestFailedException e) { Console.WriteLine("Create operation failed for SAS " + sasUri); Console.WriteLine("Additional error information: " + e.Message); Console.WriteLine(); } // Write operation: Add metadata to the blob try { IDictionary <string, string> metadata = new Dictionary <string, string>(); metadata.Add("name", "value"); blob.SetMetadata(metadata); Console.WriteLine("Write operation succeeded for SAS " + sasUri); Console.WriteLine(); } catch (RequestFailedException e) { Console.WriteLine("Write operation failed for SAS " + sasUri); Console.WriteLine("Additional error information: " + e.Message); Console.WriteLine(); } //Read operation: Read the contents of the blob. try { BlobDownloadResult download = blob.DownloadContent(); string content = download.Content.ToString(); Console.WriteLine(content); Console.WriteLine(); Console.WriteLine("Read operation succeeded for SAS " + sasUri); Console.WriteLine(); } catch (RequestFailedException e) { Console.WriteLine("Read operation failed for SAS " + sasUri); Console.WriteLine("Additional error information: " + e.Message); Console.WriteLine(); } //Delete operation: Delete the blob. try { blob.Delete(); Console.WriteLine("Delete operation succeeded for SAS " + sasUri); Console.WriteLine(); } catch (RequestFailedException e) { Console.WriteLine("Delete operation failed for SAS " + sasUri); Console.WriteLine("Additional error information: " + e.Message); Console.WriteLine(); } }
/// <summary> /// Tests a container SAS to determine which operations it allows. /// </summary> /// <param name="sasUri">A string containing a URI with a SAS appended.</param> /// <param name="blobName">A string containing the name of the blob.</param> /// <param name="blobContent">A string content content to write to the blob.</param> static void TestContainerSAS(Uri sasUri, string blobName, string blobContent) { //Try performing container operations with the SAS provided. //Note that the storage account credentials are not required here; the SAS provides the necessary //authentication information on the URI. //Return a reference to the container using the SAS URI. BlobContainerClient container = new BlobContainerClient(sasUri); //Return a reference to a blob to be created in the container. BlobClient blob = container.GetBlobClient(blobName); //Write operation: Upload a new blob to the container. try { blob.Upload(BinaryData.FromString(blobContent)); Console.WriteLine("Write operation succeeded for SAS " + sasUri); Console.WriteLine(); } catch (RequestFailedException e) { Console.WriteLine("Write operation failed for SAS " + sasUri); Console.WriteLine("Additional error information: " + e.Message); Console.WriteLine(); } //List operation: List the blobs in the container. try { foreach (BlobItem blobItem in container.GetBlobs()) { Console.WriteLine(blobItem.Name); } Console.WriteLine("List operation succeeded for SAS " + sasUri); Console.WriteLine(); } catch (RequestFailedException e) { Console.WriteLine("List operation failed for SAS " + sasUri); Console.WriteLine("Additional error information: " + e.Message); Console.WriteLine(); } //Read operation: Read the contents of the blob we created above. try { BlobDownloadInfo download = blob.Download(); Console.WriteLine(download.ContentLength); Console.WriteLine(); Console.WriteLine("Read operation succeeded for SAS " + sasUri); Console.WriteLine(); } catch (RequestFailedException e) { Console.WriteLine("Read operation failed for SAS " + sasUri); Console.WriteLine("Additional error information: " + e.Message); Console.WriteLine(); } Console.WriteLine(); //Delete operation: Delete the blob we created above. try { blob.Delete(); Console.WriteLine("Delete operation succeeded for SAS " + sasUri); Console.WriteLine(); } catch (RequestFailedException e) { Console.WriteLine("Delete operation failed for SAS " + sasUri); Console.WriteLine("Additional error information: " + e.Message); Console.WriteLine(); } }
public AzureStoredBlob(AzureBlobStorage storage, BlobClient blob, BlobProperties properties) : base(blob) { Storage = storage ?? throw new ArgumentNullException(nameof(storage)); Properties = properties ?? throw new ArgumentNullException(nameof(properties)); }
public BlobTriggerBinding(ParameterInfo parameter, IArgumentBinding <IStorageBlob> argumentBinding, IStorageAccount hostAccount, IStorageAccount dataAccount, IBlobPathSource path, IHostIdProvider hostIdProvider, IQueueConfiguration queueConfiguration, JobHostBlobsConfiguration blobsConfiguration, IWebJobsExceptionHandler exceptionHandler, IContextSetter <IBlobWrittenWatcher> blobWrittenWatcherSetter, IContextSetter <IMessageEnqueuedWatcher> messageEnqueuedWatcherSetter, ISharedContextProvider sharedContextProvider, SingletonManager singletonManager, TraceWriter trace, ILoggerFactory loggerFactory) { if (parameter == null) { throw new ArgumentNullException("parameter"); } if (argumentBinding == null) { throw new ArgumentNullException("argumentBinding"); } if (hostAccount == null) { throw new ArgumentNullException("hostAccount"); } if (dataAccount == null) { throw new ArgumentNullException("dataAccount"); } if (path == null) { throw new ArgumentNullException("path"); } if (hostIdProvider == null) { throw new ArgumentNullException("hostIdProvider"); } if (queueConfiguration == null) { throw new ArgumentNullException("queueConfiguration"); } if (blobsConfiguration == null) { throw new ArgumentNullException("blobsConfiguration"); } if (exceptionHandler == null) { throw new ArgumentNullException("exceptionHandler"); } if (blobWrittenWatcherSetter == null) { throw new ArgumentNullException("blobWrittenWatcherSetter"); } if (messageEnqueuedWatcherSetter == null) { throw new ArgumentNullException("messageEnqueuedWatcherSetter"); } if (sharedContextProvider == null) { throw new ArgumentNullException("sharedContextProvider"); } if (singletonManager == null) { throw new ArgumentNullException("singletonManager"); } if (trace == null) { throw new ArgumentNullException("trace"); } _parameter = parameter; _argumentBinding = argumentBinding; _hostAccount = hostAccount; _dataAccount = dataAccount; StorageClientFactoryContext context = new StorageClientFactoryContext { Parameter = parameter }; _blobClient = dataAccount.CreateBlobClient(context); _accountName = BlobClient.GetAccountName(_blobClient); _path = path; _hostIdProvider = hostIdProvider; _queueConfiguration = queueConfiguration; _blobsConfiguration = blobsConfiguration; _exceptionHandler = exceptionHandler; _blobWrittenWatcherSetter = blobWrittenWatcherSetter; _messageEnqueuedWatcherSetter = messageEnqueuedWatcherSetter; _sharedContextProvider = sharedContextProvider; _singletonManager = singletonManager; _trace = trace; _loggerFactory = loggerFactory; _converter = CreateConverter(_blobClient); _bindingDataContract = CreateBindingDataContract(path); }
/// <summary> /// Test some of the file storage operations. /// </summary> public async Task RunFileStorageOperationsAsync() { // These are used in the finally block to clean up the objects created during the demo. ShareClient shareClient = null; ShareFileClient shareFileClient = null; ShareDirectoryClient fileDirectory = null; BlobClient targetBlob = null; BlobContainerClient blobContainer = null; string destFile = null; string downloadFolder = null; // Name to be used for the file when downloading it so you can inspect it locally string downloadFile = null; try { //***** Setup *****// Console.WriteLine("Getting reference to the storage account."); // How to create a storage connection string - http://msdn.microsoft.com/en-us/library/azure/ee758697.aspx string storageConnectionString = ConfigurationManager.AppSettings.Get("StorageConnectionString"); string storageAccountName = ConfigurationManager.AppSettings.Get("StorageAccountName"); string storageAccountKey = ConfigurationManager.AppSettings.Get("StorageAccountKey"); Console.WriteLine("Instantiating file client."); // Create a share client for interacting with the file service. var shareServiceClient = new ShareServiceClient(storageConnectionString); // Create the share name -- use a guid in the name so it's unique. // This will also be used as the container name for blob storage when copying the file to blob storage. string shareName = "demotest-" + System.Guid.NewGuid().ToString(); // Name of folder to put the files in string sourceFolder = "testfolder"; // Name of file to upload and download string testFile = "HelloWorld.png"; // Folder where the HelloWorld.png file resides string localFolder = @".\"; // It won't let you download in the same folder as the exe file, // so use a temporary folder with the same name as the share. downloadFolder = Path.Combine(Path.GetTempPath(), shareName); //***** Create a file share *****// // Create the share if it doesn't already exist. Console.WriteLine("Creating share with name {0}", shareName); shareClient = shareServiceClient.GetShareClient(shareName); try { await shareClient.CreateIfNotExistsAsync(); Console.WriteLine(" Share created successfully."); } catch (RequestFailedException exRequest) { Common.WriteException(exRequest); Console.WriteLine("Please make sure your storage account has storage file endpoint enabled and specified correctly in the app.config - then restart the sample."); Console.WriteLine("Press any key to exit"); Console.ReadLine(); throw; } catch (Exception ex) { Console.WriteLine(" Exception thrown creating share."); Common.WriteException(ex); throw; } //***** Create a directory on the file share *****// // Create a directory on the share. Console.WriteLine("Creating directory named {0}", sourceFolder); ShareDirectoryClient rootDirectory = shareClient.GetRootDirectoryClient(); // If the source folder is null, then use the root folder. // If the source folder is specified, then get a reference to it. if (string.IsNullOrWhiteSpace(sourceFolder)) { // There is no folder specified, so return a reference to the root directory. fileDirectory = rootDirectory; Console.WriteLine(" Using root directory."); } else { // There was a folder specified, so return a reference to that folder. fileDirectory = rootDirectory.GetSubdirectoryClient(sourceFolder); await fileDirectory.CreateIfNotExistsAsync(); Console.WriteLine(" Directory created successfully."); } //***** Upload a file to the file share *****// // Get a file client. shareFileClient = fileDirectory.GetFileClient(testFile); // Upload a file to the share. Console.WriteLine("Uploading file {0} to share", testFile); // Set up the name and path of the local file. string sourceFile = Path.Combine(localFolder, testFile); if (File.Exists(sourceFile)) { using (FileStream stream = File.OpenRead(sourceFile)) { // Upload from the local file to the file share in azure. await shareFileClient.CreateAsync(stream.Length); await shareFileClient.UploadAsync(stream); } Console.WriteLine(" Successfully uploaded file to share."); } else { Console.WriteLine("File not found, so not uploaded."); } //***** Get list of all files/directories on the file share*****// // List all files/directories under the root directory. Console.WriteLine("Getting list of all files/directories under the root directory of the share."); var fileList = rootDirectory.GetFilesAndDirectoriesAsync(); // Print all files/directories listed above. await foreach (ShareFileItem listItem in fileList) { // listItem type will be ShareClient or ShareDirectoryClient. Console.WriteLine(" - {0} (type: {1})", listItem.Name, listItem.GetType()); } Console.WriteLine("Getting list of all files/directories in the file directory on the share."); // Now get the list of all files/directories in your directory. // Ordinarily, you'd write something recursive to do this for all directories and subdirectories. fileList = fileDirectory.GetFilesAndDirectoriesAsync(); // Print all files/directories in the folder. await foreach (ShareFileItem listItem in fileList) { // listItem type will be a file or directory Console.WriteLine(" - {0} (IsDirectory: {1})", listItem.Name, listItem.IsDirectory); } //***** Download a file from the file share *****// // Download the file to the downloadFolder in the temp directory. // Check and if the directory doesn't exist (which it shouldn't), create it. Console.WriteLine("Downloading file from share to local temp folder {0}.", downloadFolder); if (!Directory.Exists(downloadFolder)) { Directory.CreateDirectory(downloadFolder); } // Download the file. ShareFileDownloadInfo download = await shareFileClient.DownloadAsync(); downloadFile = Path.Combine(downloadFolder, testFile); using (FileStream stream = File.OpenWrite(downloadFile)) { await download.Content.CopyToAsync(stream); } Console.WriteLine(" Successfully downloaded file from share to local temp folder."); //***** Copy a file from the file share to blob storage, then abort the copy *****// // Copies can sometimes complete before there's a chance to abort. // If that happens with the file you're testing with, try copying the file // to a storage account in a different region. If it still finishes too fast, // try using a bigger file and copying it to a different region. That will almost always // take long enough to give you time to abort the copy. // If you want to change the file you're testing the Copy with without changing the value for the // rest of the sample code, upload the file to the share, then assign the name of the file // to the testFile variable right here before calling GetFileClient. // Then it will use the new file for the copy and abort but the rest of the code // will still use the original file. ShareFileClient shareFileCopy = fileDirectory.GetFileClient(testFile); // Upload a file to the share. Console.WriteLine("Uploading file {0} to share", testFile); // Set up the name and path of the local file. string sourceFileCopy = Path.Combine(localFolder, testFile); using (FileStream stream = File.OpenRead(sourceFile)) { // Upload from the local file to the file share in azure. await shareFileCopy.CreateAsync(stream.Length); await shareFileCopy.UploadAsync(stream); } Console.WriteLine(" Successfully uploaded file to share."); // Copy the file to blob storage. Console.WriteLine("Copying file to blob storage. Container name = {0}", shareName); // First get a blob service client. var blobServiceClient = new BlobServiceClient(storageConnectionString); // Get a blob container client and create it if it doesn't already exist. blobContainer = blobServiceClient.GetBlobContainerClient(shareName); await blobContainer.CreateIfNotExistsAsync(); // Get a blob client to the target blob. targetBlob = blobContainer.GetBlobClient(testFile); string copyId = string.Empty; // Get a share file client to be copied. shareFileClient = fileDirectory.GetFileClient(testFile); // Create a SAS for the file that's valid for 24 hours. // Note that when you are copying a file to a blob, or a blob to a file, you must use a SAS // to authenticate access to the source object, even if you are copying within the same // storage account. var sas = new AccountSasBuilder { // Allow access to Files Services = AccountSasServices.Files, // Allow access to the service level APIs ResourceTypes = AccountSasResourceTypes.All, // Access expires in 1 day! ExpiresOn = DateTime.UtcNow.AddDays(1) }; sas.SetPermissions(AccountSasPermissions.Read); var credential = new StorageSharedKeyCredential(storageAccountName, storageAccountKey); // Build a SAS URI var sasUri = new UriBuilder(shareFileClient.Uri) { Query = sas.ToSasQueryParameters(credential).ToString() }; // Start the copy of the file to the blob. CopyFromUriOperation operation = await targetBlob.StartCopyFromUriAsync(sasUri.Uri); copyId = operation.Id; Console.WriteLine(" File copy started successfully. copyID = {0}", copyId); // Now clean up after yourself. Console.WriteLine("Deleting the files from the file share."); // Delete the files because cloudFile is a different file in the range sample. shareFileClient = fileDirectory.GetFileClient(testFile); await shareFileClient.DeleteIfExistsAsync(); Console.WriteLine("Setting up files to test WriteRange and ListRanges."); //***** Write 2 ranges to a file, then list the ranges *****// // This is the code for trying out writing data to a range in a file, // and then listing those ranges. // Get a reference to a file and write a range of data to it . // Then write another range to it. // Then list the ranges. // Start at the very beginning of the file. long startOffset = 0; // Set the destination file name -- this is the file on the file share that you're writing to. destFile = "rangeops.txt"; shareFileClient = fileDirectory.GetFileClient(destFile); // Create a string with 512 a's in it. This will be used to write the range. int testStreamLen = 512; string textToStream = string.Empty; textToStream = textToStream.PadRight(testStreamLen, 'a'); using (MemoryStream ms = new MemoryStream(Encoding.Default.GetBytes(textToStream))) { // Max size of the output file; have to specify this when you create the file // I picked this number arbitrarily. long maxFileSize = 65536; Console.WriteLine("Write first range."); // Set the stream back to the beginning, in case it's been read at all. ms.Position = 0; // If the file doesn't exist, create it. // The maximum file size is passed in. It has to be big enough to hold // all the data you're going to write, so don't set it to 256k and try to write two 256-k blocks to it. if (!shareFileClient.Exists()) { Console.WriteLine("File doesn't exist, create empty file to write ranges to."); // Create a file with a maximum file size of 64k. await shareFileClient.CreateAsync(maxFileSize); Console.WriteLine(" Empty file created successfully."); } // Write the stream to the file starting at startOffset for the length of the stream. Console.WriteLine("Writing range to file."); var range = new HttpRange(startOffset, textToStream.Length); await shareFileClient.UploadRangeAsync(range, ms); // Download the file to your temp directory so you can inspect it locally. downloadFile = Path.Combine(downloadFolder, "__testrange.txt"); Console.WriteLine("Downloading file to examine."); download = await shareFileClient.DownloadAsync(); using (FileStream stream = File.OpenWrite(downloadFile)) { await download.Content.CopyToAsync(stream); } Console.WriteLine(" Successfully downloaded file with ranges in it to examine."); } // Now add the second range, but don't make it adjacent to the first one, or it will show only // one range, with the two combined. Put it like 1000 spaces away. When you get the range back, it will // start at the position at the 512-multiple border prior or equal to the beginning of the data written, // and it will end at the 512-multliple border after the actual end of the data. //For example, if you write to 2000-3000, the range will be the 512-multiple prior to 2000, which is // position 1536, or offset 1535 (because it's 0-based). // And the right offset of the range will be the 512-multiple after 3000, which is position 3072, // or offset 3071 (because it's 0-based). Console.WriteLine("Getting ready to write second range to file."); startOffset += testStreamLen + 1000; //randomly selected number // Create a string with 512 b's in it. This will be used to write the range. textToStream = string.Empty; textToStream = textToStream.PadRight(testStreamLen, 'b'); using (MemoryStream ms = new MemoryStream(Encoding.Default.GetBytes(textToStream))) { ms.Position = 0; // Write the stream to the file starting at startOffset for the length of the stream. Console.WriteLine("Write second range to file."); var range = new HttpRange(startOffset, textToStream.Length); await shareFileClient.UploadRangeAsync(range, ms); Console.WriteLine(" Successful writing second range to file."); // Download the file to your temp directory so you can examine it. downloadFile = Path.Combine(downloadFolder, "__testrange2.txt"); Console.WriteLine("Downloading file with two ranges in it to examine."); download = await shareFileClient.DownloadAsync(); using (FileStream stream = File.OpenWrite(downloadFile)) { await download.Content.CopyToAsync(stream); } Console.WriteLine(" Successfully downloaded file to examine."); } // Query and view the list of ranges. Console.WriteLine("Call to get the list of ranges."); var listOfRanges = await shareFileClient.GetRangeListAsync(new HttpRange()); Console.WriteLine(" Successfully retrieved list of ranges."); foreach (HttpRange range in listOfRanges.Value.Ranges) { Console.WriteLine(" --> filerange startOffset = {0}, endOffset = {1}", range.Offset, range.Offset + range.Length); } //***** Clean up *****// } catch (Exception ex) { Console.WriteLine(" Exception thrown. Message = {0}{1} Strack Trace = {2}", ex.Message, Environment.NewLine, ex.StackTrace); } finally { //Clean up after you're done. Console.WriteLine("Removing all files, folders, shares, blobs, and containers created in this demo."); // ****NOTE: You can just delete the file share, and everything will be removed. // This samples deletes everything off of the file share first for the purpose of // showing you how to delete specific files and directories. // Delete the file with the ranges in it. destFile = "rangeops.txt"; shareFileClient = fileDirectory.GetFileClient(destFile); await shareFileClient.DeleteIfExistsAsync(); Console.WriteLine("Deleting the directory on the file share."); // Delete the directory. bool success = await fileDirectory.DeleteIfExistsAsync(); if (success) { Console.WriteLine(" Directory on the file share deleted successfully."); } else { Console.WriteLine(" Directory on the file share NOT deleted successfully; may not exist."); } Console.WriteLine("Deleting the file share."); // Delete the share. await shareClient.DeleteAsync(); Console.WriteLine(" Deleted the file share successfully."); Console.WriteLine("Deleting the temporary download directory and the file in it."); // Delete the download folder and its contents. Directory.Delete(downloadFolder, true); Console.WriteLine(" Successfully deleted the temporary download directory."); Console.WriteLine("Deleting the container and blob used in the Copy/Abort test."); await targetBlob.DeleteIfExistsAsync(); await blobContainer.DeleteIfExistsAsync(); Console.WriteLine(" Successfully deleted the blob and its container."); } }
public static void Call([BlobTrigger(BlobPath)] BlobClient blob) { TaskSource.TrySetResult(blob); }
/// <summary> /// Deletes the image from azure storage /// </summary> /// <param name="fileName"> image file name </param> /// <returns> no return </returns> public async Task DeleteBlobImage(string fileName) { BlobContainerClient container = new BlobContainerClient(Configuration["ImageBlob"], "images"); BlobClient blob = container.GetBlobClient(fileName); await blob.DeleteIfExistsAsync(DeleteSnapshotsOption.IncludeSnapshots, null, default); }
public static void BindToBlobClient([Blob(BlobPath)] BlobClient blob) { Assert.NotNull(blob); Assert.AreEqual(BlobName, blob.Name); }
public void Run( [Blob(BlobPath)] BlobClient blob) { this.Result = blob; }
/// <summary> /// Creates a new instance of the <see cref="AzureBlobXmlRepository"/>. /// </summary> /// <param name="blobClient">A <see cref="BlobClient"/> that is connected to the blob we are reading from and writing to.</param> public AzureBlobXmlRepository(BlobClient blobClient) { _random = new Random(); _blobClient = blobClient; }
public override async Task RunAsync(CancellationToken cancellationToken) { using var stream = RandomStream.Create(Options.Size); await BlobClient.UploadAsync(stream, transferOptions : Options.StorageTransferOptions, cancellationToken : cancellationToken); }
protected virtual void CreateTestStorageEntities() { TestQueue = QueueClient.GetQueueReference(string.Format("test-input-{0}", FixtureId)); TestQueue.CreateIfNotExists(); TestQueue.Clear(); // This queue name should really be suffixed by -fsharp, -csharp, -node etc. MobileTablesQueue = QueueClient.GetQueueReference("mobiletables-input"); MobileTablesQueue.CreateIfNotExists(); // do not clear this queue since it is currently shared between fixtures TestInputContainer = BlobClient.GetContainerReference(string.Format("test-input-{0}", FixtureId)); TestInputContainer.CreateIfNotExists(); // Processing a large number of blobs on startup can take a while, // so let's start with an empty container. TestHelpers.ClearContainer(TestInputContainer); TestOutputContainer = BlobClient.GetContainerReference(string.Format("test-output-{0}", FixtureId)); TestOutputContainer.CreateIfNotExists(); TestHelpers.ClearContainer(TestOutputContainer); TestTable = TableClient.GetTableReference("test"); TestTable.CreateIfNotExists(); DeleteEntities(TestTable, "AAA"); DeleteEntities(TestTable, "BBB"); var batch = new TableBatchOperation(); batch.Insert(new TestEntity { PartitionKey = "AAA", RowKey = "001", Region = "West", Name = "Test Entity 1", Status = 0 }); batch.Insert(new TestEntity { PartitionKey = "AAA", RowKey = "002", Region = "East", Name = "Test Entity 2", Status = 1 }); batch.Insert(new TestEntity { PartitionKey = "AAA", RowKey = "003", Region = "West", Name = "Test Entity 3", Status = 1 }); batch.Insert(new TestEntity { PartitionKey = "AAA", RowKey = "004", Region = "West", Name = "Test Entity 4", Status = 1 }); batch.Insert(new TestEntity { PartitionKey = "AAA", RowKey = "005", Region = "East", Name = "Test Entity 5", Status = 0 }); TestTable.ExecuteBatch(batch); batch = new TableBatchOperation(); batch.Insert(new TestEntity { PartitionKey = "BBB", RowKey = "001", Region = "South", Name = "Test Entity 1", Status = 0 }); batch.Insert(new TestEntity { PartitionKey = "BBB", RowKey = "002", Region = "West", Name = "Test Entity 2", Status = 1 }); batch.Insert(new TestEntity { PartitionKey = "BBB", RowKey = "003", Region = "West", Name = "Test Entity 3", Status = 0 }); TestTable.ExecuteBatch(batch); string serviceBusQueueName = string.Format("test-input-{0}", FixtureId); string connectionString = AmbientConnectionStringProvider.Instance.GetConnectionString(ConnectionStringNames.ServiceBus); var namespaceManager = NamespaceManager.CreateFromConnectionString(connectionString); namespaceManager.DeleteQueue(serviceBusQueueName); namespaceManager.CreateQueue(serviceBusQueueName); ServiceBusQueueClient = Microsoft.ServiceBus.Messaging.QueueClient.CreateFromConnectionString(connectionString, serviceBusQueueName); }
public BlobTriggerBinding(string parameterName, IArgumentBinding <IStorageBlob> argumentBinding, IStorageAccount account, IBlobPathSource path, IHostIdProvider hostIdProvider, IQueueConfiguration queueConfiguration, IBackgroundExceptionDispatcher backgroundExceptionDispatcher, IContextSetter <IBlobWrittenWatcher> blobWrittenWatcherSetter, IContextSetter <IMessageEnqueuedWatcher> messageEnqueuedWatcherSetter, ISharedContextProvider sharedContextProvider, TextWriter log) { if (argumentBinding == null) { throw new ArgumentNullException("argumentBinding"); } if (account == null) { throw new ArgumentNullException("account"); } if (path == null) { throw new ArgumentNullException("path"); } if (hostIdProvider == null) { throw new ArgumentNullException("hostIdProvider"); } if (queueConfiguration == null) { throw new ArgumentNullException("queueConfiguration"); } if (backgroundExceptionDispatcher == null) { throw new ArgumentNullException("backgroundExceptionDispatcher"); } if (blobWrittenWatcherSetter == null) { throw new ArgumentNullException("blobWrittenWatcherSetter"); } if (messageEnqueuedWatcherSetter == null) { throw new ArgumentNullException("messageEnqueuedWatcherSetter"); } if (sharedContextProvider == null) { throw new ArgumentNullException("sharedContextProvider"); } if (log == null) { throw new ArgumentNullException("log"); } _parameterName = parameterName; _argumentBinding = argumentBinding; _account = account; _client = account.CreateBlobClient(); _accountName = BlobClient.GetAccountName(_client); _path = path; _hostIdProvider = hostIdProvider; _queueConfiguration = queueConfiguration; _backgroundExceptionDispatcher = backgroundExceptionDispatcher; _blobWrittenWatcherSetter = blobWrittenWatcherSetter; _messageEnqueuedWatcherSetter = messageEnqueuedWatcherSetter; _sharedContextProvider = sharedContextProvider; _log = log; _converter = CreateConverter(_client); _bindingDataContract = CreateBindingDataContract(path); }
public BlobTests(string account, string key) { this.client = new BlobClient(new CloudStorageAccount(account, key)); }
public async Task <IActionResult> Post([FromForm] IFormFileCollection files, string group, string groupidentifier) { Regex FileNameRegex = new Regex(@"([a-zA-Z0-9\s_\.-:])+\.+.*$"); Regex PictureRegexExtension = new Regex(@"^.*\.(jpg|JPG|gif|GIF|png|PNG|jpeg|JPEG)$"); Regex AudioRegexExtension = new Regex(@"^.*\.(mp3|wav|WAV|MP3|flac|FLAC)$"); Regex VideoRegexExtension = new Regex(@"^.*\.(mp4|MP4|MOV|mov|WMV|wmv|AVI|avi|WEBM|webm)$"); Regex Extensions = new Regex(@"\.(mp3|wav|WAV|MP3|jpg|JPG|gif|GIF|png|PNG|flac|FLAC|jpeg|JPEG|mp4|MP4|MOV|mov|WMV|wmv|AVI|avi|WEBM|webm)$"); if (!files.Any()) { return(BadRequest("No files given")); } foreach (var file in files) { if (!FileNameRegex.IsMatch(file.FileName)) { return(BadRequest("Invalid file name")); } if ((!PictureRegexExtension.IsMatch(file.FileName)) && (!AudioRegexExtension.IsMatch(file.FileName)) && (!VideoRegexExtension.IsMatch(file.FileName))) { return(BadRequest("Invalid file extention")); } if (PictureRegexExtension.IsMatch(file.FileName)) { if (file.Length > (5 * 1024 * 1024)) { return(BadRequest("File too large (5mb Max)")); } } if (AudioRegexExtension.IsMatch(file.FileName)) { if (file.Length > (12 * 1024 * 1024)) { return(BadRequest("File too large (12mb Max)")); } } if (VideoRegexExtension.IsMatch(file.FileName)) { if (file.Length > (100 * 1024 * 1024)) { return(BadRequest("File too large (100mb Max)")); } } } BlobServiceClient blobServiceClient = new BlobServiceClient(_configuration.GetConnectionString("storage")); foreach (var file in files) { Match extensionmatch = Extensions.Match(file.FileName); string extension = extensionmatch.ToString(); MediaModel model = new MediaModel(); model.Group = group; model.GroupIdentifier = groupidentifier; switch (group) { case "audio": case "video": case "profiles": case "campgrounds": case "campsites": { _logger.LogDebug("uploading media"); BlobContainerClient containerClient = blobServiceClient.GetBlobContainerClient(model.Group); BlobClient blobClient = containerClient.GetBlobClient(model.GroupIdentifier + System.Guid.NewGuid().ToString() + extension); await blobClient.UploadAsync(file.OpenReadStream()); _logger.LogDebug("uploaded media"); model.Uri = blobClient.Uri.ToString(); if (PictureRegexExtension.IsMatch(file.FileName)) { model.AltText = "Picture of " + model.GroupIdentifier; } else if (AudioRegexExtension.IsMatch(file.FileName)) { model.AltText = "Audio of " + model.GroupIdentifier; } else if (VideoRegexExtension.IsMatch(file.FileName)) { model.AltText = "Video of " + model.GroupIdentifier; } _logger.LogDebug("adding media model"); await _unitOfWork.Media.InsertAsync(model); await _unitOfWork.CommitAsync(); _logger.LogInformation($"added media model"); break; } default: { return(BadRequest("Invalid group entered")); } } } return(Accepted()); }
#pragma warning disable CA1806 // Do not ignore method results public override void Run(CancellationToken cancellationToken) { // traverse hierarchy down BlobServiceClient.GetBlobContainerClient(ContainerName); BlobContainerClient.GetBlobClient(BlobName); BlobContainerClient.GetBlobBaseClient(BlobName); BlobContainerClient.GetBlockBlobClient(BlobName); BlobContainerClient.GetPageBlobClient(BlobName); BlobContainerClient.GetAppendBlobClient(BlobName); // traverse hierarchy up BlobClient.GetParentBlobContainerClient(); BlobContainerClient.GetParentBlobServiceClient(); // BlobServiceClient ctors new BlobServiceClient(s_connectionString); new BlobServiceClient(BlobServiceClient.Uri); new BlobServiceClient(BlobServiceClient.Uri, s_azureSasCredential); new BlobServiceClient(BlobServiceClient.Uri, s_tokenCredential); new BlobServiceClient(BlobServiceClient.Uri, StorageSharedKeyCredential); // BlobContainerClient ctors new BlobContainerClient(s_connectionString, ContainerName); new BlobContainerClient(BlobContainerClient.Uri); new BlobContainerClient(BlobContainerClient.Uri, s_azureSasCredential); new BlobContainerClient(BlobContainerClient.Uri, s_tokenCredential); new BlobContainerClient(BlobContainerClient.Uri, StorageSharedKeyCredential); // BlobClient ctors new BlobClient(s_connectionString, ContainerName, BlobName); new BlobClient(BlobContainerClient.Uri); new BlobClient(BlobContainerClient.Uri, s_azureSasCredential); new BlobClient(BlobContainerClient.Uri, s_tokenCredential); new BlobClient(BlobContainerClient.Uri, StorageSharedKeyCredential); // BlobBaseClient ctors new BlobBaseClient(s_connectionString, ContainerName, BlobName); new BlobBaseClient(BlobContainerClient.Uri); new BlobBaseClient(BlobContainerClient.Uri, s_azureSasCredential); new BlobBaseClient(BlobContainerClient.Uri, s_tokenCredential); new BlobBaseClient(BlobContainerClient.Uri, StorageSharedKeyCredential); // AppendBlobClient ctors new AppendBlobClient(s_connectionString, ContainerName, BlobName); new AppendBlobClient(BlobContainerClient.Uri); new AppendBlobClient(BlobContainerClient.Uri, s_azureSasCredential); new AppendBlobClient(BlobContainerClient.Uri, s_tokenCredential); new AppendBlobClient(BlobContainerClient.Uri, StorageSharedKeyCredential); // BlockBlobClient ctors new BlockBlobClient(s_connectionString, ContainerName, BlobName); new BlockBlobClient(BlobContainerClient.Uri); new BlockBlobClient(BlobContainerClient.Uri, s_azureSasCredential); new BlockBlobClient(BlobContainerClient.Uri, s_tokenCredential); new BlockBlobClient(BlobContainerClient.Uri, StorageSharedKeyCredential); // PageBlobClient ctors new PageBlobClient(s_connectionString, ContainerName, BlobName); new PageBlobClient(BlobContainerClient.Uri); new PageBlobClient(BlobContainerClient.Uri, s_azureSasCredential); new PageBlobClient(BlobContainerClient.Uri, s_tokenCredential); new PageBlobClient(BlobContainerClient.Uri, StorageSharedKeyCredential); }
private static async Task <bool> TryCreateAsync(BlobClient blob, CancellationToken cancellationToken) { try { var bytes = Encoding.UTF8.GetBytes(string.Empty); using (var stream = new MemoryStream(bytes)) { await blob.UploadAsync(stream, cancellationToken).ConfigureAwait(false); } return(true); } catch (RequestFailedException exception) { switch (exception.Status) { case 404: break; case 409: case 412: // The blob already exists, or is leased by someone else return(false); default: throw; } } var container = blob.GetParentBlobContainerClient(); try { await container.CreateIfNotExistsAsync(cancellationToken : cancellationToken).ConfigureAwait(false); } catch (RequestFailedException exc) when(exc.Status == 409 && exc.ErrorCode == BlobErrorCode.ContainerBeingDeleted) { throw; } try { var bytes = Encoding.UTF8.GetBytes(string.Empty); using (var stream = new MemoryStream(bytes)) { await blob.UploadAsync(stream, cancellationToken).ConfigureAwait(false); } return(true); } catch (RequestFailedException exception) { if (exception.Status == 409 || exception.Status == 412) { // The blob already exists, or is leased by someone else return(false); } throw; } }
private async Task CleanupDevices(int deviceCount) { Console.WriteLine($"Using storage container {_blobContainerClient.Name} for importing device delete requests."); // Step 1: Collect the devices that need to be deleted. IReadOnlyList <ExportImportDevice> devicesToBeDeleted = await GetDeviceIdsToDeleteAsync(deviceCount); Console.WriteLine($"Discovered {devicesToBeDeleted.Count} devices for deletion."); string currentJobId = null; if (devicesToBeDeleted.Any()) { try { // Step 2: Write the new import data back to the blob. using Stream devicesFile = ImportExportDevicesHelpers.BuildDevicesStream(devicesToBeDeleted); // Retrieve the SAS Uri that will be used to grant access to the storage containers. BlobClient blobClient = _blobContainerClient.GetBlobClient(ImportExportDevicesFileName); var uploadResult = await blobClient.UploadAsync(devicesFile, overwrite : true); string storageAccountSasUri = GetStorageAccountSasUriForCleanupJob(_blobContainerClient).ToString(); // Step 3: Call import using the same blob to delete all devices. JobProperties importDevicesToBeDeletedProperties = JobProperties .CreateForImportJob( inputBlobContainerUri: storageAccountSasUri, outputBlobContainerUri: storageAccountSasUri, inputBlobName: ImportExportDevicesFileName, storageAuthenticationType: StorageAuthenticationType.KeyBased); JobProperties importDevicesToBeDeletedJob = null; Stopwatch jobTimer = Stopwatch.StartNew(); do { try { importDevicesToBeDeletedJob = await _registryManager.ImportDevicesAsync(importDevicesToBeDeletedProperties); currentJobId = importDevicesToBeDeletedJob.JobId; break; } // Wait for pending jobs to finish. catch (JobQuotaExceededException) { Console.WriteLine($"JobQuotaExceededException... waiting."); await Task.Delay(s_waitDuration); } } while (jobTimer.Elapsed < s_maxJobDuration); // Wait until job is finished. jobTimer.Restart(); while (importDevicesToBeDeletedJob != null && jobTimer.Elapsed < s_maxJobDuration) { importDevicesToBeDeletedJob = await _registryManager.GetJobAsync(importDevicesToBeDeletedJob.JobId); if (s_completedJobs.Contains(importDevicesToBeDeletedJob.Status)) { // Job has finished executing. Console.WriteLine($"Job {importDevicesToBeDeletedJob.JobId} is {importDevicesToBeDeletedJob.Status}."); currentJobId = null; break; } Console.WriteLine($"Job {importDevicesToBeDeletedJob.JobId} is {importDevicesToBeDeletedJob.Status} after {jobTimer.Elapsed}."); await Task.Delay(s_waitDuration); } if (importDevicesToBeDeletedJob?.Status != JobStatus.Completed) { throw new Exception("Importing devices job failed; exiting."); } } finally { if (!String.IsNullOrWhiteSpace(currentJobId)) { Console.WriteLine($"Cancelling job {currentJobId}"); await _registryManager.CancelJobAsync(currentJobId); } } } // Step 4: Delete the storage container created. await _blobContainerClient.DeleteAsync(); Console.WriteLine($"Storage container {_blobContainerClient.Name} deleted."); }