public async Task ComputeNodeUploadLogs() { Func <Task> test = async() => { using (BatchClient batchCli = TestUtilities.OpenBatchClientFromEnvironmentAsync().Result) { const string containerName = "computenodelogscontainer"; // Generate a storage container URL StagingStorageAccount storageAccount = TestUtilities.GetStorageCredentialsFromEnvironment(); BlobServiceClient blobClient = BlobUtilities.GetBlobServiceClient(storageAccount); BlobContainerClient containerClient = BlobUtilities.GetBlobContainerClient(containerName, blobClient, storageAccount); try { containerClient.CreateIfNotExists(); string sasUri = BlobUtilities.GetWriteableSasUri(containerClient, storageAccount); var blobs = containerClient.GetAllBlobs(); // Ensure that there are no items in the container to begin with Assert.Empty(blobs); var startTime = DateTime.UtcNow.Subtract(TimeSpan.FromMinutes(5)); var node = batchCli.PoolOperations.ListComputeNodes(this.poolFixture.PoolId).First(); var result = batchCli.PoolOperations.UploadComputeNodeBatchServiceLogs( this.poolFixture.PoolId, node.Id, sasUri, startTime); Assert.NotEqual(0, result.NumberOfFilesUploaded); Assert.NotEmpty(result.VirtualDirectoryName); // Allow up to 2m for files to get uploaded DateTime timeoutAt = DateTime.UtcNow.AddMinutes(2); while (DateTime.UtcNow < timeoutAt) { blobs = containerClient.GetAllBlobs(); if (blobs.Any()) { break; } } Assert.NotEmpty(blobs); } finally { await containerClient.DeleteIfExistsAsync(); } } }; await SynchronizationContextHelper.RunTestAsync(test, TestTimeout); }
/// <summary> /// Create a container if doesn't exist, setting permission with policy, and return assosciated SAS signature /// </summary> /// <param name="account">Storage account</param> /// <param name="Key">Storage account key</param> /// <param name="blobUri">Blob endpoint URI</param> /// <param name="containerName">Name of the container to be created</param> /// <param name="policy">Name for the policy</param> /// <param name="start">Start time of the policy</param> /// <param name="end">Expire time of the policy</param> /// <param name="permissions">Blob access permissions</param> /// <returns>the SAS for the container, in full URI format.</returns>. private static async Task <string> CreateContainerWithPolicySASIfNotExistAsync(string account, string key, Uri blobUri, string containerName, string policy, DateTime start, DateTime end, string permissions) { // 1. form the credentail and initial client StagingStorageAccount stagingCredentials = new StagingStorageAccount(account, key, blobUri.ToString()); StorageSharedKeyCredential shardKeyCredentials = new StorageSharedKeyCredential(account, key); BlobContainerClient containerClient = BlobUtilities.GetBlobContainerClient(containerName, stagingCredentials); // 2. create container if it doesn't exist containerClient.CreateIfNotExists(); // 3. validate policy, create/overwrite if doesn't match BlobSignedIdentifier identifier = new BlobSignedIdentifier { Id = policy, AccessPolicy = new BlobAccessPolicy { Permissions = permissions, StartsOn = start, ExpiresOn = end, }, }; var accessPolicy = (await containerClient.GetAccessPolicyAsync()).Value; bool policyFound = accessPolicy.SignedIdentifiers.Any(i => i == identifier); if (policyFound == false) { await containerClient.SetAccessPolicyAsync(PublicAccessType.BlobContainer, permissions : new List <BlobSignedIdentifier> { identifier }); } BlobSasBuilder sasBuilder = new BlobSasBuilder { BlobContainerName = containerName, StartsOn = start, ExpiresOn = end, }; sasBuilder.SetPermissions(permissions); BlobUriBuilder builder = new BlobUriBuilder(containerClient.Uri) { Sas = sasBuilder.ToSasQueryParameters(shardKeyCredentials) }; string fullSas = builder.ToString(); return(fullSas); }
/// <summary> /// Stage a single file. /// </summary> private async static Task StageOneFileAsync(FileToStage stageThisFile, SequentialFileStagingArtifact seqArtifacts) { StagingStorageAccount storecreds = stageThisFile.StagingStorageAccount; string containerName = seqArtifacts.BlobContainerCreated; // TODO: this flattens all files to the top of the compute node/task relative file directory. solve the hiearchy problem (virt dirs?) string blobName = Path.GetFileName(stageThisFile.LocalFileToStage); BlobContainerClient blobContainerClient = BlobUtilities.GetBlobContainerClient(containerName, storecreds); BlockBlobClient blobClient = blobContainerClient.GetBlockBlobClient(blobName); bool doesBlobExist = await blobClient.ExistsAsync(); bool mustUploadBlob = true; // we do not re-upload blobs if they have already been uploaded if (doesBlobExist) // if the blob exists, compare { FileInfo fi = new FileInfo(stageThisFile.LocalFileToStage); var properties = await blobClient.GetPropertiesAsync(); var length = properties.Value.ContentLength; // since we don't have a hash of the contents... we check length if (length == fi.Length) { mustUploadBlob = false; } } if (mustUploadBlob) { using FileStream stream = new FileStream(stageThisFile.LocalFileToStage, FileMode.Open); // upload the file Task uploadTask = blobClient.UploadAsync(stream); await uploadTask.ConfigureAwait(continueOnCapturedContext : false); } // get the SAS for the blob string blobSAS = ConstructBlobSource(seqArtifacts.DefaultContainerSAS, blobName); string nodeFileName = stageThisFile.NodeFileName; // create a new ResourceFile and populate it. This file is now staged! stageThisFile.StagedFiles = new ResourceFile[] { ResourceFile.FromUrl(blobSAS, nodeFileName) }; }
public async Task RunTaskAndUploadFiles_FilesAreSuccessfullyUploaded() { async Task test() { using BatchClient batchCli = TestUtilities.OpenBatchClient(TestUtilities.GetCredentialsFromEnvironment()); string jobId = "RunTaskAndUploadFiles-" + TestUtilities.GetMyName(); string containerName = "runtaskanduploadfiles"; StagingStorageAccount storageAccount = TestUtilities.GetStorageCredentialsFromEnvironment(); BlobServiceClient blobClient = BlobUtilities.GetBlobServiceClient(storageAccount); BlobContainerClient containerClient = BlobUtilities.GetBlobContainerClient(containerName, blobClient, storageAccount); try { // Create container and writeable SAS containerClient.CreateIfNotExists(); string sasUri = BlobUtilities.GetWriteableSasUri(containerClient, storageAccount); CloudJob createJob = batchCli.JobOperations.CreateJob(jobId, new PoolInformation { PoolId = poolFixture.PoolId }); createJob.Commit(); const string blobPrefix = "foo/bar"; const string taskId = "simpletask"; OutputFileBlobContainerDestination containerDestination = new OutputFileBlobContainerDestination(sasUri, blobPrefix); containerDestination.UploadHeaders = new List <HttpHeader> { new HttpHeader("x-ms-blob-content-type", "test-type") }; OutputFileDestination destination = new OutputFileDestination(containerDestination); OutputFileUploadOptions uploadOptions = new OutputFileUploadOptions(uploadCondition: OutputFileUploadCondition.TaskCompletion); CloudTask unboundTask = new CloudTask(taskId, "echo test") { OutputFiles = new List <OutputFile> { new OutputFile(@"../*.txt", destination, uploadOptions) } }; batchCli.JobOperations.AddTask(jobId, unboundTask); IPagedEnumerable <CloudTask> tasks = batchCli.JobOperations.ListTasks(jobId); TaskStateMonitor monitor = batchCli.Utilities.CreateTaskStateMonitor(); monitor.WaitAll(tasks, TaskState.Completed, TimeSpan.FromMinutes(1)); // Ensure that the correct files got uploaded List <BlobItem> blobs = containerClient.GetAllBlobs(); Assert.Equal(4, blobs.Count()); //There are 4 .txt files created, stdout, stderr, fileuploadout, and fileuploaderr foreach (BlobItem blob in blobs) { Assert.StartsWith(blobPrefix, blob.Name); Assert.Equal("test-type", blob.Properties.ContentType); // Ensure test Upload header was applied to blob. } } finally { await TestUtilities.DeleteJobIfExistsAsync(batchCli, jobId).ConfigureAwait(false); containerClient.DeleteIfExists(); } } await SynchronizationContextHelper.RunTestAsync(test, TestTimeout); }