public async Task <Response <PathInfo> > UploadAsync( Stream content, PathHttpHeaders httpHeaders, Metadata metadata, string permissions, string umask, DataLakeRequestConditions conditions, IProgress <long> progressHandler, CancellationToken cancellationToken) { await _client.CreateAsync( httpHeaders : httpHeaders, metadata : metadata, permissions : permissions, umask : umask, conditions : conditions, cancellationToken : cancellationToken).ConfigureAwait(false); // After the File is Create, Lease ID is the only valid request parameter. conditions = new DataLakeRequestConditions { LeaseId = conditions?.LeaseId }; // If we can compute the size and it's small enough if (PartitionedUploadExtensions.TryGetLength(content, out long contentLength) && contentLength < _singleUploadThreshold) { // Append data await _client.AppendAsync( content, offset : 0, leaseId : conditions?.LeaseId, progressHandler : progressHandler, cancellationToken : cancellationToken).ConfigureAwait(false); // Flush data return(await _client.FlushAsync( position : contentLength, httpHeaders : httpHeaders, conditions : conditions) .ConfigureAwait(false)); } // If the caller provided an explicit block size, we'll use it. // Otherwise we'll adjust dynamically based on the size of the // content. int blockSize = _blockSize != null ? _blockSize.Value : contentLength < Constants.LargeUploadThreshold ? Constants.DefaultBufferSize : Constants.LargeBufferSize; // Otherwise stage individual blocks in parallel return(await UploadInParallelAsync( content, blockSize, httpHeaders, conditions, progressHandler, cancellationToken).ConfigureAwait(false)); }
public async Task TraverseAsync() { // Create a temporary Lorem Ipsum file on disk that we can upload string originalPath = CreateTempFile(SampleFileContent); // Make StorageSharedKeyCredential to pass to the serviceClient string storageAccountName = StorageAccountName; string storageAccountKey = StorageAccountKey; Uri serviceUri = StorageAccountBlobUri; StorageSharedKeyCredential sharedKeyCredential = new StorageSharedKeyCredential(storageAccountName, storageAccountKey); // Create DataLakeServiceClient using StorageSharedKeyCredentials DataLakeServiceClient serviceClient = new DataLakeServiceClient(serviceUri, sharedKeyCredential); // Get a reference to a filesystem named "sample-filesystem-traverseasync" and then create it DataLakeFileSystemClient filesystem = serviceClient.GetFileSystemClient(Randomize("sample-filesystem-traverse")); await filesystem.CreateAsync(); try { // Create a bunch of directories and files within the directories DataLakeDirectoryClient first = await filesystem.CreateDirectoryAsync("first"); await first.CreateSubDirectoryAsync("a"); await first.CreateSubDirectoryAsync("b"); DataLakeDirectoryClient second = await filesystem.CreateDirectoryAsync("second"); await second.CreateSubDirectoryAsync("c"); await second.CreateSubDirectoryAsync("d"); await filesystem.CreateDirectoryAsync("third"); DataLakeDirectoryClient fourth = await filesystem.CreateDirectoryAsync("fourth"); DataLakeDirectoryClient deepest = await fourth.CreateSubDirectoryAsync("e"); // Upload a DataLake file named "file" DataLakeFileClient file = deepest.GetFileClient("file"); await file.CreateAsync(); using (FileStream stream = File.OpenRead(originalPath)) { await file.AppendAsync(stream, 0); } // Keep track of all the names we encounter List <string> names = new List <string>(); await foreach (PathItem pathItem in filesystem.ListPathsAsync(recursive: true)) { names.Add(pathItem.Name); } // Verify we've seen everything Assert.AreEqual(10, names.Count); Assert.Contains("first", names); Assert.Contains("second", names); Assert.Contains("third", names); Assert.Contains("fourth", names); Assert.Contains("first/a", names); Assert.Contains("first/b", names); Assert.Contains("second/c", names); Assert.Contains("second/d", names); Assert.Contains("fourth/e", names); Assert.Contains("fourth/e/file", names); } finally { // Clean up after the test when we're finished await filesystem.DeleteAsync(); } }