/// <summary> /// Attempt to update expiry of all children. Pin parent node if all children were extended successfully. /// </summary> private async Task <PinResult> TryPinChildrenAsync(Context context, VstsDedupIdentifier parentNode, IEnumerable <VstsDedupIdentifier> dedupIdentifiers, CancellationToken cts) { var chunks = new List <VstsDedupIdentifier>(); var nodes = new List <VstsDedupIdentifier>(); foreach (var id in dedupIdentifiers) { if (id.AlgorithmId == Hashing.ChunkDedupIdentifier.ChunkAlgorithmId) { chunks.Add(id); } else { nodes.Add(id); } } // Attempt to save all children. Tracer.Debug(context, $"Pinning children: nodes=[{string.Join(",", nodes.Select(x => x.ValueString))}] chunks=[{string.Join(",", chunks.Select(x => x.ValueString))}]"); var result = await TryPinNodesAsync(context, nodes, cts) & await TryPinChunksAsync(context, chunks, cts); if (result == PinResult.Success) { // If all children are saved, pin parent. result = await TryPinNodeAsync(context, parentNode, cts); } return(result); }
private Task <long?> PlaceFileInternalAsync( Context context, ContentHash contentHash, string path, FileMode fileMode, CancellationToken cts) { return(AsyncHttpRetryHelper <long?> .InvokeAsync( async() => { Stream httpStream = null; try { httpStream = await GetStreamInternalAsync(context, contentHash, null, cts).ConfigureAwait(false); if (httpStream == null) { return null; } try { var success = DownloadUriCache.Instance.TryGetDownloadUri(contentHash, out PreauthenticatedUri preauthUri); var uri = success ? preauthUri.NotNullUri : new Uri("http://empty.com"); Directory.CreateDirectory(Directory.GetParent(path).FullName); // TODO: Investigate using ManagedParallelBlobDownloader instead (bug 1365340) await ParallelHttpDownload.Download( _parallelSegmentDownloadConfig, uri, httpStream, null, path, fileMode, cts, (destinationPath, offset, endOffset) => Tracer.Debug(context, $"Download {destinationPath} [{offset}, {endOffset}) start."), (destinationPath, offset, endOffset) => Tracer.Debug(context, $"Download {destinationPath} [{offset}, {endOffset}) end."), (destinationPath, offset, endOffset, message) => Tracer.Debug(context, $"Download {destinationPath} [{offset}, {endOffset}) failed. (message: {message})"), async(offset, token) => { var offsetStream = await GetStreamInternalAsync( context, contentHash, _parallelSegmentDownloadConfig.SegmentSizeInBytes, cts).ConfigureAwait(false); offsetStream.Position = offset; return offsetStream; }, () => BufferPool.Get()).ConfigureAwait(false); } catch (Exception e) when(fileMode == FileMode.CreateNew && !IsErrorFileExists(e)) { try { // Need to delete here so that a partial download doesn't run afoul of FileReplacementMode.FailIfExists upon retry // Don't do this if the error itself was that the file already existed File.Delete(path); } catch (Exception ex) { Tracer.Warning(context, $"Error deleting file at {path}: {ex}"); } throw; } return httpStream.Length; } catch (StorageException storageEx) when(storageEx.InnerException is WebException) { var webEx = (WebException)storageEx.InnerException; if (((HttpWebResponse)webEx.Response).StatusCode == HttpStatusCode.NotFound) { return null; } throw; } finally { httpStream?.Dispose(); } }, maxRetries : 5, tracer : new AppTraceSourceContextAdapter(context, "BlobReadOnlyContentSession", SourceLevels.All), canRetryDelegate : exception => { // HACK HACK: This is an additional layer of retries specifically to catch the SSL exceptions seen in DM. // Newer versions of Artifact packages have this retry automatically, but the packages deployed with the M119.0 release do not. // Once the next release is completed, these retries can be removed. if (exception is HttpRequestException && exception.InnerException is WebException) { return true; } while (exception != null) { if (exception is TimeoutException) { return true; } exception = exception.InnerException; } return false; }, cancellationToken : cts, continueOnCapturedContext : false, context : context.Id.ToString())); }