/// <inheritdoc /> protected override async Task <PutResult> PutStreamCoreAsync(OperationContext context, HashType hashType, Stream stream, UrgencyHint urgencyHint, Counter retryCounter) { if (!hashType.IsValidDedup()) { return(new PutResult( new ContentHash(hashType), $"DedupStore client requires a HashType that supports dedup. Given hash type: {hashType}")); } try { var tempFile = TempDirectory.CreateRandomFileName().Path; using (Stream writer = new FileStream(tempFile, FileMode.Create, FileAccess.Write, FileShare.None, 4096, FileOptions.Asynchronous | FileOptions.SequentialScan)) { await stream.CopyToAsync(writer); } // Cast is necessary because ContentSessionBase implements IContentSession explicitly return(await(this as IContentSession).PutFileAsync(context, hashType, new AbsolutePath(tempFile), FileRealizationMode.None, context.Token, urgencyHint)); } catch (Exception e) { return(new PutResult(e, new ContentHash(hashType))); } }
/// <inheritdoc /> protected override async Task <PutResult> PutFileCoreAsync( OperationContext context, HashType hashType, AbsolutePath path, FileRealizationMode realizationMode, UrgencyHint urgencyHint, Counter retryCounter) { if (!hashType.IsValidDedup()) { return(new PutResult( new ContentHash(hashType), $"DedupStore client requires a HashType that supports dedup. Given hash type: {hashType}.")); } try { var contentSize = GetContentSize(path); var dedupNode = await GetDedupNodeFromFileAsync(hashType, path.Path); var contentHash = dedupNode.ToContentHash(hashType); if (contentHash.HashType != hashType) { return(new PutResult( contentHash, $"Failed to add a DedupStore reference due to hash type mismatch: provided=[{hashType}] calculated=[{contentHash.HashType}]")); } var pinResult = await PinAsync(context, contentHash, context.Token, urgencyHint); if (pinResult.Succeeded) { return(new PutResult(contentHash, contentSize)); } if (pinResult.Code == PinResult.ResultCode.Error) { return(new PutResult(pinResult, contentHash)); } var putResult = await UploadWithDedupAsync(context, path, hashType, dedupNode).ConfigureAwait(false); if (!putResult.Succeeded) { return(new PutResult( putResult, contentHash, $"Failed to add a DedupStore reference to content with hash=[{contentHash}]")); } return(new PutResult(contentHash, contentSize)); } catch (Exception e) { return(new PutResult(e, new ContentHash(hashType))); } }
[InlineData(HashType.Dedup1024K, DedupNode.MaxDirectChildrenPerNode / 8, 64, 16)] // 4MB | 64 * 4 * 1024K 256MB public void HashOfChunksInNodeMatchesChunkHashAlgorithmNegative(HashType hashType, int expectedChunkCount, int multiplier, int divider) { Assert.True(hashType.IsValidDedup(), $"Hash type: {hashType} is not a valid dedup."); var config = new ChunkerConfiguration((multiplier * hashType.GetAvgChunkSize()) / divider); Assert.Throws <NotImplementedException>(() => HashOfChunksInNodeMatchesChunkHashAlgorithmInner(expectedChunkCount, config, new ManagedChunker(config))); if (Chunker.IsComChunkerSupported && config.AvgChunkSize == ChunkerConfiguration.SupportedComChunkerConfiguration.AvgChunkSize && hashType == HashType.Dedup64K) // No COMchunker support for any other chunk sizes. { Assert.Throws <NotImplementedException>(() => HashOfChunksInNodeMatchesChunkHashAlgorithmInner(expectedChunkCount, config, new ComChunker(config))); } }
/// <nodoc /> public static NodeAlgorithmId GetNodeAlgorithmId(this HashType hashType) { if (!hashType.IsValidDedup()) { throw new NotImplementedException($"{hashType.Serialize()} doesn't support chunking."); } var hit = TypeToAlgorithmId.TryGetValue(hashType, out var nodeAlgorithmId); if (!hit) { throw new NotImplementedException($"{nameof(GetNodeAlgorithmId)}: No algorithm id found for hash type {hashType.Serialize()}."); } return(nodeAlgorithmId); }
/// <nodoc /> public static int GetAvgChunkSize(this HashType hashType) { if (!hashType.IsValidDedup()) { throw new NotImplementedException($"{hashType.Serialize()} doesn't support chunking."); } var hit = TypeToAvgChunkSize.TryGetValue(hashType, out var avgChunkSize); if (!hit) { throw new NotImplementedException($"{nameof(GetAvgChunkSize)}: No average chunk size found for hash type {hashType.Serialize()}."); } return(avgChunkSize); }
/// <summary> /// Clients may switch between hashing algorithms. Must be set at the beginning of the build. /// </summary> public static void SetDefaultHashType(HashType hashType) { s_isInitialized = true; HashInfo = HashInfoLookup.Find(hashType); if (hashType.IsValidDedup()) { if (Chunker.IsComChunkerSupported) { if (Chunker.ComChunkerLoadError.Value != null) { Logger.Log.ComChunkerFailulre(Events.StaticContext, Chunker.ComChunkerLoadError.Value.ToString()); } else { Logger.Log.ChunkerType(Events.StaticContext, nameof(ComChunker)); } } else { Logger.Log.ChunkerType(Events.StaticContext, nameof(ManagedChunker)); } } }
/// <nodoc /> public static ChunkerConfiguration GetChunkerConfiguration(this HashType hashType) { return(hashType.IsValidDedup() ? new ChunkerConfiguration(hashType.GetAvgChunkSize()) : throw new NotImplementedException($"Unsupported enum {hashType} of type {nameof(HashType)} encountered.")); }