/// <summary> /// Converts a <see cref="ContentHash"/> to a string appropriate for the JSON fingerprint. /// For compactness, the hash is truncated to just the first 10 characters. /// This will cause data loss and should only be used for displaying hashes. /// </summary> public static string ContentHashToString(ContentHash hash) { Contract.Requires(hash.HashType != HashType.Unknown); var fullString = hash.ToHex(); return(hash.ToHex().Substring(0, 10)); }
public void ToHexCorrect(HashType hashType, string expected) { var hashLength = HashInfoLookup.Find(hashType).ByteLength; var hash = new ContentHash(hashType, Enumerable.Range(0, hashLength).Select(i => (byte)i).ToArray()); Assert.Equal(expected, hash.ToHex()); }
public void CreateFromString(HashType hashType, string value) { var hash = new ContentHash(value); Assert.Equal(hashType, hash.HashType); Assert.Equal(value.Split(':')[1], hash.ToHex()); }
/// <inheritdoc /> public AbsolutePath GeneratePath(ContentHash contentHash, byte[] contentLocationIdContent) { string contentHashString = contentHash.ToHex(); var pathRoot = new AbsolutePath(Encoding.UTF8.GetString(contentLocationIdContent)); return(pathRoot / contentHash.HashType.ToString() / contentHashString.Substring(0, 3) / (contentHashString + BlobFileExtension)); }
/// <inheritdoc /> public override string GetCommandLineArgs(LocalServerConfiguration localContentServerConfiguration = null, string scenario = null, bool logAutoFlush = false, bool passMaxConfigurations = false) { var args = new StringBuilder(base.GetCommandLineArgs(localContentServerConfiguration, scenario, logAutoFlush, false)); args.AppendFormat(" /hash:{0}", ContentHash.ToHex()); args.AppendFormat(" /hashType:{0}", ContentHash.HashType.ToString()); args.AppendFormat(" /path:{0}", DestinationPath.Path); return(args.ToString()); }
public void RoundtripPartialBuffer(HashType hashType) { var buffer = new byte[ContentHash.SerializedLength]; var h1 = ContentHash.Random(hashType); h1.SerializeHashBytes(buffer); var h2 = new ContentHash(hashType, buffer); Assert.Equal(hashType, h2.HashType); Assert.Equal(h1.ToHex(), h2.ToHex()); }
public void RoundtripPartialBufferPositiveOffset(HashType hashType) { const int offset = 5; var buffer = new byte[ContentHash.SerializedLength + offset]; var h1 = ContentHash.Random(hashType); h1.SerializeHashBytes(buffer, offset); var h2 = new ContentHash(hashType, buffer, offset); Assert.Equal(hashType, h2.HashType); Assert.Equal(h1.ToHex(), h2.ToHex()); }
internal void OpenStream ( [Description("Cache root directory path (using in-process cache)")] string cachePath, [Description("Cache name (using cache service)")] string cacheName, [Required, Description("Content hash value of referenced content to place")] string hash, [Description(HashTypeDescription)] string hashType, [Description("File name where the GRPC port can be found when using cache service. 'CASaaS GRPC port' if not specified.")] string grpcPortFileName, [Description("The GRPC port."), DefaultValue(0)] int grpcPort ) { var ht = GetHashTypeByNameOrDefault(hashType); var contentHash = new ContentHash(ht, HexUtilities.HexToBytes(hash)); ServiceClientRpcConfiguration rpcConfig = null; if (cacheName != null) { if (grpcPort == 0) { grpcPort = Helpers.GetGrpcPortFromFile(_logger, grpcPortFileName); } rpcConfig = new ServiceClientRpcConfiguration(grpcPort); } RunContentStore(cacheName, cachePath, rpcConfig, async(context, session) => { var r = await session.OpenStreamAsync(context, contentHash, CancellationToken.None).ConfigureAwait(false); if (r.Succeeded) { using (r.Stream) { var path = _fileSystem.GetTempPath() / $"{contentHash.ToHex()}.dat"; using (Stream fileStream = await _fileSystem.OpenSafeAsync(path, FileAccess.Write, FileMode.Create, FileShare.None)) { await r.Stream.CopyToAsync(fileStream); context.Always($"Content streamed to file path=[{path}]"); } } } else { context.Error(r.ToString()); } }); }
public void RoundtripPartialBinary(HashType hashType) { using (var ms = new MemoryStream()) { using (var writer = new BinaryWriter(ms)) { var h1 = ContentHash.Random(hashType); h1.SerializeHashBytes(writer); Assert.Equal(HashInfoLookup.Find(hashType).ByteLength, ms.Length); ms.Position = 0; using (var reader = new BinaryReader(ms)) { var h2 = new ContentHash(hashType, reader); Assert.Equal(hashType, h2.HashType); Assert.Equal(h1.ToHex(), h2.ToHex()); } } } }
/// <summary> /// Records a <see cref="ContentHash" /> for the given file handle. This hash mapping will be persisted to disk if the /// table is saved with <see cref="SaveAsync" />. The given file handle should be opened with at most Read sharing /// (having the handle should ensure the file is not being written). /// This returns a <see cref="VersionedFileIdentityAndContentInfo"/>: /// - The identity has the kind <see cref="VersionedFileIdentity.IdentityKind.StrongUsn"/> if a USN-based identity was successfully established; /// the identity may have kind <see cref="VersionedFileIdentity.IdentityKind.Anonymous"/> if such an identity was unavailable. /// - Regardless, the contained <see cref="FileContentInfo"/> contains the actual length of the stream corresponding to <paramref name="hash"/>. /// </summary> /// <remarks> /// An overload taking a file path is intentionally not provided. This should be called after hashing or writing a file, /// but before closing the handle. This way, there is no race between establishing the file's hash, some unrelated writer, /// and recording its file version (e.g., USN) to hash mapping. /// Note that this results in a small amount of I/O (e.g., on Windows, a file open and USN query), but never hashes the file or reads its contents. /// The <paramref name="strict"/> corresponds to the <c>flush</c> parameter of <see cref="VersionedFileIdentity.TryEstablishStrong"/> /// </remarks> public VersionedFileIdentity RecordContentHash( string path, SafeFileHandle handle, ContentHash hash, long length, bool?strict = default) { Contract.Requires(handle != null); Contract.Requires(!string.IsNullOrWhiteSpace(path)); using (Counters.StartStopwatch(FileContentTableCounters.RecordContentHashDuration)) { // TODO: The contract below looks very nice but breaks tons of UT // Fix the tests and enable the contract. // Contract.Requires(FileContentInfo.IsValidLength(length, hash)); // Here we write a new change journal record for this file to get a 'strong' identity. This means that the USN -> hash table // only ever contains USNs whose records have the 'close' reason set. Recording USNs without that // reason set would not be correct; it would be possible that multiple separate changes (e.g. writes) // were represented with the same USN, and so intermediate USNs do not necessarily correspond to exactly // one snapshot of a file. See http://msdn.microsoft.com/en-us/library/windows/desktop/aa363803(v=vs.85).aspx Possible <VersionedFileIdentity, Failure <VersionedFileIdentity.IdentityUnavailabilityReason> > possibleVersionedIdentity = TryEstablishStrongIdentity(handle, flush: strict == true); if (!possibleVersionedIdentity.Succeeded) { if (Interlocked.CompareExchange(ref m_changeJournalWarningLogged, 1, 0) == 0) { Tracing.Logger.Log.StorageFileContentTableIgnoringFileSinceVersionedFileIdentityIsNotSupported( Events.StaticContext, path, possibleVersionedIdentity.Failure.DescribeIncludingInnerFailures()); } return(VersionedFileIdentity.Anonymous); } VersionedFileIdentity identity = possibleVersionedIdentity.Result; var newEntry = new Entry(identity.Usn, hash, length, EntryTimeToLive); // We allow concurrent update attempts with different observed USNs. // This is useful and relevant for two reasons: // - Querying a 'strong' identity (TryEstablishStrongIdentity) generates a new CLOSE record every time. // - Creating hardlinks generates 'hardlink change' records. // So, concurrently creating and recording (or even just recording) different links is possible, and // keeping the last stored entry (rather than highest-USN entry) can introduce false positives. var fileIdAndVolumeId = new FileIdAndVolumeId(identity.VolumeSerialNumber, identity.FileId); m_entries.AddOrUpdate( new FileIdAndVolumeId(identity.VolumeSerialNumber, identity.FileId), newEntry, updateValueFactory: (key, existingEntry) => { if (existingEntry.Usn > newEntry.Usn) { return(existingEntry); } if (newEntry.Hash == existingEntry.Hash) { Counters.IncrementCounter(FileContentTableCounters.NumUsnMismatch); Tracing.Logger.Log.StorageUsnMismatchButContentMatch( Events.StaticContext, path, existingEntry.Usn.Value, newEntry.Usn.Value, existingEntry.Hash.ToHex()); } else { // Stale USN. Counters.IncrementCounter(FileContentTableCounters.NumContentMismatch); } return(newEntry); }); Tracing.Logger.Log.StorageRecordNewKnownUsn( Events.StaticContext, path, identity.FileId.High, identity.FileId.Low, identity.VolumeSerialNumber, identity.Usn.Value, hash.ToHex()); return(identity); } }
public static AbsolutePath GetContentPath(AbsolutePath rootPath, ContentHash contentHash) { string hash = contentHash.ToHex(); return(rootPath / "Shared" / contentHash.HashType.ToString() / hash.Substring(0, 3) / (hash + ".blob")); }
/// <inheritdoc /> public override string ToString() { return(m_hash.ToHex()); }
private async Task UpgradeLegacyVsoHashedContent(bool holdHandleToFile) { var context = new Context(Logger); using (var testDirectory = new DisposableDirectory(FileSystem)) { // Create an empty store. using (var store = Create(testDirectory.Path, _clock)) { await store.StartupAsync(context).ShouldBeSuccess(); await store.ShutdownAsync(context).ShouldBeSuccess(); } // Write a file into the cache corresponding to the generated content directory entry. string emptyFileVsoHashHex = _emptyFileVsoHash.ToHex(); var oldRoot = testDirectory.Path / "Shared" / ((int)HashType.DeprecatedVso0).ToString(); var oldPath = oldRoot / emptyFileVsoHashHex.Substring(0, 3) / (emptyFileVsoHashHex + ".blob"); var newPath = testDirectory.Path / "Shared" / ContentHashType.Serialize() / emptyFileVsoHashHex.Substring(0, 3) / (emptyFileVsoHashHex + ".blob"); FileSystem.CreateDirectory(oldPath.Parent); FileSystem.WriteAllBytes(oldPath, new byte[0]); // Load the store, checking that its content has been upgraded. // In this first pass, we might fail a file move. await TestStore( context, _clock, testDirectory, async store => { Assert.Equal(holdHandleToFile, FileSystem.FileExists(oldPath)); Assert.True(FileSystem.FileExists(newPath)); var contentHash = new ContentHash(ContentHashType, HexUtilities.HexToBytes(emptyFileVsoHashHex)); var contains = await store.ContainsAsync(context, contentHash, null); Assert.True(contains); }, preStartupAction : store => { if (holdHandleToFile) { store.ThrowOnUpgradeLegacyVsoHashedContentDirectoryRename = oldRoot; store.ThrowOnUpgradeLegacyVsoHashedContentDirectoryDelete = oldRoot; store.ThrowOnUpgradeLegacyVsoHashedContentFileRename = oldPath; } }); Assert.Equal(holdHandleToFile, FileSystem.DirectoryExists(oldRoot)); // Load the store, checking that its content has been upgraded. // In this second pass, we might do not fail any file move. await TestStore(context, _clock, testDirectory, async store => { // Make sure the cleanup completed. Assert.False(FileSystem.FileExists(oldPath)); Assert.True(FileSystem.FileExists(newPath)); var contentHash = new ContentHash(ContentHashType, HexUtilities.HexToBytes(emptyFileVsoHashHex)); var contentFileInfo = await store.GetCacheFileInfo(contentHash); Assert.NotNull(contentFileInfo); }); Assert.False(FileSystem.DirectoryExists(oldRoot)); } }
private const int GetBuildManifestHashFromLocalFileRetryLimit = 5; // Starts from 0, retry multiplier is applied upto (GetBuildManifestHashFromLocalFileRetryLimit - 1) /// <summary> /// EngineEnviromentSettings.BuildManifestHashCacheSalt is used to create a salted weak fingerprint for [VSO:SHA] cache entries using the file's VSO hash as input. /// </summary> private WeakContentFingerprint GenerateSaltedWeakFingerprint(ContentHash hash) => new WeakContentFingerprint(FingerprintUtilities.Hash($"Hash: '{hash.ToHex()}' Salt: '{m_buildManifestHashCacheSalt}'")); // Changes to this string will invalidate all existing cache entries