private static int GetBitCount(int entryCount) { var result = checked (entryCount * 64); Contract.AssertDebug(IsValidBitCount(result)); return(result); }
/// <summary> /// Rounds up the given bit count as needed to an allowable size for a <see cref="BitSet"/> /// (i.e., the returned value satisfies <see cref="IsValidBitCount"/>) /// </summary> public static int RoundToValidBitCount(int unroundedBitCount) { var result = checked (unroundedBitCount + 63) & ~63; Contract.AssertDebug(IsValidBitCount(result)); return(result); }
/// <summary> /// Renders this object to a string which when parsed with <see cref="Parse(string)"/> /// returns a <see cref="FileContentInfo"/> object that is equal to this object. /// /// The format of the rendered value is $"{Hash}_{Length}". /// </summary> public string Render() { Func <string, object, string> propertyRenderer = (name, value) => { var valueStr = value.ToString(); Contract.AssertDebug( !valueStr.Contains(RenderSeparator), I($"Rendered value of the '{name}' property ('{valueStr}') must not contain the designated separator string ('{RenderSeparator}')")); return(valueStr); }; string hashStr = propertyRenderer(nameof(Hash), Hash); string lengthStr = propertyRenderer(nameof(RawLength), m_length); return(I($"{hashStr}{RenderSeparator}{lengthStr}")); }
/// <summary> /// Splits the current instance into a smaller instances /// </summary> public IReadOnlyList <ContentLocationEventData> Split(long maxEstimatedSize) { // First, need to compute the number of hashes that will fit into maxEstimatedSize. var maxHashCount = MaxContentHashesCount(maxEstimatedSize); // Then we need to split the instance into a sequence of instances. var hashes = ContentHashes.Split(maxHashCount).ToList(); var result = new List <ContentLocationEventData>(hashes.Count); Contract.Assert(hashes.Sum(c => c.Count) == ContentHashes.Count); switch (this) { case AddContentLocationEventData addContentLocationEventData: var contentSizes = addContentLocationEventData.ContentSizes.Split(maxHashCount).ToList(); Contract.Assert(hashes.Count == contentSizes.Count); result.AddRange(hashes.Select((t, index) => new AddContentLocationEventData(Sender, t, contentSizes[index], addContentLocationEventData.Touch))); break; case RemoveContentLocationEventData _: result.AddRange(hashes.Select(t => new RemoveContentLocationEventData(Sender, t))); break; case TouchContentLocationEventData touchContentLocationEventData: result.AddRange(hashes.Select(t => new TouchContentLocationEventData(Sender, t, touchContentLocationEventData.AccessTime))); break; } Contract.AssertDebug(result.TrueForAll(v => v.EstimateSerializedInstanceSize() < maxEstimatedSize)); return(result); int MaxContentHashesCount(long estimatedSize) { switch (this) { case AddContentLocationEventData _: return((int)(estimatedSize - SerializedSizeBase) / (ShortHash.SerializedLength + sizeof(long))); default: return((int)(estimatedSize - SerializedSizeBase) / (ShortHash.SerializedLength)); } } }
/// <inheritdoc /> protected override byte[] HashFinal() { Contract.AssertDebug(_buffer != null); // Here, either the buffer has data, or there were no blocks. // Flush out buffer if (_currentOffset != 0) { var blockHash = HashBlock(_buffer, _currentOffset); return(_rollingId.Finalize(blockHash).Bytes); } // if there are no blocks add an empty block var emptyBlockHash = HashBlock(new byte[] { }, 0); return(_rollingId.Finalize(emptyBlockHash).Bytes); }
/// <summary> /// Records a <see cref="ContentHash" /> for the given file handle. This hash mapping will be persisted to disk if the /// table is saved with <see cref="SaveAsync" />. The given file handle should be opened with at most Read sharing /// (having the handle should ensure the file is not being written). /// This returns a <see cref="VersionedFileIdentityAndContentInfo"/>: /// - The identity has the kind <see cref="VersionedFileIdentity.IdentityKind.StrongUsn"/> if a USN-based identity was successfully established; /// the identity may have kind <see cref="VersionedFileIdentity.IdentityKind.Anonymous"/> if such an identity was unavailable. /// - Regardless, the contained <see cref="FileContentInfo"/> contains the actual length of the stream corresponding to <paramref name="hash"/>. /// </summary> /// <remarks> /// An overload taking a file path is intentionally not provided. This should be called after hashing or writing a file, /// but before closing the handle. This way, there is no race between establishing the file's hash, some unrelated writer, /// and recording its file version (e.g., USN) to hash mapping. /// Note that this results in a small amount of I/O (e.g., on Windows, a file open and USN query), but never hashes the file or reads its contents. /// The <paramref name="strict"/> corresponds to the <c>flush</c> parameter of <see cref="VersionedFileIdentity.TryEstablishStrong"/> /// </remarks> public VersionedFileIdentityAndContentInfo RecordContentHash( FileStream stream, ContentHash hash, bool?strict = default) { Contract.Requires(stream != null); strict = strict ?? stream.CanWrite; Contract.AssertDebug(stream.SafeFileHandle != null && stream.Name != null); long length = stream.Length; VersionedFileIdentity identity = RecordContentHash( stream.Name, stream.SafeFileHandle, hash, length, strict: strict); return(new VersionedFileIdentityAndContentInfo(identity, new FileContentInfo(hash, length))); }
/// <inheritdoc /> protected override void HashCore(byte[] array, int ibStart, int cbSize) { Contract.AssertDebug(_buffer != null); while (cbSize > 0) { if (_currentOffset == _buffer.Length) { var blockHash = HashBlock(_buffer, _buffer.Length); _rollingId.Update(blockHash); _currentOffset = 0; } int bytesToCopy = Math.Min(cbSize, _buffer.Length - _currentOffset); Buffer.BlockCopy(array, ibStart, _buffer, _currentOffset, bytesToCopy); _currentOffset += bytesToCopy; ibStart += bytesToCopy; cbSize -= bytesToCopy; } }
private static bool RangeIncidentNodesAndIntersect( IReadonlyDirectedGraph graph, RangedNodeSet walkFromNodes, Func <IReadonlyDirectedGraph, NodeId, IEnumerable <Edge> > getEdges, Func <NodeId, NodeId, bool> validateEdgeTopoProperty, NodeRange incidentNodeFilter, RangedNodeSet intersectWith, bool skipOutOfOrderNodes, out NodeRange range, out NodeId intersection) { // Note that initially, currentMin > currentMax so NodeRange.CreatePossiblyEmpty // would return an empty range. We return an empty range iff no nodes pass incidentNodeFilter below. uint currentMin = NodeId.MaxValue; uint currentMax = NodeId.MinValue; foreach (NodeId existingNode in walkFromNodes) { IEnumerable <Edge> edges = getEdges(graph, existingNode); foreach (Edge edge in edges) { NodeId other = edge.OtherNode; if (!validateEdgeTopoProperty(existingNode, other)) { if (skipOutOfOrderNodes) { continue; } throw new BuildXLException(I($"Topological order violated due to an edge between nodes {existingNode} and {other}")); } if (!incidentNodeFilter.Contains(other)) { continue; } if (other.Value > currentMax) { currentMax = edge.OtherNode.Value; Contract.AssertDebug(currentMax <= NodeId.MaxValue && currentMax >= NodeId.MinValue); } if (other.Value < currentMin) { currentMin = edge.OtherNode.Value; Contract.AssertDebug(currentMin <= NodeId.MaxValue && currentMin >= NodeId.MinValue); } if (intersectWith.Contains(other)) { intersection = other; Contract.AssertDebug(currentMin <= NodeId.MaxValue && currentMin >= NodeId.MinValue); Contract.AssertDebug(currentMax <= NodeId.MaxValue && currentMax >= NodeId.MinValue); range = NodeRange.CreatePossiblyEmpty(new NodeId(currentMin), new NodeId(currentMax)); return(true); } } } intersection = NodeId.Invalid; Contract.AssertDebug(currentMin <= NodeId.MaxValue && currentMin >= NodeId.MinValue); Contract.AssertDebug(currentMax <= NodeId.MaxValue && currentMax >= NodeId.MinValue); range = NodeRange.CreatePossiblyEmpty(new NodeId(currentMin), new NodeId(currentMax)); return(false); }