public static bool VerifyHash(byte[] array, ref int offset) { ulong expected = SipHash.ComputeHash(array, 0, offset); ulong actual = UInt64LE.Read(array, ref offset); return(expected == actual); }
public void WriteTo(byte[] array) { Debug.Assert(array.Length >= Size); Debug.Assert(Format.IsValidPosition(ChunkBeginPosition)); int offset = 0; UInt64LE.Write(array, ref offset, (ulong)ChunkBeginPosition); UInt64LE.Write(array, ref offset, SipHash.ComputeHash(array, 0, offset)); }
public void WriteTo(byte[] array) { Debug.Assert(array.Length >= Size); Debug.Assert(Format.IsValidContentLength(ContentLength)); int offset = 0; UserData.WriteTo(array, ref offset); UInt64LE.Write(array, ref offset, (ulong)ContentLength); UInt64LE.Write(array, ref offset, ContentHash); UInt64LE.Write(array, ref offset, SipHash.ComputeHash(array, 0, offset)); }
public async Task WriteAsync(UserData userData, byte[] array, int offset, int count) { if (array == null) { throw new ArgumentNullException(nameof(array)); } if (offset < 0 || count < 0 || array.Length - offset < count) { throw new Exception($"Invalid range for array of length {array.Length}: [{offset}, {offset} + {count})"); } if (count > MaxContentLength) { throw new Exception($"Chunk too big: {count}"); } if (_torn) { await WritePadding(); _torn = false; } var meter = new Meter() { ChunkBeginPosition = _writer.Position }; var header = new ChunkHeader() { UserData = userData, ContentLength = count, ContentHash = SipHash.ComputeHash(array, offset, count), }; if (!header.EndPosition(meter.ChunkBeginPosition).HasValue) { throw new Exception($"File too big: {meter.ChunkBeginPosition}"); } meter.WriteTo(_meter); header.WriteTo(_header); try { await WriteMetered(_header, 0, _header.Length); await WriteMetered(array, offset, count); } catch { _torn = true; throw; } }
// Returns true if it's safe to read the chunk after the specified chunk. There are // two cases where blindly reading the next chunk can backfire: // // 1. By reading the next chunk you'll actually skip valid chunks. So the chunk you'll read // won't really be the next. // 2. Even if the next chunk decodes correctly (both its header and content hashes match), // it may be not a real chunk but a part of some larger chunk's content. // // To see these horrors in action, replace the implementation of this method with `return true` // and run tests. TrickyTruncateTest() and TrickyEmbedTest() should fail. They correspond to the // two cases described above. Note that there is no malicious action in these tests. The chunkio // files are produced by ChunkWriter. There are also file truncations, but they can naturally // happen when a processing with ChunkWriter crashes. async Task <bool> IsSkippable(long begin, ChunkHeader header) { long end = header.EndPosition(begin).Value; if (begin / MeterInterval == (end - 1) / MeterInterval) { return(true); } Meter?meter = await ReadMeter(MeterBefore(end - 1)); if (meter.HasValue) { return(meter.Value.ChunkBeginPosition == begin); } var content = new byte[header.ContentLength]; long pos = MeteredPosition(begin, ChunkHeader.Size).Value; return(await ReadMetered(pos, content, 0, content.Length) && SipHash.ComputeHash(content) == header.ContentHash); }
public async Task <bool> ReadContentAsync(byte[] array, int offset) { if (array == null) { throw new ArgumentNullException(nameof(array)); } if (offset < 0) { throw new ArgumentException($"Negative offset: {offset}"); } if (array.Length - offset < ContentLength) { throw new ArgumentException($"Array too short: {array.Length}"); } long pos = MeteredPosition(BeginPosition, ChunkHeader.Size).Value; _skippable = await _reader.ReadMetered(pos, array, offset, ContentLength) && SipHash.ComputeHash(array, offset, ContentLength) == _header.ContentHash; return(_skippable.Value); }