protected virtual async ValueTask <uint[]?> ReadFanOutAsync() { byte[] fanOut = new byte[256 * sizeof(int)]; if (await ReadFromChunkAsync("OIDF", 0, fanOut).ConfigureAwait(false) != fanOut.Length) { return(null); } return(Enumerable.Range(0, 256).Select(i => NetBitConverter.ToUInt32(fanOut, sizeof(int) * i)).ToArray()); }
public FileToken Decode(string tokenStr) { var encBys = _urlDataCodec.Decode(tokenStr); //校验签名 var hashLen = 16; var mdatBys = new byte[encBys.Length - hashLen]; Array.Copy(encBys, hashLen, mdatBys, 0, mdatBys.Length); var signBys = ArrayUtil.Addition(_appSecretBytes, mdatBys); var hashBys = Md5(signBys); if (!ArrayUtil.Equals(hashBys, 0, encBys, 0, hashLen)) { throw new InvalidDataException("bad sign"); } if (mdatBys[0] != CurrentVersion) { throw new NotSupportedException("bad token version"); } //解析成对象 var index = 1; //忽略版本 var pseudoId = NetBitConverter.ToUInt32(mdatBys, index); index += 4; var fileId = NetBitConverter.ToInt32(mdatBys, index); index += 4; var ownerId = NetBitConverter.ToInt32(mdatBys, index); index += 4; var mimeId = NetBitConverter.ToUInt32(mdatBys, index); index += 4; var expireTime = ToDateTime(mdatBys, index); index += sizeof(long); var fileCreateTime = ToDateTime(mdatBys, index); return(new FileToken { PseudoId = pseudoId, FileId = fileId, FileOwnerId = ownerId, MimeId = mimeId, ExpireTime = expireTime, FileCreateTime = fileCreateTime }); }
async ValueTask InitAsync() { if (_ver == 0) { _fIdx ??= FileBucket.OpenRead(Path.ChangeExtension(PackFile, ".idx")); byte[] header = new byte[8]; long fanOutOffset = -1; if (header.Length == await _fIdx.ReadAtAsync(0, header).ConfigureAwait(false)) { var index = new byte[] { 255, (byte)'t', (byte)'O', (byte)'c', 0, 0, 0, 2 }; if (header.SequenceEqual(index)) { // We have a v2 header. fanOutOffset = 8; _ver = 2; } else if (header.Take(4).SequenceEqual(index.Take(4))) { // We have an unsupported future header _ver = -1; _fIdx.Dispose(); _fIdx = null; return; } else { // We have a v0/v1 header, which is no header fanOutOffset = 0; _ver = 1; } } if (_fanOut == null && _ver > 0) { byte[] fanOut = new byte[4 * 256]; if (fanOut.Length == await _fIdx.ReadAtAsync(fanOutOffset, fanOut).ConfigureAwait(false)) { _fanOut = new uint[256]; for (int i = 0; i < 256; i++) { _fanOut[i] = NetBitConverter.ToUInt32(fanOut, i * 4); } } _hasBitmap = File.Exists(Path.ChangeExtension(PackFile, ".bitmap")); } } }
public async Task Read4BitmapsXor() { string bmpFile = FindResource("*.bitmap") ?? throw new InvalidOperationException("Bitmap not found"); var fb = FileBucket.OpenRead(bmpFile); var headers = await fb.ReadFullAsync(32); // Skip headers uint count = NetBitConverter.ToUInt32(headers, 8); Assert.AreEqual(106u, count); List <GitEwahBitmapBucket> buckets = new List <GitEwahBitmapBucket>(); for (int i = 0; i < 4; i++) { buckets.Add(new GitEwahBitmapBucket(fb.Duplicate(false))); await fb.ReadNetworkUInt32Async(); // Bitlength uint u2 = await fb.ReadNetworkUInt32Async(); // Compressed length for (uint n = 0; n < u2; n++) { await fb.ReadNetworkUInt64Async(); } await fb.ReadNetworkUInt32Async(); // Last RLW start } var allXor = new BitwiseXorBucket(new BitwiseXorBucket(buckets[0], buckets[1]), new BitwiseXorBucket(buckets[2], buckets[3])); int maxBits = buckets.Max(x => x.ReadBitLengthAsync().AsTask().Result); Assert.AreEqual(2369, maxBits); var bb = await allXor.ReadFullAsync((maxBits + 7) / 8); for (int i = 0; i < bb.Length - 1; i++) { Assert.AreEqual((byte)0xFF, bb[i]); } }
public async Task ReadBitmap() { string bmpFile = FindResource("*.bitmap") ?? throw new InvalidOperationException("Bitmap not found"); var fb = FileBucket.OpenRead(bmpFile); var headers = await fb.ReadFullAsync(32); // Skip headers uint count = NetBitConverter.ToUInt32(headers, 8); Assert.AreEqual(106u, count); //BitArray var bitLengths = new int[4]; { var c = fb.Duplicate(false); Assert.AreEqual(32, c.Position); for (int i = 0; i < 4; i++) { bitLengths[i] = (int)await c.ReadNetworkUInt32Async(); uint u2 = await c.ReadNetworkUInt32Async(); List <ulong> w = new List <ulong>(); for (uint n = 0; n < u2; n++) { w.Add(await c.ReadNetworkUInt64Async()); } await c.ReadNetworkUInt32Async(); TestContext.WriteLine($"EWAH {i}: {bitLengths[i]}\t{u2}"); foreach (var v in w) { TestContext.Write($"{v:X16} "); } TestContext.WriteLine(); } //GC.KeepAlive(u1 + u2 + u3); } for (int i = 0; i < 4; i++) { using var ewah = new GitEwahBitmapBucket(fb.NoClose(true)); Assert.AreEqual(0L, ewah.Position); int expectedBytes = (int)(8 * ((bitLengths[i] + 63) / 64)); long?p = await ewah.ReadRemainingBytesAsync(); int peekLen = ewah.Peek().Length; Assert.IsTrue(peekLen > 0, "Can peek something"); Assert.IsTrue(peekLen <= expectedBytes, "No overshoot"); Assert.AreEqual(expectedBytes, (int)p, "ReadRemaining returned expected value"); var bb = await ewah.ReadFullAsync(65536); Assert.AreEqual(expectedBytes, bb.Length, $"Read {bb.Length}, expected {bitLengths[i]} bits, what would be {(bitLengths[i] + 7) / 8} bytes, or {expectedBytes} bytes when reading longs"); StringBuilder sb = new StringBuilder(); for (int ii = 0; ii < bb.Length; ii++) { sb.Append(bb[ii].ToString("x2")); } TestContext.WriteLine(); int removeAfter = 2 * ((bitLengths[i] + 7) / 8); if (removeAfter < sb.Length) { sb.Remove(removeAfter, sb.Length - removeAfter); } TestContext.WriteLine(sb.ToString()); } }
internal override async ValueTask <IGitCommitGraphInfo?> GetCommitInfo(GitId id) { await InitAsync().ConfigureAwait(false); var(success, index) = await TryFindIdAsync(id).ConfigureAwait(false); if (success) { int hashLength = GitId.HashLength(IdType); int commitDataSz = hashLength + 2 * sizeof(uint) + sizeof(ulong); byte[] commitData = new byte[commitDataSz]; if (commitDataSz != await ReadFromChunkAsync("CDAT", index * commitDataSz, commitData).ConfigureAwait(false)) { return(null); } // commitData now contains the root hash, 2 parent indexes and the topological level uint parent0 = NetBitConverter.ToUInt32(commitData, hashLength); uint parent1 = NetBitConverter.ToUInt32(commitData, hashLength + sizeof(uint)); ulong chainLevel = NetBitConverter.ToUInt64(commitData, hashLength + 2 * sizeof(uint)); Task <GitId>[] parents; if (parent0 == 0x70000000) { return(new GitCommitGraphInfo(Array.Empty <GitId>(), chainLevel, _haveV2 ? await ReadCommitTimeOffset(index).ConfigureAwait(false) : long.MinValue)); } else if (parent1 == 0x70000000) { parents = new[] { GetOidAsync((int)parent0).AsTask() } } ; else if (parent1 >= 0x80000000) { var extraParents = new byte[sizeof(uint) * 256]; int len = await ReadFromChunkAsync("EDGE", 4 *(parent1 & 0x7FFFFFFF), extraParents).ConfigureAwait(false) / sizeof(uint); if (len == 0 || len >= 256) { return(null); // Handle as if not exists in chain. Should never happen } int?stopAfter = null; parents = new[] { GetOidAsync((int)parent0).AsTask() }.Concat( Enumerable.Range(0, len) .Select(i => NetBitConverter.ToUInt32(extraParents, i * sizeof(uint))) .TakeWhile((v, i) => { if (i > stopAfter) { return(false); } else if ((v & 0x80000000) != 0) { stopAfter = i; } ; return(true); }) .Select(v => GetOidAsync((int)(v & 0x7FFFFFFF)).AsTask())).ToArray(); } else { parents = new[] { GetOidAsync((int)parent0).AsTask(), GetOidAsync((int)parent1).AsTask() } }; IEnumerable <Task> waits; Task <long>? v2 = null; if (_haveV2) { v2 = ReadCommitTimeOffset(index).AsTask(); waits = parents.Concat(new Task[] { v2 }); } else { waits = parents; } await Task.WhenAll(waits).ConfigureAwait(false); long offset = v2 != null ? await v2.ConfigureAwait(false) : long.MinValue; return(new GitCommitGraphInfo(parents.Select(x => x.Result).ToArray(), chainLevel, offset)); } return(null); }
private async ValueTask <bool> RefillAsync(bool allowWait) { if (_state <= ewah_state.start && !allowWait && Inner.Peek().IsEmpty) { return(false); } if (_lengthBits is null) { var bb = await Inner.ReadFullAsync(4 + 4).ConfigureAwait(false); _lengthBits = NetBitConverter.ToUInt32(bb, 0); _compressedSize = NetBitConverter.ToInt32(bb, 4); _left = _compressedSize; _state = ewah_state.start; } int peekLength = Inner.Peek().Length / sizeof(ulong); _wpos = 0; switch (_state) { case ewah_state.start: ulong curOp = await Inner.ReadNetworkUInt64Async().ConfigureAwait(false); _repBit = (curOp & 1UL) != 0; _repCount = (uint)(curOp >> 1); _rawCount = (int)(curOp >> 33); _left--; peekLength--; _state = ewah_state.same; goto case ewah_state.same; case ewah_state.same: byte val = _repBit ? (byte)0xFF : (byte)0; while (_repCount > 0 && _wpos + 8 < _buffer.Length) { _buffer[_wpos++] = val; _buffer[_wpos++] = val; _buffer[_wpos++] = val; _buffer[_wpos++] = val; _buffer[_wpos++] = val; _buffer[_wpos++] = val; _buffer[_wpos++] = val; _buffer[_wpos++] = val; _repCount--; } if (_repCount > 0) { _readable = new BucketBytes(_buffer, 0, _wpos); return(true); } _state = ewah_state.raw; goto case ewah_state.raw; case ewah_state.raw: while (_rawCount > 0) { if ((_wpos > 8 && peekLength < 8) || (_wpos + 8 >= _buffer.Length)) { // Avoid new reads if we already have something. Return result _readable = new BucketBytes(_buffer, 0, _wpos); return(true); } var bb = await Inner.ReadFullAsync(sizeof(ulong)).ConfigureAwait(false); if (bb.Length != sizeof(ulong)) { throw new BucketEofException(Inner); } peekLength--; _left--; _rawCount--; for (int i = bb.Length - 1; i >= 0; i--) { _buffer[_wpos++] = bb[i]; } } if (_left == 0) { _state = ewah_state.footer; _readable = new BucketBytes(_buffer, 0, _wpos); return(true); } _state = ewah_state.start; goto case ewah_state.start; case ewah_state.footer: await Inner.ReadNetworkUInt32Async().ConfigureAwait(false); _state = ewah_state.done; goto case ewah_state.done; case ewah_state.done: default: return(false); } }