Esempio n. 1
0
        public void TrickyEmbedTest()
        {
            WithFile(Test).Wait();
            async Task Test(string fname)
            {
                using (var writer = new ChunkWriter(fname)) {
                    await Write(writer, Content(1, count : 1));
                }
                byte[] chunkio = File.ReadAllBytes(fname);
                CollectionAssert.AreEqual(FileHeader, chunkio.Take(FileHeader.Length).ToArray());
                File.Delete(fname);
                using (var writer = new ChunkWriter(fname)) {
                    await Write(writer, Content(1, count : MeterInterval));
                }
                using (var file = new FileStream(fname, FileMode.Open, FileAccess.Write)) {
                    Assert.AreEqual(MeterInterval + 2 * Meter.Size + ChunkHeader.Size, file.Length);
                    file.SetLength(MeterInterval);
                }
                using (var writer = new ChunkWriter(fname)) {
                    await Write(writer, chunkio);
                }
                using (var reader = new ChunkReader(fname)) {
                    IChunk chunk = await reader.ReadFirstAsync(1, long.MaxValue);

                    Assert.IsNotNull(chunk);
                    byte[] content = new byte[chunk.ContentLength];
                    Assert.IsTrue(await chunk.ReadContentAsync(content, 0));
                    // A naive implementation can give Content(1, 1) instead, which is an awful thing to do
                    // because Content(1, 1) is embedded in the middle of a real chunk.
                    CollectionAssert.AreEqual(chunkio, content);
                }
            }
        }
Esempio n. 2
0
        public void TrickyTruncateTest()
        {
            WithFile(Test).Wait();
            async Task Test(string fname)
            {
                using (var writer = new ChunkWriter(fname)) {
                    await Write(writer, Content(1, count : MeterInterval - Meter.Size + 1));
                }
                using (var file = new FileStream(fname, FileMode.Open, FileAccess.Write)) {
                    Assert.AreEqual(MeterInterval + Meter.Size + ChunkHeader.Size + 1, file.Length);
                    file.SetLength(MeterInterval);
                }
                using (var writer = new ChunkWriter(fname)) {
                    await Write(writer, Content(2, count : 1));

                    Assert.AreEqual(MeterInterval + Meter.Size + ChunkHeader.Size + 1, writer.Length);
                    await Write(writer, Content(3, count : 1));
                }
                using (var reader = new ChunkReader(fname)) {
                    IChunk chunk = await reader.ReadFirstAsync(1, long.MaxValue);

                    Assert.IsNotNull(chunk);
                    byte[] content = new byte[chunk.ContentLength];
                    Assert.IsTrue(await chunk.ReadContentAsync(content, 0));
                    // A naive implementation can give Content(3, 1) instead of Content(2, 1), which means
                    // effectively skipping a valid chunk.
                    CollectionAssert.AreEqual(Content(2, count: 1), content);
                }
            }
        }
Esempio n. 3
0
        public void MaxChunkSizeTest()
        {
            WithFile(Test).Wait();
            async Task Test(string fname)
            {
                using (var writer = new ChunkWriter(fname)) {
                    await Write(writer, Content(1, count : 1));
                    await Write(writer, Content(2, count : MaxContentLength));
                    await Write(writer, Content(3, count : 1));
                }

                using (var reader = new ChunkReader(fname)) {
                    IChunk chunk = await reader.ReadFirstAsync(0, long.MaxValue);

                    Assert.IsNotNull(chunk);
                    byte[] content = new byte[chunk.ContentLength];
                    Assert.IsTrue(await chunk.ReadContentAsync(content, 0));
                    CollectionAssert.AreEqual(Content(1, count: 1), content);

                    chunk = await reader.ReadFirstAsync(chunk.EndPosition, long.MaxValue);

                    Assert.IsNotNull(chunk);
                    Assert.AreEqual(MaxContentLength, chunk.ContentLength);
                    content = new byte[chunk.ContentLength];
                    Assert.IsTrue(await chunk.ReadContentAsync(content, 0));
                    foreach (byte x in content)
                    {
                        if (x != 2)
                        {
                            Assert.Fail($"Invalid byte: {x}");
                        }
                    }

                    chunk = await reader.ReadFirstAsync(chunk.EndPosition, long.MaxValue);

                    Assert.IsNotNull(chunk);
                    content = new byte[chunk.ContentLength];
                    Assert.IsTrue(await chunk.ReadContentAsync(content, 0));
                    CollectionAssert.AreEqual(Content(3, count: 1), content);

                    chunk = await reader.ReadFirstAsync(chunk.EndPosition, long.MaxValue);

                    Assert.IsNull(chunk);
                }
            }
        }
Esempio n. 4
0
        static async Task <InputChunk> Decompress(IChunk chunk)
        {
            var content = new byte[chunk.ContentLength];

            if (!await chunk.ReadContentAsync(content, 0))
            {
                return(null);
            }
            var res = new InputChunk(chunk.BeginPosition, chunk.EndPosition, chunk.UserData);

            try {
                Compression.DecompressTo(content, 0, content.Length, res);
            } catch {
                res.Dispose();
                // This translation of decompression errors into missing chunks is the only reason
                // why ReadAtPartitionAsync is implemented in BufferedReader rather than ChunkReader.
                return(null);
            }
            return(res);
        }