public static void CreateMultiChunk(TFChunkDbConfig config, int chunkStartNum, int chunkEndNum, string filename, int? physicalSize = null, long? logicalSize = null) { if (chunkStartNum > chunkEndNum) throw new ArgumentException("chunkStartNum"); var chunkHeader = new ChunkHeader(TFChunk.CurrentChunkVersion, config.ChunkSize, chunkStartNum, chunkEndNum, true, Guid.NewGuid()); var chunkBytes = chunkHeader.AsByteArray(); var physicalDataSize = physicalSize ?? config.ChunkSize; var logicalDataSize = logicalSize ?? (chunkEndNum - chunkStartNum + 1) * config.ChunkSize; var buf = new byte[ChunkHeader.Size + physicalDataSize + ChunkFooter.Size]; Buffer.BlockCopy(chunkBytes, 0, buf, 0, chunkBytes.Length); var chunkFooter = new ChunkFooter(true, true, physicalDataSize, logicalDataSize, 0, new byte[ChunkFooter.ChecksumSize]); chunkBytes = chunkFooter.AsByteArray(); Buffer.BlockCopy(chunkBytes, 0, buf, buf.Length - ChunkFooter.Size, chunkBytes.Length); File.WriteAllBytes(filename, buf); }
public static void CreateSingleChunk(TFChunkDbConfig config, int chunkNum, string filename, int? actualDataSize = null, bool isScavenged = false, byte[] contents = null) { var chunkHeader = new ChunkHeader(TFChunk.CurrentChunkVersion, config.ChunkSize, chunkNum, chunkNum, isScavenged, Guid.NewGuid()); var chunkBytes = chunkHeader.AsByteArray(); var dataSize = actualDataSize ?? config.ChunkSize; var buf = new byte[ChunkHeader.Size + dataSize + ChunkFooter.Size]; Buffer.BlockCopy(chunkBytes, 0, buf, 0, chunkBytes.Length); var chunkFooter = new ChunkFooter(true, true, dataSize, dataSize, 0, new byte[ChunkFooter.ChecksumSize]); chunkBytes = chunkFooter.AsByteArray(); Buffer.BlockCopy(chunkBytes, 0, buf, buf.Length - ChunkFooter.Size, chunkBytes.Length); if (contents != null) { if (contents.Length != dataSize) throw new Exception("Wrong contents size."); Buffer.BlockCopy(contents, 0, buf, ChunkHeader.Size, contents.Length); } File.WriteAllBytes(filename, buf); }
private void InitCompleted(bool verifyHash) { if (!File.Exists(_filename)) throw new CorruptDatabaseException(new ChunkNotFoundException(_filename)); _isReadonly = true; CreateReaderStreams(); var reader = GetReaderWorkItem(); try { Debug.Assert(!reader.IsMemory); _chunkHeader = ReadHeader(reader.Stream); if (_chunkHeader.Version != CurrentChunkVersion) throw new CorruptDatabaseException(new WrongTFChunkVersionException(_filename, _chunkHeader.Version, CurrentChunkVersion)); _chunkFooter = ReadFooter(reader.Stream); _actualDataSize = _chunkFooter.ActualDataSize; var expectedFileSize = _chunkFooter.ActualChunkSize + _chunkFooter.MapSize + ChunkHeader.Size + ChunkFooter.Size; if (reader.Stream.Length != expectedFileSize) { throw new CorruptDatabaseException(new BadChunkInDatabaseException( string.Format("Chunk file '{0}' should have file size {1} bytes, but instead has {2} bytes length.", _filename, expectedFileSize, reader.Stream.Length))); } _midpoints = PopulateMidpoints(_midpointsDepth); } finally { ReturnReaderWorkItem(reader); } SetAttributes(); if (verifyHash) VerifyFileHash(); }
public void CompleteScavenge(ICollection<PosMap> mapping) { if (_isReadonly) throw new InvalidOperationException("Cannot complete a read-only TFChunk."); _chunkFooter = WriteFooter(_actualDataSize, mapping); Flush(); _isReadonly = true; CloseWriterStream(); }
public void Complete() { if (_isReadonly) throw new InvalidOperationException("Cannot complete a read-only TFChunk."); _chunkFooter = WriteFooter(_chunkHeader.ChunkSize, null); Flush(); _isReadonly = true; CloseWriterStream(); }
private ChunkFooter WriteFooter(int actualChunkSize, ICollection<PosMap> mapping) { int mapSize = 0; if (mapping != null) { if (_cached) { throw new InvalidOperationException("Trying to write mapping while chunk is cached! " + "You probably are writing scavenged chunk as cached. " + "Don't do this!"); } mapSize = mapping.Count * sizeof(ulong); _writerWorkItem.Buffer.SetLength(mapSize); _writerWorkItem.Buffer.Position = 0; foreach (var map in mapping) { _writerWorkItem.BufferWriter.Write(map.AsUInt64()); } WriteRawData(_writerWorkItem.Buffer); } var footer = new ChunkFooter(true, actualChunkSize, _actualDataSize, mapSize, new byte[ChunkFooter.ChecksumSize]); //MD5 _writerWorkItem.MD5.TransformFinalBlock(footer.AsByteArray(), 0, ChunkFooter.Size - ChunkFooter.ChecksumSize); //FILE footer.MD5Hash = _writerWorkItem.MD5.Hash; var footerBytes = footer.AsByteArray(); var footerPos = ChunkHeader.Size + actualChunkSize + mapSize; _writerWorkItem.Stream.Position = footerPos; _writerWorkItem.Stream.Write(footerBytes, 0, ChunkFooter.Size); var fileSize = ChunkHeader.Size + actualChunkSize + mapSize + ChunkFooter.Size; if (_writerWorkItem.Stream.Length != fileSize) _writerWorkItem.Stream.SetLength(fileSize); return footer; }
private void CreateChunk(string filename, int actualSize, int chunkSize) { var chunkHeader = new ChunkHeader(1, chunkSize, 0, 0, 0); var chunkBytes = chunkHeader.AsByteArray(); var buf = new byte[ChunkHeader.Size + actualSize + ChunkFooter.Size]; Buffer.BlockCopy(chunkBytes, 0, buf, 0, chunkBytes.Length); var chunkFooter = new ChunkFooter(true, actualSize, actualSize, 0, new byte[ChunkFooter.ChecksumSize]); chunkBytes = chunkFooter.AsByteArray(); Buffer.BlockCopy(chunkBytes, 0, buf, buf.Length - ChunkFooter.Size, chunkBytes.Length); File.WriteAllBytes(filename, buf); }