private ChunkFooter WriteFooter(ICollection<PosMap> mapping) { var workItem = _writerWorkItem; int mapSize = 0; if (mapping != null) { if (!_inMem && _isCached != 0) { throw new InvalidOperationException("Trying to write mapping while chunk is cached! " + "You probably are writing scavenged chunk as cached. " + "Don't do this!"); } mapSize = mapping.Count * PosMap.FullSize; workItem.Buffer.SetLength(mapSize); workItem.Buffer.Position = 0; foreach (var map in mapping) { map.Write(workItem.BufferWriter); } if (_inMem) ResizeMemStream(workItem, mapSize); WriteRawData(workItem, workItem.Buffer); } var footerNoHash = new ChunkFooter(true, true, _physicalDataSize, LogicalDataSize, mapSize, new byte[ChunkFooter.ChecksumSize]); //MD5 workItem.MD5.TransformFinalBlock(footerNoHash.AsByteArray(), 0, ChunkFooter.Size - ChunkFooter.ChecksumSize); //FILE var footerWithHash = new ChunkFooter(true, true, _physicalDataSize, LogicalDataSize, mapSize, workItem.MD5.Hash); workItem.AppendData(footerWithHash.AsByteArray(), 0, ChunkFooter.Size); Flush(); // trying to prevent bug with resized file, but no data in it var fileSize = ChunkHeader.Size + _physicalDataSize + mapSize + ChunkFooter.Size; if (workItem.StreamLength != fileSize) { workItem.ResizeStream(fileSize); _fileSize = fileSize; } return footerWithHash; }
private void CompleteNonRaw(ICollection<PosMap> mapping) { if (_isReadOnly) throw new InvalidOperationException("Cannot complete a read-only TFChunk."); _chunkFooter = WriteFooter(mapping); Flush(); _isReadOnly = true; CleanUpWriterWorkItem(_writerWorkItem); _writerWorkItem = null; SetAttributes(); }
public void CompleteRaw() { if (_isReadOnly) throw new InvalidOperationException("Cannot complete a read-only TFChunk."); if (_writerWorkItem.StreamPosition != _writerWorkItem.StreamLength) throw new InvalidOperationException("The raw chunk is not completely written."); Flush(); _chunkFooter = ReadFooter(_writerWorkItem.WorkingStream); _isReadOnly = true; CleanUpWriterWorkItem(_writerWorkItem); _writerWorkItem = null; SetAttributes(); }
private void InitCompleted(bool verifyHash) { var fileInfo = new FileInfo(_filename); if (!fileInfo.Exists) throw new CorruptDatabaseException(new ChunkNotFoundException(_filename)); _fileSize = (int)fileInfo.Length; _isReadOnly = true; SetAttributes(); CreateReaderStreams(); var reader = GetReaderWorkItem(); try { _chunkHeader = ReadHeader(reader.Stream); if (_chunkHeader.Version != CurrentChunkVersion) throw new CorruptDatabaseException(new WrongFileVersionException(_filename, _chunkHeader.Version, CurrentChunkVersion)); _chunkFooter = ReadFooter(reader.Stream); if (!_chunkFooter.IsCompleted) { throw new CorruptDatabaseException(new BadChunkInDatabaseException( string.Format("Chunk file '{0}' should be completed, but is not.", _filename))); } _logicalDataSize = _chunkFooter.LogicalDataSize; _physicalDataSize = _chunkFooter.PhysicalDataSize; var expectedFileSize = _chunkFooter.PhysicalDataSize + _chunkFooter.MapSize + ChunkHeader.Size + ChunkFooter.Size; if (reader.Stream.Length != expectedFileSize) { throw new CorruptDatabaseException(new BadChunkInDatabaseException( string.Format("Chunk file '{0}' should have file size {1} bytes, but instead has {2} bytes length.", _filename, expectedFileSize, reader.Stream.Length))); } } finally { ReturnReaderWorkItem(reader); } _readSide = _chunkHeader.IsScavenged ? (IChunkReadSide) new TFChunkReadSideScavenged(this) : new TFChunkReadSideUnscavenged(this); _readSide.Cache(); if (verifyHash) VerifyFileHash(); }
private ChunkFooter WriteFooter(ICollection<PosMap> mapping) { var workItem = _writerWorkItem; int mapSize = 0; if (mapping != null) { if (_isCached != 0) { throw new InvalidOperationException("Trying to write mapping while chunk is cached! " + "You probably are writing scavenged chunk as cached. " + "Don't do this!"); } mapSize = mapping.Count * sizeof(ulong); workItem.Buffer.SetLength(mapSize); workItem.Buffer.Position = 0; foreach (var map in mapping) { workItem.BufferWriter.Write(map.AsUInt64()); } WriteRawData(workItem, workItem.Buffer); } var footerNoHash = new ChunkFooter(true, _actualDataSize, _actualDataSize, mapSize, new byte[ChunkFooter.ChecksumSize]); //MD5 workItem.MD5.TransformFinalBlock(footerNoHash.AsByteArray(), 0, ChunkFooter.Size - ChunkFooter.ChecksumSize); //FILE var footerWithHash = new ChunkFooter(true, _actualDataSize, _actualDataSize, mapSize, workItem.MD5.Hash); workItem.Stream.Write(footerWithHash.AsByteArray(), 0, ChunkFooter.Size); var fileSize = ChunkHeader.Size + _actualDataSize + mapSize + ChunkFooter.Size; if (workItem.Stream.Length != fileSize) workItem.Stream.SetLength(fileSize); return footerWithHash; }
public void CompleteScavenge(ICollection<PosMap> mapping) { if (_isReadOnly) throw new InvalidOperationException("Cannot complete a read-only TFChunk."); _chunkFooter = WriteFooter(mapping); Flush(); _isReadOnly = true; CleanUpWriterWorkItem(_writerWorkItem); _writerWorkItem = null; }
private ChunkFooter WriteFooter(ICollection <PosMap> mapping) { var workItem = _writerWorkItem; workItem.ResizeStream((int)workItem.StreamPosition); int mapSize = 0; if (mapping != null) { if (!_inMem && _isCached != 0) { throw new InvalidOperationException("Trying to write mapping while chunk is cached. " + "You probably are writing scavenged chunk as cached. " + "Do not do this."); } mapSize = mapping.Count * PosMap.FullSize; workItem.Buffer.SetLength(mapSize); workItem.Buffer.Position = 0; foreach (var map in mapping) { map.Write(workItem.BufferWriter); } if (_inMem) { ResizeMemStream(workItem, mapSize); } WriteRawData(workItem, workItem.Buffer); } workItem.FlushToDisk(); if (_chunkHeader.Version >= (byte)ChunkVersions.Aligned) { var alignedSize = GetAlignedSize(ChunkHeader.Size + _physicalDataSize + mapSize + ChunkFooter.Size); var bufferSize = alignedSize - workItem.StreamPosition - ChunkFooter.Size; Log.Debug("Buffer size is {bufferSize}", bufferSize); if (bufferSize > 0) { byte[] buffer = new byte[bufferSize]; WriteRawData(workItem, buffer, buffer.Length); } } Flush(); var footerNoHash = new ChunkFooter(true, true, _physicalDataSize, LogicalDataSize, mapSize, new byte[ChunkFooter.ChecksumSize]); //MD5 workItem.MD5.TransformFinalBlock(footerNoHash.AsByteArray(), 0, ChunkFooter.Size - ChunkFooter.ChecksumSize); //FILE var footerWithHash = new ChunkFooter(true, true, _physicalDataSize, LogicalDataSize, mapSize, workItem.MD5.Hash); workItem.AppendData(footerWithHash.AsByteArray(), 0, ChunkFooter.Size); Flush(); _fileSize = (int)workItem.StreamLength; return(footerWithHash); }
private void InitCompleted(bool verifyHash, bool optimizeReadSideCache) { var fileInfo = new FileInfo(_filename); if (!fileInfo.Exists) { throw new CorruptDatabaseException(new ChunkNotFoundException(_filename)); } _fileSize = (int)fileInfo.Length; _isReadOnly = true; SetAttributes(_filename, true); CreateReaderStreams(); var reader = GetReaderWorkItem(); try { _chunkHeader = ReadHeader(reader.Stream); Log.Debug("Opened completed {chunk} as version {version}", _filename, _chunkHeader.Version); if (_chunkHeader.Version != (byte)ChunkVersions.Unaligned && _chunkHeader.Version != (byte)ChunkVersions.Aligned) { throw new CorruptDatabaseException(new WrongFileVersionException(_filename, _chunkHeader.Version, CurrentChunkVersion)); } if (_chunkHeader.Version != (byte)ChunkVersions.Aligned && _unbuffered) { throw new Exception( "You can only run unbuffered mode on v3 or higher chunk files. Please run scavenge on your database to upgrade your transaction file to v3."); } _chunkFooter = ReadFooter(reader.Stream); if (!_chunkFooter.IsCompleted) { throw new CorruptDatabaseException(new BadChunkInDatabaseException( string.Format("Chunk file '{0}' should be completed, but is not.", _filename))); } _logicalDataSize = _chunkFooter.LogicalDataSize; _physicalDataSize = _chunkFooter.PhysicalDataSize; var expectedFileSize = _chunkFooter.PhysicalDataSize + _chunkFooter.MapSize + ChunkHeader.Size + ChunkFooter.Size; if (_chunkHeader.Version == (byte)ChunkVersions.Unaligned && reader.Stream.Length != expectedFileSize) { throw new CorruptDatabaseException(new BadChunkInDatabaseException( string.Format( "Chunk file '{0}' should have a file size of {1} bytes, but it has a size of {2} bytes.", _filename, expectedFileSize, reader.Stream.Length))); } } finally { ReturnReaderWorkItem(reader); } _readSide = _chunkHeader.IsScavenged ? (IChunkReadSide) new TFChunkReadSideScavenged(this, optimizeReadSideCache) : new TFChunkReadSideUnscavenged(this); _readSide.Cache(); if (verifyHash) { VerifyFileHash(); } }