private void CreateWriterWorkItemForNewChunk(ChunkHeader chunkHeader, int fileSize) { var md5 = MD5.Create(); // create temp file first and set desired length // if there is not enough disk space or something else prevents file to be resized as desired // we'll end up with empty temp file, which won't trigger false error on next DB verification var tempFilename = string.Format("{0}.{1}.tmp", _filename, Guid.NewGuid()); var tempFile = new FileStream(tempFilename, FileMode.CreateNew, FileAccess.ReadWrite, FileShare.Read, WriteBufferSize, FileOptions.SequentialScan); tempFile.SetLength(fileSize); // we need to write header into temp file before moving it into correct chunk place, so in case of crash // we don't end up with seemingly valid chunk file with no header at all... WriteHeader(md5, tempFile, chunkHeader); tempFile.FlushToDisk(); tempFile.Close(); File.Move(tempFilename, _filename); Stream stream = GetWriteStream(_filename); stream.Position = ChunkHeader.Size; _writerWorkItem = new WriterWorkItem(stream, null, md5); Flush(); // persist file move result }
private void CreateInMemChunk(ChunkHeader chunkHeader, int fileSize) { var md5 = MD5.Create(); // ALLOCATE MEM Interlocked.Exchange(ref _isCached, 1); _cachedLength = fileSize; _cachedData = Marshal.AllocHGlobal(_cachedLength); // WRITER STREAM var memStream = new UnmanagedMemoryStream((byte *)_cachedData, _cachedLength, _cachedLength, FileAccess.ReadWrite); WriteHeader(md5, memStream, chunkHeader); memStream.Position = ChunkHeader.Size; // READER STREAMS Interlocked.Add(ref _memStreamCount, _maxReaderCount); for (int i = 0; i < _maxReaderCount; i++) { var stream = new UnmanagedMemoryStream((byte *)_cachedData, _cachedLength); var reader = new BinaryReader(stream); _memStreams.Enqueue(new ReaderWorkItem(stream, reader, isMemory: true)); } _writerWorkItem = new WriterWorkItem(null, memStream, md5); }
private static long WriteRawData(WriterWorkItem workItem, MemoryStream buffer) { var len = (int)buffer.Length; var buf = buffer.GetBuffer(); return(WriteRawData(workItem, buf, len)); }
private void CreateWriterWorkItemForExistingChunk(int writePosition, out ChunkHeader chunkHeader) { var md5 = MD5.Create(); var stream = GetWriteStream(_filename); try { chunkHeader = ReadHeader(stream); if (chunkHeader.Version == (byte)ChunkVersions.Unaligned) { Log.Verbose("Upgrading ongoing file {chunk} to version 3", _filename); var newHeader = new ChunkHeader((byte)ChunkVersions.Aligned, chunkHeader.ChunkSize, chunkHeader.ChunkStartNumber, chunkHeader.ChunkEndNumber, false, chunkHeader.ChunkId); stream.Seek(0, SeekOrigin.Begin); chunkHeader = newHeader; var head = newHeader.AsByteArray(); stream.Write(head, 0, head.Length); stream.Flush(); stream.Seek(0, SeekOrigin.Begin); } } catch { stream.Dispose(); ((IDisposable)md5).Dispose(); throw; } var realPosition = GetRawPosition(writePosition); MD5Hash.ContinuousHashFor(md5, stream, 0, realPosition); stream.Position = realPosition; // this reordering fixes bug in Mono implementation of FileStream _writerWorkItem = new WriterWorkItem(stream, null, md5); }
private static long WriteRawData(WriterWorkItem workItem, byte[] buf, int len) { var curPos = GetDataPosition(workItem); workItem.MD5.TransformBlock(buf, 0, len, null, 0); workItem.AppendData(buf, 0, len); return(curPos); }
private void CleanUpWriterWorkItem(WriterWorkItem writerWorkItem) { if (writerWorkItem == null) { return; } writerWorkItem.Dispose(); }
private void CompleteNonRaw(ICollection <PosMap> mapping) { if (_isReadOnly) { throw new InvalidOperationException("Cannot complete a read-only TFChunk."); } _chunkFooter = WriteFooter(mapping); Flush(); _isReadOnly = true; CleanUpWriterWorkItem(_writerWorkItem); _writerWorkItem = null; SetAttributes(_filename, true); }
public void CompleteRaw() { if (_isReadOnly) { throw new InvalidOperationException("Cannot complete a read-only TFChunk."); } if (_writerWorkItem.StreamPosition != _writerWorkItem.StreamLength) { throw new InvalidOperationException("The raw chunk is not completely written."); } Flush(); _chunkFooter = ReadFooter(_writerWorkItem.WorkingStream); _isReadOnly = true; CleanUpWriterWorkItem(_writerWorkItem); _writerWorkItem = null; SetAttributes(_filename, true); }
private static long WriteRawData(WriterWorkItem workItem, byte[] buf, int len) { var curPos = GetDataPosition(workItem); //MD5 workItem.MD5.TransformBlock(buf, 0, len, null, 0); //FILE workItem.Stream.Write(buf, 0, len); // as we are always append-only, stream's position should be right here //MEMORY var memStream = workItem.UnmanagedMemoryStream; if (memStream != null) { var realMemPos = GetRawPosition(curPos); memStream.Seek(realMemPos, SeekOrigin.Begin); memStream.Write(buf, 0, len); } return(curPos); }
private void CleanUpWriterWorkItem(WriterWorkItem writerWorkItem) { if (writerWorkItem == null) { return; } if (writerWorkItem.Stream != null) { writerWorkItem.Stream.Dispose(); } var unmanagedStream = writerWorkItem.UnmanagedMemoryStream; if (unmanagedStream != null) { unmanagedStream.Dispose(); writerWorkItem.UnmanagedMemoryStream = null; } }
private void CreateWriterWorkItemForExistingChunk(int writePosition, out ChunkHeader chunkHeader) { var md5 = MD5.Create(); var stream = new FileStream(_filename, FileMode.Open, FileAccess.ReadWrite, FileShare.Read, WriteBufferSize, FileOptions.SequentialScan); try { chunkHeader = ReadHeader(stream); } catch { stream.Dispose(); ((IDisposable)md5).Dispose(); throw; } var realPosition = GetRawPosition(writePosition); MD5Hash.ContinuousHashFor(md5, stream, 0, realPosition); stream.Position = realPosition; // this reordering fixes bug in Mono implementation of FileStream _writerWorkItem = new WriterWorkItem(stream, null, md5); }
private void ResizeMemStream(WriterWorkItem workItem, int mapSize) { var newFileSize = (int)workItem.StreamPosition + mapSize + ChunkFooter.Size; if (workItem.StreamLength < newFileSize) { var pos = workItem.StreamPosition; var newCachedData = Marshal.AllocHGlobal(newFileSize); var memStream = new UnmanagedMemoryStream((byte *)newCachedData, workItem.StreamLength, newFileSize, FileAccess.ReadWrite); workItem.WorkingStream.Position = 0; workItem.WorkingStream.CopyTo(memStream); if (!TryDestructMemStreams()) { throw new Exception("MemStream readers are in use when writing scavenged chunk."); } _cachedLength = newFileSize; _cachedData = newCachedData; memStream.Position = pos; workItem.SetMemStream(memStream); // READER STREAMS Interlocked.Add(ref _memStreamCount, _maxReaderCount); for (int i = 0; i < _maxReaderCount; i++) { var stream = new UnmanagedMemoryStream((byte *)_cachedData, _cachedLength); var reader = new BinaryReader(stream); _memStreams.Enqueue(new ReaderWorkItem(stream, reader, isMemory: true)); } } }
private static long GetDataPosition(WriterWorkItem workItem) { return(workItem.StreamPosition - ChunkHeader.Size); }
private static long WriteRawData(WriterWorkItem workItem, byte[] buf, int len) { var curPos = GetLogicalPosition(workItem); //MD5 workItem.MD5.TransformBlock(buf, 0, len, null, 0); //FILE workItem.Stream.Write(buf, 0, len); // as we are always append-only, stream's position should be right here //MEMORY var memStream = workItem.UnmanagedMemoryStream; if (memStream != null) { var realMemPos = GetRealPosition(curPos); memStream.Seek(realMemPos, SeekOrigin.Begin); memStream.Write(buf, 0, len); } return curPos; }
public void CompleteScavenge(ICollection<PosMap> mapping) { if (_isReadOnly) throw new InvalidOperationException("Cannot complete a read-only TFChunk."); _chunkFooter = WriteFooter(mapping); Flush(); _isReadOnly = true; CleanUpWriterWorkItem(_writerWorkItem); _writerWorkItem = null; }
private void CleanUpWriterWorkItem(WriterWorkItem writerWorkItem) { if (writerWorkItem == null) return; if (writerWorkItem.Stream != null) writerWorkItem.Stream.Dispose(); var unmanagedStream = writerWorkItem.UnmanagedMemoryStream; if (unmanagedStream != null) { unmanagedStream.Dispose(); writerWorkItem.UnmanagedMemoryStream = null; } }
private void CreateInMemChunk(ChunkHeader chunkHeader, int fileSize) { var md5 = MD5.Create(); // ALLOCATE MEM Interlocked.Exchange(ref _isCached, 1); _cachedLength = fileSize; _cachedData = Marshal.AllocHGlobal(_cachedLength); // WRITER STREAM var memStream = new UnmanagedMemoryStream((byte*)_cachedData, _cachedLength, _cachedLength, FileAccess.ReadWrite); WriteHeader(md5, memStream, chunkHeader); memStream.Position = ChunkHeader.Size; // READER STREAMS Interlocked.Add(ref _memStreamCount, _maxReaderCount); for (int i = 0; i < _maxReaderCount; i++) { var stream = new UnmanagedMemoryStream((byte*)_cachedData, _cachedLength); var reader = new BinaryReader(stream); _memStreams.Enqueue(new ReaderWorkItem(stream, reader, isMemory: true)); } _writerWorkItem = new WriterWorkItem(null, memStream, md5); }
public void CompleteRaw() { if (_isReadOnly) throw new InvalidOperationException("Cannot complete a read-only TFChunk."); if (_writerWorkItem.StreamPosition != _writerWorkItem.StreamLength) throw new InvalidOperationException("The raw chunk is not completely written."); Flush(); _chunkFooter = ReadFooter(_writerWorkItem.WorkingStream); _isReadOnly = true; CleanUpWriterWorkItem(_writerWorkItem); _writerWorkItem = null; SetAttributes(); }
private void CompleteNonRaw(ICollection<PosMap> mapping) { if (_isReadOnly) throw new InvalidOperationException("Cannot complete a read-only TFChunk."); _chunkFooter = WriteFooter(mapping); Flush(); _isReadOnly = true; CleanUpWriterWorkItem(_writerWorkItem); _writerWorkItem = null; SetAttributes(); }
private static long WriteRawData(WriterWorkItem workItem, MemoryStream buffer) { var len = (int) buffer.Length; var buf = buffer.GetBuffer(); return WriteRawData(workItem, buf, len); }
private static long GetDataPosition(WriterWorkItem workItem) { return workItem.StreamPosition - ChunkHeader.Size; }
private void CreateWriterWorkItemForNewChunk(ChunkHeader chunkHeader, int fileSize) { var md5 = MD5.Create(); // create temp file first and set desired length // if there is not enough disk space or something else prevents file to be resized as desired // we'll end up with empty temp file, which won't trigger false error on next DB verification var tempFilename = string.Format("{0}.{1}.tmp", _filename, Guid.NewGuid()); var tempFile = new FileStream(tempFilename, FileMode.CreateNew, FileAccess.ReadWrite, FileShare.Read, WriteBufferSize, FileOptions.SequentialScan); tempFile.SetLength(fileSize); // we need to write header into temp file before moving it into correct chunk place, so in case of crash // we don't end up with seemingly valid chunk file with no header at all... WriteHeader(md5, tempFile, chunkHeader); tempFile.FlushToDisk(); tempFile.Close(); File.Move(tempFilename, _filename); var stream = new FileStream(_filename, FileMode.Open, FileAccess.ReadWrite, FileShare.Read, WriteBufferSize, FileOptions.SequentialScan); stream.Position = ChunkHeader.Size; _writerWorkItem = new WriterWorkItem(stream, null, md5); Flush(); // persist file move result }
private static int GetLogicalPosition(WriterWorkItem workItem) { return (int)workItem.Stream.Position - ChunkHeader.Size; }
private void CreateWriterWorkItemForNewChunk(ChunkHeader chunkHeader, int fileSize) { var md5 = MD5.Create(); var stream = new FileStream(_filename, FileMode.Create, FileAccess.ReadWrite, FileShare.Read, WriteBufferSize, FileOptions.SequentialScan); var writer = new BinaryWriter(stream); stream.SetLength(fileSize); WriteHeader(md5, stream, chunkHeader); _writerWorkItem = new WriterWorkItem(stream, writer, md5); Flush(); }
private void ResizeMemStream(WriterWorkItem workItem, int mapSize) { var newFileSize = (int) workItem.StreamPosition + mapSize + ChunkFooter.Size; if (workItem.StreamLength < newFileSize) { var pos = workItem.StreamPosition; var newCachedData = Marshal.AllocHGlobal(newFileSize); var memStream = new UnmanagedMemoryStream((byte*) newCachedData, workItem.StreamLength, newFileSize, FileAccess.ReadWrite); workItem.WorkingStream.Position = 0; workItem.WorkingStream.CopyTo(memStream); if (!TryDestructMemStreams()) throw new Exception("MemStream readers are in use when writing scavenged chunk!"); _cachedLength = newFileSize; _cachedData = newCachedData; memStream.Position = pos; workItem.SetMemStream(memStream); // READER STREAMS Interlocked.Add(ref _memStreamCount, _maxReaderCount); for (int i = 0; i < _maxReaderCount; i++) { var stream = new UnmanagedMemoryStream((byte*) _cachedData, _cachedLength); var reader = new BinaryReader(stream); _memStreams.Enqueue(new ReaderWorkItem(stream, reader, isMemory: true)); } } }
private static long WriteRawData(WriterWorkItem workItem, byte[] buf, int len) { var curPos = GetDataPosition(workItem); workItem.MD5.TransformBlock(buf, 0, len, null, 0); workItem.AppendData(buf, 0, len); return curPos; }
private void CleanUpWriterWorkItem(WriterWorkItem writerWorkItem) { if (writerWorkItem == null) return; writerWorkItem.Dispose(); }