public void a_record_can_be_written() { var filename = Path.Combine(PathName, "prefix.tf0"); var chunkHeader = new ChunkHeader(TFChunk.CurrentChunkVersion, 10000, 0, 0, 0); var chunkBytes = chunkHeader.AsByteArray(); var buf = new byte[ChunkHeader.Size + ChunkFooter.Size + chunkHeader.ChunkSize]; Buffer.BlockCopy(chunkBytes, 0, buf, 0, chunkBytes.Length); File.WriteAllBytes(filename, buf); _checkpoint = new InMemoryCheckpoint(137); var db = new TFChunkDb(new TFChunkDbConfig(PathName, new PrefixFileNamingStrategy(PathName, "prefix.tf"), chunkHeader.ChunkSize, 0, _checkpoint, new ICheckpoint[0])); db.OpenVerifyAndClean(); var writer = new TFChunkWriter(db); var record = new PrepareLogRecord(logPosition: 0, correlationId: _correlationId, eventId: _eventId, transactionPosition: 0, transactionOffset: 0, eventStreamId: "WorldEnding", expectedVersion: 1234, timeStamp: new DateTime(2012, 12, 21), flags: PrepareFlags.None, eventType: "type", data: new byte[8000], metadata: new byte[] { 7, 17 }); Console.WriteLine(record.GetSizeWithLengthPrefixAndSuffix()); Console.WriteLine(record.GetSizeWithLengthPrefixAndSuffix() + 137); long pos; Assert.IsTrue(writer.Write(record, out pos)); writer.Close(); db.Dispose(); Assert.AreEqual(record.GetSizeWithLengthPrefixAndSuffix() + 137, _checkpoint.Read()); using (var filestream = File.Open(filename, FileMode.Open, FileAccess.Read)) { filestream.Seek(ChunkHeader.Size + 137 + sizeof(int), SeekOrigin.Begin); var reader = new BinaryReader(filestream); var read = LogRecord.ReadFrom(reader); Console.WriteLine(string.Join("\n", Directory.EnumerateFiles(PathName))); Assert.AreEqual(record, read); } }
public void a_record_can_be_written() { var filename = GetFilePathFor("chunk-000000.000000"); var chunkHeader = new ChunkHeader(TFChunk.CurrentChunkVersion, 10000, 0, 0, false, Guid.NewGuid()); var chunkBytes = chunkHeader.AsByteArray(); var buf = new byte[ChunkHeader.Size + ChunkFooter.Size + chunkHeader.ChunkSize]; Buffer.BlockCopy(chunkBytes, 0, buf, 0, chunkBytes.Length); File.WriteAllBytes(filename, buf); _checkpoint = new InMemoryCheckpoint(137); var db = new TFChunkDb(new TFChunkDbConfig(PathName, new VersionedPatternFileNamingStrategy(PathName, "chunk-"), chunkHeader.ChunkSize, 0, _checkpoint, new InMemoryCheckpoint(), new InMemoryCheckpoint(-1), new InMemoryCheckpoint(-1))); db.Open(); var bytes = new byte[3994]; // this gives exactly 4097 size of record, with 3993 (rec size 4096) everything works fine! new Random().NextBytes(bytes); var writer = new TFChunkWriter(db); var record = new PrepareLogRecord(logPosition: 137, correlationId: _correlationId, eventId: _eventId, transactionPosition: 789, transactionOffset: 543, eventStreamId: "WorldEnding", expectedVersion: 1234, timeStamp: new DateTime(2012, 12, 21), flags: PrepareFlags.SingleWrite, eventType: "type", data: bytes, metadata: new byte[] { 0x07, 0x17 }); long pos; Assert.IsTrue(writer.Write(record, out pos)); writer.Close(); db.Dispose(); Assert.AreEqual(record.GetSizeWithLengthPrefixAndSuffix() + 137, _checkpoint.Read()); using (var filestream = File.Open(filename, FileMode.Open, FileAccess.Read)) { filestream.Seek(ChunkHeader.Size + 137 + sizeof(int), SeekOrigin.Begin); var reader = new BinaryReader(filestream); var read = LogRecord.ReadFrom(reader); Assert.AreEqual(record, read); } }
public static void CreateMultiChunk(TFChunkDbConfig config, int chunkStartNum, int chunkEndNum, string filename, int? physicalSize = null, long? logicalSize = null) { if (chunkStartNum > chunkEndNum) throw new ArgumentException("chunkStartNum"); var chunkHeader = new ChunkHeader(TFChunk.CurrentChunkVersion, config.ChunkSize, chunkStartNum, chunkEndNum, true, Guid.NewGuid()); var chunkBytes = chunkHeader.AsByteArray(); var physicalDataSize = physicalSize ?? config.ChunkSize; var logicalDataSize = logicalSize ?? (chunkEndNum - chunkStartNum + 1) * config.ChunkSize; var buf = new byte[ChunkHeader.Size + physicalDataSize + ChunkFooter.Size]; Buffer.BlockCopy(chunkBytes, 0, buf, 0, chunkBytes.Length); var chunkFooter = new ChunkFooter(true, true, physicalDataSize, logicalDataSize, 0, new byte[ChunkFooter.ChecksumSize]); chunkBytes = chunkFooter.AsByteArray(); Buffer.BlockCopy(chunkBytes, 0, buf, buf.Length - ChunkFooter.Size, chunkBytes.Length); File.WriteAllBytes(filename, buf); }
public void a_record_can_be_written() { var filename = Path.Combine(PathName, "prefix.tf0"); var chunkHeader = new ChunkHeader(TFChunk.CurrentChunkVersion, 10000, 0, 0, 0); var chunkBytes = chunkHeader.AsByteArray(); var bytes = new byte[ChunkHeader.Size + 10000 + ChunkFooter.Size]; Buffer.BlockCopy(chunkBytes, 0, bytes, 0, chunkBytes.Length); File.WriteAllBytes(filename, bytes); _checkpoint = new InMemoryCheckpoint(137); var db = new TFChunkDb(new TFChunkDbConfig(PathName, new PrefixFileNamingStrategy(PathName, "prefix.tf"), 10000, 0, _checkpoint, new InMemoryCheckpoint(), new ICheckpoint[0])); db.OpenVerifyAndClean(); var tf = new TFChunkWriter(db); var record = new PrepareLogRecord(logPosition: 0, correlationId: _correlationId, eventId: _eventId, expectedVersion: 1234, transactionPosition: 0, transactionOffset: 0, eventStreamId: "WorldEnding", timeStamp: new DateTime(2012, 12, 21), flags: PrepareFlags.None, eventType: "type", data: new byte[] { 1, 2, 3, 4, 5 }, metadata: new byte[] { 7, 17 }); long tmp; tf.Write(record, out tmp); //tf.Flush(); tf.Close(); db.Dispose(); Assert.AreEqual(record.GetSizeWithLengthPrefixAndSuffix() + 137, _checkpoint.Read()); //137 is fluff assigned to beginning of checkpoint //TODO actually read the event using (var filestream = File.Open(filename, FileMode.Open, FileAccess.Read)) { filestream.Seek(ChunkHeader.Size + 137 + sizeof(int), SeekOrigin.Begin); var reader = new BinaryReader(filestream); var read = LogRecord.ReadFrom(reader); Assert.AreEqual(record, read); } }
public static void CreateOngoingChunk(TFChunkDbConfig config, int chunkNum, string filename, int? actualSize = null, byte[] contents = null) { var chunkHeader = new ChunkHeader(TFChunk.CurrentChunkVersion, config.ChunkSize, chunkNum, chunkNum, false, Guid.NewGuid()); var chunkBytes = chunkHeader.AsByteArray(); var dataSize = actualSize ?? config.ChunkSize; var buf = new byte[ChunkHeader.Size + dataSize + ChunkFooter.Size]; Buffer.BlockCopy(chunkBytes, 0, buf, 0, chunkBytes.Length); if (contents != null) { if (contents.Length != dataSize) throw new Exception("Wrong contents size."); Buffer.BlockCopy(contents, 0, buf, ChunkHeader.Size, contents.Length); } File.WriteAllBytes(filename, buf); }
private void InitCompleted() { if (!File.Exists(_filename)) { throw new CorruptDatabaseException(new ChunkNotFoundException(_filename)); } _isReadonly = true; CreateReaderStreams(); var reader = GetReaderWorkItem(); try { Debug.Assert(!reader.IsMemory); _chunkFooter = ReadFooter(reader.Stream); _actualDataSize = _chunkFooter.ActualDataSize; _chunkHeader = ReadHeader(reader.Stream); var expectedFileSize = _chunkFooter.ActualChunkSize + _chunkFooter.MapSize + ChunkHeader.Size + ChunkFooter.Size; if (reader.Stream.Length != expectedFileSize) { throw new CorruptDatabaseException(new BadChunkInDatabaseException( string.Format("Chunk file '{0}' should have file size {1} bytes, but instead has {2} bytes length.", _filename, expectedFileSize, reader.Stream.Length))); } _midpoints = PopulateMidpoints(_midpointsDepth); } finally { ReturnReaderWorkItem(reader); } SetAttributes(); //VerifyFileHash(); }
private void CreateWriterWorkItemForExistingChunk(int writePosition, out ChunkHeader chunkHeader) { var md5 = MD5.Create(); var stream = new FileStream(_filename, FileMode.Open, FileAccess.ReadWrite, FileShare.Read, WriteBufferSize, FileOptions.SequentialScan); var writer = new BinaryWriter(stream); try { chunkHeader = ReadHeader(stream); } catch { stream.Dispose(); ((IDisposable)md5).Dispose(); throw; } var realPosition = GetRealPosition(writePosition, inMemory: false); stream.Position = realPosition; MD5Hash.ContinuousHashFor(md5, stream, 0, realPosition); _writerWorkItem = new WriterWorkItem(stream, writer, md5); }
public TFChunk.TFChunk AddNewChunk(ChunkHeader chunkHeader, int fileSize) { Ensure.NotNull(chunkHeader, "chunkHeader"); Ensure.Positive(fileSize, "fileSize"); lock (_chunksLocker) { if (chunkHeader.ChunkStartNumber != _chunksCount) { throw new Exception(string.Format("Received request to create a new ongoing chunk #{0}-{1}, but current chunks count is {2}.", chunkHeader.ChunkStartNumber, chunkHeader.ChunkEndNumber, _chunksCount)); } var chunkName = _config.FileNamingStrategy.GetFilenameFor(chunkHeader.ChunkStartNumber, 0); var chunk = TFChunk.TFChunk.CreateWithHeader(chunkName, chunkHeader, fileSize, _config.InMemDb, unbuffered: _config.Unbuffered, writethrough: _config.WriteThrough); AddChunk(chunk); return(chunk); } }
public CreateChunk(Guid masterId, Guid subscriptionId, ChunkHeader chunkHeader, int fileSize, bool isCompletedChunk) { Ensure.NotEmptyGuid(masterId, "masterId"); Ensure.NotEmptyGuid(subscriptionId, "subscriptionId"); Ensure.NotNull(chunkHeader, "chunkHeader"); MasterId = masterId; SubscriptionId = subscriptionId; ChunkHeader = chunkHeader; FileSize = fileSize; IsCompletedChunk = isCompletedChunk; }
private void TruncateChunkAndFillWithZeros(ChunkHeader chunkHeader, string chunkFilename, long truncateChk) { if (chunkHeader.IsScavenged || chunkHeader.ChunkStartNumber != chunkHeader.ChunkEndNumber || truncateChk < chunkHeader.ChunkStartPosition || truncateChk >= chunkHeader.ChunkEndPosition) { throw new Exception( string.Format("Chunk #{0}-{1} ({2}) is not correct unscavenged chunk. TruncatePosition: {3}, ChunkHeader: {4}.", chunkHeader.ChunkStartNumber, chunkHeader.ChunkEndNumber, chunkFilename, truncateChk, chunkHeader)); } using (var fs = new FileStream(chunkFilename, FileMode.Open, FileAccess.ReadWrite, FileShare.Read)) { fs.SetLength(ChunkHeader.Size + chunkHeader.ChunkSize + ChunkFooter.Size); fs.Position = ChunkHeader.Size + chunkHeader.GetLocalLogPosition(truncateChk); var zeros = new byte[65536]; var leftToWrite = fs.Length - fs.Position; while (leftToWrite > 0) { var toWrite = (int)Math.Min(leftToWrite, zeros.Length); fs.Write(zeros, 0, toWrite); leftToWrite -= toWrite; } fs.FlushToDisk(); } }
private void InitCompleted(bool verifyHash) { if (!File.Exists(_filename)) throw new CorruptDatabaseException(new ChunkNotFoundException(_filename)); _isReadonly = true; CreateReaderStreams(); var reader = GetReaderWorkItem(); try { Debug.Assert(!reader.IsMemory); _chunkHeader = ReadHeader(reader.Stream); if (_chunkHeader.Version != CurrentChunkVersion) throw new CorruptDatabaseException(new WrongTFChunkVersionException(_filename, _chunkHeader.Version, CurrentChunkVersion)); _chunkFooter = ReadFooter(reader.Stream); _actualDataSize = _chunkFooter.ActualDataSize; var expectedFileSize = _chunkFooter.ActualChunkSize + _chunkFooter.MapSize + ChunkHeader.Size + ChunkFooter.Size; if (reader.Stream.Length != expectedFileSize) { throw new CorruptDatabaseException(new BadChunkInDatabaseException( string.Format("Chunk file '{0}' should have file size {1} bytes, but instead has {2} bytes length.", _filename, expectedFileSize, reader.Stream.Length))); } _midpoints = PopulateMidpoints(_midpointsDepth); } finally { ReturnReaderWorkItem(reader); } SetAttributes(); if (verifyHash) VerifyFileHash(); }
private void InitNew(ChunkHeader chunkHeader) { Ensure.NotNull(chunkHeader, "chunkHeader"); _isReadonly = false; _chunkHeader = chunkHeader; _actualDataSize = 0; CreateWriterWorkItemForNewChunk(chunkHeader); CreateReaderStreams(); SetAttributes(); }
private void CreateWriterWorkItemForNewChunk(ChunkHeader chunkHeader) { var md5 = MD5.Create(); var stream = new FileStream(_filename, FileMode.Create, FileAccess.ReadWrite, FileShare.Read, WriteBufferSize, FileOptions.SequentialScan); var writer = new BinaryWriter(stream); stream.SetLength(chunkHeader.ChunkSize + ChunkHeader.Size + ChunkFooter.Size); WriteHeader(md5, stream, chunkHeader); _writerWorkItem = new WriterWorkItem(stream, writer, md5); Flush(); }
private void WriteHeader(MD5 md5, Stream stream, ChunkHeader chunkHeader) { var chunkHeaderBytes = chunkHeader.AsByteArray(); md5.TransformBlock(chunkHeaderBytes, 0, ChunkHeader.Size, null, 0); stream.Write(chunkHeaderBytes, 0, ChunkHeader.Size); }
public static TFChunk CreateNew(string filename, int chunkSize, int chunkNumber, int chunkScavengeVersion) { var chunkHeader = new ChunkHeader(CurrentChunkVersion, chunkSize, chunkNumber, chunkNumber, chunkScavengeVersion); var chunk = new TFChunk(filename, TFConsts.TFChunkReaderCount, TFConsts.MidpointsDepth); try { chunk.InitNew(chunkHeader); } catch { chunk.Dispose(); throw; } return chunk; }
public void TruncateDb(long truncateChk) { var writerChk = _config.WriterCheckpoint.Read(); var requestedTruncation = writerChk - truncateChk; if (_config.MaxTruncation >= 0 && requestedTruncation > _config.MaxTruncation) { Log.Error( "MaxTruncation is set and truncate checkpoint is out of bounds. MaxTruncation {maxTruncation} vs requested truncation {requestedTruncation} [{writerChk} => {truncateChk}]. To proceed, set MaxTruncation to -1 (no max) or greater than {requestedTruncationHint}.", _config.MaxTruncation, requestedTruncation, writerChk, truncateChk, requestedTruncation); throw new Exception( string.Format("MaxTruncation is set ({0}) and truncate checkpoint is out of bounds (requested truncation is {1} [{2} => {3}]).", _config.MaxTruncation, requestedTruncation, writerChk, truncateChk)); } var oldLastChunkNum = (int)(writerChk / _config.ChunkSize); var newLastChunkNum = (int)(truncateChk / _config.ChunkSize); var chunkEnumerator = new TFChunkEnumerator(_config.FileNamingStrategy); var excessiveChunks = _config.FileNamingStrategy.GetAllVersionsFor(oldLastChunkNum + 1); if (excessiveChunks.Length > 0) { throw new Exception(string.Format("During truncation of DB excessive TFChunks were found:\n{0}.", string.Join("\n", excessiveChunks))); } ChunkHeader newLastChunkHeader = null; string newLastChunkFilename = null; // find the chunk to truncate to foreach (var chunkInfo in chunkEnumerator.EnumerateChunks(oldLastChunkNum)) { switch (chunkInfo) { case LatestVersion(var fileName, var _, var end): if (newLastChunkFilename != null || end < newLastChunkNum) { break; } newLastChunkHeader = ReadChunkHeader(fileName); newLastChunkFilename = fileName; break; case MissingVersion(var fileName, var chunkNum): if (chunkNum < newLastChunkNum) { throw new Exception($"Could not find any chunk #{fileName}."); } break; } } // it's not bad if there is no file, it could have been deleted on previous run if (newLastChunkHeader != null) { var chunksToDelete = new List <string>(); var chunkNumToDeleteFrom = newLastChunkNum + 1; if (newLastChunkHeader.IsScavenged) { Log.Information( "Deleting ALL chunks from #{chunkStartNumber} inclusively " + "as truncation position is in the middle of scavenged chunk.", newLastChunkHeader.ChunkStartNumber); chunkNumToDeleteFrom = newLastChunkHeader.ChunkStartNumber; } foreach (var chunkInfo in chunkEnumerator.EnumerateChunks(oldLastChunkNum)) { switch (chunkInfo) { case LatestVersion(var fileName, var start, _): if (start >= chunkNumToDeleteFrom) { chunksToDelete.Add(fileName); } break; case OldVersion(var fileName, var start): if (start >= chunkNumToDeleteFrom) { chunksToDelete.Add(fileName); } break; } } // we need to remove excessive chunks from largest number to lowest one, so in case of crash // mid-process, we don't end up with broken non-sequential chunks sequence. chunksToDelete.Reverse(); foreach (var chunkFile in chunksToDelete) { Log.Information("File {chunk} will be deleted during TruncateDb procedure.", chunkFile); File.SetAttributes(chunkFile, FileAttributes.Normal); File.Delete(chunkFile); } if (!newLastChunkHeader.IsScavenged) { TruncateChunkAndFillWithZeros(newLastChunkHeader, newLastChunkFilename, truncateChk); } else { truncateChk = newLastChunkHeader.ChunkStartPosition; Log.Information( "Setting TruncateCheckpoint to {truncateCheckpoint} " + "as truncation position is in the middle of scavenged chunk.", truncateChk); } } if (_config.EpochCheckpoint.Read() >= truncateChk) { var epochChk = _config.EpochCheckpoint.Read(); Log.Information("Truncating epoch from {epochFrom} (0x{epochFrom:X}) to {epochTo} (0x{epochTo:X}).", epochChk, epochChk, -1, -1); _config.EpochCheckpoint.Write(-1); _config.EpochCheckpoint.Flush(); } if (_config.ChaserCheckpoint.Read() > truncateChk) { var chaserChk = _config.ChaserCheckpoint.Read(); Log.Information( "Truncating chaser from {chaserCheckpoint} (0x{chaserCheckpoint:X}) to {truncateCheckpoint} (0x{truncateCheckpoint:X}).", chaserChk, chaserChk, truncateChk, truncateChk); _config.ChaserCheckpoint.Write(truncateChk); _config.ChaserCheckpoint.Flush(); } if (_config.WriterCheckpoint.Read() > truncateChk) { var writerCheckpoint = _config.WriterCheckpoint.Read(); Log.Information( "Truncating writer from {writerCheckpoint} (0x{writerCheckpoint:X}) to {truncateCheckpoint} (0x{truncateCheckpoint:X}).", writerCheckpoint, writerCheckpoint, truncateChk, truncateChk); _config.WriterCheckpoint.Write(truncateChk); _config.WriterCheckpoint.Flush(); } Log.Information("Resetting TruncateCheckpoint to {truncateCheckpoint} (0x{truncateCheckpoint:X}).", -1, -1); _config.TruncateCheckpoint.Write(-1); _config.TruncateCheckpoint.Flush(); }
public void TruncateDb(long truncateChk) { var writerChk = _config.WriterCheckpoint.Read(); var oldLastChunkNum = (int)(writerChk / _config.ChunkSize); var newLastChunkNum = (int)(truncateChk / _config.ChunkSize); var excessiveChunks = _config.FileNamingStrategy.GetAllVersionsFor(oldLastChunkNum + 1); if (excessiveChunks.Length > 0) { throw new Exception(string.Format("During truncation of DB excessive TFChunks were found:\n{0}.", string.Join("\n", excessiveChunks))); } ChunkHeader newLastChunkHeader = null; string newLastChunkFilename = null; for (int chunkNum = 0; chunkNum <= newLastChunkNum;) { var chunks = _config.FileNamingStrategy.GetAllVersionsFor(chunkNum); if (chunks.Length == 0) { if (chunkNum != newLastChunkNum) { throw new Exception(string.Format("Couldn't find any chunk #{0}.", chunkNum)); } break; } using (var fs = File.OpenRead(chunks[0])) { var chunkHeader = ChunkHeader.FromStream(fs); if (chunkHeader.ChunkEndNumber >= newLastChunkNum) { newLastChunkHeader = chunkHeader; newLastChunkFilename = chunks[0]; break; } chunkNum = chunkHeader.ChunkEndNumber + 1; } } // we need to remove excessive chunks from largest number to lowest one, so in case of crash // mid-process, we don't end up with broken non-sequential chunks sequence. for (int i = oldLastChunkNum; i > newLastChunkNum; i -= 1) { foreach (var chunkFile in _config.FileNamingStrategy.GetAllVersionsFor(i)) { Log.Info("File {0} will be deleted during TruncateDb procedure.", chunkFile); File.SetAttributes(chunkFile, FileAttributes.Normal); File.Delete(chunkFile); } } // it's not bad if there is no file, it could have been deleted on previous run if (newLastChunkHeader != null) { // if the chunk we want to truncate into is already scavenged // we have to truncate (i.e., delete) the whole chunk, not just part of it if (newLastChunkHeader.IsScavenged) { truncateChk = newLastChunkHeader.ChunkStartPosition; // we need to delete EVERYTHING from ChunkStartNumber up to newLastChunkNum, inclusive Log.Info("Setting TruncateCheckpoint to {0} and deleting ALL chunks from #{1} inclusively " + "as truncation position is in the middle of scavenged chunk.", truncateChk, newLastChunkHeader.ChunkStartNumber); for (int i = newLastChunkNum; i >= newLastChunkHeader.ChunkStartNumber; --i) { var chunksToDelete = _config.FileNamingStrategy.GetAllVersionsFor(i); foreach (var chunkFile in chunksToDelete) { Log.Info("File {0} will be deleted during TruncateDb procedure.", chunkFile); File.SetAttributes(chunkFile, FileAttributes.Normal); File.Delete(chunkFile); } } } else { TruncateChunkAndFillWithZeros(newLastChunkHeader, newLastChunkFilename, truncateChk); } } if (_config.EpochCheckpoint.Read() >= truncateChk) { Log.Info("Truncating epoch from {0} (0x{0:X}) to {1} (0x{1:X}).", _config.EpochCheckpoint.Read(), -1); _config.EpochCheckpoint.Write(-1); _config.EpochCheckpoint.Flush(); } if (_config.ChaserCheckpoint.Read() > truncateChk) { Log.Info("Truncating chaser from {0} (0x{0:X}) to {1} (0x{1:X}).", _config.ChaserCheckpoint.Read(), truncateChk); _config.ChaserCheckpoint.Write(truncateChk); _config.ChaserCheckpoint.Flush(); } if (_config.WriterCheckpoint.Read() > truncateChk) { Log.Info("Truncating writer from {0} (0x{0:X}) to {1} (0x{1:X}).", _config.WriterCheckpoint.Read(), truncateChk); _config.WriterCheckpoint.Write(truncateChk); _config.WriterCheckpoint.Flush(); } Log.Info("Resetting TruncateCheckpoint to {0} (0x{0:X}).", -1); _config.TruncateCheckpoint.Write(-1); _config.TruncateCheckpoint.Flush(); }
public TFChunk.TFChunk CreateTempChunk(ChunkHeader chunkHeader, int fileSize) { var chunkFileName = _config.FileNamingStrategy.GetTempFilename(); return TFChunk.TFChunk.CreateWithHeader(chunkFileName, chunkHeader, fileSize); }
public TFChunk.TFChunk CreateTempChunk(ChunkHeader chunkHeader, int fileSize) { var chunkFileName = _config.FileNamingStrategy.GetTempFilename(); return(TFChunk.TFChunk.CreateWithHeader(chunkFileName, chunkHeader, fileSize, _config.InMemDb)); }
public void try_read_returns_record_when_writerchecksum_ahead() { var recordToWrite = new PrepareLogRecord(logPosition: 0, correlationId: _correlationId, eventId: _eventId, transactionPosition: 0, transactionOffset: 0, eventStreamId: "WorldEnding", expectedVersion: 1234, timeStamp: new DateTime(2012, 12, 21), flags: PrepareFlags.None, eventType: "type", data: new byte[] { 1, 2, 3, 4, 5 }, metadata: new byte[] { 7, 17 }); using (var fs = new FileStream(GetFilePathFor("prefix.tf0"), FileMode.CreateNew, FileAccess.Write)) { fs.SetLength(ChunkHeader.Size + ChunkFooter.Size + 10000); var chunkHeader = new ChunkHeader(TFChunk.CurrentChunkVersion, 10000, 0, 0, false).AsByteArray(); var writer = new BinaryWriter(fs); writer.Write(chunkHeader); recordToWrite.WriteWithLengthPrefixAndSuffixTo(writer); fs.Close(); } var writerchk = new InMemoryCheckpoint(128); var chaserchk = new InMemoryCheckpoint(Checkpoint.Chaser, 0); var db = new TFChunkDb(new TFChunkDbConfig(PathName, new PrefixFileNamingStrategy(PathName, "prefix.tf"), 10000, 0, writerchk, chaserchk, new[] {chaserchk})); db.OpenVerifyAndClean(); var chaser = new TFChunkChaser(db, writerchk, chaserchk); chaser.Open(); LogRecord record; var recordRead = chaser.TryReadNext(out record); chaser.Close(); Assert.AreEqual(record.GetSizeWithLengthPrefixAndSuffix(), chaserchk.Read()); Assert.IsTrue(recordRead); Assert.AreEqual(recordToWrite, record); db.Close(); }
public TFChunk.TFChunk AddNewChunk(ChunkHeader chunkHeader, int fileSize) { Ensure.NotNull(chunkHeader, "chunkHeader"); Ensure.Positive(fileSize, "fileSize"); lock (_chunksLocker) { if (chunkHeader.ChunkStartNumber != _chunksCount) throw new Exception(string.Format("Received request to create a new ongoing chunk #{0}-{1}, but current chunks count is {2}.", chunkHeader.ChunkStartNumber, chunkHeader.ChunkEndNumber, _chunksCount)); var chunkName = _config.FileNamingStrategy.GetFilenameFor(chunkHeader.ChunkStartNumber, 0); var chunk = TFChunk.TFChunk.CreateWithHeader(chunkName, chunkHeader, fileSize); AddChunk(chunk); return chunk; } }
private void CreateChunk(string filename, int actualSize, int chunkSize) { var chunkHeader = new ChunkHeader(1, chunkSize, 0, 0, 0); var chunkBytes = chunkHeader.AsByteArray(); var buf = new byte[ChunkHeader.Size + actualSize + ChunkFooter.Size]; Buffer.BlockCopy(chunkBytes, 0, buf, 0, chunkBytes.Length); var chunkFooter = new ChunkFooter(true, actualSize, actualSize, 0, new byte[ChunkFooter.ChecksumSize]); chunkBytes = chunkFooter.AsByteArray(); Buffer.BlockCopy(chunkBytes, 0, buf, buf.Length - ChunkFooter.Size, chunkBytes.Length); File.WriteAllBytes(filename, buf); }
public void a_record_is_not_written_at_first_but_written_on_second_try() { var filename1 = Path.Combine(PathName, "prefix.tf0"); var filename2 = Path.Combine(PathName, "prefix.tf1"); var chunkHeader = new ChunkHeader(TFChunk.CurrentChunkVersion, 10000, 0, 0, 0); var chunkBytes = chunkHeader.AsByteArray(); var bytes = new byte[ChunkHeader.Size + 10000 + ChunkFooter.Size]; Buffer.BlockCopy(chunkBytes, 0, bytes, 0, chunkBytes.Length); File.WriteAllBytes(filename1, bytes); _checkpoint = new InMemoryCheckpoint(0); var db = new TFChunkDb(new TFChunkDbConfig(PathName, new PrefixFileNamingStrategy(PathName, "prefix.tf"), 10000, 0, _checkpoint, new ICheckpoint[0])); db.OpenVerifyAndClean(); var tf = new TFChunkWriter(db); long pos; var record1 = new PrepareLogRecord(logPosition: 0, correlationId: _correlationId, eventId: _eventId, expectedVersion: 1234, transactionPosition: 0, transactionOffset: 0, eventStreamId: "WorldEnding", timeStamp: new DateTime(2012, 12, 21), flags: PrepareFlags.None, eventType: "type", data: new byte[] { 1, 2, 3, 4, 5 }, metadata: new byte[8000]); Assert.IsTrue(tf.Write(record1, out pos)); // almost fill up first chunk var record2 = new PrepareLogRecord(logPosition: pos, correlationId: _correlationId, eventId: _eventId, expectedVersion: 1234, transactionPosition: pos, transactionOffset: 0, eventStreamId: "WorldEnding", timeStamp: new DateTime(2012, 12, 21), flags: PrepareFlags.None, eventType: "type", data: new byte[] { 1, 2, 3, 4, 5 }, metadata: new byte[8000]); Assert.IsFalse(tf.Write(record2, out pos)); // chunk has too small space var record3 = new PrepareLogRecord(logPosition: pos, correlationId: _correlationId, eventId: _eventId, expectedVersion: 1234, transactionPosition: pos, transactionOffset: 0, eventStreamId: "WorldEnding", timeStamp: new DateTime(2012, 12, 21), flags: PrepareFlags.None, eventType: "type", data: new byte[] { 1, 2, 3, 4, 5 }, metadata: new byte[2000]); Assert.IsTrue(tf.Write(record3, out pos)); tf.Close(); db.Dispose(); Assert.AreEqual(record3.GetSizeWithLengthPrefixAndSuffix() + 10000, _checkpoint.Read()); using (var filestream = File.Open(filename2, FileMode.Open, FileAccess.Read)) { filestream.Seek(ChunkHeader.Size + sizeof(int), SeekOrigin.Begin); var reader = new BinaryReader(filestream); var read = LogRecord.ReadFrom(reader); Assert.AreEqual(record3, read); } }
private void CreateOngoingChunk(string filename, int actualSize, int chunkSize) { var chunkHeader = new ChunkHeader(TFChunk.CurrentChunkVersion, chunkSize, 0, 0, 0); var chunkBytes = chunkHeader.AsByteArray(); var buf = new byte[ChunkHeader.Size + actualSize + ChunkFooter.Size]; Buffer.BlockCopy(chunkBytes, 0, buf, 0, chunkBytes.Length); File.WriteAllBytes(filename, buf); }