private void CreateWriterWorkItemForExistingChunk(int writePosition, out ChunkHeader chunkHeader) { var md5 = MD5.Create(); var stream = GetWriteStream(_filename); try { chunkHeader = ReadHeader(stream); if (chunkHeader.Version == (byte)ChunkVersions.Unaligned) { Log.Verbose("Upgrading ongoing file {chunk} to version 3", _filename); var newHeader = new ChunkHeader((byte)ChunkVersions.Aligned, chunkHeader.ChunkSize, chunkHeader.ChunkStartNumber, chunkHeader.ChunkEndNumber, false, chunkHeader.ChunkId); stream.Seek(0, SeekOrigin.Begin); chunkHeader = newHeader; var head = newHeader.AsByteArray(); stream.Write(head, 0, head.Length); stream.Flush(); stream.Seek(0, SeekOrigin.Begin); } } catch { stream.Dispose(); ((IDisposable)md5).Dispose(); throw; } var realPosition = GetRawPosition(writePosition); MD5Hash.ContinuousHashFor(md5, stream, 0, realPosition); stream.Position = realPosition; // this reordering fixes bug in Mono implementation of FileStream _writerWorkItem = new WriterWorkItem(stream, null, md5); }
private void WriteHeader(MD5 md5, Stream stream, ChunkHeader chunkHeader) { var chunkHeaderBytes = chunkHeader.AsByteArray(); md5.TransformBlock(chunkHeaderBytes, 0, ChunkHeader.Size, null, 0); stream.Write(chunkHeaderBytes, 0, ChunkHeader.Size); }
public static void CreateSingleChunk(TFChunkDbConfig config, int chunkNum, string filename, int?actualDataSize = null, bool isScavenged = false, byte[] contents = null) { var chunkHeader = new ChunkHeader(TFChunk.CurrentChunkVersion, config.ChunkSize, chunkNum, chunkNum, isScavenged, Guid.NewGuid()); var chunkBytes = chunkHeader.AsByteArray(); var dataSize = actualDataSize ?? config.ChunkSize; var buf = new byte[ChunkHeader.Size + dataSize + ChunkFooter.Size]; Buffer.BlockCopy(chunkBytes, 0, buf, 0, chunkBytes.Length); var chunkFooter = new ChunkFooter(true, true, dataSize, dataSize, 0, new byte[ChunkFooter.ChecksumSize]); chunkBytes = chunkFooter.AsByteArray(); Buffer.BlockCopy(chunkBytes, 0, buf, buf.Length - ChunkFooter.Size, chunkBytes.Length); if (contents != null) { if (contents.Length != dataSize) { throw new Exception("Wrong contents size."); } Buffer.BlockCopy(contents, 0, buf, ChunkHeader.Size, contents.Length); } File.WriteAllBytes(filename, buf); }
private void CreateOngoingChunk(string filename, int actualSize, int chunkSize) { var chunkHeader = new ChunkHeader(TFChunk.CurrentChunkVersion, chunkSize, 0, 0, 0); var chunkBytes = chunkHeader.AsByteArray(); var buf = new byte[ChunkHeader.Size + actualSize + ChunkFooter.Size]; Buffer.BlockCopy(chunkBytes, 0, buf, 0, chunkBytes.Length); File.WriteAllBytes(filename, buf); }
public void a_record_can_be_written() { var filename = GetFilePathFor("chunk-000000.000000"); var chunkHeader = new ChunkHeader(TFChunk.CurrentChunkVersion, 10000, 0, 0, false, Guid.NewGuid()); var chunkBytes = chunkHeader.AsByteArray(); var buf = new byte[ChunkHeader.Size + ChunkFooter.Size + chunkHeader.ChunkSize]; Buffer.BlockCopy(chunkBytes, 0, buf, 0, chunkBytes.Length); File.WriteAllBytes(filename, buf); _checkpoint = new InMemoryCheckpoint(137); var db = new TFChunkDb(TFChunkHelper.CreateDbConfig(PathName, _checkpoint, new InMemoryCheckpoint(), chunkSize: chunkHeader.ChunkSize)); db.Open(); var bytes = new byte[3994]; // this gives exactly 4097 size of record, with 3993 (rec size 4096) everything works fine! new Random().NextBytes(bytes); var writer = new TFChunkWriter(db); var logFormat = LogFormatHelper <TLogFormat, TStreamId> .LogFormat; logFormat.StreamNameIndex.GetOrAddId("WorldEnding", out var streamId, out _, out _); var record = LogRecord.Prepare( factory: logFormat.RecordFactory, logPosition: 137, correlationId: _correlationId, eventId: _eventId, transactionPos: 789, transactionOffset: 543, eventStreamId: streamId, expectedVersion: 1234, timeStamp: new DateTime(2012, 12, 21), flags: PrepareFlags.SingleWrite, eventType: "type", data: bytes, metadata: new byte[] { 0x07, 0x17 }); long pos; Assert.IsTrue(writer.Write(record, out pos)); writer.Close(); db.Dispose(); Assert.AreEqual(record.GetSizeWithLengthPrefixAndSuffix() + 137, _checkpoint.Read()); using (var filestream = File.Open(filename, FileMode.Open, FileAccess.Read)) { filestream.Seek(ChunkHeader.Size + 137 + sizeof(int), SeekOrigin.Begin); var reader = new BinaryReader(filestream); var read = LogRecord.ReadFrom(reader, (int)reader.BaseStream.Length); Assert.AreEqual(record, read); } }
public void a_record_can_be_written() { var filename = Path.Combine(PathName, "prefix.tf0"); var chunkHeader = new ChunkHeader(TFChunk.CurrentChunkVersion, 10000, 0, 0, 0); var chunkBytes = chunkHeader.AsByteArray(); var buf = new byte[ChunkHeader.Size + ChunkFooter.Size + chunkHeader.ChunkSize]; Buffer.BlockCopy(chunkBytes, 0, buf, 0, chunkBytes.Length); File.WriteAllBytes(filename, buf); _checkpoint = new InMemoryCheckpoint(137); var db = new TFChunkDb(new TFChunkDbConfig(PathName, new PrefixFileNamingStrategy(PathName, "prefix.tf"), chunkHeader.ChunkSize, 0, _checkpoint, new ICheckpoint[0])); db.OpenVerifyAndClean(); var writer = new TFChunkWriter(db); var record = new PrepareLogRecord(logPosition: 0, correlationId: _correlationId, eventId: _eventId, transactionPosition: 0, transactionOffset: 0, eventStreamId: "WorldEnding", expectedVersion: 1234, timeStamp: new DateTime(2012, 12, 21), flags: PrepareFlags.None, eventType: "type", data: new byte[8000], metadata: new byte[] { 7, 17 }); Console.WriteLine(record.GetSizeWithLengthPrefixAndSuffix()); Console.WriteLine(record.GetSizeWithLengthPrefixAndSuffix() + 137); long pos; Assert.IsTrue(writer.Write(record, out pos)); writer.Close(); db.Dispose(); Assert.AreEqual(record.GetSizeWithLengthPrefixAndSuffix() + 137, _checkpoint.Read()); using (var filestream = File.Open(filename, FileMode.Open, FileAccess.Read)) { filestream.Seek(ChunkHeader.Size + 137 + sizeof(int), SeekOrigin.Begin); var reader = new BinaryReader(filestream); var read = LogRecord.ReadFrom(reader); Console.WriteLine(string.Join("\n", Directory.EnumerateFiles(PathName))); Assert.AreEqual(record, read); } }
public void a_record_can_be_written() { var filename = Path.Combine(PathName, "prefix.tf0"); var chunkHeader = new ChunkHeader(TFChunk.CurrentChunkVersion, 10000, 0, 0, 0); var chunkBytes = chunkHeader.AsByteArray(); var buf = new byte[ChunkHeader.Size + ChunkFooter.Size + chunkHeader.ChunkSize]; Buffer.BlockCopy(chunkBytes, 0, buf, 0, chunkBytes.Length); File.WriteAllBytes(filename, buf); _checkpoint = new InMemoryCheckpoint(137); var db = new TFChunkDb(new TFChunkDbConfig(PathName, new PrefixFileNamingStrategy(PathName, "prefix.tf"), chunkHeader.ChunkSize, 0, _checkpoint, new InMemoryCheckpoint(), new ICheckpoint[0])); db.OpenVerifyAndClean(); var bytes = new byte[3994]; // this gives exactly 4097 size of record, with 3993 (rec size 4096) everything works fine! new Random().NextBytes(bytes); var writer = new TFChunkWriter(db); var record = new PrepareLogRecord(logPosition: 123, correlationId: _correlationId, eventId: _eventId, transactionPosition: 789, transactionOffset: 543, eventStreamId: "WorldEnding", expectedVersion: 1234, timeStamp: new DateTime(2012, 12, 21), flags: PrepareFlags.SingleWrite, eventType: "type", data: bytes, metadata: new byte[] { 0x07, 0x17 }); long pos; Assert.IsTrue(writer.Write(record, out pos)); writer.Close(); db.Dispose(); Assert.AreEqual(record.GetSizeWithLengthPrefixAndSuffix() + 137, _checkpoint.Read()); using (var filestream = File.Open(filename, FileMode.Open, FileAccess.Read)) { filestream.Seek(ChunkHeader.Size + 137 + sizeof(int), SeekOrigin.Begin); var reader = new BinaryReader(filestream); var read = LogRecord.ReadFrom(reader); Assert.AreEqual(record, read); } }
private void CreateChunk(string filename, int actualSize, int chunkSize) { var chunkHeader = new ChunkHeader(1, chunkSize, 0, 0, 0); var chunkBytes = chunkHeader.AsByteArray(); var buf = new byte[ChunkHeader.Size + actualSize + ChunkFooter.Size]; Buffer.BlockCopy(chunkBytes, 0, buf, 0, chunkBytes.Length); var chunkFooter = new ChunkFooter(true, actualSize, actualSize, 0, new byte[ChunkFooter.ChecksumSize]); chunkBytes = chunkFooter.AsByteArray(); Buffer.BlockCopy(chunkBytes, 0, buf, buf.Length - ChunkFooter.Size, chunkBytes.Length); File.WriteAllBytes(filename, buf); }
public void a_record_can_be_written() { var filename = Path.Combine(PathName, "prefix.tf0"); var chunkHeader = new ChunkHeader(TFChunk.CurrentChunkVersion, 10000, 0, 0, 0); var chunkBytes = chunkHeader.AsByteArray(); var bytes = new byte[ChunkHeader.Size + 10000 + ChunkFooter.Size]; Buffer.BlockCopy(chunkBytes, 0, bytes, 0, chunkBytes.Length); File.WriteAllBytes(filename, bytes); _checkpoint = new InMemoryCheckpoint(137); var db = new TFChunkDb(new TFChunkDbConfig(PathName, new PrefixFileNamingStrategy(PathName, "prefix.tf"), 10000, 0, _checkpoint, new InMemoryCheckpoint(), new ICheckpoint[0])); db.OpenVerifyAndClean(); var tf = new TFChunkWriter(db); var record = new PrepareLogRecord(logPosition: 0, correlationId: _correlationId, eventId: _eventId, expectedVersion: 1234, transactionPosition: 0, transactionOffset: 0, eventStreamId: "WorldEnding", timeStamp: new DateTime(2012, 12, 21), flags: PrepareFlags.None, eventType: "type", data: new byte[] { 1, 2, 3, 4, 5 }, metadata: new byte[] { 7, 17 }); long tmp; tf.Write(record, out tmp); //tf.Flush(); tf.Close(); db.Dispose(); Assert.AreEqual(record.GetSizeWithLengthPrefixAndSuffix() + 137, _checkpoint.Read()); //137 is fluff assigned to beginning of checkpoint //TODO actually read the event using (var filestream = File.Open(filename, FileMode.Open, FileAccess.Read)) { filestream.Seek(ChunkHeader.Size + 137 + sizeof(int), SeekOrigin.Begin); var reader = new BinaryReader(filestream); var read = LogRecord.ReadFrom(reader); Assert.AreEqual(record, read); } }
public void a_record_can_be_written() { var filename = GetFilePathFor("chunk-000000.000000"); var chunkHeader = new ChunkHeader(TFChunk.CurrentChunkVersion, 10000, 0, 0, false, chunkId: Guid.NewGuid()); var chunkBytes = chunkHeader.AsByteArray(); var bytes = new byte[ChunkHeader.Size + 10000 + ChunkFooter.Size]; Buffer.BlockCopy(chunkBytes, 0, bytes, 0, chunkBytes.Length); File.WriteAllBytes(filename, bytes); _checkpoint = new InMemoryCheckpoint(137); var db = new TFChunkDb(TFChunkHelper.CreateDbConfig(PathName, _checkpoint, new InMemoryCheckpoint())); db.Open(); var recordFactory = LogFormatHelper <TLogFormat, TStreamId> .RecordFactory; var streamId = LogFormatHelper <TLogFormat, TStreamId> .StreamId; var eventTypeId = LogFormatHelper <TLogFormat, TStreamId> .EventTypeId; var tf = new TFChunkWriter(db); var record = LogRecord.Prepare( factory: recordFactory, logPosition: _checkpoint.Read(), correlationId: _correlationId, eventId: _eventId, expectedVersion: 1234, transactionPos: 0, transactionOffset: 0, eventStreamId: streamId, timeStamp: new DateTime(2012, 12, 21), flags: PrepareFlags.None, eventType: eventTypeId, data: new byte[] { 1, 2, 3, 4, 5 }, metadata: new byte[] { 7, 17 }); long tmp; tf.Write(record, out tmp); tf.Close(); db.Dispose(); Assert.AreEqual(record.GetSizeWithLengthPrefixAndSuffix() + 137, _checkpoint.Read()); //137 is fluff assigned to beginning of checkpoint using (var filestream = File.Open(filename, FileMode.Open, FileAccess.Read)) { filestream.Seek(ChunkHeader.Size + 137 + sizeof(int), SeekOrigin.Begin); var reader = new BinaryReader(filestream); var read = LogRecord.ReadFrom(reader, (int)reader.BaseStream.Length); Assert.AreEqual(record, read); } }
public static void CreateMultiChunk(TFChunkDbConfig config, int chunkStartNum, int chunkEndNum, string filename, int?physicalSize = null, long?logicalSize = null) { if (chunkStartNum > chunkEndNum) { throw new ArgumentException("chunkStartNum"); } var chunkHeader = new ChunkHeader(TFChunk.CurrentChunkVersion, config.ChunkSize, chunkStartNum, chunkEndNum, true, Guid.NewGuid()); var chunkBytes = chunkHeader.AsByteArray(); var physicalDataSize = physicalSize ?? config.ChunkSize; var logicalDataSize = logicalSize ?? (chunkEndNum - chunkStartNum + 1) * config.ChunkSize; var buf = new byte[ChunkHeader.Size + physicalDataSize + ChunkFooter.Size]; Buffer.BlockCopy(chunkBytes, 0, buf, 0, chunkBytes.Length); var chunkFooter = new ChunkFooter(true, true, physicalDataSize, logicalDataSize, 0, new byte[ChunkFooter.ChecksumSize]); chunkBytes = chunkFooter.AsByteArray(); Buffer.BlockCopy(chunkBytes, 0, buf, buf.Length - ChunkFooter.Size, chunkBytes.Length); File.WriteAllBytes(filename, buf); }
public void a_record_is_not_written_at_first_but_written_on_second_try() { var filename1 = GetFilePathFor("chunk-000000.000000"); var filename2 = GetFilePathFor("chunk-000001.000000"); var chunkHeader = new ChunkHeader(TFChunk.CurrentChunkVersion, 10000, 0, 0, false, Guid.NewGuid()); var chunkBytes = chunkHeader.AsByteArray(); var bytes = new byte[ChunkHeader.Size + 10000 + ChunkFooter.Size]; Buffer.BlockCopy(chunkBytes, 0, bytes, 0, chunkBytes.Length); File.WriteAllBytes(filename1, bytes); _checkpoint = new InMemoryCheckpoint(0); var db = new TFChunkDb(TFChunkHelper.CreateDbConfig(PathName, _checkpoint, new InMemoryCheckpoint())); db.Open(); var tf = new TFChunkWriter(db); long pos; var record1 = new PrepareLogRecord(logPosition: 0, correlationId: _correlationId, eventId: _eventId, expectedVersion: 1234, transactionPosition: 0, transactionOffset: 0, eventStreamId: "WorldEnding", timeStamp: new DateTime(2012, 12, 21), flags: PrepareFlags.None, eventType: "type", data: new byte[] { 1, 2, 3, 4, 5 }, metadata: new byte[8000]); Assert.IsTrue(tf.Write(record1, out pos)); // almost fill up first chunk var record2 = new PrepareLogRecord(logPosition: pos, correlationId: _correlationId, eventId: _eventId, expectedVersion: 1234, transactionPosition: pos, transactionOffset: 0, eventStreamId: "WorldEnding", timeStamp: new DateTime(2012, 12, 21), flags: PrepareFlags.None, eventType: "type", data: new byte[] { 1, 2, 3, 4, 5 }, metadata: new byte[8000]); Assert.IsFalse(tf.Write(record2, out pos)); // chunk has too small space var record3 = new PrepareLogRecord(logPosition: pos, correlationId: _correlationId, eventId: _eventId, expectedVersion: 1234, transactionPosition: pos, transactionOffset: 0, eventStreamId: "WorldEnding", timeStamp: new DateTime(2012, 12, 21), flags: PrepareFlags.None, eventType: "type", data: new byte[] { 1, 2, 3, 4, 5 }, metadata: new byte[2000]); Assert.IsTrue(tf.Write(record3, out pos)); tf.Close(); db.Dispose(); Assert.AreEqual(record3.GetSizeWithLengthPrefixAndSuffix() + 10000, _checkpoint.Read()); using (var filestream = File.Open(filename2, FileMode.Open, FileAccess.Read)) { filestream.Seek(ChunkHeader.Size + sizeof(int), SeekOrigin.Begin); var reader = new BinaryReader(filestream); var read = LogRecord.ReadFrom(reader); Assert.AreEqual(record3, read); } }