public void TestCloseAndReopen() { using (TempFile file = new TempFile()) { Guid guid = Guid.NewGuid(); long id; using (FragmentedFile ff = FragmentedFile.CreateNew(file.TempPath, 512)) { using (Stream io = ff.Create(out id)) PrimitiveSerializer.Guid.WriteTo(guid, io); Assert.AreEqual(id, ff.FirstIdentity); } using (FragmentedFile ff = new FragmentedFile(file.TempPath, 512)) { Assert.AreEqual(id, ff.FirstIdentity); using (Stream io = ff.Open(id, FileAccess.Read)) Assert.AreEqual(guid, PrimitiveSerializer.Guid.ReadFrom(io)); } using (FragmentedFile ff = new FragmentedFile(file.TempPath, 512, 10, 10, FileAccess.Read, FileShare.None, FileOptions.None)) { Assert.AreEqual(id, ff.FirstIdentity); using (Stream io = ff.Open(id, FileAccess.Read)) Assert.AreEqual(guid, PrimitiveSerializer.Guid.ReadFrom(io)); AssertThrows <InvalidOperationException>(delegate() { ff.Open(id, FileAccess.Write).Dispose(); }); } } }
public void TestRecoverBlocks() { long idFirst, idSecond, idThird; using (TempFile file = new TempFile()) { if (true) { FragmentedFile ff = FragmentedFile.CreateNew(file.TempPath, 512, 100, 2, FragmentedFile.OptionsDefault); idFirst = ff.Create(); idSecond = ff.Create(); idThird = ff.Create(); ff.Delete(idFirst); //Dangerous, only used for testing the case when ff was never disposed, nor GC'd GC.SuppressFinalize(ff); ff = null; } GC.Collect(0, GCCollectionMode.Forced); GC.WaitForPendingFinalizers(); using (FragmentedFile f2 = new FragmentedFile(file.TempPath, 512)) { Assert.IsTrue(f2.Create() < idSecond); Assert.IsTrue(f2.Create() > idThird); } } }
public void TestReaderStream() { using (SharedMemoryStream shared = new SharedMemoryStream()) using (FragmentedFile ff = FragmentedFile.CreateNew(shared, 512, 100, 2)) { long id; using (Stream write = ff.Create(out id)) PrimitiveSerializer.Int64.WriteTo(id, write); using (Stream read = ff.Open(id, FileAccess.Read)) { Assert.IsTrue(read.CanRead); Assert.IsFalse(read.CanWrite); Assert.IsFalse(read.CanSeek); Assert.AreEqual(id, PrimitiveSerializer.Int64.ReadFrom(read)); read.Flush();//no-op AssertThrows <NotSupportedException>(delegate() { read.Position = 0; }); AssertThrows <NotSupportedException>(delegate() { GC.KeepAlive(read.Position); }); AssertThrows <NotSupportedException>(delegate() { GC.KeepAlive(read.Length); }); AssertThrows <NotSupportedException>(delegate() { read.SetLength(1); }); AssertThrows <NotSupportedException>(delegate() { read.Seek(1, SeekOrigin.Begin); }); AssertThrows <NotSupportedException>(delegate() { read.WriteByte(1); }); } } }
public void TestClear() { Dictionary <long, byte[]> data = new Dictionary <long, byte[]>(); using (TempFile file = new TempFile()) using (FragmentedFile ff = FragmentedFile.CreateNew(file.TempPath, 512, 100, 2, FragmentedFile.OptionsDefault)) { //Create: for (int i = 0; i < 256; i++) { data.Add(ff.Create(), null); } //Enumerate: int count = 0; foreach (KeyValuePair <long, Stream> fragment in ff.ForeachBlock(true, true, null)) { count++; } Assert.AreEqual(256, count); ff.Clear(); //Empty? foreach (KeyValuePair <long, Stream> fragment in ff.ForeachBlock(true, true, null)) { Assert.Fail(); } } }
public void TestTransactBlock() { SharedMemoryStream shared = new SharedMemoryStream(); FragmentedFile.CreateNew(shared, 512, 100, 2).Dispose(); using (FragmentedFile ff = new FragmentedFile(shared, 512, 100, 2)) { long id; byte[] orig = MakeBytes(255); using (Stream write = ff.Create(out id)) write.Write(orig, 0, orig.Length); Assert.AreEqual(orig, IOStream.ReadAllBytes(ff.Open(id, FileAccess.Read))); byte[] change = MakeBytes(800); using (Stream write = ff.Open(id, FileAccess.Write)) using (ITransactable trans = (ITransactable)write) //the Fragmented File Streams are ITransactable { write.Write(change, 0, change.Length); Assert.AreEqual(orig, IOStream.ReadAllBytes(ff.Open(id, FileAccess.Read))); trans.Commit(); //commit changes so that readers can read Assert.AreEqual(change, IOStream.ReadAllBytes(ff.Open(id, FileAccess.Read))); trans.Rollback(); //rollback even after commit to 'undo' the changes Assert.AreEqual(orig, IOStream.ReadAllBytes(ff.Open(id, FileAccess.Read))); } //once disposed you can no longer rollback, if rollback has not been called commit is implied. Assert.AreEqual(orig, IOStream.ReadAllBytes(ff.Open(id, FileAccess.Read))); } }
/// <summary> /// Creates an empty file store in the path specified /// </summary> public static BTreeFileStore CreateNew(string filepath, int blockSize, int growthRate, int concurrentWriters, FileOptions options) { using (FragmentedFile file = FragmentedFile.CreateNew(filepath, blockSize)) CreateRoot(file); return(new BTreeFileStore(filepath, blockSize, growthRate, concurrentWriters, options, false)); }
private static void CreateRoot(FragmentedFile file) { long rootId; using (file.Create(out rootId)) { } if (rootId != file.FirstIdentity) { throw new InvalidNodeHandleException(); } }
public void TestSingleBlockCrud() { using (TempFile file = new TempFile()) { using (FragmentedFile ff = FragmentedFile.CreateNew(file.TempPath, 512, 100, 2, FragmentedFile.OptionsDefault)) TestCrud(ff, 256, 256); using (FragmentedFile ff = new FragmentedFile(file.TempPath, 512, 100, 2, FragmentedFile.OptionsDefault)) TestCrud(ff, 256, 256); } }
void TestCrud(FragmentedFile ff, int blockCount, int blockSize) { Dictionary <long, byte[]> data = new Dictionary <long, byte[]>(); //Create: for (int i = 0; i < blockCount; i++) { data.Add(ff.Create(), null); } //Write: foreach (long id in new List <long>(data.Keys)) { using (Stream io = ff.Open(id, FileAccess.Write)) io.Write(data[id] = MakeBytes(blockSize), 0, blockSize); } //Read: foreach (KeyValuePair <long, byte[]> kv in data) { using (Stream io = ff.Open(kv.Key, FileAccess.Read)) Assert.AreEqual(kv.Value, IOStream.ReadAllBytes(io)); } //Enumerate: Dictionary <long, byte[]> copy = new Dictionary <long, byte[]>(data); foreach (KeyValuePair <long, Stream> fragment in ff.ForeachBlock(true, true, null)) { Assert.AreEqual(copy[fragment.Key], IOStream.ReadAllBytes(fragment.Value)); Assert.IsTrue(copy.Remove(fragment.Key)); } //Update: foreach (long id in new List <long>(data.Keys)) { Assert.AreEqual(data[id], IOStream.ReadAllBytes(ff.Open(id, FileAccess.Read))); using (Stream io = ff.Open(id, FileAccess.Write)) io.Write(data[id] = MakeBytes(blockSize * 2), 0, blockSize * 2); Assert.AreEqual(data[id], IOStream.ReadAllBytes(ff.Open(id, FileAccess.Read))); using (Stream io = ff.Open(id, FileAccess.Write)) io.Write(data[id] = MakeBytes(blockSize / 2), 0, blockSize / 2); Assert.AreEqual(data[id], IOStream.ReadAllBytes(ff.Open(id, FileAccess.Read))); } //Delete: foreach (long id in new List <long>(data.Keys)) { ff.Delete(id); } //Empty? foreach (KeyValuePair <long, Stream> fragment in ff.ForeachBlock(true, true, null)) { Assert.Fail(); } }
public void TestCreateAndRead() { using (TempFile file = new TempFile()) using (FragmentedFile ff = FragmentedFile.CreateNew(file.TempPath, 512)) { Guid guid = Guid.NewGuid(); long id; using (Stream io = ff.Create(out id)) PrimitiveSerializer.Guid.WriteTo(guid, io); Assert.AreEqual(id, ff.FirstIdentity); using (Stream io = ff.Open(id, FileAccess.Read)) Assert.AreEqual(guid, PrimitiveSerializer.Guid.ReadFrom(io)); } }
public void TestTransactWriteAfterCommit() { SharedMemoryStream shared = new SharedMemoryStream(); FragmentedFile.CreateNew(shared, 512, 100, 2).Dispose(); using (FragmentedFile ff = new FragmentedFile(shared, 512, 100, 2)) { long id; byte[] bytes = MakeBytes(255); using (Stream write = ff.Create(out id)) using (ITransactable trans = (ITransactable)write) { write.Write(bytes, 0, bytes.Length); trans.Commit(); write.Write(bytes, 0, bytes.Length); } } }
/// <summary> /// Performs a low-level scan of the storage file to yield all Key/Value pairs it was able to read from the file. /// </summary> /// <param name="options"> The options normally used to create the <see cref="BPlusTree{TKey, TValue}"/> instance </param> /// <param name="sharing"> <see cref="FileShare"/> options used to open the file </param> /// <returns> Yields the Key/Value pairs found in the file </returns> public static IEnumerable <KeyValuePair <TKey, TValue> > RecoveryScan(Options options, FileShare sharing) { options = options.Clone(); options.CreateFile = CreatePolicy.Never; string filename = options.FileName; if (String.IsNullOrEmpty(filename)) { throw new InvalidConfigurationValueException("FileName", "The FileName property was not specified."); } if (!File.Exists(filename)) { throw new InvalidConfigurationValueException("FileName", "The FileName specified does not exist."); } if (options.StorageType != StorageType.Disk) { throw new InvalidConfigurationValueException("StorageType", "The storage type is not set to 'Disk'."); } using (FragmentedFile file = new FragmentedFile(filename, options.FileBlockSize, 1, 1, FileAccess.Read, sharing, FileOptions.None)) { NodeSerializer nodeReader = new NodeSerializer(options, new NodeHandleSerializer(new Storage.BTreeFileStore.HandleSerializer())); foreach (KeyValuePair <long, Stream> block in file.ForeachBlock(true, false, IngoreDataInvalid)) { List <KeyValuePair <TKey, TValue> > found = new List <KeyValuePair <TKey, TValue> >(); try { foreach (KeyValuePair <TKey, TValue> entry in nodeReader.RecoverLeaf(block.Value)) { found.Add(entry); } } catch { /* Serialization error: Ignore and continue */ } foreach (KeyValuePair <TKey, TValue> entry in found) { yield return(entry); } } } }
public void TestRollbackCreate() { SharedMemoryStream shared = new SharedMemoryStream(); FragmentedFile.CreateNew(shared, 512, 100, 2).Dispose(); using (FragmentedFile ff = new FragmentedFile(shared, 512, 100, 2)) { long id; byte[] bytes = MakeBytes(255); using (Stream write = ff.Create(out id)) using (ITransactable trans = (ITransactable)write) { write.Write(bytes, 0, bytes.Length); trans.Commit(); Assert.AreEqual(bytes, IOStream.ReadAllBytes(ff.Open(id, FileAccess.Read))); trans.Rollback(); } AssertThrows <InvalidDataException>(delegate() { ff.Open(id, FileAccess.Read); }); } }
public void TestOptionsWriteThrough() { long id; byte[] bytes; Dictionary <long, byte[]> data = new Dictionary <long, byte[]>(); using (TempFile file = new TempFile()) using (FragmentedFile ff = FragmentedFile.CreateNew(file.TempPath, 512, 10, 10, FragmentedFile.OptionsWriteThrough)) { for (int i = 0; i < 256; i++) { using (Stream io = ff.Create(out id)) io.Write(bytes = MakeBytes(256), 0, bytes.Length); data.Add(id, bytes); } foreach (KeyValuePair <long, byte[]> kv in data) { using (Stream io = ff.Open(kv.Key, FileAccess.Read)) Assert.AreEqual(kv.Value, IOStream.ReadAllBytes(io)); } } }
public void TestOptionsNoBuffering() { // If you have issues with this test, your hardware may not support it, or your sector size is larger than 4096 long id; byte[] bytes; Dictionary <long, byte[]> data = new Dictionary <long, byte[]>(); using (TempFile file = new TempFile()) using (FragmentedFile ff = FragmentedFile.CreateNew(file.TempPath, 4096, 10, 10, FragmentedFile.OptionsNoBuffering)) { for (int i = 0; i < 256; i++) { using (Stream io = ff.Create(out id)) io.Write(bytes = MakeBytes(256), 0, bytes.Length); data.Add(id, bytes); } foreach (KeyValuePair <long, byte[]> kv in data) { using (Stream io = ff.Open(kv.Key, FileAccess.Read)) Assert.AreEqual(kv.Value, IOStream.ReadAllBytes(io)); } } }
public void TestMultiBlocks() { using (TempFile file = new TempFile()) using (FragmentedFile ff = FragmentedFile.CreateNew(file.TempPath, 512, 100, 2, FragmentedFile.OptionsDefault)) TestCrud(ff, 50, 2000); }
private BTreeFileStore(FragmentedFile filestore) { _file = filestore; _rootId = new FileId(_file.FirstIdentity); }