public void TestCommitRollback() { using (BPlusTree <int, string> tree = Create(Options)) { tree.EnableCount(); Assert.AreEqual(0, tree.Count); tree.Rollback(); Assert.AreEqual(0, tree.Count); tree.Commit(); Assert.AreEqual(0, tree.Count); tree.Add(1, "A"); tree.Rollback(); Assert.AreEqual(0, tree.Count); tree.Commit(); Assert.AreEqual(0, tree.Count); tree.Add(1, "A"); tree.Commit(); Assert.AreEqual(1, tree.Count); tree.Rollback(); Assert.AreEqual(1, tree.Count); tree.Add(2, "B"); tree.Rollback(); Assert.AreEqual(1, tree.Count); tree[1] = "abc"; tree.Commit(); Assert.AreEqual(1, tree.Count); tree.Rollback(); Assert.AreEqual("abc", tree[1]); Assert.IsFalse(tree.ContainsKey(2)); } }
/// <summary> /// Добавляет инфомрацию о продаже в CMS /// </summary> /// <param name="saleEvent">Событие о продаже</param> public void Add(SaleEvent saleEvent) { if (saleEvent == null) { throw new ArgumentNullException(nameof(saleEvent)); } bTree.Add(saleEvent.DateTime, new IndexValue() { Article = saleEvent.Article, Store = saleEvent.StoreName, Count = saleEvent.Count }); bTree.Commit(); }
public void TestReadOnlyCopy() { using (var tempFile = new TempFile()) { var options = new BPlusTree <int, string> .OptionsV2(new PrimitiveSerializer(), new PrimitiveSerializer()) { CreateFile = CreatePolicy.Always, FileName = tempFile.TempPath, } .CalcBTreeOrder(4, 10); var readcopy = options.Clone(); readcopy.CreateFile = CreatePolicy.Never; readcopy.ReadOnly = true; using (var tree = new BPlusTree <int, string>(options)) { using (var copy = new BPlusTree <int, string>(readcopy)) { copy.EnableCount(); Assert.AreEqual(0, copy.Count); } //insert some data... tree.AddRange(MakeValues(0, 100)); using (var copy = new BPlusTree <int, string>(readcopy)) { copy.EnableCount(); Assert.AreEqual(0, copy.Count); } tree.Commit(); //insert some data... for (int i = 0; i < 100; i++) { tree.Remove(i); } tree.AddRange(MakeValues(1000, 1000)); using (var copy = new BPlusTree <int, string>(readcopy)) { copy.EnableCount(); Assert.AreEqual(100, copy.Count); Assert.AreEqual(0, copy.First().Key); Assert.AreEqual(99, copy.Last().Key); } tree.Commit(); } } }
public override void Put(byte[] key, byte[] value) { try { Console.WriteLine("{0}:", name); Console.Write("{0}:", key.ToHexadecimal()); try { Console.WriteLine("{0}:", System.Text.Encoding.Unicode.GetString(value)); } catch { Console.WriteLine("{0}", value.ToHexadecimal()); } } catch { Console.WriteLine("XXX"); } long OO; lock (index) if (index.TryGetValue(key, out OO)) { //TODO:resize object if we can Delete(key); Put(key, value); } else { ObjectHeader ObjectHeader = new ObjectHeader(); ObjectHeader.Deleted = false; ObjectHeader.Length = value.Length; ObjectHeader.Key = key; int ObjectSize = value.Length; const int ObjectHeaderSize = 40; OO = mh.WriteOffset; mh.WriteOffset += ObjectSize + ObjectHeaderSize; EnsureCapacity(mh.WriteOffset); MemoryMappedViewStream accessor = mmfBlock.CreateViewStream(reservedBytes + OO, ObjectSize + ObjectHeaderSize, MemoryMappedFileAccess.Write); byte[] ObjectHeaderBytes = new byte[ObjectHeaderSize]; System.IO.BinaryWriter bw = new BinaryWriter(new MemoryStream(ObjectHeaderBytes, true)); bw.Write(ObjectHeader.Deleted); bw.BaseStream.Seek(3, SeekOrigin.Current); bw.Write(ObjectHeader.Length); bw.Write(ObjectHeader.Key); accessor.Write(ObjectHeaderBytes, 0, ObjectHeaderSize); accessor.Write(value, 0, value.Length); accessor.Flush(); accessor.Dispose(); maMeta.Write(0, ref mh); maMeta.Flush(); index.Add(key, OO); index.Commit(); } }
public static void TestStringTree() { Stream treefile = null, blockfile = null; BPlusTree tree = GetStringTree(null, ref treefile, ref blockfile, false); Debug.WriteLine(tree.ToText()); Hashtable allmaps = new Hashtable(); for (int i = 0; i < 2; i++) { for (int j = 0; j < 2; j++) { Hashtable record = new Hashtable(); for (int k = 0; k < 2; k++) { string thiskey = MakeKey(i, j, k); string thisvalue = MakeValue(j, k, i); Debug.WriteLine("Set Pair: [" + thiskey + " : " + thisvalue + "]"); record[thiskey] = thisvalue; tree[thiskey] = thisvalue; tree.Commit(); Debug.WriteLine("Get Pair: [" + thiskey + " : " + tree.Get(thiskey).ToString() + "]"); Debug.WriteLine(tree.ToText()); } } } }
public void PersistAndCommitAll() { var OperationIds = new long[opsToCommit.Count]; opsToCommit.Keys.CopyTo(OperationIds, 0); foreach (var operationId in OperationIds) { IList <IndexOp <long> > opToCommit; if (opsToCommit.TryGetValue(operationId, out opToCommit)) { foreach (var indexOp in opToCommit) { _transitionTree.TryRemove(indexOp.Key, indexOp); switch (indexOp.OperationType) { case OpType.Insert: _tree.Add(indexOp.Key, indexOp.RowId); break; case OpType.Remove: _tree.TryRemove(indexOp.Key, indexOp.RowId); break; } } opsToCommit.Remove(operationId); } } try { if (!_tree.IsDisposed) { _tree.Commit(); } } catch (Exception ex) { Rollback(null); if (LoggerManager.Instance.IndexLogger != null) { LoggerManager.Instance.IndexLogger.Error("BPlusIndex", "Error Code: " + ErrorCodes.Indexes.TREE_COMMIT_FAILURE + " - " + ex.ToString()); throw new IndexException(ErrorCodes.Indexes.TREE_COMMIT_FAILURE); } } }
private void Release(WriteLockInfo lockInfo, bool commit = true) { if (commit == false) { var rtaken = false; try { Monitor.Enter(_rlocko, ref rtaken); try { } finally { _nextGen = false; _liveGen -= 1; } } finally { if (rtaken) { Monitor.Exit(_rlocko); } } Rollback(_contentNodes); Rollback(_contentRootNodes); Rollback(_contentTypesById); Rollback(_contentTypesByAlias); } else if (_localDb != null && _wchanges != null) { foreach (var change in _wchanges) { if (change.Value.IsNull) { _localDb.TryRemove(change.Key, out ContentNodeKit unused); } else { _localDb[change.Key] = change.Value; } } _wchanges = null; _localDb.Commit(); } if (lockInfo.Count) { _wlocked--; } if (lockInfo.Taken) { Monitor.Exit(_wlocko); } }
public void TestReadOnlyCopy() { using (var tempFile = new TempFile()) { var options = new BPlusTree<int, string>.OptionsV2(new PrimitiveSerializer(), new PrimitiveSerializer()) { CreateFile = CreatePolicy.Always, FileName = tempFile.TempPath, }.CalcBTreeOrder(4, 10); var readcopy = options.Clone(); readcopy.CreateFile = CreatePolicy.Never; readcopy.ReadOnly = true; using (var tree = new BPlusTree<int, string>(options)) { using (var copy = new BPlusTree<int, string>(readcopy)) { copy.EnableCount(); Assert.AreEqual(0, copy.Count); } //insert some data... tree.AddRange(MakeValues(0, 100)); using (var copy = new BPlusTree<int, string>(readcopy)) { copy.EnableCount(); Assert.AreEqual(0, copy.Count); } tree.Commit(); //insert some data... for (int i = 0; i < 100; i++) tree.Remove(i); tree.AddRange(MakeValues(1000, 1000)); using (var copy = new BPlusTree<int, string>(readcopy)) { copy.EnableCount(); Assert.AreEqual(100, copy.Count); Assert.AreEqual(0, copy.First().Key); Assert.AreEqual(99, copy.Last().Key); } tree.Commit(); } } }
public void TestRestoreLargeLog() { using (TempFile savelog = new TempFile()) using (TempFile temp = new TempFile()) { var options = GetOptions(temp); options.FileBlockSize = 512; options.StoragePerformance = StoragePerformance.Fastest; options.CalcBTreeOrder(Marshal.SizeOf(typeof(Guid)), Marshal.SizeOf(typeof(TestInfo))); options.TransactionLog = new TransactionLog <Guid, TestInfo>( new TransactionLogOptions <Guid, TestInfo>( options.TransactionLogFileName, options.KeySerializer, options.ValueSerializer ) ); //Now recover... Dictionary <Guid, TestInfo> first = new Dictionary <Guid, TestInfo>(); Dictionary <Guid, TestInfo> sample; using (var tree = new BPlusTree <Guid, TestInfo>(options)) { tree.EnableCount(); Insert(tree, first, 1, 100, TimeSpan.FromMinutes(1)); tree.Commit(); Assert.AreEqual(100, tree.Count); sample = new Dictionary <Guid, TestInfo>(first); Insert(tree, sample, 7, 5000, TimeSpan.FromMinutes(1)); Assert.AreEqual(35100, tree.Count); for (int i = 0; i < 1; i++) { foreach (var rec in tree) { var value = rec.Value; value.UpdateCount++; value.ReadCount++; tree[rec.Key] = value; } } File.Copy(options.TransactionLog.FileName, savelog.TempPath, true); tree.Rollback(); TestInfo.AssertEquals(first, tree); } //file still has initial committed data TestInfo.AssertEquals(first, BPlusTree <Guid, TestInfo> .EnumerateFile(options)); //restore the log and verify all data. File.Copy(savelog.TempPath, options.TransactionLog.FileName, true); using (var tree = new BPlusTree <Guid, TestInfo>(options)) { TestInfo.AssertEquals(sample, tree); } //file still has initial committed data TestInfo.AssertEquals(sample, BPlusTree <Guid, TestInfo> .EnumerateFile(options)); } }
public void TestSyncFromLogging() { using (var tempFile = new TempFile()) using (var logfile = new TempFile()) using (var tempCopy = new TempFile()) { var options = new BPlusTree <int, string> .OptionsV2(new PrimitiveSerializer(), new PrimitiveSerializer()) { CreateFile = CreatePolicy.Always, FileName = tempFile.TempPath, TransactionLogFileName = logfile.TempPath, } .CalcBTreeOrder(4, 10); var readcopy = options.Clone(); readcopy.FileName = tempCopy.TempPath; readcopy.StoragePerformance = StoragePerformance.Fastest; using (var tree = new BPlusTree <int, string>(options)) using (var copy = new BPlusTree <int, string>(readcopy)) using (var tlog = new TransactionLog <int, string>( new TransactionLogOptions <int, string>(logfile.TempPath, PrimitiveSerializer.Int32, PrimitiveSerializer.String) { ReadOnly = true })) { tree.Add(0, "0"); tree.Commit(); long logpos = 0; copy.EnableCount(); //start by copying the data from tree's file into the copy instance: copy.BulkInsert( BPlusTree <int, string> .EnumerateFile(options), new BulkInsertOptions { InputIsSorted = true, CommitOnCompletion = false, ReplaceContents = true } ); Assert.AreEqual(1, copy.Count); Assert.AreEqual("0", copy[0]); tlog.ReplayLog(copy, ref logpos); Assert.AreEqual(1, copy.Count); //insert some data... tree.AddRange(MakeValues(1, 99)); tlog.ReplayLog(copy, ref logpos); Assert.AreEqual(100, copy.Count); //insert some data... for (int i = 0; i < 100; i++) { tree.Remove(i); } tlog.ReplayLog(copy, ref logpos); Assert.AreEqual(0, copy.Count); tree.AddRange(MakeValues(1000, 1000)); tlog.ReplayLog(copy, ref logpos); Assert.AreEqual(1000, copy.Count); } } }
public void TestRestoreLargeLog() { using (TempFile savelog = new TempFile()) using (TempFile temp = new TempFile()) { var options = GetOptions(temp); options.FileBlockSize = 512; options.StoragePerformance = StoragePerformance.Fastest; options.CalcBTreeOrder(Marshal.SizeOf(typeof(Guid)), Marshal.SizeOf(typeof(TestInfo))); options.TransactionLog = new TransactionLog<Guid, TestInfo>( new TransactionLogOptions<Guid, TestInfo>( options.TransactionLogFileName, options.KeySerializer, options.ValueSerializer ) ); //Now recover... Dictionary<Guid, TestInfo> first = new Dictionary<Guid, TestInfo>(); Dictionary<Guid, TestInfo> sample; using (var tree = new BPlusTree<Guid, TestInfo>(options)) { tree.EnableCount(); Insert(tree, first, 1, 100, TimeSpan.FromMinutes(1)); tree.Commit(); Assert.AreEqual(100, tree.Count); sample = new Dictionary<Guid, TestInfo>(first); Insert(tree, sample, 7, 5000, TimeSpan.FromMinutes(1)); Assert.AreEqual(35100, tree.Count); for (int i = 0; i < 1; i++) { foreach (var rec in tree) { var value = rec.Value; value.UpdateCount++; value.ReadCount++; tree[rec.Key] = value; } } File.Copy(options.TransactionLog.FileName, savelog.TempPath, true); tree.Rollback(); TestInfo.AssertEquals(first, tree); } //file still has initial committed data TestInfo.AssertEquals(first, BPlusTree<Guid, TestInfo>.EnumerateFile(options)); //restore the log and verify all data. File.Copy(savelog.TempPath, options.TransactionLog.FileName, true); using (var tree = new BPlusTree<Guid, TestInfo>(options)) { TestInfo.AssertEquals(sample, tree); } //file still has initial committed data TestInfo.AssertEquals(sample, BPlusTree<Guid, TestInfo>.EnumerateFile(options)); } }
public void TestSyncFromLogging() { using (var tempFile = new TempFile()) using (var logfile = new TempFile()) using (var tempCopy = new TempFile()) { var options = new BPlusTree<int, string>.OptionsV2(new PrimitiveSerializer(), new PrimitiveSerializer()) { CreateFile = CreatePolicy.Always, FileName = tempFile.TempPath, TransactionLogFileName = logfile.TempPath, }.CalcBTreeOrder(4, 10); var readcopy = options.Clone(); readcopy.FileName = tempCopy.TempPath; readcopy.StoragePerformance = StoragePerformance.Fastest; using (var tree = new BPlusTree<int, string>(options)) using (var copy = new BPlusTree<int, string>(readcopy)) using (var tlog = new TransactionLog<int, string>( new TransactionLogOptions<int, string>(logfile.TempPath, PrimitiveSerializer.Int32, PrimitiveSerializer.String) { ReadOnly = true })) { tree.Add(0, "0"); tree.Commit(); long logpos = 0; copy.EnableCount(); //start by copying the data from tree's file into the copy instance: copy.BulkInsert( BPlusTree<int, string>.EnumerateFile(options), new BulkInsertOptions { InputIsSorted = true, CommitOnCompletion = false, ReplaceContents = true } ); Assert.AreEqual(1, copy.Count); Assert.AreEqual("0", copy[0]); tlog.ReplayLog(copy, ref logpos); Assert.AreEqual(1, copy.Count); //insert some data... tree.AddRange(MakeValues(1, 99)); tlog.ReplayLog(copy, ref logpos); Assert.AreEqual(100, copy.Count); //insert some data... for (int i = 0; i < 100; i++) tree.Remove(i); tlog.ReplayLog(copy, ref logpos); Assert.AreEqual(0, copy.Count); tree.AddRange(MakeValues(1000, 1000)); tlog.ReplayLog(copy, ref logpos); Assert.AreEqual(1000, copy.Count); } } }
public void Dispose() { tree.Commit(); tree.Dispose(); }