public void Delete_Query() { using (var file = new TempFile()) { var initial = new DateTime(2000, 01, 01); using (var db = new LiteEngine(file.Filename)) { for (var i = 0; i < 5000; i++) { db.Insert("col", new BsonDocument { { "dt", initial.AddDays(i) } }); } db.EnsureIndex("col", "dt"); Assert.AreEqual(5000, db.Count("col")); Assert.AreEqual(0, db.Count("col", Query.GT("dd", initial))); var del = db.Delete("col", Query.GT("dd", initial)); Assert.AreEqual(0, del); Assert.AreEqual(0, db.Count("col", Query.GT("dd", initial))); } } }
public void Thread_Insert_Test() { using (var file = new TempFile()) using (var db = new LiteEngine(file.Filename)) { db.EnsureIndex("col", "thread"); // insert 1000 x thread=1 var ta = Task.Factory.StartNew(() => { for (var i = 0; i < 1000; i++) { db.Insert("col", new BsonDocument { { "thread", 1 } }); } }); // insert 700 x thread=2 var tb = Task.Factory.StartNew(() => { for (var i = 0; i < 700; i++) { db.Insert("col", new BsonDocument { { "thread", 2 } }); } }); Task.WaitAll(ta, tb); Assert.AreEqual(1000, db.Count("col", Query.EQ("thread", 1))); Assert.AreEqual(700, db.Count("col", Query.EQ("thread", 2))); } }
public void MultiKey_InsertUpdate_Test() { using (var file = new TempFile()) using (var db = new LiteEngine(file.Filename)) { db.Insert("col", GetDocs(1, 1, 1, 2, 3)); db.Insert("col", GetDocs(2, 2, 2, 2, 4)); db.Insert("col", GetDocs(3, 3, 3)); // create index afer documents are in collection db.EnsureIndex("col", "list"); db.EnsureIndex("col", "rnd"); db.Update("col", GetDocs(2, 2, 9, 9)); // try find var r = string.Join(",", db.Find("col", Query.EQ("list", 2)).Select(x => x["_id"].ToString())); Assert.AreEqual("1", r); Assert.AreEqual(3, db.Count("col", null)); Assert.AreEqual(3, db.Count("col", Query.All())); // 5 keys = [1, 2, 3],[3],[9] var l = string.Join(",", db.FindIndex("col", Query.All("list"))); Assert.AreEqual("1,2,3,3,9", l); // count should be count only documents - not index nodes Assert.AreEqual(3, db.Count("col", Query.All("list"))); } }
public void Checkpoint_TransactionRecovery_Test() { using (var file = new TempFile()) { using (var db = new LiteEngine(new FileDiskService(file.Filename), autocommit: false)) { var log = new StringBuilder(); db.Log.Level = Logger.CACHE; db.Log.Logging += (s) => log.AppendLine(s); // initialize my "col" with 1000 docs without transaction db.Insert("col", GetDocs(1, 1000)); // commit now for intialize new transaction db.Commit(); // insert a lot of docs inside a single collection (will do checkpoint in disk) db.Insert("col", GetDocs(1001, N)); // update all documents db.Update("col", GetDocs(1, N)); // create new index db.EnsureIndex("col", "type"); // checks if cache had a checkpoint Assert.IsTrue(log.ToString().Contains("checkpoint")); // datafile must be big (because checkpoint expand file) Assert.IsTrue(file.Size > 30 * 1024 * 1024); // in MB\ // delete all docs > 1000 db.Delete("col", Query.GT("_id", 1000)); db.DropIndex("col", "type"); // let's rollback everything db.Rollback(); // be sure cache are empty Assert.AreEqual(0, db.CacheUsed); // datafile must returns to original size (less than 1.5MB for 1000 docs) Assert.IsTrue(file.Size < 1.5 * 1024 * 1024); // in MB // test in my only doc exits Assert.AreEqual(1000, db.Count("col", Query.All())); Assert.AreEqual(1000, db.Count("col", null)); // test indexes (must have only _id index) Assert.AreEqual(1, db.GetIndexes("col").Count()); } } }
public void Multikey_Count_Test() { using (var file = new TempFile()) using (var db = new LiteEngine(file.Disk(), cacheSize: 10, autocommit: true)) { // create index before db.EnsureIndex("col", "list"); db.Insert("col", GetDocs(1, 1000, 1, 2, 3)); db.Insert("col", GetDocs(1001, 2000, 2, 3)); db.Insert("col", GetDocs(2001, 2500, 4)); Assert.AreEqual(1000, db.Count("col", Query.All("list", 1))); Assert.AreEqual(2000, db.Count("col", Query.All("list", 2))); Assert.AreEqual(2000, db.Count("col", Query.All("list", 3))); Assert.AreEqual(500, db.Count("col", Query.All("list", 4))); // drop index db.DropIndex("col", "list"); // re-create index db.EnsureIndex("col", "list"); // count again Assert.AreEqual(1000, db.Count("col", Query.All("list", 1))); Assert.AreEqual(2000, db.Count("col", Query.All("list", 2))); Assert.AreEqual(2000, db.Count("col", Query.All("list", 3))); Assert.AreEqual(500, db.Count("col", Query.All("list", 4))); } }
public void Thread_InsertUpdate_Test() { const int N = 3000; using (var file = new TempFile()) using (var db = new LiteEngine(file.Filename)) { db.EnsureIndex("col", "updated"); Assert.AreEqual(0, db.Count("col", Query.EQ("updated", true))); // insert basic document var ta = Task.Factory.StartNew(() => { for (var i = 0; i < N; i++) { var doc = new BsonDocument { { "_id", i } }; db.Insert("col", doc); } }); // update _id=N var tb = Task.Factory.StartNew(() => { var i = 0; while (i < N) { var doc = new BsonDocument { { "_id", i }, { "updated", true }, { "name", TempFile.LoremIpsum(5, 10, 1, 5, 1) } }; if (db.Update("col", doc)) { i++; } } }); Task.WaitAll(ta, tb); Assert.AreEqual(N, db.Count("col", Query.EQ("updated", true))); } }
public void Checkpoint_Recovery_Test() { using (var file = new TempFile()) { // init with N docs with type=1 using (var db = new LiteEngine(file.Filename)) { db.EnsureIndex("col", "type"); db.Insert("col", GetDocs(1, N, type: 1)); Assert.AreEqual(N, db.Count("col", Query.EQ("type", 1))); } // re-open and try update all docs to type=2 using (var db = new LiteEngine(file.Filename)) { var log = new StringBuilder(); db.Log.Level = Logger.CACHE; db.Log.Logging += (s) => log.AppendLine(s); try { // try update all to "type=2" // but throws exception before finish db.Update("col", GetDocs(1, N, type: 2, throwAtEnd: true)); } catch (Exception ex) { if (!ex.Message.Contains("Try Recovery!")) { Assert.Fail(ex.Message); } } // checks if cache had a checkpoint Assert.IsTrue(log.ToString().Contains("checkpoint")); // re-check if all docs will be type=1 Assert.AreEqual(N, db.Count("col", Query.EQ("type", 1))); Assert.AreEqual(0, db.Count("col", Query.EQ("type", 2))); } // re-open datafile the be sure contains only type=1 using (var db = new LiteEngine(file.Filename)) { Assert.AreEqual(N, db.Count("col", Query.EQ("type", 1))); Assert.AreEqual(0, db.Count("col", Query.EQ("type", 2))); } } }
public void BulkInsert_Test() { using (var file = new TempFile()) using (var db = new LiteEngine(file.Filename)) { // let's bulk 500.000 documents db.InsertBulk("col", GetDocs(1, 500000)); // and assert if all are inserted (based on collection header only) Assert.AreEqual(500000, db.Count("col")); // and now count all Assert.AreEqual(500000, db.Count("col", Query.All())); } }
public void Execute(LiteEngine engine, StringScanner s, Display display, InputCommand input, Env env) { var col = this.ReadCollection(engine, s); var query = this.ReadQuery(s); display.WriteResult(engine.Count(col, query)); }
public void Shell_Commands() { using (var db = new LiteEngine(new MemoryStream())) { db.Run("db.col1.insert {a: 1}"); db.Run("db.col1.insert {a: 2}"); db.Run("db.col1.insert {a: 3}"); db.Run("db.col1.ensureIndex a"); Assert.AreEqual(1, db.Run("db.col1.find a = 1").First().AsDocument["a"].AsInt32); db.Run("db.col1.update a = $.a + 10, b = 2 where a = 1"); Assert.AreEqual(11, db.Run("db.col1.find a = 11").First().AsDocument["a"].AsInt32); Assert.AreEqual(3, db.Count("col1")); // insert new data db.Run("db.data.insert {Text: \"Anything\", Number: 10} id:int"); db.Run("db.data.ensureIndex Text"); var doc = db.Run("db.data.find Text like \"A\"").First() as BsonDocument; Assert.AreEqual(1, doc["_id"].AsInt32); Assert.AreEqual("Anything", doc["Text"].AsString); Assert.AreEqual(10, doc["Number"].AsInt32); } }
public void Engine_QueryUpdate_Test() { using (var file = new TempFile()) using (var db = new LiteEngine(file.Filename)) { db.EnsureIndex("col", "name"); // insert 4 documents db.Insert("col", new BsonDocument { { "_id", 1 } }); db.Insert("col", new BsonDocument { { "_id", 2 } }); db.Insert("col", new BsonDocument { { "_id", 3 } }); db.Insert("col", new BsonDocument { { "_id", 4 } }); // query all documents and update name foreach (var d in db.Find("col", Query.All())) { d["name"] = "john"; db.Update("col", d); } // this simple test if same thread open a read mode and then open write lock mode Assert.AreEqual(4, db.Count("col", Query.EQ("name", "john"))); } }
public IEnumerable <BsonValue> Execute(StringScanner s, LiteEngine engine) { var col = this.ReadCollection(engine, s); var query = this.ReadQuery(s, false); s.ThrowIfNotFinish(); yield return(engine.Count(col, query)); }
public void Process_Insert_Delete() { using (var file = new TempFile()) { using (var dbA = new LiteEngine(file.Filename)) using (var dbB = new LiteEngine(file.Filename)) { dbA.EnsureIndex("col", "process", false); // insert 1000 x instance=1 var ta = Task.Factory.StartNew(() => { for (var i = 0; i < 1000; i++) { dbA.Insert("col", new BsonDocument { { "process", 1 } }); } }); // keeping delete all var tb = Task.Factory.StartNew(() => { // while before starts insert while (dbB.Count("col", Query.EQ("process", 1)) == 0) { Task.Delay(50).Wait(); } // while until has docs while (dbB.Count("col", Query.EQ("process", 1)) > 0) { dbB.Delete("col", Query.All()); Task.Delay(50).Wait(); } }); Task.WaitAll(ta, tb); Assert.AreEqual(0, dbA.Count("col", Query.EQ("process", 1))); Assert.AreEqual(0, dbB.Count("col", Query.EQ("process", 1))); } } }
public void Process_Multi_Insert() { using (var file = new TempFile()) { using (var dbA = new LiteEngine(file.Filename)) using (var dbB = new LiteEngine(file.Filename)) { dbA.EnsureIndex("col", "process", false); // insert 1000 x instance=1 var ta = Task.Factory.StartNew(() => { for (var i = 0; i < 1000; i++) { dbA.Insert("col", new BsonDocument { { "process", 1 } }); } }); // insert 700 x instance=2 var tb = Task.Factory.StartNew(() => { for (var i = 0; i < 700; i++) { dbB.Insert("col", new BsonDocument { { "process", 2 } }); } }); Task.WaitAll(ta, tb); Assert.AreEqual(1000, dbA.Count("col", Query.EQ("process", 1))); Assert.AreEqual(700, dbA.Count("col", Query.EQ("process", 2))); Assert.AreEqual(1000, dbB.Count("col", Query.EQ("process", 1))); Assert.AreEqual(700, dbB.Count("col", Query.EQ("process", 2))); } } }
public void Process_Insert_Count() { using (var file = new TempFile()) { using (var dbA = new LiteEngine(file.Filename)) using (var dbB = new LiteEngine(file.Filename)) { dbA.EnsureIndex("col", "process", false); // insert 1000 x instance=1 var ta = Task.Factory.StartNew(() => { for (var i = 0; i < 1000; i++) { dbA.Insert("col", new BsonDocument { { "process", 1 } }); } }); // keep querying until found 1000 docs var tb = Task.Factory.StartNew(() => { var count = 0L; while (count < 1000) { // force query all rows count = dbB.Count("col", Query.EQ("process", 1)); Task.Delay(50).Wait(); } }); Task.WaitAll(ta, tb); Assert.AreEqual(1000, dbA.Count("col", Query.EQ("process", 1))); Assert.AreEqual(1000, dbB.Count("col", Query.EQ("process", 1))); } } }
public void Shell_Commands() { using (var db = new LiteEngine(new MemoryStream())) { db.Run("db.col1.insert {a: 1}"); db.Run("db.col1.insert {a: 2}"); db.Run("db.col1.insert {a: 3}"); db.Run("db.col1.ensureIndex a"); Assert.AreEqual(1, db.Run("db.col1.find a = 1").First().AsDocument["a"].AsInt32); Assert.AreEqual(3, db.Count("col1")); } }
public void Checkpoint_Recovery_Test() { using (var file = new TempFile()) { // init with N docs with type=1 using (var db = new LiteEngine(file.Filename)) { db.EnsureIndex("col", "type"); db.Insert("col", GetDocs(1, N, type: 1)); Assert.AreEqual(N, db.Count("col", Query.EQ("type", 1))); } // re-open and try update all docs to type=2 using (var db = new LiteEngine(file.Filename)) { var log = new StringBuilder(); db.Log.Level = Logger.CACHE; db.Log.Logging += (s) => log.AppendLine(s); try { // try update all to "type=2" // but throws exception before finish db.Update("col", GetDocs(1, N, type: 2, throwAtEnd: true)); } catch (Exception ex) { if (!ex.Message.Contains("Try Recovery!")) Assert.Fail(ex.Message); } // checks if cache had a checkpoint Assert.IsTrue(log.ToString().Contains("checkpoint")); // re-check if all docs will be type=1 Assert.AreEqual(N, db.Count("col", Query.EQ("type", 1))); Assert.AreEqual(0, db.Count("col", Query.EQ("type", 2))); } // re-open datafile the be sure contains only type=1 using (var db = new LiteEngine(file.Filename)) { Assert.AreEqual(N, db.Count("col", Query.EQ("type", 1))); Assert.AreEqual(0, db.Count("col", Query.EQ("type", 2))); } } }
public void Concurrency_InsertUpdate_Test() { const int N = 3000; using (var file = new TempFile()) using (var db = new LiteEngine(file.Filename)) { db.EnsureIndex("col", "updated"); Assert.AreEqual(0, db.Count("col", Query.EQ("updated", true))); // insert basic document var ta = Task.Factory.StartNew(() => { for (var i = 0; i < N; i++) { var doc = new BsonDocument { { "_id", i } }; db.Insert("col", doc); } }); // update _id=N var tb = Task.Factory.StartNew(() => { var i = 0; while (i < N) { var doc = new BsonDocument { { "_id", i }, { "updated", true }, { "name", TempFile.LoremIpsum(5, 10, 1, 5, 1) } }; if (db.Update("col", doc)) i++; } }); Task.WaitAll(ta, tb); Assert.AreEqual(N, db.Count("col", Query.EQ("updated", true))); } }
public void Checkpoint_Index_Test() { using (var file = new TempFile()) using (var db = new LiteEngine(file.Filename)) { // insert basic N documents db.Insert("col", GetDocs(1, N)); var log = new StringBuilder(); db.Log.Level = Logger.CACHE; db.Log.Logging += (s) => log.AppendLine(s); // create an index in col db.EnsureIndex("col", "name"); Assert.IsTrue(log.ToString().Contains("checkpoint")); Assert.AreEqual(N, db.Count("col", Query.All())); } }
public void Thread_InsertQuery_Test() { const int N = 3000; var running = true; using (var file = new TempFile()) using (var db = new LiteEngine(file.Filename)) { db.Insert("col", new BsonDocument()); // insert basic document var ta = Task.Factory.StartNew(() => { for (var i = 0; i < N; i++) { var doc = new BsonDocument { { "_id", i } }; db.Insert("col", doc); } running = false; }); // query while insert var tb = Task.Factory.StartNew(() => { while (running) { db.Find("col", Query.All()).ToList(); } }); Task.WaitAll(ta, tb); Assert.AreEqual(N + 1, db.Count("col", Query.All())); } }
public void Engine_QueryUpdate_Test() { using (var file = new TempFile()) using (var db = new LiteEngine(file.Filename)) { db.EnsureIndex("col", "name"); // insert 4 documents db.Insert("col", new BsonDocument { { "_id", 1 } }); db.Insert("col", new BsonDocument { { "_id", 2 } }); db.Insert("col", new BsonDocument { { "_id", 3 } }); db.Insert("col", new BsonDocument { { "_id", 4 } }); // query all documents and update name foreach(var d in db.Find("col", Query.All())) { d["name"] = "john"; db.Update("col", d); } // this simple test if same thread open a read mode and then open write lock mode Assert.AreEqual(4, db.Count("col", Query.EQ("name", "john"))); } }
public void Concurrency_InsertQuery_Test() { const int N = 3000; var running = true; using (var file = new TempFile()) using (var db = new LiteEngine(file.Filename)) { db.Insert("col", new BsonDocument()); // insert basic document var ta = Task.Factory.StartNew(() => { for (var i = 0; i < N; i++) { var doc = new BsonDocument { { "_id", i } }; db.Insert("col", doc); } running = false; }); // query while insert var tb = Task.Factory.StartNew(() => { while (running) { db.Find("col", Query.All()).ToList(); } }); Task.WaitAll(ta, tb); Assert.AreEqual(N + 1, db.Count("col", Query.All())); } }
public void ShrinkTest_Test() { // do some tests Action<LiteEngine> DoTest = (db) => { Assert.AreEqual(1, db.Count("col", null)); Assert.AreEqual(99, db.UserVersion); Assert.IsNotNull(db.GetIndexes("col").FirstOrDefault(x => x.Field == "name")); Assert.IsTrue(db.GetIndexes("col").FirstOrDefault(x => x.Field == "name").Unique); }; using (var file = new TempFile()) { using (var db = new LiteEngine(file.Filename)) { db.UserVersion = 99; db.EnsureIndex("col", "name", true); db.Insert("col", GetDocs(1, 30000)); db.Delete("col", Query.GT("_id", 1)); // delete 29.999 docs Assert.AreEqual(1, db.Count("col", null)); // file still large than 20mb (even with only 1 document) Assert.IsTrue(file.Size > 20 * 1024 * 1024); // reduce datafile db.Shrink(); // now file are small than 50kb Assert.IsTrue(file.Size < 50 * 1024); DoTest(db); } // re-open datafile to check if is ok using (var db = new LiteEngine(file.Filename)) { // still 1 doc and 1 name unique index DoTest(db); // shrink again but now with password var reduced = db.Shrink("abc123"); // file still same size (but now are encrypted) Assert.AreEqual(0, reduced); // still 1 doc and 1 name unique index DoTest(db); } // re-open, again, but now with password using (var db = new LiteEngine(file.Filename, "abc123")) { DoTest(db); // now, remove password db.Shrink(); // test again DoTest(db); } } }
public void Concurrency_Insert_Test() { using (var file = new TempFile()) using (var db = new LiteEngine(file.Filename)) { db.EnsureIndex("col", "thread"); // insert 5000 x thread=1 var ta = Task.Factory.StartNew(() => { for(var i = 0; i < 5000; i++) db.Insert("col", new BsonDocument { { "thread", 1 } }); }); // insert 4000 x thread=2 var tb = Task.Factory.StartNew(() => { for (var i = 0; i < 4000; i++) db.Insert("col", new BsonDocument { { "thread", 2 } }); }); Task.WaitAll(ta, tb); Assert.AreEqual(5000, db.Count("col", Query.EQ("thread", 1))); Assert.AreEqual(4000, db.Count("col", Query.EQ("thread", 2))); } }
public void ShrinkTest_Test() { // do some tests Action <LiteEngine> DoTest = (db) => { Assert.AreEqual(1, db.Count("col", null)); Assert.AreEqual(99, db.UserVersion); Assert.IsNotNull(db.GetIndexes("col").FirstOrDefault(x => x.Field == "name")); Assert.IsTrue(db.GetIndexes("col").FirstOrDefault(x => x.Field == "name").Unique); }; using (var file = new TempFile()) { using (var db = new LiteEngine(file.Filename)) { db.UserVersion = 99; db.EnsureIndex("col", "name", true); db.Insert("col", GetDocs(1, 40000)); db.Delete("col", Query.GT("_id", 1)); // delete 29.999 docs Assert.AreEqual(1, db.Count("col", null)); // file still large than 20mb (even with only 1 document) Assert.IsTrue(file.Size > 20 * 1024 * 1024); // reduce datafile db.Shrink(); // now file are small than 50kb Assert.IsTrue(file.Size < 50 * 1024); DoTest(db); } // re-open datafile to check if is ok using (var db = new LiteEngine(file.Filename)) { // still 1 doc and 1 name unique index DoTest(db); // shrink again but now with password var reduced = db.Shrink("abc123"); // file still same size (but now are encrypted) Assert.AreEqual(0, reduced); // still 1 doc and 1 name unique index DoTest(db); } // re-open, again, but now with password using (var db = new LiteEngine(file.Filename, "abc123")) { DoTest(db); // now, remove password db.Shrink(); // test again DoTest(db); } } }
public void Checkpoint_TransactionRecovery_Test() { using (var file = new TempFile()) { using (var db = new LiteEngine(new FileDiskService(file.Filename), autocommit: false )) { var log = new StringBuilder(); db.Log.Level = Logger.CACHE; db.Log.Logging += (s) => log.AppendLine(s); // initialize my "col" with 1000 docs without transaction db.Insert("col", GetDocs(1, 1000)); // commit now for intialize new transaction db.Commit(); // insert a lot of docs inside a single collection (will do checkpoint in disk) db.Insert("col", GetDocs(1001, N)); // update all documents db.Update("col", GetDocs(1, N)); // create new index db.EnsureIndex("col", "type"); // checks if cache had a checkpoint Assert.IsTrue(log.ToString().Contains("checkpoint")); // datafile must be big (because checkpoint expand file) Assert.IsTrue(file.Size > 30 * 1024 * 1024); // in MB\ // delete all docs > 1000 db.Delete("col", Query.GT("_id", 1000)); db.DropIndex("col", "type"); // let's rollback everything db.Rollback(); // be sure cache are empty Assert.AreEqual(0, db.CacheUsed); // datafile must returns to original size (less than 1.5MB for 1000 docs) Assert.IsTrue(file.Size < 1.5 * 1024 * 1024); // in MB // test in my only doc exits Assert.AreEqual(1000, db.Count("col", Query.All())); Assert.AreEqual(1000, db.Count("col", null)); // test indexes (must have only _id index) Assert.AreEqual(1, db.GetIndexes("col").Count()); } } }