public void Update_IndexNodes() { using (var db = new LiteEngine()) { var doc = new BsonDocument { ["_id"] = 1, ["name"] = "Mauricio", ["phones"] = new BsonArray() { "51", "11" } }; db.Insert("col1", doc); db.EnsureIndex("col1", "idx_name", "name", false); db.EnsureIndex("col1", "idx_phones", "phones[*]", false); doc["name"] = "David"; doc["phones"] = new BsonArray() { "11", "25" }; db.Update("col1", doc); doc["name"] = "John"; db.Update("col1", doc); } }
public void Simple_Performance_Runner() { // just a simple example to test performance speed using (var file = new TempFile()) using (var db = new LiteEngine(file.Filename)) { var ti = new Stopwatch(); var tx = new Stopwatch(); var tu = new Stopwatch(); var td = new Stopwatch(); ti.Start(); db.Insert("col", GetDocs(N1)); ti.Stop(); tx.Start(); db.EnsureIndex("col", "name"); tx.Stop(); tu.Start(); db.Update("col", GetDocs(N1)); tu.Stop(); db.EnsureIndex("col", "name"); td.Start(); db.Delete("col", Query.All()); td.Stop(); Debug.WriteLine("Insert time: " + ti.ElapsedMilliseconds); Debug.WriteLine("EnsureIndex time: " + tx.ElapsedMilliseconds); Debug.WriteLine("Update time: " + tu.ElapsedMilliseconds); Debug.WriteLine("Delete time: " + td.ElapsedMilliseconds); } }
public void Multikey_Count_Test() { using (var file = new TempFile()) using (var db = new LiteEngine(file.Disk(), cacheSize: 10, autocommit: true)) { // create index before db.EnsureIndex("col", "list"); db.Insert("col", GetDocs(1, 1000, 1, 2, 3)); db.Insert("col", GetDocs(1001, 2000, 2, 3)); db.Insert("col", GetDocs(2001, 2500, 4)); Assert.AreEqual(1000, db.Count("col", Query.All("list", 1))); Assert.AreEqual(2000, db.Count("col", Query.All("list", 2))); Assert.AreEqual(2000, db.Count("col", Query.All("list", 3))); Assert.AreEqual(500, db.Count("col", Query.All("list", 4))); // drop index db.DropIndex("col", "list"); // re-create index db.EnsureIndex("col", "list"); // count again Assert.AreEqual(1000, db.Count("col", Query.All("list", 1))); Assert.AreEqual(2000, db.Count("col", Query.All("list", 2))); Assert.AreEqual(2000, db.Count("col", Query.All("list", 3))); Assert.AreEqual(500, db.Count("col", Query.All("list", 4))); } }
public void MultiKey_InsertUpdate_Test() { using (var file = new TempFile()) using (var db = new LiteEngine(file.Filename)) { db.Insert("col", GetDocs(1, 1, 1, 2, 3)); db.Insert("col", GetDocs(2, 2, 2, 2, 4)); db.Insert("col", GetDocs(3, 3, 3)); // create index afer documents are in collection db.EnsureIndex("col", "list"); db.EnsureIndex("col", "rnd"); db.Update("col", GetDocs(2, 2, 9, 9)); // try find var r = string.Join(",", db.Find("col", Query.EQ("list", 2)).Select(x => x["_id"].ToString())); Assert.AreEqual("1", r); Assert.AreEqual(3, db.Count("col", null)); Assert.AreEqual(3, db.Count("col", Query.All())); // 5 keys = [1, 2, 3],[3],[9] var l = string.Join(",", db.FindIndex("col", Query.All("list"))); Assert.AreEqual("1,2,3,3,9", l); // count should be count only documents - not index nodes Assert.AreEqual(3, db.Count("col", Query.All("list"))); } }
static void ExecuteTest(string name, Action test) { Console.WriteLine("{0} (N = {1})", name, TASKS); // delete datafile before starts File.Delete(filename); // create empty database and collection with index in name using (var db = new LiteEngine(filename)) { db.EnsureIndex("collection", "name"); } var s = new Stopwatch(); s.Start(); // execute test test(); s.Stop(); Console.WriteLine("Time Elapsed (ms): " + s.ElapsedMilliseconds); Console.WriteLine(); // assert if database are ok Assert(); }
public void Find_Index_Keys() { using (var db = new LiteEngine(new MemoryStream())) { db.Insert("col", new BsonDocument { { "Number", 1 } }, BsonType.Int32); db.Insert("col", new BsonDocument { { "Number", 2 } }, BsonType.Int32); db.Insert("col", new BsonDocument { { "Number", 3 } }, BsonType.Int32); db.Insert("col", new BsonDocument { { "Number", 4 } }, BsonType.Int32); db.Insert("col", new BsonDocument { { "Number", 5 } }, BsonType.Int32); db.EnsureIndex("col", "Number"); Assert.AreEqual(5, db.FindIndex("col", Query.EQ("Number", 5)).First().AsInt32); } }
public void Query_Test() { using (var file = new TempFile()) using (var db = new LiteEngine(file.Filename)) { db.Insert("col", new BsonDocument[] { new BsonDocument { { "_id", 1 }, { "name", "e" } }, new BsonDocument { { "_id", 2 }, { "name", "d" } }, new BsonDocument { { "_id", 3 }, { "name", "c" } }, new BsonDocument { { "_id", 4 }, { "name", "b" } }, new BsonDocument { { "_id", 5 }, { "name", "a" } } }); db.EnsureIndex("col", "name"); Func <Query, string> result = (q) => string.Join(",", db.FindIndex("col", q).Select(x => x.ToString())); Assert.AreEqual("1", result(Query.EQ("_id", 1))); Assert.AreEqual("4,5", result(Query.GTE("_id", 4))); Assert.AreEqual("1", result(Query.LT("_id", 2))); Assert.AreEqual("a,b,d,e", result(Query.Not("name", "c"))); Assert.AreEqual("2,4", result(Query.Where("_id", (v) => v.AsInt32 % 2 == 0))); } }
public void Thread_Insert_Test() { using (var file = new TempFile()) using (var db = new LiteEngine(file.Filename)) { db.EnsureIndex("col", "thread"); // insert 1000 x thread=1 var ta = Task.Factory.StartNew(() => { for (var i = 0; i < 1000; i++) { db.Insert("col", new BsonDocument { { "thread", 1 } }); } }); // insert 700 x thread=2 var tb = Task.Factory.StartNew(() => { for (var i = 0; i < 700; i++) { db.Insert("col", new BsonDocument { { "thread", 2 } }); } }); Task.WaitAll(ta, tb); Assert.AreEqual(1000, db.Count("col", Query.EQ("thread", 1))); Assert.AreEqual(700, db.Count("col", Query.EQ("thread", 2))); } }
public void Loop_Test() { using (var tmp = new TempFile()) { using (var db = new LiteEngine(tmp.Filename)) { db.Insert("col", new BsonDocument { { "Number", 1 } }); db.Insert("col", new BsonDocument { { "Number", 2 } }); db.Insert("col", new BsonDocument { { "Number", 3 } }); db.Insert("col", new BsonDocument { { "Number", 4 } }); } using (var db = new LiteEngine(tmp.Filename)) { foreach (var doc in db.Find("col", Query.All())) { doc["Name"] = "John"; db.Update("col", doc); } db.EnsureIndex("col", "Name"); var all = db.Find("col", Query.EQ("Name", "John")); Assert.AreEqual(4, all.Count()); } } }
public void Index_Order() { using (var tmp = new TempFile()) using (var db = new LiteEngine(tmp.Filename)) { db.Insert("col", new BsonDocument { { "text", "D" } }); db.Insert("col", new BsonDocument { { "text", "A" } }); db.Insert("col", new BsonDocument { { "text", "E" } }); db.Insert("col", new BsonDocument { { "text", "C" } }); db.Insert("col", new BsonDocument { { "text", "B" } }); db.EnsureIndex("col", "text"); var asc = string.Join("", db.Find("col", Query.All("text")) .Select(x => x["text"].AsString) .ToArray()); var desc = string.Join("", db.Find("col", Query.All("text", Query.Descending)) .Select(x => x["text"].AsString) .ToArray()); Assert.AreEqual("ABCDE", asc); Assert.AreEqual("EDCBA", desc); var indexes = db.GetIndexes("col"); Assert.AreEqual(1, indexes.Count(x => x.Field == "text")); } }
public void Query_Test() { using (var file = new TempFile()) using (var db = new LiteEngine(file.Filename)) { db.Insert("col", new BsonDocument[] { new BsonDocument { { "_id", 1 }, { "name", "e" } }, new BsonDocument { { "_id", 2 }, { "name", "d" } }, new BsonDocument { { "_id", 3 }, { "name", "c" } }, new BsonDocument { { "_id", 4 }, { "name", "b" } }, new BsonDocument { { "_id", 5 }, { "name", "a" } } }); db.EnsureIndex("col", "name"); Func<Query, string> result = (q) => string.Join(",", db.FindIndex("col", q).Select(x => x.ToString())); Assert.AreEqual("1", result(Query.EQ("_id", 1))); Assert.AreEqual("4,5", result(Query.GTE("_id", 4))); Assert.AreEqual("1", result(Query.LT("_id", 2))); Assert.AreEqual("a,b,d,e", result(Query.Not("name", "c"))); Assert.AreEqual("2,4", result(Query.Where("_id", (v) => v.AsInt32 % 2 == 0))); } }
public void Delete_Query() { using (var file = new TempFile()) { var initial = new DateTime(2000, 01, 01); using (var db = new LiteEngine(file.Filename)) { for (var i = 0; i < 5000; i++) { db.Insert("col", new BsonDocument { { "dt", initial.AddDays(i) } }); } db.EnsureIndex("col", "dt"); Assert.AreEqual(5000, db.Count("col")); Assert.AreEqual(0, db.Count("col", Query.GT("dd", initial))); var del = db.Delete("col", Query.GT("dd", initial)); Assert.AreEqual(0, del); Assert.AreEqual(0, db.Count("col", Query.GT("dd", initial))); } } }
public void Engine_QueryUpdate_Test() { using (var file = new TempFile()) using (var db = new LiteEngine(file.Filename)) { db.EnsureIndex("col", "name"); // insert 4 documents db.Insert("col", new BsonDocument { { "_id", 1 } }); db.Insert("col", new BsonDocument { { "_id", 2 } }); db.Insert("col", new BsonDocument { { "_id", 3 } }); db.Insert("col", new BsonDocument { { "_id", 4 } }); // query all documents and update name foreach (var d in db.Find("col", Query.All())) { d["name"] = "john"; db.Update("col", d); } // this simple test if same thread open a read mode and then open write lock mode Assert.AreEqual(4, db.Count("col", Query.EQ("name", "john"))); } }
public void Populate(IEnumerable <BsonDocument> docs) { // create indexes before _engine.EnsureIndex("col", "age"); // bulk data insert _engine.Insert("col", docs); }
public void Checkpoint_TransactionRecovery_Test() { using (var file = new TempFile()) { using (var db = new LiteEngine(new FileDiskService(file.Filename), autocommit: false)) { var log = new StringBuilder(); db.Log.Level = Logger.CACHE; db.Log.Logging += (s) => log.AppendLine(s); // initialize my "col" with 1000 docs without transaction db.Insert("col", GetDocs(1, 1000)); // commit now for intialize new transaction db.Commit(); // insert a lot of docs inside a single collection (will do checkpoint in disk) db.Insert("col", GetDocs(1001, N)); // update all documents db.Update("col", GetDocs(1, N)); // create new index db.EnsureIndex("col", "type"); // checks if cache had a checkpoint Assert.IsTrue(log.ToString().Contains("checkpoint")); // datafile must be big (because checkpoint expand file) Assert.IsTrue(file.Size > 30 * 1024 * 1024); // in MB\ // delete all docs > 1000 db.Delete("col", Query.GT("_id", 1000)); db.DropIndex("col", "type"); // let's rollback everything db.Rollback(); // be sure cache are empty Assert.AreEqual(0, db.CacheUsed); // datafile must returns to original size (less than 1.5MB for 1000 docs) Assert.IsTrue(file.Size < 1.5 * 1024 * 1024); // in MB // test in my only doc exits Assert.AreEqual(1000, db.Count("col", Query.All())); Assert.AreEqual(1000, db.Count("col", null)); // test indexes (must have only _id index) Assert.AreEqual(1, db.GetIndexes("col").Count()); } } }
public void Performance_Test() { // just a simple example to test performance speed using (var file = new TempFile()) using (var db = new LiteEngine(file.Filename)) { var ti = new Stopwatch(); var tx = new Stopwatch(); var tu = new Stopwatch(); var td = new Stopwatch(); ti.Start(); db.Insert("col", GetDocs(N1)); db.Commit(); ti.Stop(); tx.Start(); db.EnsureIndex("col", "name"); db.Commit(); tx.Stop(); tu.Start(); db.Update("col", GetDocs(N1)); db.Commit(); tu.Stop(); db.EnsureIndex("col", "name"); db.Commit(); td.Start(); db.Delete("col", Query.All()); db.Commit(); td.Stop(); Debug.Print("Insert time: " + ti.ElapsedMilliseconds); Debug.Print("EnsureIndex time: " + tx.ElapsedMilliseconds); Debug.Print("Update time: " + tu.ElapsedMilliseconds); Debug.Print("Delete time: " + td.ElapsedMilliseconds); } }
public void Checkpoint_Recovery_Test() { using (var file = new TempFile()) { // init with N docs with type=1 using (var db = new LiteEngine(file.Filename)) { db.EnsureIndex("col", "type"); db.Insert("col", GetDocs(1, N, type: 1)); Assert.AreEqual(N, db.Count("col", Query.EQ("type", 1))); } // re-open and try update all docs to type=2 using (var db = new LiteEngine(file.Filename)) { var log = new StringBuilder(); db.Log.Level = Logger.CACHE; db.Log.Logging += (s) => log.AppendLine(s); try { // try update all to "type=2" // but throws exception before finish db.Update("col", GetDocs(1, N, type: 2, throwAtEnd: true)); } catch (Exception ex) { if (!ex.Message.Contains("Try Recovery!")) { Assert.Fail(ex.Message); } } // checks if cache had a checkpoint Assert.IsTrue(log.ToString().Contains("checkpoint")); // re-check if all docs will be type=1 Assert.AreEqual(N, db.Count("col", Query.EQ("type", 1))); Assert.AreEqual(0, db.Count("col", Query.EQ("type", 2))); } // re-open datafile the be sure contains only type=1 using (var db = new LiteEngine(file.Filename)) { Assert.AreEqual(N, db.Count("col", Query.EQ("type", 1))); Assert.AreEqual(0, db.Count("col", Query.EQ("type", 2))); } } }
public void Thread_InsertUpdate_Test() { const int N = 3000; using (var file = new TempFile()) using (var db = new LiteEngine(file.Filename)) { db.EnsureIndex("col", "updated"); Assert.AreEqual(0, db.Count("col", Query.EQ("updated", true))); // insert basic document var ta = Task.Factory.StartNew(() => { for (var i = 0; i < N; i++) { var doc = new BsonDocument { { "_id", i } }; db.Insert("col", doc); } }); // update _id=N var tb = Task.Factory.StartNew(() => { var i = 0; while (i < N) { var doc = new BsonDocument { { "_id", i }, { "updated", true }, { "name", TempFile.LoremIpsum(5, 10, 1, 5, 1) } }; if (db.Update("col", doc)) { i++; } } }); Task.WaitAll(ta, tb); Assert.AreEqual(N, db.Count("col", Query.EQ("updated", true))); } }
public void Checkpoint_Recovery_Test() { using (var file = new TempFile()) { // init with N docs with type=1 using (var db = new LiteEngine(file.Filename)) { db.EnsureIndex("col", "type"); db.Insert("col", GetDocs(1, N, type: 1)); Assert.AreEqual(N, db.Count("col", Query.EQ("type", 1))); } // re-open and try update all docs to type=2 using (var db = new LiteEngine(file.Filename)) { var log = new StringBuilder(); db.Log.Level = Logger.CACHE; db.Log.Logging += (s) => log.AppendLine(s); try { // try update all to "type=2" // but throws exception before finish db.Update("col", GetDocs(1, N, type: 2, throwAtEnd: true)); } catch (Exception ex) { if (!ex.Message.Contains("Try Recovery!")) Assert.Fail(ex.Message); } // checks if cache had a checkpoint Assert.IsTrue(log.ToString().Contains("checkpoint")); // re-check if all docs will be type=1 Assert.AreEqual(N, db.Count("col", Query.EQ("type", 1))); Assert.AreEqual(0, db.Count("col", Query.EQ("type", 2))); } // re-open datafile the be sure contains only type=1 using (var db = new LiteEngine(file.Filename)) { Assert.AreEqual(N, db.Count("col", Query.EQ("type", 1))); Assert.AreEqual(0, db.Count("col", Query.EQ("type", 2))); } } }
public void Concurrency_InsertUpdate_Test() { const int N = 3000; using (var file = new TempFile()) using (var db = new LiteEngine(file.Filename)) { db.EnsureIndex("col", "updated"); Assert.AreEqual(0, db.Count("col", Query.EQ("updated", true))); // insert basic document var ta = Task.Factory.StartNew(() => { for (var i = 0; i < N; i++) { var doc = new BsonDocument { { "_id", i } }; db.Insert("col", doc); } }); // update _id=N var tb = Task.Factory.StartNew(() => { var i = 0; while (i < N) { var doc = new BsonDocument { { "_id", i }, { "updated", true }, { "name", TempFile.LoremIpsum(5, 10, 1, 5, 1) } }; if (db.Update("col", doc)) i++; } }); Task.WaitAll(ta, tb); Assert.AreEqual(N, db.Count("col", Query.EQ("updated", true))); } }
public void Process_Insert_Delete() { using (var file = new TempFile()) { using (var dbA = new LiteEngine(file.Filename)) using (var dbB = new LiteEngine(file.Filename)) { dbA.EnsureIndex("col", "process", false); // insert 1000 x instance=1 var ta = Task.Factory.StartNew(() => { for (var i = 0; i < 1000; i++) { dbA.Insert("col", new BsonDocument { { "process", 1 } }); } }); // keeping delete all var tb = Task.Factory.StartNew(() => { // while before starts insert while (dbB.Count("col", Query.EQ("process", 1)) == 0) { Task.Delay(50).Wait(); } // while until has docs while (dbB.Count("col", Query.EQ("process", 1)) > 0) { dbB.Delete("col", Query.All()); Task.Delay(50).Wait(); } }); Task.WaitAll(ta, tb); Assert.AreEqual(0, dbA.Count("col", Query.EQ("process", 1))); Assert.AreEqual(0, dbB.Count("col", Query.EQ("process", 1))); } } }
public void Loop_With_Update() { using (var tmp = new TempFile()) { // initialize database with 4 using (var db = new LiteEngine(tmp.Filename)) { db.Insert("col", new BsonDocument { { "Number", 1 } }, BsonType.Int32); db.Insert("col", new BsonDocument { { "Number", 2 } }, BsonType.Int32); db.Insert("col", new BsonDocument { { "Number", 3 } }, BsonType.Int32); db.Insert("col", new BsonDocument { { "Number", 4 } }, BsonType.Int32); db.Insert("col", new BsonDocument { { "Number", 5 } }, BsonType.Int32); } using (var db = new LiteEngine(tmp.Filename)) { foreach (var doc in db.Find("col", Query.All(), 0, 1000)) { var id = doc["_id"]; doc["Name"] = "John"; // inside this update, locker must be in write db.Update("col", doc); } db.EnsureIndex("col", "Name"); var all = db.Find("col", Query.EQ("Name", "John")); Assert.AreEqual(5, all.Count()); } } }
public void Checkpoint_Index_Test() { using (var file = new TempFile()) using (var db = new LiteEngine(file.Filename)) { // insert basic N documents db.Insert("col", GetDocs(1, N)); var log = new StringBuilder(); db.Log.Level = Logger.CACHE; db.Log.Logging += (s) => log.AppendLine(s); // create an index in col db.EnsureIndex("col", "name"); Assert.IsTrue(log.ToString().Contains("checkpoint")); Assert.AreEqual(N, db.Count("col", Query.All())); } }
public void Process_Multi_Insert() { using (var file = new TempFile()) { using (var dbA = new LiteEngine(file.Filename)) using (var dbB = new LiteEngine(file.Filename)) { dbA.EnsureIndex("col", "process", false); // insert 1000 x instance=1 var ta = Task.Factory.StartNew(() => { for (var i = 0; i < 1000; i++) { dbA.Insert("col", new BsonDocument { { "process", 1 } }); } }); // insert 700 x instance=2 var tb = Task.Factory.StartNew(() => { for (var i = 0; i < 700; i++) { dbB.Insert("col", new BsonDocument { { "process", 2 } }); } }); Task.WaitAll(ta, tb); Assert.AreEqual(1000, dbA.Count("col", Query.EQ("process", 1))); Assert.AreEqual(700, dbA.Count("col", Query.EQ("process", 2))); Assert.AreEqual(1000, dbB.Count("col", Query.EQ("process", 1))); Assert.AreEqual(700, dbB.Count("col", Query.EQ("process", 2))); } } }
public void Process_Insert_Count() { using (var file = new TempFile()) { using (var dbA = new LiteEngine(file.Filename)) using (var dbB = new LiteEngine(file.Filename)) { dbA.EnsureIndex("col", "process", false); // insert 1000 x instance=1 var ta = Task.Factory.StartNew(() => { for (var i = 0; i < 1000; i++) { dbA.Insert("col", new BsonDocument { { "process", 1 } }); } }); // keep querying until found 1000 docs var tb = Task.Factory.StartNew(() => { var count = 0L; while (count < 1000) { // force query all rows count = dbB.Count("col", Query.EQ("process", 1)); Task.Delay(50).Wait(); } }); Task.WaitAll(ta, tb); Assert.AreEqual(1000, dbA.Count("col", Query.EQ("process", 1))); Assert.AreEqual(1000, dbB.Count("col", Query.EQ("process", 1))); } } }
public IEnumerable <BsonValue> Execute(StringScanner s, LiteEngine engine) { var col = this.ReadCollection(engine, s); var field = s.Scan(this.FieldPattern).Trim().ThrowIfEmpty("Invalid field/index name", s); var unique = false; string expression = null; s.Scan(@"\s*"); if (s.HasTerminated == false) { unique = s.Scan(@"unique\s*").Length > 0; if (s.Scan(@"\s*using\s+(.+)").Length > 0) { expression = BsonExpression.ReadExpression(s, true, false)?.Source; } } s.ThrowIfNotFinish(); yield return(engine.EnsureIndex(col, field, unique, expression)); }
public void Execute(LiteEngine engine, StringScanner s, Display display, InputCommand input, Env env) { var col = this.ReadCollection(engine, s); var field = s.Scan(this.FieldPattern).Trim().ThrowIfEmpty("Invalid field name"); var unique = false; s.Scan(@"\s*"); if (s.HasTerminated == false) { var options = JsonSerializer.Deserialize(s.ToString()); if (options.IsBoolean) { unique = options.AsBoolean; } else if (options.IsDocument) // support old version index definitions { unique = options.AsDocument["unique"].AsBoolean; } } display.WriteResult(engine.EnsureIndex(col, field, unique)); }
public void Engine_QueryUpdate_Test() { using (var file = new TempFile()) using (var db = new LiteEngine(file.Filename)) { db.EnsureIndex("col", "name"); // insert 4 documents db.Insert("col", new BsonDocument { { "_id", 1 } }); db.Insert("col", new BsonDocument { { "_id", 2 } }); db.Insert("col", new BsonDocument { { "_id", 3 } }); db.Insert("col", new BsonDocument { { "_id", 4 } }); // query all documents and update name foreach(var d in db.Find("col", Query.All())) { d["name"] = "john"; db.Update("col", d); } // this simple test if same thread open a read mode and then open write lock mode Assert.AreEqual(4, db.Count("col", Query.EQ("name", "john"))); } }
public void ShrinkTest_Test() { // do some tests Action<LiteEngine> DoTest = (db) => { Assert.AreEqual(1, db.Count("col", null)); Assert.AreEqual(99, db.UserVersion); Assert.IsNotNull(db.GetIndexes("col").FirstOrDefault(x => x.Field == "name")); Assert.IsTrue(db.GetIndexes("col").FirstOrDefault(x => x.Field == "name").Unique); }; using (var file = new TempFile()) { using (var db = new LiteEngine(file.Filename)) { db.UserVersion = 99; db.EnsureIndex("col", "name", true); db.Insert("col", GetDocs(1, 30000)); db.Delete("col", Query.GT("_id", 1)); // delete 29.999 docs Assert.AreEqual(1, db.Count("col", null)); // file still large than 20mb (even with only 1 document) Assert.IsTrue(file.Size > 20 * 1024 * 1024); // reduce datafile db.Shrink(); // now file are small than 50kb Assert.IsTrue(file.Size < 50 * 1024); DoTest(db); } // re-open datafile to check if is ok using (var db = new LiteEngine(file.Filename)) { // still 1 doc and 1 name unique index DoTest(db); // shrink again but now with password var reduced = db.Shrink("abc123"); // file still same size (but now are encrypted) Assert.AreEqual(0, reduced); // still 1 doc and 1 name unique index DoTest(db); } // re-open, again, but now with password using (var db = new LiteEngine(file.Filename, "abc123")) { DoTest(db); // now, remove password db.Shrink(); // test again DoTest(db); } } }
public void AutoId_No_Duplicate_After_Delete() { // using strong type using (var db = new LiteDatabase(new MemoryStream())) { var col = db.GetCollection <EntityInt>("col1"); col.EnsureIndex(x => x.Name); col.Insert(new EntityInt { Name = "One" }); col.Insert(new EntityInt { Name = "Two" }); var one = col.FindOne(x => x.Name == "One"); var two = col.FindOne(x => x.Name == "Two"); Assert.AreEqual(1, one.Id); Assert.AreEqual(2, two.Id); // now delete first 2 rows col.Delete(one.Id); col.Delete(two.Id); // and insert new documents col.Insert(new EntityInt { Name = "Three" }); col.Insert(new EntityInt { Name = "Four" }); var three = col.FindOne(x => x.Name == "Three"); var four = col.FindOne(x => x.Name == "Four"); Assert.AreEqual(3, three.Id); Assert.AreEqual(4, four.Id); } // using bsondocument/engine using (var db = new LiteEngine(new MemoryStream())) { db.EnsureIndex("col", "Name"); db.Insert("col", new BsonDocument { ["Name"] = "One" }, BsonType.Int32); db.Insert("col", new BsonDocument { ["Name"] = "Two" }, BsonType.Int32); var one = db.FindOne("col", Query.EQ("Name", "One")); var two = db.FindOne("col", Query.EQ("Name", "Two")); Assert.AreEqual(1, one["_id"].AsInt32); Assert.AreEqual(2, two["_id"].AsInt32); // now delete first 2 rows db.Delete("col", one["_id"].AsInt32); db.Delete("col", two["_id"].AsInt32); // and insert new documents db.Insert("col", new BsonDocument { ["Name"] = "Three" }, BsonType.Int32); db.Insert("col", new BsonDocument { ["Name"] = "Four" }, BsonType.Int32); var three = db.FindOne("col", Query.EQ("Name", "Three")); var four = db.FindOne("col", Query.EQ("Name", "Four")); Assert.AreEqual(3, three["_id"].AsInt32); Assert.AreEqual(4, four["_id"].AsInt32); } }
public void Concurrency_Insert_Test() { using (var file = new TempFile()) using (var db = new LiteEngine(file.Filename)) { db.EnsureIndex("col", "thread"); // insert 5000 x thread=1 var ta = Task.Factory.StartNew(() => { for(var i = 0; i < 5000; i++) db.Insert("col", new BsonDocument { { "thread", 1 } }); }); // insert 4000 x thread=2 var tb = Task.Factory.StartNew(() => { for (var i = 0; i < 4000; i++) db.Insert("col", new BsonDocument { { "thread", 2 } }); }); Task.WaitAll(ta, tb); Assert.AreEqual(5000, db.Count("col", Query.EQ("thread", 1))); Assert.AreEqual(4000, db.Count("col", Query.EQ("thread", 2))); } }
public void ShrinkTest_Test() { // do some tests Action <LiteEngine> DoTest = (db) => { Assert.AreEqual(1, db.Count("col", null)); Assert.AreEqual(99, db.UserVersion); Assert.IsNotNull(db.GetIndexes("col").FirstOrDefault(x => x.Field == "name")); Assert.IsTrue(db.GetIndexes("col").FirstOrDefault(x => x.Field == "name").Unique); }; using (var file = new TempFile()) { using (var db = new LiteEngine(file.Filename)) { db.UserVersion = 99; db.EnsureIndex("col", "name", true); db.Insert("col", GetDocs(1, 40000)); db.Delete("col", Query.GT("_id", 1)); // delete 29.999 docs Assert.AreEqual(1, db.Count("col", null)); // file still large than 20mb (even with only 1 document) Assert.IsTrue(file.Size > 20 * 1024 * 1024); // reduce datafile db.Shrink(); // now file are small than 50kb Assert.IsTrue(file.Size < 50 * 1024); DoTest(db); } // re-open datafile to check if is ok using (var db = new LiteEngine(file.Filename)) { // still 1 doc and 1 name unique index DoTest(db); // shrink again but now with password var reduced = db.Shrink("abc123"); // file still same size (but now are encrypted) Assert.AreEqual(0, reduced); // still 1 doc and 1 name unique index DoTest(db); } // re-open, again, but now with password using (var db = new LiteEngine(file.Filename, "abc123")) { DoTest(db); // now, remove password db.Shrink(); // test again DoTest(db); } } }
public void Culture_Ordinal_Sort() { // 1046 = pt-BR var collation = new Collation(1046, CompareOptions.IgnoreCase); var s = new EngineSettings { DataStream = new MemoryStream() }; var names = new string[] { "Ze", "Ana", "Ána", "Ánã", "Ana Paula", "ana lucia" }; var sortByLinq = names.OrderBy(x => x, collation).ToArray(); var findByLinq = names.Where(x => collation.Compare(x, "ANA") == 0).ToArray(); using (var e = new LiteEngine(s)) { //e.Rebuild(new RebuildOptions { Collation = collation }); e.Insert("col1", names.Select(x => new BsonDocument { ["name"] = x }), BsonAutoId.Int32); // sort by merge sort var sortByOrderByName = e.Query("col1", new Query { OrderBy = "name" }) .ToEnumerable() .Select(x => x["name"].AsString) .ToArray(); var query = new Query(); query.Where.Add("name = 'ANA'"); // find by expression var findByExpr = e.Query("col1", query) .ToEnumerable() .Select(x => x["name"].AsString) .ToArray(); sortByOrderByName.Should().BeEquivalentTo(sortByLinq); findByExpr.Should().BeEquivalentTo(findByLinq); // index test e.EnsureIndex("col1", "idx_name", "name", false); // sort by index var sortByIndexName = e.Query("col1", new Query { OrderBy = "name" }) .ToEnumerable() .Select(x => x["name"].AsString) .ToArray(); // find by index var findByIndex = e.Query("col1", query) .ToEnumerable() .Select(x => x["name"].AsString) .ToArray(); sortByIndexName.Should().BeEquivalentTo(sortByLinq); findByIndex.Should().BeEquivalentTo(findByLinq); } }
public void CreateIndex() { _db.EnsureIndex("col", "name", false); }
public void ExecuteQuery(bool createIndex) { using (var db = new LiteEngine(new MemoryStream())) { db.Insert("col", new BsonDocument[] { new BsonDocument { ["age"] = 1, ["name"] = "a" }, new BsonDocument { ["age"] = 2, ["name"] = "b" }, new BsonDocument { ["age"] = 3, ["name"] = "c" }, new BsonDocument { ["age"] = 4, ["name"] = "d" }, new BsonDocument { ["age"] = 5, ["name"] = "e" }, new BsonDocument { ["age"] = 6, ["name"] = "f" }, new BsonDocument { ["age"] = 7, ["name"] = "g" }, new BsonDocument { ["age"] = 8, ["name"] = "h" }, new BsonDocument { ["age"] = 9, ["name"] = "i" }, new BsonDocument { ["age"] = 9, ["name"] = "j" } }); if (createIndex) { db.EnsureIndex("col", "age"); db.EnsureIndex("col", "name"); } Func <Query, string> result = (q) => string.Join(",", db.Find("col", q).Select(x => x["name"].AsString)); Assert.AreEqual("a,b,c,d,e,f,g,h,i,j", result(Query.All())); Assert.AreEqual("a", result(Query.EQ("age", 1))); Assert.AreEqual("g", result(Query.EQ("age", 7))); Assert.AreEqual("h,i,j", result(Query.GT("age", 7))); Assert.AreEqual("g,h,i,j", result(Query.GTE("age", 7))); Assert.AreEqual("", result(Query.LT("age", 1))); Assert.AreEqual("a", result(Query.LTE("age", 1))); Assert.AreEqual("g,h,i,j", result(Query.Between("age", 7, 9))); Assert.AreEqual("a,b,c,d,e,f,g,h", result(Query.Not("age", 9))); Assert.AreEqual("a", result(Query.Not(Query.GTE("age", 2)))); Assert.AreEqual("a,g,i,j", result(Query.In("age", 1, 7, 9))); Assert.AreEqual("a", result(Query.StartsWith("name", "a"))); Assert.AreEqual("j", result(Query.And(Query.EQ("age", 9), Query.EQ("name", "j")))); Assert.AreEqual("j", result(Query.And(Query.GTE("age", 1), Query.And(Query.LTE("age", 9), Query.EQ("name", "j"))))); Assert.AreEqual("j", result(Query.And(Query.GTE("age", 1), Query.LTE("age", 9), Query.EQ("name", "j")))); Assert.AreEqual("a,i,j", result(Query.Or(Query.EQ("age", 1), Query.EQ("age", 9)))); Assert.AreEqual("b,d,f,h", result(Query.Where("age", (v) => v.AsInt32 % 2 == 0))); } }
public void Checkpoint_TransactionRecovery_Test() { using (var file = new TempFile()) { using (var db = new LiteEngine(new FileDiskService(file.Filename), autocommit: false )) { var log = new StringBuilder(); db.Log.Level = Logger.CACHE; db.Log.Logging += (s) => log.AppendLine(s); // initialize my "col" with 1000 docs without transaction db.Insert("col", GetDocs(1, 1000)); // commit now for intialize new transaction db.Commit(); // insert a lot of docs inside a single collection (will do checkpoint in disk) db.Insert("col", GetDocs(1001, N)); // update all documents db.Update("col", GetDocs(1, N)); // create new index db.EnsureIndex("col", "type"); // checks if cache had a checkpoint Assert.IsTrue(log.ToString().Contains("checkpoint")); // datafile must be big (because checkpoint expand file) Assert.IsTrue(file.Size > 30 * 1024 * 1024); // in MB\ // delete all docs > 1000 db.Delete("col", Query.GT("_id", 1000)); db.DropIndex("col", "type"); // let's rollback everything db.Rollback(); // be sure cache are empty Assert.AreEqual(0, db.CacheUsed); // datafile must returns to original size (less than 1.5MB for 1000 docs) Assert.IsTrue(file.Size < 1.5 * 1024 * 1024); // in MB // test in my only doc exits Assert.AreEqual(1000, db.Count("col", Query.All())); Assert.AreEqual(1000, db.Count("col", null)); // test indexes (must have only _id index) Assert.AreEqual(1, db.GetIndexes("col").Count()); } } }