public void AutoIndexEngine_Test() { using (var db = new LiteEngine(new MemoryStream())) { var doc = new BsonDocument { ["name"] = "john doe", ["age"] = 40 }; db.Insert("people", doc); var result = db.FindOne("people", Query.And( Query.EQ("name", "john doe"), Query.EQ("age", 40))); Assert.AreEqual(doc["name"], result["name"]); var indexName = db.GetIndexes("people").FirstOrDefault(x => x.Field == "name"); var indexAge = db.GetIndexes("people").FirstOrDefault(x => x.Field == "age"); // indexes are not unique (by default, when using LiteEngine) Assert.AreEqual(false, indexName.Unique); Assert.AreEqual(false, indexAge.Unique); } }
/// <summary> /// Read all colleciton, indexes and documents inside current datafile /// Drop per index, per collection and shrink /// This steps will check/validate all file data /// </summary> private void CheckIntegrity() { using (var db = new LiteEngine(this.Filename)) { var cols = db.GetCollectionNames().ToArray(); foreach (var col in cols) { var indexes = db.GetIndexes(col).ToArray(); foreach (var idx in indexes) { var q = db.Find(col, Query.All(idx.Field)); foreach (var doc in q) { // document are ok! } // lets drop this index (if not _id) if (idx.Field != "_id") { db.DropIndex(col, idx.Field); } } // and drop collection db.DropCollection(col); } // and now shrink db.Shrink(); } }
public void Index_Order() { using (var tmp = new TempFile()) using (var db = new LiteEngine(tmp.Filename)) { db.Insert("col", new BsonDocument { { "text", "D" } }); db.Insert("col", new BsonDocument { { "text", "A" } }); db.Insert("col", new BsonDocument { { "text", "E" } }); db.Insert("col", new BsonDocument { { "text", "C" } }); db.Insert("col", new BsonDocument { { "text", "B" } }); db.EnsureIndex("col", "text"); var asc = string.Join("", db.Find("col", Query.All("text")) .Select(x => x["text"].AsString) .ToArray()); var desc = string.Join("", db.Find("col", Query.All("text", Query.Descending)) .Select(x => x["text"].AsString) .ToArray()); Assert.AreEqual("ABCDE", asc); Assert.AreEqual("EDCBA", desc); var indexes = db.GetIndexes("col"); Assert.AreEqual(1, indexes.Count(x => x.Field == "text")); } }
public void Execute(LiteEngine engine, StringScanner s, Display display, InputCommand input, Env env) { var col = this.ReadCollection(engine, s); display.WriteResult(new BsonArray(engine.GetIndexes(col).Select(x => new BsonDocument { { "slot", x.Slot }, { "field", x.Field }, { "unique", x.Unique } }))); }
public void Checkpoint_TransactionRecovery_Test() { using (var file = new TempFile()) { using (var db = new LiteEngine(new FileDiskService(file.Filename), autocommit: false)) { var log = new StringBuilder(); db.Log.Level = Logger.CACHE; db.Log.Logging += (s) => log.AppendLine(s); // initialize my "col" with 1000 docs without transaction db.Insert("col", GetDocs(1, 1000)); // commit now for intialize new transaction db.Commit(); // insert a lot of docs inside a single collection (will do checkpoint in disk) db.Insert("col", GetDocs(1001, N)); // update all documents db.Update("col", GetDocs(1, N)); // create new index db.EnsureIndex("col", "type"); // checks if cache had a checkpoint Assert.IsTrue(log.ToString().Contains("checkpoint")); // datafile must be big (because checkpoint expand file) Assert.IsTrue(file.Size > 30 * 1024 * 1024); // in MB\ // delete all docs > 1000 db.Delete("col", Query.GT("_id", 1000)); db.DropIndex("col", "type"); // let's rollback everything db.Rollback(); // be sure cache are empty Assert.AreEqual(0, db.CacheUsed); // datafile must returns to original size (less than 1.5MB for 1000 docs) Assert.IsTrue(file.Size < 1.5 * 1024 * 1024); // in MB // test in my only doc exits Assert.AreEqual(1000, db.Count("col", Query.All())); Assert.AreEqual(1000, db.Count("col", null)); // test indexes (must have only _id index) Assert.AreEqual(1, db.GetIndexes("col").Count()); } } }
public IEnumerable <BsonValue> Execute(StringScanner s, LiteEngine engine) { var col = this.ReadCollection(engine, s); s.ThrowIfNotFinish(); var indexes = engine.GetIndexes(col); foreach (var index in indexes) { yield return(new BsonDocument { { "slot", index.Slot }, { "field", index.Field }, { "expression", index.Expression }, { "unique", index.Unique } }); } }
public void Checkpoint_TransactionRecovery_Test() { using (var file = new TempFile()) { using (var db = new LiteEngine(new FileDiskService(file.Filename), autocommit: false )) { var log = new StringBuilder(); db.Log.Level = Logger.CACHE; db.Log.Logging += (s) => log.AppendLine(s); // initialize my "col" with 1000 docs without transaction db.Insert("col", GetDocs(1, 1000)); // commit now for intialize new transaction db.Commit(); // insert a lot of docs inside a single collection (will do checkpoint in disk) db.Insert("col", GetDocs(1001, N)); // update all documents db.Update("col", GetDocs(1, N)); // create new index db.EnsureIndex("col", "type"); // checks if cache had a checkpoint Assert.IsTrue(log.ToString().Contains("checkpoint")); // datafile must be big (because checkpoint expand file) Assert.IsTrue(file.Size > 30 * 1024 * 1024); // in MB\ // delete all docs > 1000 db.Delete("col", Query.GT("_id", 1000)); db.DropIndex("col", "type"); // let's rollback everything db.Rollback(); // be sure cache are empty Assert.AreEqual(0, db.CacheUsed); // datafile must returns to original size (less than 1.5MB for 1000 docs) Assert.IsTrue(file.Size < 1.5 * 1024 * 1024); // in MB // test in my only doc exits Assert.AreEqual(1000, db.Count("col", Query.All())); Assert.AreEqual(1000, db.Count("col", null)); // test indexes (must have only _id index) Assert.AreEqual(1, db.GetIndexes("col").Count()); } } }