Exemple #1
0
        public void Multikey_Count_Test()
        {
            using (var file = new TempFile())
                using (var db = new LiteEngine(file.Disk(), cacheSize: 10, autocommit: true))
                {
                    // create index before
                    db.EnsureIndex("col", "list");

                    db.Insert("col", GetDocs(1, 1000, 1, 2, 3));
                    db.Insert("col", GetDocs(1001, 2000, 2, 3));
                    db.Insert("col", GetDocs(2001, 2500, 4));

                    Assert.AreEqual(1000, db.Count("col", Query.All("list", 1)));
                    Assert.AreEqual(2000, db.Count("col", Query.All("list", 2)));
                    Assert.AreEqual(2000, db.Count("col", Query.All("list", 3)));
                    Assert.AreEqual(500, db.Count("col", Query.All("list", 4)));

                    // drop index
                    db.DropIndex("col", "list");

                    // re-create index
                    db.EnsureIndex("col", "list");

                    // count again
                    Assert.AreEqual(1000, db.Count("col", Query.All("list", 1)));
                    Assert.AreEqual(2000, db.Count("col", Query.All("list", 2)));
                    Assert.AreEqual(2000, db.Count("col", Query.All("list", 3)));
                    Assert.AreEqual(500, db.Count("col", Query.All("list", 4)));
                }
        }
Exemple #2
0
        public void Execute(LiteEngine engine, StringScanner s, Display display, InputCommand input, Env env)
        {
            var col = this.ReadCollection(engine, s);
            var index = s.Scan(this.FieldPattern).Trim();

            display.WriteResult(engine.DropIndex(col, index));
        }
        /// <summary>
        /// Read all colleciton, indexes and documents inside current datafile
        /// Drop per index, per collection and shrink
        /// This steps will check/validate all file data
        /// </summary>
        private void CheckIntegrity()
        {
            using (var db = new LiteEngine(this.Filename))
            {
                var cols = db.GetCollectionNames().ToArray();

                foreach (var col in cols)
                {
                    var indexes = db.GetIndexes(col).ToArray();

                    foreach (var idx in indexes)
                    {
                        var q = db.Find(col, Query.All(idx.Field));

                        foreach (var doc in q)
                        {
                            // document are ok!
                        }

                        // lets drop this index (if not _id)
                        if (idx.Field != "_id")
                        {
                            db.DropIndex(col, idx.Field);
                        }
                    }

                    // and drop collection
                    db.DropCollection(col);
                }

                // and now shrink
                db.Shrink();
            }
        }
Exemple #4
0
        public void Execute(LiteEngine engine, StringScanner s, Display display, InputCommand input, Env env)
        {
            var col   = this.ReadCollection(engine, s);
            var index = s.Scan(this.FieldPattern).Trim();

            display.WriteResult(engine.DropIndex(col, index));
        }
Exemple #5
0
        public IEnumerable <BsonValue> Execute(StringScanner s, LiteEngine engine)
        {
            var col   = this.ReadCollection(engine, s);
            var index = s.Scan(this.FieldPattern).Trim().ThrowIfEmpty("Missing field index name", s);

            s.ThrowIfNotFinish();

            yield return(engine.DropIndex(col, index));
        }
Exemple #6
0
        public void Checkpoint_TransactionRecovery_Test()
        {
            using (var file = new TempFile())
            {
                using (var db = new LiteEngine(new FileDiskService(file.Filename), autocommit: false))
                {
                    var log = new StringBuilder();
                    db.Log.Level    = Logger.CACHE;
                    db.Log.Logging += (s) => log.AppendLine(s);

                    // initialize my "col" with 1000 docs without transaction
                    db.Insert("col", GetDocs(1, 1000));

                    // commit now for intialize new transaction
                    db.Commit();

                    // insert a lot of docs inside a single collection (will do checkpoint in disk)
                    db.Insert("col", GetDocs(1001, N));

                    // update all documents
                    db.Update("col", GetDocs(1, N));

                    // create new index
                    db.EnsureIndex("col", "type");

                    // checks if cache had a checkpoint
                    Assert.IsTrue(log.ToString().Contains("checkpoint"));

                    // datafile must be big (because checkpoint expand file)
                    Assert.IsTrue(file.Size > 30 * 1024 * 1024); // in MB\

                    // delete all docs > 1000
                    db.Delete("col", Query.GT("_id", 1000));

                    db.DropIndex("col", "type");

                    // let's rollback everything
                    db.Rollback();

                    // be sure cache are empty
                    Assert.AreEqual(0, db.CacheUsed);

                    // datafile must returns to original size (less than 1.5MB for 1000 docs)
                    Assert.IsTrue(file.Size < 1.5 * 1024 * 1024); // in MB

                    // test in my only doc exits
                    Assert.AreEqual(1000, db.Count("col", Query.All()));
                    Assert.AreEqual(1000, db.Count("col", null));

                    // test indexes (must have only _id index)
                    Assert.AreEqual(1, db.GetIndexes("col").Count());
                }
            }
        }
Exemple #7
0
        public void Checkpoint_TransactionRecovery_Test()
        {
            using (var file = new TempFile())
            {
                using (var db = new LiteEngine(new FileDiskService(file.Filename), autocommit: false ))
                {
                    var log = new StringBuilder();
                    db.Log.Level = Logger.CACHE;
                    db.Log.Logging += (s) => log.AppendLine(s);

                    // initialize my "col" with 1000 docs without transaction
                    db.Insert("col", GetDocs(1, 1000));

                    // commit now for intialize new transaction
                    db.Commit();

                    // insert a lot of docs inside a single collection (will do checkpoint in disk)
                    db.Insert("col", GetDocs(1001, N));

                    // update all documents
                    db.Update("col", GetDocs(1, N));

                    // create new index
                    db.EnsureIndex("col", "type");

                    // checks if cache had a checkpoint
                    Assert.IsTrue(log.ToString().Contains("checkpoint"));

                    // datafile must be big (because checkpoint expand file)
                    Assert.IsTrue(file.Size > 30 * 1024 * 1024); // in MB\

                    // delete all docs > 1000
                    db.Delete("col", Query.GT("_id", 1000));

                    db.DropIndex("col", "type");

                    // let's rollback everything
                    db.Rollback();

                    // be sure cache are empty
                    Assert.AreEqual(0, db.CacheUsed);

                    // datafile must returns to original size (less than 1.5MB for 1000 docs)
                    Assert.IsTrue(file.Size < 1.5 * 1024 * 1024); // in MB

                    // test in my only doc exits
                    Assert.AreEqual(1000, db.Count("col", Query.All()));
                    Assert.AreEqual(1000, db.Count("col", null));

                    // test indexes (must have only _id index)
                    Assert.AreEqual(1, db.GetIndexes("col").Count());
                }
            }
        }