/// <summary> /// Drop a collection - remove all data pages + indexes pages /// </summary> public void Drop(CollectionPage col) { // add all pages to delete var pages = new HashSet <uint>(); // search for all data page and index page foreach (var index in col.GetIndexes(true)) { // get all nodes from index var nodes = _indexer.FindAll(index, Query.Ascending); foreach (var node in nodes) { // if is PK index, add dataPages if (index.Slot == 0) { pages.Add(node.DataBlock.PageID); // read datablock to check if there is any extended page var block = _data.GetBlock(node.DataBlock); if (block.ExtendPageID != uint.MaxValue) { _pager.DeletePage(block.ExtendPageID, true); } } // memory checkpoint _trans.CheckPoint(); // add index page to delete list page pages.Add(node.Position.PageID); } // remove head+tail nodes in all indexes pages.Add(index.HeadNode.PageID); pages.Add(index.TailNode.PageID); } // and now, lets delete all this pages foreach (var pageID in pages) { // delete page _pager.DeletePage(pageID); // memory checkpoint _trans.CheckPoint(); } // get header page to remove from collection list links var header = _pager.GetPage <HeaderPage>(0); header.CollectionPages.Remove(col.CollectionName); // set header as dirty after remove _pager.SetDirty(header); _pager.DeletePage(col.PageID); }
/// <summary> /// Fetch documents from enumerator and add to buffer. If cache recycle, stop read to execute in another read /// </summary> public void Fetch(TransactionService trans, DataService data, BsonReader bsonReader) { // empty document buffer this.Documents.Clear(); // while until must cache not recycle while (trans.CheckPoint() == false) { // read next node this.HasMore = _nodes.MoveNext(); // if finish, exit loop if (this.HasMore == false) { return; } // if run ONLY under index, skip/limit before deserialize if (_query.UseIndex && _query.UseFilter == false) { if (--_skip >= 0) { continue; } if (--_limit <= -1) { this.HasMore = false; return; } } // get current node var node = _nodes.Current; // read document from data block var buffer = data.Read(node.DataBlock); var doc = bsonReader.Deserialize(buffer).AsDocument; // if need run in full scan, execute full scan and test return if (_query.UseFilter) { // execute query condition here - if false, do not add on final results if (_query.FilterDocument(doc) == false) { continue; } // implement skip/limit after deserialize in full scan if (--_skip >= 0) { continue; } if (--_limit <= -1) { this.HasMore = false; return; } } // increment position cursor _position++; // avoid lock again just to check limit if (_limit == 0) { this.HasMore = false; } this.Documents.Add(doc); } }