/// <summary> /// Find witch index will be used and run Execute method /// </summary> public virtual async IAsyncEnumerable <IndexNode> Run(CollectionPage col, IndexService indexer) { // get index for this query var index = col.GetCollectionIndex(this.Name); if (index == null) { throw LiteException.IndexNotFound(this.Name); } var distinct = new HashSet <PageAddress>(); await foreach (var node in this.Execute(indexer, index)) { // distinct by dataBlock if (distinct.Contains(node.DataBlock) == false) { distinct.Add(node.DataBlock); yield return(node); } } }
/// <summary> /// Update document using same page position as reference /// </summary> public void Update(CollectionPage col, PageAddress blockAddress, BsonDocument doc) { var bytesLeft = doc.GetBytesCount(true); if (bytesLeft > MAX_DOCUMENT_SIZE) { throw new LiteException(0, "Document size exceed {0} limit", MAX_DOCUMENT_SIZE); } DataBlock lastBlock = null; var updateAddress = blockAddress; IEnumerable <BufferSlice> source() { var bytesToCopy = 0; while (bytesLeft > 0) { // if last block contains new block sequence, continue updating if (updateAddress.IsEmpty == false) { var dataPage = _snapshot.GetPage <DataPage>(updateAddress.PageID); var currentBlock = dataPage.GetBlock(updateAddress.Index); // try get full page size content bytesToCopy = Math.Min(bytesLeft, dataPage.FreeBytes + currentBlock.Buffer.Count); // get current free slot linked list var slot = BasePage.FreeIndexSlot(dataPage.FreeBytes); var updateBlock = dataPage.UpdateBlock(currentBlock, bytesToCopy); _snapshot.AddOrRemoveFreeList(dataPage, slot); yield return(updateBlock.Buffer); lastBlock = updateBlock; // go to next address (if extits) updateAddress = updateBlock.NextBlock; } else { bytesToCopy = Math.Min(bytesLeft, MAX_DATA_BYTES_PER_PAGE); var dataPage = _snapshot.GetFreePage <DataPage>(bytesToCopy + DataBlock.DATA_BLOCK_FIXED_SIZE); var insertBlock = dataPage.InsertBlock(bytesToCopy, true); if (lastBlock != null) { lastBlock.SetNextBlock(insertBlock.Position); } yield return(insertBlock.Buffer); lastBlock = insertBlock; } bytesLeft -= bytesToCopy; } // old document was bigger than current, must delete extend blocks if (lastBlock.NextBlock.IsEmpty == false) { this.Delete(lastBlock.NextBlock); } } // consume all source bytes to write BsonDocument direct into PageBuffer // must be fastest as possible using (var w = new BufferWriter(source())) { // already bytes count calculate at method start w.WriteDocument(doc, false); w.Consume(); } }
/// <summary> /// Implement internal update document /// </summary> private bool UpdateDocument(Snapshot snapshot, CollectionPage col, BsonDocument doc, IndexService indexer, DataService data) { // normalize id before find var id = doc["_id"]; // validate id for null, min/max values if (id.IsNull || id.IsMinValue || id.IsMaxValue) { throw LiteException.InvalidDataType("_id", id); } // find indexNode from pk index var pkNode = indexer.Find(col.PK, id, false, LiteDB.Query.Ascending); // if not found document, no updates if (pkNode == null) { return(false); } // update data storage data.Update(col, pkNode.DataBlock, doc); // get all current non-pk index nodes from this data block (slot, key, nodePosition) var oldKeys = indexer.GetNodeList(pkNode.NextNode) .Select(x => new Tuple <byte, BsonValue, PageAddress>(x.Slot, x.Key, x.Position)) .ToArray(); // build a list of all new key index keys var newKeys = new List <Tuple <byte, BsonValue, string> >(); foreach (var index in col.GetCollectionIndexes().Where(x => x.Name != "_id")) { // getting all keys from expression over document var keys = index.BsonExpr.Execute(doc, _header.Pragmas.Collation); foreach (var key in keys) { newKeys.Add(new Tuple <byte, BsonValue, string>(index.Slot, key, index.Name)); } } if (oldKeys.Length == 0 && newKeys.Count == 0) { return(true); } // get a list of all nodes that are in oldKeys but not in newKeys (must delete) var toDelete = new HashSet <PageAddress>(oldKeys .Where(x => newKeys.Any(n => n.Item1 == x.Item1 && n.Item2 == x.Item2) == false) .Select(x => x.Item3)); // get a list of all keys that are not in oldKeys (must insert) var toInsert = newKeys .Where(x => oldKeys.Any(o => o.Item1 == x.Item1 && o.Item2 == x.Item2) == false) .ToArray(); // if nothing to change, just exit if (toDelete.Count == 0 && toInsert.Length == 0) { return(true); } // delete nodes and return last keeped node in list var last = indexer.DeleteList(pkNode.Position, toDelete); // now, insert all new nodes foreach (var elem in toInsert) { var index = col.GetCollectionIndex(elem.Item3); last = indexer.AddNode(index, elem.Item2, pkNode.DataBlock, last); } return(true); }
private IEnumerable <BsonDocument> SysDump(FileOrigin origin) { var collections = _header.GetCollections().ToDictionary(x => x.Value, x => x.Key); foreach (var buffer in _disk.ReadFull(origin)) { var page = new BasePage(buffer); var pageID = page.PageID; if (origin == FileOrigin.Data && buffer.Position > 0 && pageID == 0) { // this will fix print PageID in data file bubbles pages pageID = (uint)(buffer.Position / PAGE_SIZE); } var doc = new BsonDocument(); doc["_position"] = (int)buffer.Position; doc["pageID"] = (int)pageID; doc["pageType"] = page.PageType.ToString(); doc["nextPageID"] = dumpPageID(page.NextPageID); doc["prevPageID"] = dumpPageID(page.PrevPageID); doc["collection"] = collections.GetOrDefault(page.ColID, "-"); doc["transactionID"] = (int)page.TransactionID; doc["isConfirmed"] = page.IsConfirmed; doc["itemsCount"] = (int)page.ItemsCount; doc["freeBytes"] = page.FreeBytes; doc["usedBytes"] = (int)page.UsedBytes; doc["fragmentedBytes"] = (int)page.FragmentedBytes; doc["nextFreePosition"] = (int)page.NextFreePosition; doc["highestIndex"] = (int)page.HighestIndex; if (page.PageType == PageType.Header) { var header = new HeaderPage(buffer); doc["freeEmptyPageID"] = dumpPageID(header.FreeEmptyPageID); doc["lastPageID"] = (int)header.LastPageID; doc["creationTime"] = header.CreationTime; doc["userVersion"] = header.UserVersion; doc["collections"] = new BsonDocument(header.GetCollections().ToDictionary(x => x.Key, x => new BsonValue((int)x.Value))); } else if (page.PageType == PageType.Collection) { var collection = new CollectionPage(buffer); doc["lastAnalyzed"] = collection.LastAnalyzed; doc["creationTime"] = collection.CreationTime; doc["freeDataPageID"] = new BsonArray(collection.FreeDataPageID.Select(x => dumpPageID(x))); doc["freeIndexPageID"] = new BsonArray(collection.FreeIndexPageID.Select(x => dumpPageID(x))); doc["indexes"] = new BsonArray(collection.GetCollectionIndexes().Select(x => new BsonDocument { ["name"] = x.Name, ["expression"] = x.Expression, ["unique"] = x.Unique, ["headPageID"] = dumpPageID(x.Head.PageID), ["tailPageID"] = dumpPageID(x.Tail.PageID), ["maxLevel"] = (int)x.MaxLevel, ["keyCount"] = (int)x.KeyCount, ["uniqueKeyCount"] = (int)x.UniqueKeyCount })); } yield return(doc); } BsonValue dumpPageID(uint pageID) { return(pageID == uint.MaxValue ? BsonValue.Null : new BsonValue((int)pageID)); } }
private IEnumerable <BsonDocument> DumpPages(uint?pageID) { var collections = _header.GetCollections().ToDictionary(x => x.Value, x => x.Key); // get any transaction from current thread ID var transaction = _monitor.GetThreadTransaction(); var snapshot = transaction.CreateSnapshot(LockMode.Read, "$", false); var start = pageID.HasValue ? pageID.Value : 0; var end = pageID.HasValue ? pageID.Value : _header.LastPageID; for (uint i = start; i <= Math.Min(end, _header.LastPageID); i++) { var page = snapshot.GetPage <BasePage>(i, out var origin, out var position, out var walVersion); var doc = new BsonDocument { ["pageID"] = (int)page.PageID, ["pageType"] = page.PageType.ToString(), ["_position"] = position, ["_origin"] = origin.ToString(), ["_version"] = walVersion, ["prevPageID"] = (int)page.PrevPageID, ["nextPageID"] = (int)page.NextPageID, ["slot"] = (int)page.PageListSlot, ["collection"] = collections.GetOrDefault(page.ColID, "-"), ["itemsCount"] = (int)page.ItemsCount, ["freeBytes"] = page.FreeBytes, ["usedBytes"] = (int)page.UsedBytes, ["fragmentedBytes"] = (int)page.FragmentedBytes, ["nextFreePosition"] = (int)page.NextFreePosition, ["highestIndex"] = (int)page.HighestIndex }; if (page.PageType == PageType.Collection) { var collectionPage = new CollectionPage(page.Buffer); doc["dataPageList"] = new BsonArray(collectionPage.FreeDataPageList.Select(x => new BsonValue((int)x))); doc["indexes"] = new BsonArray(collectionPage.GetCollectionIndexes().Select(x => new BsonDocument { ["slot"] = (int)x.Slot, ["empty"] = x.IsEmpty, ["indexType"] = (int)x.IndexType, ["name"] = x.Name, ["expression"] = x.Expression, ["unique"] = x.Unique, ["head"] = x.Head.ToBsonValue(), ["tail"] = x.Tail.ToBsonValue(), ["maxLevel"] = (int)x.MaxLevel, ["freeIndexPageList"] = (int)x.FreeIndexPageList, })); } if (pageID.HasValue) { doc["buffer"] = page.Buffer.ToArray(); } yield return(doc); transaction.Safepoint(); } }
/// <summary> /// Update document using same page position as reference /// </summary> public async Task Update(CollectionPage col, PageAddress blockAddress, BsonDocument doc) { var bytesLeft = doc.GetBytesCount(true); if (bytesLeft > MAX_DOCUMENT_SIZE) { throw new LiteException(0, "Document size exceed {0} limit", MAX_DOCUMENT_SIZE); } DataBlock lastBlock = null; var updateAddress = blockAddress; async IAsyncEnumerable <BufferSlice> source() { var bytesToCopy = 0; while (bytesLeft > 0) { // if last block contains new block sequence, continue updating if (updateAddress.IsEmpty == false) { var dataPage = await _snapshot.GetPage <DataPage>(updateAddress.PageID); var currentBlock = dataPage.GetBlock(updateAddress.Index); // try get full page size content (do not add DATA_BLOCK_FIXED_SIZE because will be added in UpdateBlock) bytesToCopy = Math.Min(bytesLeft, dataPage.FreeBytes + currentBlock.Buffer.Count); var updateBlock = dataPage.UpdateBlock(currentBlock, bytesToCopy); await _snapshot.AddOrRemoveFreeDataList(dataPage); yield return(updateBlock.Buffer); lastBlock = updateBlock; // go to next address (if exists) updateAddress = updateBlock.NextBlock; } else { bytesToCopy = Math.Min(bytesLeft, MAX_DATA_BYTES_PER_PAGE); var dataPage = await _snapshot.GetFreeDataPage(bytesToCopy + DataBlock.DATA_BLOCK_FIXED_SIZE); var insertBlock = dataPage.InsertBlock(bytesToCopy, true); if (lastBlock != null) { lastBlock.SetNextBlock(insertBlock.Position); } await _snapshot.AddOrRemoveFreeDataList(dataPage); yield return(insertBlock.Buffer); lastBlock = insertBlock; } bytesLeft -= bytesToCopy; } // old document was bigger than current, must delete extend blocks if (lastBlock.NextBlock.IsEmpty == false) { var nextBlockAddress = lastBlock.NextBlock; lastBlock.SetNextBlock(PageAddress.Empty); await this.Delete(nextBlockAddress); } } // consume all source bytes to write BsonDocument direct into PageBuffer await using (var writer = await BufferWriterAsync.CreateAsync(source())) { // already bytes count calculate at method start await writer.WriteDocument(doc, false); await writer.Consume(); } }