/// <summary> /// Read page bytes from disk /// </summary> public virtual byte[] ReadPage(uint pageID) { // if stream are not initialized but need header, create new header if (_stream == null && pageID == 0) { var header = new HeaderPage { LastPageID = 1 }; return(header.WritePage()); } else if (_stream == null) { this.InternalInitialize(); } var buffer = new byte[BasePage.PAGE_SIZE]; var position = BasePage.GetSizeOfPages(pageID); // position cursor if (_stream.Position != position) { _stream.Seek(position, SeekOrigin.Begin); } // read bytes from data file _stream.Read(buffer, 0, BasePage.PAGE_SIZE); return(buffer); }
/// <summary> /// Returns a size of specified number of pages /// </summary> /// <param name="pageCount">The page count</param> public static long GetSizeOfPages(int pageCount) { if (pageCount < 0) { throw new ArgumentOutOfRangeException("pageCount", "Could not be less than 0."); } return(BasePage.GetSizeOfPages((uint)pageCount)); }
/// <summary> /// Persist single page bytes to disk /// </summary> public virtual void WritePage(uint pageID, byte[] buffer) { var position = BasePage.GetSizeOfPages(pageID); _log.Write(Logger.DISK, "write page #{0:0000} :: {1}", pageID, (PageType)buffer[PAGE_TYPE_POSITION]); // position cursor if (_stream.Position != position) { _stream.Seek(position, SeekOrigin.Begin); } _stream.Write(buffer, 0, BasePage.PAGE_SIZE); }
/// <summary> /// Persist single page bytes to disk /// </summary> public virtual void WritePage(uint pageID, byte[] buffer) { if (_stream == null) { this.InternalInitialize(); } var position = BasePage.GetSizeOfPages(pageID); // position cursor if (_stream.Position != position) { _stream.Seek(position, SeekOrigin.Begin); } _stream.Write(buffer, 0, BasePage.PAGE_SIZE); }
/// <summary> /// Read page bytes from disk /// </summary> public virtual byte[] ReadPage(uint pageID) { var buffer = new byte[BasePage.PAGE_SIZE]; var position = BasePage.GetSizeOfPages(pageID); // position cursor if (_stream.Position != position) { _stream.Seek(position, SeekOrigin.Begin); } // read bytes from data file _stream.Read(buffer, 0, BasePage.PAGE_SIZE); _log.Write(Logger.DISK, "read page #{0:0000} :: {1}", pageID, (PageType)buffer[PAGE_TYPE_POSITION]); return(buffer); }
/// <summary> /// Get internal information about database. Can filter collections /// </summary> public BsonDocument Info() { using (_locker.Read()) { var header = _pager.GetPage <HeaderPage>(0); var collections = new BsonArray(); foreach (var colName in header.CollectionPages.Keys) { var col = this.GetCollectionPage(colName, false); var colDoc = new BsonDocument { { "name", col.CollectionName }, { "pageID", (double)col.PageID }, { "count", col.DocumentCount }, { "sequence", col.Sequence }, { "indexes", new BsonArray( col.Indexes.Where(x => !x.IsEmpty).Select(i => new BsonDocument { { "slot", i.Slot }, { "field", i.Field }, { "expression", i.Expression }, { "unique", i.Unique } })) } }; collections.Add(colDoc); } return(new BsonDocument { { "userVersion", (int)header.UserVersion }, { "encrypted", header.Password.Any(x => x > 0) }, { "changeID", (int)header.ChangeID }, { "lastPageID", (int)header.LastPageID }, { "fileSize", BasePage.GetSizeOfPages(header.LastPageID + 1) }, { "collections", collections } }); } }
/// <summary> /// Read journal file returning IEnumerable of pages /// </summary> public IEnumerable <byte[]> ReadJournal(uint lastPageID) { // position stream at begin journal area var pos = BasePage.GetSizeOfPages(lastPageID + 1); _stream.Seek(pos, SeekOrigin.Begin); var buffer = new byte[BasePage.PAGE_SIZE]; while (_stream.Position < _stream.Length) { // read page bytes from journal file _stream.Read(buffer, 0, BasePage.PAGE_SIZE); yield return(buffer); // now set position to next journal page pos += BasePage.PAGE_SIZE; _stream.Seek(pos, SeekOrigin.Begin); } }
/// <summary> /// Write original bytes page in a journal file (in sequence) - if journal not exists, create. /// </summary> public void WriteJournal(ICollection <byte[]> pages, uint lastPageID) { // write journal only if enabled if (_options.Journal == false) { return; } var size = BasePage.GetSizeOfPages(lastPageID + 1) + BasePage.GetSizeOfPages(pages.Count); _log.Write(Logger.JOURNAL, "extend datafile to journal - {0} pages", pages.Count); // set journal file length before write _stream.SetLength(size); // go to initial file position (after lastPageID) _stream.Seek(BasePage.GetSizeOfPages(lastPageID + 1), SeekOrigin.Begin); foreach (var buffer in pages) { // read pageID and pageType from buffer var pageID = BitConverter.ToUInt32(buffer, 0); var pageType = (PageType)buffer[PAGE_TYPE_POSITION]; _log.Write(Logger.JOURNAL, "write page #{0:0000} :: {1}", pageID, pageType); // write page bytes _stream.Write(buffer, 0, BasePage.PAGE_SIZE); } _log.Write(Logger.JOURNAL, "flush journal to disk"); // ensure all data are persisted in disk this.Flush(); }
/// <summary> /// Save all dirty pages to disk /// </summary> public void PersistDirtyPages() { // get header page var header = _pager.GetPage <HeaderPage>(0); // increase file changeID (back to 0 when overflow) header.ChangeID = header.ChangeID == ushort.MaxValue ? (ushort)0 : (ushort)(header.ChangeID + (ushort)1); // mark header as dirty _pager.SetDirty(header); _log.Write(Logger.DISK, "begin disk operations - changeID: {0}", header.ChangeID); // write journal file in desc order to header be last page in disk if (_disk.IsJournalEnabled) { _disk.WriteJournal(_cache.GetDirtyPages() .OrderByDescending(x => x.PageID) .Select(x => x.DiskData) .Where(x => x.Length > 0) .ToList(), header.LastPageID); // mark header as recovery before start writing (in journal, must keep recovery = false) header.Recovery = true; } else { // if no journal extend, resize file here to fast writes _disk.SetLength(BasePage.GetSizeOfPages(header.LastPageID + 1)); } // get all dirty page stating from Header page (SortedList) // header page (id=0) always must be first page to write on disk because it's will mark disk as "in recovery" foreach (var page in _cache.GetDirtyPages()) { // page.WritePage() updated DiskData with new rendered buffer var buffer = _crypto == null || page.PageID == 0 ? page.WritePage() : _crypto.Encrypt(page.WritePage()); _disk.WritePage(page.PageID, buffer); } if (_disk.IsJournalEnabled) { // re-write header page but now with recovery=false header.Recovery = false; _log.Write(Logger.DISK, "re-write header page now with recovery = false"); _disk.WritePage(0, header.WritePage()); } // mark all dirty pages as clean pages (all are persisted in disk and are valid pages) _cache.MarkDirtyAsClean(); // flush all data direct to disk _disk.Flush(); // discard journal file _disk.ClearJournal(header.LastPageID); }
/// <summary> /// Shrink datafile to crop journal area /// </summary> public void ClearJournal(uint lastPageID) { _log.Write(Logger.JOURNAL, "shrink datafile to remove journal area"); this.SetLength(BasePage.GetSizeOfPages(lastPageID + 1)); }