public CollectionPage(PageBuffer buffer) : base(buffer) { ENSURE(this.PageType == PageType.Collection, "page type must be collection page"); if (this.PageType != PageType.Collection) { LiteException.InvalidPageType(PageType.Collection, this); } // create new buffer area to store BsonDocument indexes var area = _buffer.Slice(PAGE_HEADER_SIZE, PAGE_SIZE - PAGE_HEADER_SIZE); using (var r = new BufferReader(new[] { area }, false)) { // read position for FreeDataPage and FreeIndexPage for (var i = 0; i < PAGE_FREE_LIST_SLOTS; i++) { this.FreeDataPageList[i] = r.ReadUInt32(); } // skip reserved area r.Skip(P_INDEXES - PAGE_HEADER_SIZE - r.Position); // read indexes count (max 255 indexes per collection) var count = r.ReadByte(); // 1 byte for (var i = 0; i < count; i++) { var index = new CollectionIndex(r); _indexes[index.Name] = index; } } }
/// <summary> /// Move a writable page to readable list - if already exisits, override content /// Used after write operation that must mark page as readable becase page content was changed /// This method runs BEFORE send to write disk queue - but new page request must read this new content /// Returns readable page /// </summary> public PageBuffer MoveToReadable(PageBuffer page) { ENSURE(page.Position != long.MaxValue, "page must have position to be readable"); ENSURE(page.ShareCounter == BUFFER_WRITABLE, "page must be writable before from to readable dict"); var added = true; // no concurrency in writable page page.ShareCounter = 1; var readable = _readable.AddOrUpdate(page.Position, page, (newKey, current) => { // if page already exist inside readable list, should never be in-used (this will be garanteed by lock control) ENSURE(current.ShareCounter == 0, "user must ensure this page are not in use when mark as read only"); current.ShareCounter = 1; // if page already in cache, this is a duplicate page in memory // must update cached page with new page content Buffer.BlockCopy(page.Array, page.Offset, current.Array, current.Offset, PAGE_SIZE); added = false; return(current); }); // if page was not added into readable list move page to free list if (added == false) { this.DiscardPage(page); } // return page that are in _readble list return(readable); }
/// <summary> /// Move a writable page to readable list - if already exisits, override content /// Used after write operation that must mark page as readable becase page content was changed /// This method runs BEFORE send to write disk queue - but new page request must read this new content /// Returns readable page /// </summary> public PageBuffer MoveToReadable(PageBuffer page) { ENSURE(page.Position != long.MaxValue, "page must have position to be readable"); ENSURE(page.IsWritable, "page must be writable before from to readable dict"); // mark page as readble page.IsWritable = false; if (_readable.TryAdd(page.Position, page)) { return(page); } else { // if page already exists, update content var current = _readable[page.Position]; // if page already in cache, this is a duplicate page in memory // must update cached page with new page content Buffer.BlockCopy(page.Array, page.Offset, current.Array, current.Offset, PAGE_SIZE); // discard current into a free page this.DiscardPage(page); // return page that are in _readble list return(current); } }
/// <summary> /// Load HeaderPage from buffer page /// </summary> public HeaderPage(PageBuffer buffer) : base(buffer) { this.LoadPage(); this.CreationTime = _buffer.ReadDateTime(P_CREATION_TIME); }
/// <summary> /// Create new Page based on pre-defined PageID and PageType /// </summary> public BasePage(PageBuffer buffer, uint pageID, PageType pageType) { _buffer = buffer; ENSURE(buffer.Slice(PAGE_HEADER_SIZE, PAGE_SIZE - PAGE_HEADER_SIZE - 1).All(0), "new page buffer must be empty before use in a new page"); // page information this.PageID = pageID; this.PageType = pageType; this.PrevPageID = uint.MaxValue; this.NextPageID = uint.MaxValue; // transaction information this.ColID = uint.MaxValue; this.TransactionID = uint.MaxValue; this.IsConfirmed = false; // block information this.ItemsCount = 0; this.UsedBytes = 0; this.FragmentedBytes = 0; this.NextFreePosition = PAGE_HEADER_SIZE; // 32 this.HighestIndex = byte.MaxValue; // empty - not used yet // default values this.IsDirty = false; // writing direct into buffer in Ctor() because there is no change later (write once) _buffer.Write(this.PageID, P_PAGE_ID); _buffer.Write((byte)this.PageType, P_PAGE_TYPE); }
/// <summary> /// Create new page instance based on buffer (READ) /// </summary> public static T ReadPage <T>(PageBuffer buffer) where T : BasePage { if (typeof(T) == typeof(BasePage)) { return((T)(object)new BasePage(buffer)); } if (typeof(T) == typeof(HeaderPage)) { return((T)(object)new HeaderPage(buffer)); } if (typeof(T) == typeof(CollectionPage)) { return((T)(object)new CollectionPage(buffer)); } if (typeof(T) == typeof(IndexPage)) { return((T)(object)new IndexPage(buffer)); } if (typeof(T) == typeof(DataPage)) { return((T)(object)new DataPage(buffer)); } throw new InvalidCastException(); }
/// <summary> /// Create a new empty database (use synced mode) /// </summary> private void Initialize(Stream stream, Collation collation, long initialSize) { var buffer = new PageBuffer(new byte[PAGE_SIZE], 0, 0); var header = new HeaderPage(buffer, 0); // update collation header.Pragmas.Set(Pragmas.COLLATION, (collation ?? Collation.Default).ToString(), false); // update buffer header.UpdateBuffer(); stream.Write(buffer.Array, buffer.Offset, PAGE_SIZE); if (initialSize > 0) { if (stream is AesStream) { throw LiteException.InitialSizeCryptoNotSupported(); } if (initialSize % PAGE_SIZE != 0) { throw LiteException.InvalidInitialSize(); } stream.SetLength(initialSize); } stream.FlushToDisk(); }
/// <summary> /// Add page into writer queue and will be saved in disk by another thread. If page.Position = MaxValue, store at end of file (will get final Position) /// After this method, this page will be available into reader as a clean page /// </summary> public void EnqueuePage(PageBuffer page) { ENSURE(page.Origin == FileOrigin.Log, "async writer must use only for Log file"); ENSURE(_running, "should not add new page in shutdown process [to-review]"); //TODO: review this _queue.Enqueue(page); }
/// <summary> /// Read existing DataPage in buffer /// </summary> public DataPage(PageBuffer buffer) : base(buffer) { if (this.PageType != PageType.Data) { throw new LiteException(0, $"Invalid DataPage buffer on {PageID}"); } }
// private readonly Dictionary<byte, IndexNode> _cache = new Dictionary<byte, IndexNode>(); /// <summary> /// Read existing IndexPage in buffer /// </summary> public IndexPage(PageBuffer buffer) : base(buffer) { if (this.PageType != PageType.Index) { throw new LiteException(0, $"Invalid IndexPage buffer on {PageID}"); } }
public CollectionPage(PageBuffer buffer, uint pageID) : base(buffer, pageID, PageType.Collection) { for (var i = 0; i < PAGE_FREE_LIST_SLOTS; i++) { this.FreeDataPageList[i] = uint.MaxValue; } }
/// <summary> /// Read existing DataPage in buffer /// </summary> public DataPage(PageBuffer buffer) : base(buffer) { ENSURE(this.PageType == PageType.Data, "page type must be data page"); if (this.PageType != PageType.Data) { throw new LiteException(0, $"Invalid DataPage buffer on {PageID}"); } }
/// <summary> /// Read existing IndexPage in buffer /// </summary> public IndexPage(PageBuffer buffer) : base(buffer) { ENSURE(this.PageType == PageType.Index, "page type must be index page"); if (this.PageType != PageType.Index) { throw new LiteException(0, $"Invalid IndexPage buffer on {PageID}"); } }
/// <summary> /// Read existing IndexPage in buffer /// </summary> public IndexPage(PageBuffer buffer) : base(buffer) { ENSURE(this.PageType == PageType.Index, "page type must be index page"); if (this.PageType != PageType.Index) { throw LiteException.InvalidPageType(PageType.Index, this); } }
/// <summary> /// Read existing DataPage in buffer /// </summary> public DataPage(PageBuffer buffer) : base(buffer) { ENSURE(this.PageType == PageType.Data, "page type must be data page"); if (this.PageType != PageType.Data) { throw LiteException.InvalidPageType(PageType.Data, this); } }
/// <summary> /// Create a save point before do any change on header page (execute UpdateBuffer()) /// </summary> public PageBuffer Savepoint() { this.UpdateBuffer(); var savepoint = new PageBuffer(new byte[PAGE_SIZE], 0, 0); System.Buffer.BlockCopy(_buffer.Array, _buffer.Offset, savepoint.Array, savepoint.Offset, PAGE_SIZE); return(savepoint); }
/// <summary> /// Read page from stream - do not use cache system /// </summary> private T ReadPage <T>(uint pageID) where T : BasePage { var position = BasePage.GetPagePosition(pageID); _stream.Position = position; _stream.Read(_buffer, 0, PAGE_SIZE); var buffer = new PageBuffer(_buffer, 0, 0); return(BasePage.ReadPage <T>(buffer)); }
/// <summary> /// Complete discard a writable page - clean content and move to free list /// </summary> public void DiscardPage(PageBuffer page) { // clear page controls page.IsWritable = false; page.Position = long.MaxValue; // clear content page.Fill(0); // added into free list _free.Enqueue(page); }
public CollectionPage(PageBuffer buffer, uint pageID) : base(buffer, pageID, PageType.Collection) { // initialize page version this.CreationTime = DateTime.Now; this.LastAnalyzed = DateTime.MinValue; for (var i = 0; i < PAGE_FREE_LIST_SLOTS; i++) { this.FreeDataPageID[i] = uint.MaxValue; this.FreeIndexPageID[i] = uint.MaxValue; } }
/// <summary> /// Try move this page to readable list (if not alrady in readable list) /// Returns true if was moved /// </summary> public bool TryMoveToReadable(PageBuffer page) { ENSURE(page.Position != long.MaxValue, "page must have a position"); ENSURE(page.IsWritable, "page must be writable"); var added = _readable.TryAdd(page.Position, page); if (added) { page.IsWritable = false; } return(added); }
public CollectionPage(PageBuffer buffer) : base(buffer) { if (this.PageType != PageType.Collection) { throw new LiteException(0, $"Invalid CollectionPage buffer on {PageID}"); } // create new buffer area to store BsonDocument indexes var area = _buffer.Slice(PAGE_HEADER_SIZE, PAGE_SIZE - PAGE_HEADER_SIZE); using (var r = new BufferReader(new[] { area }, false)) { // read position for FreeDataPage and FreeIndexPage for (var i = 0; i < PAGE_FREE_LIST_SLOTS; i++) { this.FreeDataPageID[i] = r.ReadUInt32(); this.FreeIndexPageID[i] = r.ReadUInt32(); } // read create/last analyzed (16 bytes) this.CreationTime = r.ReadDateTime(); this.LastAnalyzed = r.ReadDateTime(); // skip reserved area r.Skip(P_INDEXES - r.Position); // read indexes count (max 256 indexes per collection) var count = r.ReadByte(); // 1 byte for (var i = 0; i < count; i++) { var index = new CollectionIndex( slot: r.ReadByte(), name: r.ReadCString(), expr: r.ReadCString(), unique: r.ReadBoolean()) { Head = r.ReadPageAddress(), // 5 Tail = r.ReadPageAddress(), // 5 MaxLevel = r.ReadByte(), // 1 KeyCount = r.ReadUInt32(), // 4 UniqueKeyCount = r.ReadUInt32() // 4 }; _indexes[index.Name] = index; } } }
/// <summary> /// Complete discard a writable page - clean content and move to free list /// </summary> public void DiscardPage(PageBuffer page) { ENSURE(page.ShareCounter == BUFFER_WRITABLE, "discarded page must be writable"); // clear page controls page.ShareCounter = 0; page.Position = long.MaxValue; // DO NOT CLEAR CONTENT // when this page will requested from free list will be clear if request was from NewPage() // or will be overwrite by ReadPage // added into free list _free.Enqueue(page); }
/// <summary> /// Create new Header Page /// </summary> public HeaderPage(PageBuffer buffer, uint pageID) : base(buffer, pageID, PageType.Header) { // initialize page version this.CreationTime = DateTime.UtcNow; this.FreeEmptyPageID = uint.MaxValue; this.LastPageID = 0; // writing direct into buffer in Ctor() because there is no change later (write once) _buffer.Write(HEADER_INFO, P_HEADER_INFO); _buffer.Write(FILE_VERSION, P_FILE_VERSION); _buffer.Write(this.CreationTime, P_CREATION_TIME); // initialize collections _collections = new BsonDocument(); }
private void WritePageToStream(PageBuffer page) { if (page == null) { return; } ENSURE(page.ShareCounter > 0, "page must be shared at least 1"); // set stream position according to page _stream.Position = page.Position; _stream.Write(page.Array, page.Offset, PAGE_SIZE); // release page here (no page use after this) page.Release(); }
/// <summary> /// Read page from stream - do not use cache system /// </summary> private T ReadPage <T>(uint pageID) where T : BasePage { var position = BasePage.GetPagePosition(pageID); if (_cachedPage?.PageID == pageID) { return((T)_cachedPage); } _stream.Position = position; _stream.Read(_buffer, 0, PAGE_SIZE); var buffer = new PageBuffer(_buffer, 0, 0); return((T)(_cachedPage = BasePage.ReadPage <T>(buffer))); }
/// <summary> /// Create a new empty database (use synced mode) /// </summary> private void Initialize(Stream stream, long initialSize) { var buffer = new PageBuffer(new byte[PAGE_SIZE], 0, 0); var header = new HeaderPage(buffer, 0); // update buffer header.UpdateBuffer(); stream.Write(buffer.Array, 0, PAGE_SIZE); if (initialSize > 0) { stream.SetLength(initialSize); } stream.FlushToDisk(); }
/// <summary> /// Try move this page to readable list (if not alrady in readable list) /// Returns true if was moved /// </summary> public bool TryMoveToReadable(PageBuffer page) { ENSURE(page.Position != long.MaxValue, "page must have a position"); ENSURE(page.ShareCounter == BUFFER_WRITABLE, "page must be writable"); // set page as not in use page.ShareCounter = 0; var added = _readable.TryAdd(page.Position, page); // if not added, let's back ShareCounter to writable state if (!added) { page.ShareCounter = BUFFER_WRITABLE; } return(added); }
/// <summary> /// Create new page instance with new PageID and passed buffer (NEW) /// </summary> public static T CreatePage <T>(PageBuffer buffer, uint pageID) where T : BasePage { if (typeof(T) == typeof(CollectionPage)) { return((T)(object)new CollectionPage(buffer, pageID)); } if (typeof(T) == typeof(IndexPage)) { return((T)(object)new IndexPage(buffer, pageID)); } if (typeof(T) == typeof(DataPage)) { return((T)(object)new DataPage(buffer, pageID)); } throw new InvalidCastException(); }
/// <summary> /// Create a new empty database (use synced mode) /// </summary> private void Initialize(Stream stream, Collation collation, long initialSize) { var buffer = new PageBuffer(new byte[PAGE_SIZE], 0, 0); var header = new HeaderPage(buffer, 0); // update collation header.Pragmas.Set(Pragmas.COLLATION, (collation ?? Collation.Default).ToString(), false); // update buffer header.UpdateBuffer(); stream.Write(buffer.Array, buffer.Offset, PAGE_SIZE); if (initialSize > 0) { stream.SetLength(initialSize); } stream.FlushToDisk(); }
/// <summary> /// Try to move this page to readable list (if not already in readable list) /// Returns true if it was moved /// </summary> public bool TryMoveToReadable(PageBuffer page) { ENSURE(page.Position != long.MaxValue, "page must have a position"); ENSURE(page.ShareCounter == BUFFER_WRITABLE, "page must be writable"); ENSURE(page.Origin != FileOrigin.None, "page must have origin defined"); var key = this.GetReadableKey(page.Position, page.Origin); // set page as not in use page.ShareCounter = 0; var added = _readable.TryAdd(key, page); // if not added, let's get ShareCounter back to writable state if (!added) { page.ShareCounter = BUFFER_WRITABLE; } return(added); }