/// <summary> /// Read page from disk (dirty, wal or data) /// </summary> private T ReadPage <T>(uint pageID, out FileOrigin origin, out long position, out int walVersion) where T : BasePage { // if not inside local pages can be a dirty page saved in log file if (_transPages.DirtyPages.TryGetValue(pageID, out var walPosition)) { // read page from log file var buffer = _reader.ReadPage(walPosition.Position, _mode == LockMode.Write, FileOrigin.Log); var dirty = BasePage.ReadPage <T>(buffer); origin = FileOrigin.Log; position = walPosition.Position; walVersion = _readVersion; ENSURE(dirty.TransactionID == _transactionID, "this page must came from same transaction"); return(dirty); } // now, look inside wal-index var pos = _walIndex.GetPageIndex(pageID, _readVersion, out walVersion); if (pos != long.MaxValue) { // read page from log file var buffer = _reader.ReadPage(pos, _mode == LockMode.Write, FileOrigin.Log); var logPage = BasePage.ReadPage <T>(buffer); // clear some data inside this page (will be override when write on log file) logPage.TransactionID = 0; logPage.IsConfirmed = false; origin = FileOrigin.Log; position = pos; return(logPage); } else { // for last chance, look inside original disk data file var pagePosition = BasePage.GetPagePosition(pageID); // read page from data file var buffer = _reader.ReadPage(pagePosition, _mode == LockMode.Write, FileOrigin.Data); var diskpage = BasePage.ReadPage <T>(buffer); origin = FileOrigin.Data; position = pagePosition; ENSURE(diskpage.IsConfirmed == false || diskpage.TransactionID != 0, "page are not header-clear in data file"); return(diskpage); } }
/// <summary> /// Read page from stream - do not use cache system /// </summary> private T ReadPage <T>(uint pageID) where T : BasePage { var position = BasePage.GetPagePosition(pageID); _stream.Position = position; _stream.Read(_buffer, 0, PAGE_SIZE); var buffer = new PageBuffer(_buffer, 0, 0); return(BasePage.ReadPage <T>(buffer)); }
/// <summary> /// </summary> public long Shrink() { _walIndex.Checkpoint(false); if (_disk.GetLength(FileOrigin.Log) > 0) { throw new LiteException(0, "Shrink operation requires no log file - run Checkpoint before continue"); } _locker.EnterReserved(true); var originalLength = _disk.GetLength(FileOrigin.Data); // create a savepoint in header page - restore if any error occurs var savepoint = _header.Savepoint(); // must clear all cache pages because all of them will change _disk.Cache.Clear(); try { // initialize V8 file reader using (var reader = new FileReaderV8(_header, _disk)) { // clear current header _header.FreeEmptyPageID = uint.MaxValue; _header.LastPageID = 0; _header.GetCollections().ToList().ForEach(c => _header.DeleteCollection(c.Key)); // rebuild entrie database using FileReader this.Rebuild(reader); // crop data file var newLength = BasePage.GetPagePosition(_header.LastPageID); _disk.SetLength(newLength, FileOrigin.Data); return(originalLength - newLength); } } catch (Exception) { _header.Restore(savepoint); throw; } finally { _locker.ExitReserved(true); _walIndex.Checkpoint(false); } }
/// <summary> /// Do checkpoint operation to copy log pages into data file. Return how many transactions was commited inside data file /// Checkpoint requires exclusive lock database /// If soft = true, just try enter in exclusive mode - if not possible, just exit (don't execute checkpoint) /// </summary> private int CheckpointInternal() { LOG($"checkpoint", "WAL"); // wait all pages write on disk _disk.Queue.Wait(); var counter = 0; ENSURE(_disk.Queue.Length == 0, "no pages on queue when checkpoint"); // getting all "good" pages from log file to be copied into data file IEnumerable <PageBuffer> source() { foreach (var buffer in _disk.ReadFull(FileOrigin.Log)) { if (buffer.IsBlank()) { // this should not happen, but if it does, it means there's a zeroed page in the file // just skip it continue; } // read direct from buffer to avoid create BasePage structure var transactionID = buffer.ReadUInt32(BasePage.P_TRANSACTION_ID); // only confied paged can be write on data disk if (_confirmTransactions.Contains(transactionID)) { var pageID = buffer.ReadUInt32(BasePage.P_PAGE_ID); // clear isConfirmed/transactionID buffer.Write(uint.MaxValue, BasePage.P_TRANSACTION_ID); buffer.Write(false, BasePage.P_IS_CONFIRMED); buffer.Position = BasePage.GetPagePosition(pageID); counter++; yield return(buffer); } } } // write all log pages into data file (sync) _disk.Write(source(), FileOrigin.Data); // clear log file, clear wal index, memory cache, this.Clear(); return(counter); }
/// <summary> /// Read page from stream - do not use cache system /// </summary> private T ReadPage <T>(uint pageID) where T : BasePage { var position = BasePage.GetPagePosition(pageID); if (_cachedPage?.PageID == pageID) { return((T)_cachedPage); } _stream.Position = position; _stream.Read(_buffer, 0, PAGE_SIZE); var buffer = new PageBuffer(_buffer, 0, 0); return((T)(_cachedPage = BasePage.ReadPage <T>(buffer))); }
/// <summary> /// Returns a size of specified number of pages /// </summary> public static long GetPagePosition(int pageID) { ENSURE(pageID >= 0, "page could not be less than 0."); return(BasePage.GetPagePosition((uint)pageID)); }
/// <summary> /// Do checkpoint operation to copy log pages into data file. Return how many transactions was commited inside data file /// If soft = true, just try enter in exclusive mode - if not possible, just exit /// </summary> public void Checkpoint(bool soft) { // get original log length var logLength = _disk.GetLength(FileOrigin.Log); // no log file or no confirmed transaction, just exit if (logLength == 0 || _confirmTransactions.Count == 0) { return; } // for safe, lock all database (read/write) before run checkpoint operation // future versions can be smarter and avoid lock (be more like SQLite checkpoint) if (soft) { // shutdown mode only try enter in exclusive mode... if not possible, exit without checkpoint if (_locker.TryEnterExclusive() == false) { return; } } else { _locker.EnterReserved(true); } LOG($"checkpoint", "WAL"); // wait all pages write on disk _disk.Queue.Wait(); ENSURE(_disk.Queue.Length == 0, "no pages on queue when checkpoint"); // getting all "good" pages from log file to be copied into data file IEnumerable <PageBuffer> source() { foreach (var buffer in _disk.ReadFull(FileOrigin.Log)) { // read direct from buffer to avoid create BasePage structure var transactionID = buffer.ReadUInt32(BasePage.P_TRANSACTION_ID); // only confied paged can be write on data disk if (_confirmTransactions.Contains(transactionID)) { var pageID = buffer.ReadUInt32(BasePage.P_PAGE_ID); // clear isConfirmed/transactionID buffer.Write(uint.MaxValue, BasePage.P_TRANSACTION_ID); buffer.Write(false, BasePage.P_IS_CONFIRMED); buffer.Position = BasePage.GetPagePosition(pageID); yield return(buffer); } } } // write all log pages into data file (sync) _disk.Write(source(), FileOrigin.Data); // reset _confirmTransactions.Clear(); _index.Clear(); _currentReadVersion = 0; // clear cache _disk.Cache.Clear(); // clear log file (sync) _disk.SetLength(0, FileOrigin.Log); // remove exclusive lock _locker.ExitReserved(true); }
/// <summary> /// Do checkpoint operation to copy log pages into data file. Return how many pages as saved into data file /// Soft checkpoint try execute only if there is no one using (try exclusive lock - if not possible just exit) /// If crop = true, reduce data file removing all log area. If not, keeps file with same size and clean all isConfirmed pages /// </summary> public int Checkpoint(bool soft, bool crop) { LOG($"checkpoint", "WAL"); bool lockWasTaken; if (soft) { if (_locker.TryEnterExclusive(out lockWasTaken) == false) { return(0); } } else { lockWasTaken = _locker.EnterExclusive(); } try { // wait all pages write on disk _disk.Queue.Wait(); var counter = 0; ENSURE(_disk.Queue.Length == 0, "no pages on queue when checkpoint"); // getting all "good" pages from log file to be copied into data file IEnumerable <PageBuffer> source() { // collect all isConfirmedPages var confirmedPages = new List <long>(); var finalDataPosition = (_disk.Header.LastPageID + 1) * PAGE_SIZE; foreach (var buffer in _disk.ReadLog(false)) { // read direct from buffer to avoid create BasePage structure var transactionID = buffer.ReadUInt32(BasePage.P_TRANSACTION_ID); // only confirmed pages can be write on data disk if (_confirmTransactions.Contains(transactionID)) { // if page is confirmed page and are after data area, add to list var isConfirmed = buffer.ReadBool(BasePage.P_IS_CONFIRMED); if (isConfirmed && buffer.Position >= finalDataPosition) { confirmedPages.Add(buffer.Position); } // clear isConfirmed/transactionID buffer.Write(uint.MaxValue, BasePage.P_TRANSACTION_ID); buffer.Write(false, BasePage.P_IS_CONFIRMED); // update buffer position to data area position var pageID = buffer.ReadUInt32(BasePage.P_PAGE_ID); buffer.Position = BasePage.GetPagePosition(pageID); counter++; yield return(buffer); } } // if not crop data file, fill with 0 all confirmed pages after data area if (crop == false) { var buffer = new PageBuffer(new byte[PAGE_SIZE], 0, 0); foreach (var position in confirmedPages) { buffer.Position = position; yield return(buffer); } } } // write all log pages into data file (sync) _disk.Write(source()); // clear log file, clear wal index, memory cache, this.Clear(crop); return(counter); } finally { if (lockWasTaken) { _locker.ExitExclusive(); } } }
/// <summary> /// Do checkpoint operation to copy log pages into data file. Return how many pages as saved into data file /// Soft checkpoint try execute only if there is no one using (try exclusive lock - if not possible just exit) /// If crop = true, reduce data file removing all log area. If not, keeps file with same size and clean all isConfirmed pages /// </summary> public async Task <int> Checkpoint() { LOG($"checkpoint", "WAL"); var counter = 0; // getting all "good" pages from log file to be copied into data file async IAsyncEnumerable <PageBuffer> source() { // collect all isConfirmedPages var confirmedPages = new List <long>(); var finalDataPosition = (_disk.Header.LastPageID + 1) * PAGE_SIZE; await foreach (var buffer in _disk.ReadLog(false)) { // read direct from buffer to avoid create BasePage structure var transactionID = buffer.ReadUInt32(BasePage.P_TRANSACTION_ID); // only confirmed pages can be write on data disk if (_confirmTransactions.Contains(transactionID)) { // if page is confirmed page and are after data area, add to list var isConfirmed = buffer.ReadBool(BasePage.P_IS_CONFIRMED); if (isConfirmed && buffer.Position >= finalDataPosition) { confirmedPages.Add(buffer.Position); } // clear isConfirmed/transactionID buffer.Write(uint.MaxValue, BasePage.P_TRANSACTION_ID); buffer.Write(false, BasePage.P_IS_CONFIRMED); // update buffer position to data area position var pageID = buffer.ReadUInt32(BasePage.P_PAGE_ID); buffer.Position = BasePage.GetPagePosition(pageID); counter++; yield return(buffer); } } } // write all log pages into data file (sync) await _disk.WriteDataPages(source()); // clear log file, clear wal index, memory cache, _confirmTransactions.Clear(); _index.Clear(); _lastTransactionID = 0; _currentReadVersion = 0; // clear cache _disk.Cache.Clear(); _disk.ResetLogPosition(true); return(counter); }