/// <summary> /// Do checkpoint operation to copy log pages into data file. Return how many transactions was commited inside data file /// Checkpoint requires exclusive lock database /// If soft = true, just try enter in exclusive mode - if not possible, just exit (don't execute checkpoint) /// </summary> private int CheckpointInternal() { LOG($"checkpoint", "WAL"); // wait all pages write on disk _disk.Queue.Wait(); var counter = 0; ENSURE(_disk.Queue.Length == 0, "no pages on queue when checkpoint"); // getting all "good" pages from log file to be copied into data file IEnumerable <PageBuffer> source() { foreach (var buffer in _disk.ReadFull(FileOrigin.Log)) { if (buffer.IsBlank()) { // this should not happen, but if it does, it means there's a zeroed page in the file // just skip it continue; } // read direct from buffer to avoid create BasePage structure var transactionID = buffer.ReadUInt32(BasePage.P_TRANSACTION_ID); // only confied paged can be write on data disk if (_confirmTransactions.Contains(transactionID)) { var pageID = buffer.ReadUInt32(BasePage.P_PAGE_ID); // clear isConfirmed/transactionID buffer.Write(uint.MaxValue, BasePage.P_TRANSACTION_ID); buffer.Write(false, BasePage.P_IS_CONFIRMED); buffer.Position = BasePage.GetPagePosition(pageID); counter++; yield return(buffer); } } } // write all log pages into data file (sync) _disk.Write(source(), FileOrigin.Data); // clear log file, clear wal index, memory cache, this.Clear(); return(counter); }
/// <summary> /// Do checkpoint operation to copy log pages into data file. Return how many transactions was commited inside data file /// If soft = true, just try enter in exclusive mode - if not possible, just exit /// </summary> public void Checkpoint(bool soft) { // get original log length var logLength = _disk.GetLength(FileOrigin.Log); // no log file or no confirmed transaction, just exit if (logLength == 0 || _confirmTransactions.Count == 0) { return; } // for safe, lock all database (read/write) before run checkpoint operation // future versions can be smarter and avoid lock (be more like SQLite checkpoint) if (soft) { // shutdown mode only try enter in exclusive mode... if not possible, exit without checkpoint if (_locker.TryEnterExclusive() == false) { return; } } else { _locker.EnterReserved(true); } LOG($"checkpoint", "WAL"); // wait all pages write on disk _disk.Queue.Wait(); ENSURE(_disk.Queue.Length == 0, "no pages on queue when checkpoint"); // getting all "good" pages from log file to be copied into data file IEnumerable <PageBuffer> source() { foreach (var buffer in _disk.ReadFull(FileOrigin.Log)) { // read direct from buffer to avoid create BasePage structure var transactionID = buffer.ReadUInt32(BasePage.P_TRANSACTION_ID); // only confied paged can be write on data disk if (_confirmTransactions.Contains(transactionID)) { var pageID = buffer.ReadUInt32(BasePage.P_PAGE_ID); // clear isConfirmed/transactionID buffer.Write(uint.MaxValue, BasePage.P_TRANSACTION_ID); buffer.Write(false, BasePage.P_IS_CONFIRMED); buffer.Position = BasePage.GetPagePosition(pageID); yield return(buffer); } } } // write all log pages into data file (sync) _disk.Write(source(), FileOrigin.Data); // reset _confirmTransactions.Clear(); _index.Clear(); _currentReadVersion = 0; // clear cache _disk.Cache.Clear(); // clear log file (sync) _disk.SetLength(0, FileOrigin.Log); // remove exclusive lock _locker.ExitReserved(true); }
/// <summary> /// Do checkpoint operation to copy log pages into data file. Return how many pages as saved into data file /// Soft checkpoint try execute only if there is no one using (try exclusive lock - if not possible just exit) /// If crop = true, reduce data file removing all log area. If not, keeps file with same size and clean all isConfirmed pages /// </summary> public int Checkpoint(bool soft, bool crop) { LOG($"checkpoint", "WAL"); bool lockWasTaken; if (soft) { if (_locker.TryEnterExclusive(out lockWasTaken) == false) { return(0); } } else { lockWasTaken = _locker.EnterExclusive(); } try { // wait all pages write on disk _disk.Queue.Wait(); var counter = 0; ENSURE(_disk.Queue.Length == 0, "no pages on queue when checkpoint"); // getting all "good" pages from log file to be copied into data file IEnumerable <PageBuffer> source() { // collect all isConfirmedPages var confirmedPages = new List <long>(); var finalDataPosition = (_disk.Header.LastPageID + 1) * PAGE_SIZE; foreach (var buffer in _disk.ReadLog(false)) { // read direct from buffer to avoid create BasePage structure var transactionID = buffer.ReadUInt32(BasePage.P_TRANSACTION_ID); // only confirmed pages can be write on data disk if (_confirmTransactions.Contains(transactionID)) { // if page is confirmed page and are after data area, add to list var isConfirmed = buffer.ReadBool(BasePage.P_IS_CONFIRMED); if (isConfirmed && buffer.Position >= finalDataPosition) { confirmedPages.Add(buffer.Position); } // clear isConfirmed/transactionID buffer.Write(uint.MaxValue, BasePage.P_TRANSACTION_ID); buffer.Write(false, BasePage.P_IS_CONFIRMED); // update buffer position to data area position var pageID = buffer.ReadUInt32(BasePage.P_PAGE_ID); buffer.Position = BasePage.GetPagePosition(pageID); counter++; yield return(buffer); } } // if not crop data file, fill with 0 all confirmed pages after data area if (crop == false) { var buffer = new PageBuffer(new byte[PAGE_SIZE], 0, 0); foreach (var position in confirmedPages) { buffer.Position = position; yield return(buffer); } } } // write all log pages into data file (sync) _disk.Write(source()); // clear log file, clear wal index, memory cache, this.Clear(crop); return(counter); } finally { if (lockWasTaken) { _locker.ExitExclusive(); } } }