/// <summary> /// Create a new empty database (use synced mode) /// </summary> private void Initialize(Stream stream, Collation collation, long initialSize) { var buffer = new PageBuffer(new byte[PAGE_SIZE], 0, 0); var header = new HeaderPage(buffer, 0); // update collation header.Pragmas.Set(Pragmas.COLLATION, (collation ?? Collation.Default).ToString(), false); // update buffer header.UpdateBuffer(); stream.Write(buffer.Array, buffer.Offset, PAGE_SIZE); if (initialSize > 0) { if (stream is AesStream) { throw LiteException.InitialSizeCryptoNotSupported(); } if (initialSize % PAGE_SIZE != 0) { throw LiteException.InvalidInitialSize(); } stream.SetLength(initialSize); } stream.FlushToDisk(); }
/// <summary> /// Initialize LiteEngine using initial engine settings /// </summary> public LiteEngine(EngineSettings settings) { _settings = settings ?? throw new ArgumentNullException(nameof(settings)); // clear checkpoint if database is readonly if (_settings.ReadOnly) { _settings.Checkpoint = 0; } LOG($"start initializing{(_settings.ReadOnly ? " (readonly)" : "")}", "ENGINE"); try { // initialize locker service (no dependency) _locker = new LockService(settings.Timeout, settings.ReadOnly); // initialize disk service (will create database if needed) _disk = new DiskService(settings); // read page with no cache ref (has a own PageBuffer) - do not Release() support var buffer = _disk.ReadFull(FileOrigin.Data).First(); // if first byte are 1 this datafile are encrypted but has do defined password to open if (buffer[0] == 1) { throw new LiteException(0, "This data file is encrypted and needs a password to open"); } _header = new HeaderPage(buffer); // initialize wal-index service _walIndex = new WalIndexService(_disk, _locker); // if exists log file, restore wal index references (can update full _header instance) if (_disk.GetLength(FileOrigin.Log) > 0) { _walIndex.RestoreIndex(ref _header); } // initialize sort temp disk _sortDisk = new SortDisk(settings.CreateTempFactory(), CONTAINER_SORT_SIZE, settings.UtcDate); // initialize transaction monitor as last service _monitor = new TransactionMonitor(_header, _locker, _disk, _walIndex, _settings); // register system collections this.InitializeSystemCollections(); LOG("initialization completed", "ENGINE"); } catch (Exception ex) { LOG(ex.Message, "ERROR"); // explicit dispose (but do not run shutdown operation) this.Dispose(true); throw; } }
public FileReaderV8(HeaderPage header, DiskService disk) { _collections = header.GetCollections().ToDictionary(x => x.Key, x => x.Value); // using writer stream from pool (no need to return) _stream = disk.GetPool(FileOrigin.Data).Writer; }
public FileReaderV8(HeaderPage header, DiskService disk) { _collections = header.GetCollections().ToDictionary(x => x.Key, x => x.Value); // using writer stream from pool (no need to return) _stream = new ConcurrentStream(disk.Writer); }
public Snapshot(LockMode mode, string collectionName, HeaderPage header, uint transactionID, TransactionPages transPages, LockService locker, WalIndexService walIndex, DiskReader reader, bool addIfNotExists) { _mode = mode; _collectionName = collectionName; _header = header; _transactionID = transactionID; _transPages = transPages; _locker = locker; _walIndex = walIndex; _reader = reader; // enter in lock mode according initial mode if (mode == LockMode.Read) { _locker.EnterRead(_collectionName); } else { _locker.EnterReserved(_collectionName); } // get lastest read version from wal-index _readVersion = _walIndex.CurrentReadVersion; var srv = new CollectionService(_header, this, _transPages); // read collection (create if new - load virtual too) srv.Get(_collectionName, addIfNotExists, ref _collectionPage); // clear local pages (will clear _collectionPage link reference) if (_collectionPage != null) { _localPages.Remove(_collectionPage.PageID); } }
public EnginePragmas(BufferSlice buffer, HeaderPage headerPage) : this(headerPage) { foreach (var pragma in _pragmas.Values) { pragma.Read(buffer); } _isDirty = false; }
/// <summary> /// Load all confirmed transactions from log file (used only when open datafile) /// Don't need lock because it's called on ctor of LiteEngine /// </summary> public void RestoreIndex(ref HeaderPage header) { // get all page positions var positions = new Dictionary <long, List <PagePosition> >(); var current = 0L; // read all pages to get confirmed transactions (do not read page content, only page header) foreach (var buffer in _disk.ReadFull(FileOrigin.Log)) { // read direct from buffer to avoid create BasePage structure var pageID = buffer.ReadUInt32(BasePage.P_PAGE_ID); var isConfirmed = buffer.ReadBool(BasePage.P_IS_CONFIRMED); var transactionID = buffer.ReadUInt32(BasePage.P_TRANSACTION_ID); var position = new PagePosition(pageID, current); if (positions.TryGetValue(transactionID, out var list)) { list.Add(position); } else { positions[transactionID] = new List <PagePosition> { position }; } if (isConfirmed) { this.ConfirmTransaction(transactionID, positions[transactionID]); var pageType = (PageType)buffer.ReadByte(BasePage.P_PAGE_TYPE); // when a header is modified in transaction, must always be the last page inside log file (per transaction) if (pageType == PageType.Header) { // page buffer instance can't change var headerBuffer = header.Buffer; // copy this buffer block into original header block Buffer.BlockCopy(buffer.Array, buffer.Offset, headerBuffer.Array, headerBuffer.Offset, PAGE_SIZE); // re-load header (using new buffer data) header = new HeaderPage(headerBuffer); header.TransactionID = uint.MaxValue; header.IsConfirmed = false; } } // update last transaction ID _lastTransactionID = (int)transactionID; current += PAGE_SIZE; } }
public TransactionService(HeaderPage header, DiskService disk, WalIndexService walIndex) { // retain instances _header = header; _disk = disk; _walIndex = walIndex; // create new transactionID _transactionID = walIndex.NextTransactionID(); _startTime = DateTime.UtcNow; }
public FileReaderV8(HeaderPage header, DiskService disk) { this.UserVersion = header.UserVersion; _collections = header.GetCollections().ToDictionary(x => x.Key, x => x.Value); // using writer stream from pool (no need to return) _stream = disk.GetPool(FileOrigin.Data).Writer; _buffer = BufferPool.Rent(PAGE_SIZE); }
public FileReaderV8(HeaderPage header, DiskService disk) { // get a copy of pragmas this.Pragmas = new EnginePragmas(header.UpdateBuffer(), header); _collections = header.GetCollections().ToDictionary(x => x.Key, x => x.Value); // using writer stream from pool (no need to return) _stream = disk.GetPool(FileOrigin.Data).Writer; _buffer = BufferPool.Rent(PAGE_SIZE); }
public TransactionMonitor(HeaderPage header, LockService locker, DiskService disk, WalIndexService walIndex) { _header = header; _locker = locker; _disk = disk; _walIndex = walIndex; // initialize free pages with all avaiable pages in memory _freePages = MAX_TRANSACTION_SIZE; // initial size _initialSize = MAX_TRANSACTION_SIZE / MAX_OPEN_TRANSACTIONS; }
public TransactionMonitor(HeaderPage header, LockService locker, DiskService disk, WalIndexService walIndex, EngineSettings settings) { _header = header; _locker = locker; _disk = disk; _walIndex = walIndex; _settings = settings; // initialize free pages with all avaiable pages in memory _freePages = settings.MaxTransactionSize; // initial size _initialSize = settings.MaxTransactionSize / MAX_OPEN_TRANSACTIONS; }
/// <summary> /// Initialize LiteEngine using initial engine settings /// </summary> public LiteEngine(EngineSettings settings) { _settings = settings ?? throw new ArgumentNullException(nameof(settings)); LOG($"start initializing{(_settings.ReadOnly ? " (readonly)" : "")}", "ENGINE"); try { // initialize disk service (will create database if needed) _disk = new DiskService(settings, MEMORY_SEGMENT_SIZES); // get header page from disk service _header = _disk.Header; // test for same collation if (settings.Collation != null && settings.Collation.ToString() != _header.Pragmas.Collation.ToString()) { throw new LiteException(0, $"Datafile collation '{_header.Pragmas.Collation}' is different from engine settings. Use Rebuild database to change collation."); } // initialize locker service _locker = new LockService(_header.Pragmas); // initialize wal-index service _walIndex = new WalIndexService(_disk, _locker); // restore wal index references, if exists _walIndex.RestoreIndex(_header); // initialize sort temp disk _sortDisk = new SortDisk(settings.CreateTempFactory(), CONTAINER_SORT_SIZE, _header.Pragmas); // initialize transaction monitor as last service _monitor = new TransactionMonitor(_header, _settings, _locker, _disk, _walIndex); // register system collections this.InitializeSystemCollections(); LOG("initialization completed", "ENGINE"); } catch (Exception ex) { LOG(ex.Message, "ERROR"); // explicit dispose (but do not run shutdown operation) this.Dispose(true); throw; } }
/// <summary> /// Check collection name if is valid (and fit on header) /// Throw correct message error if not valid name or not fit on header page /// </summary> public static void CheckName(string name, HeaderPage header) { if (Encoding.UTF8.GetByteCount(name) > header.GetAvaiableCollectionSpace()) { throw LiteException.InvalidCollectionName(name, "There is no space in header this collection name"); } if (!name.IsWord()) { throw LiteException.InvalidCollectionName(name, "Use only [a-Z$_]"); } if (name.StartsWith("$")) { throw LiteException.InvalidCollectionName(name, "Collection can't starts with `$` (reserved for system collections)"); } }
/// <summary> /// Create a new empty database (use synced mode) /// </summary> private void Initialize(Stream stream, long initialSize) { var buffer = new PageBuffer(new byte[PAGE_SIZE], 0, 0); var header = new HeaderPage(buffer, 0); // update buffer header.UpdateBuffer(); stream.Write(buffer.Array, 0, PAGE_SIZE); if (initialSize > 0) { stream.SetLength(initialSize); } stream.FlushToDisk(); }
public TransactionService(HeaderPage header, LockService locker, DiskService disk, WalIndexService walIndex, int maxTransactionSize, TransactionMonitor monitor, bool queryOnly) { // retain instances _header = header; _locker = locker; _disk = disk; _walIndex = walIndex; _monitor = monitor; this.QueryOnly = queryOnly; this.MaxTransactionSize = maxTransactionSize; // create new transactionID _transactionID = walIndex.NextTransactionID(); _startTime = DateTime.UtcNow; _reader = _disk.GetReader(); }
/// <summary> /// Initialize database /// </summary> public async Task OpenAsync() { LOG("start initializing", "ENGINE"); try { // open async stream if (_stream is IAsyncInitialize s) { await s.InitializeAsync(); } // initialize disk service (will create database if needed) _disk = await DiskService.CreateAsync(_stream, _collation, MEMORY_SEGMENT_SIZES, MAX_EXTENDS); // get header page from disk service _header = _disk.Header; // test for same collation if (_collation.ToString() != _header.Pragmas.Collation.ToString()) { throw new LiteException(0, $"Datafile collation '{_header.Pragmas.Collation}' is different from engine settings. Use Rebuild database to change collation."); } // initialize wal-index service _walIndex = new WalIndexService(_disk); // restore wal index references, if exists await _walIndex.RestoreIndex(_header); // register system collections // this.InitializeSystemCollections(); _disposed = false; LOG("initialization completed", "ENGINE"); } catch (Exception ex) { LOG(ex.Message, "ERROR"); // explicit dispose (but do not run shutdown operation) await this.DisposeAsync(); throw; } }
public TransactionService(HeaderPage header, LockService locker, DiskService disk, WalIndexService walIndex, int maxTransactionSize, Action <uint> done) { // retain instances _header = header; _locker = locker; _disk = disk; _walIndex = walIndex; _maxTransactionSize = maxTransactionSize; _done = done; // create new transactionID _transactionID = walIndex.NextTransactionID(); _startTime = DateTime.UtcNow; _reader = _disk.GetReader(); // enter transaction locker to avoid 2 transactions in same thread _locker.EnterTransaction(); }
/// <summary> /// Create a new empty database (use synced mode) /// </summary> private void Initialize(Stream stream, Collation collation, long initialSize) { var buffer = new PageBuffer(new byte[PAGE_SIZE], 0, 0); var header = new HeaderPage(buffer, 0); // update collation header.Pragmas.Set(Pragmas.COLLATION, (collation ?? Collation.Default).ToString(), false); // update buffer header.UpdateBuffer(); stream.Write(buffer.Array, buffer.Offset, PAGE_SIZE); if (initialSize > 0) { stream.SetLength(initialSize); } stream.FlushToDisk(); }
public EnginePragmas(HeaderPage headerPage) { _headerPage = headerPage; _pragmas = new Dictionary <string, Pragma>(StringComparer.OrdinalIgnoreCase) { ["USER_VERSION"] = new Pragma { Name = "USER_VERSION", Get = () => this.UserVersion, Set = (v) => this.UserVersion = v.AsInt32, Read = (b) => this.UserVersion = b.ReadInt32(P_USER_VERSION), Validate = (v, h) => { }, Write = (b) => b.Write(this.UserVersion, P_USER_VERSION) }, ["COLLATION"] = new Pragma { Name = "COLLATION", Get = () => this.Collation.ToString(), Set = (v) => this.Collation = new Collation(v.AsString), Read = (b) => this.Collation = new Collation(b.ReadInt32(P_COLLATION_LCID), (CompareOptions)b.ReadInt32(P_COLLATION_SORT)), Validate = (v, h) => { throw new LiteException(0, "Pragma COLLATION is read only"); }, Write = (b) => { b.Write(this.Collation.LCID, P_COLLATION_LCID); b.Write((int)this.Collation.SortOptions, P_COLLATION_SORT); } }, ["TIMEOUT"] = new Pragma { Name = "TIMEOUT", Get = () => (int)this.Timeout.TotalSeconds, Set = (v) => this.Timeout = TimeSpan.FromSeconds(v.AsInt32), Read = (b) => this.Timeout = TimeSpan.FromSeconds(b.ReadInt32(P_TIMEOUT)), Validate = (v, h) => { if (v <= 0) { throw new LiteException(0, "Pragma TIMEOUT must be greater than zero"); } }, Write = (b) => b.Write((int)this.Timeout.TotalSeconds, P_TIMEOUT) }, ["LIMIT_SIZE"] = new Pragma { Name = "LIMIT_SIZE", Get = () => this.LimitSize, Set = (v) => this.LimitSize = v.AsInt64, Read = (b) => this.LimitSize = b.ReadInt64(P_LIMIT_SIZE), Validate = (v, h) => { if (v < 4 * PAGE_SIZE) { throw new LiteException(0, "Pragma LIMIT_SIZE must be at least 4 pages (32768 bytes)"); } if (h != null && v.AsInt64 < (h.LastPageID + 1) * Constants.PAGE_SIZE) { throw new LiteException(0, "Pragma LIMIT_SIZE must be greater or equal to the current file size"); } }, Write = (b) => b.Write(this.LimitSize, P_LIMIT_SIZE) }, ["UTC_DATE"] = new Pragma { Name = "UTC_DATE", Get = () => this.UtcDate, Set = (v) => this.UtcDate = v.AsBoolean, Read = (b) => this.UtcDate = b.ReadBool(P_UTC_DATE), Validate = (v, h) => { }, Write = (b) => b.Write(this.UtcDate, P_UTC_DATE) }, ["CHECKPOINT"] = new Pragma { Name = "CHECKPOINT", Get = () => this.Checkpoint, Set = (v) => this.Checkpoint = v.AsInt32, Read = (b) => this.Checkpoint = b.ReadInt32(P_CHECKPOINT), Validate = (v, h) => { if (v < 0) { throw new LiteException(0, "Pragma CHECKPOINT must be greater or equal to zero"); } }, Write = (b) => b.Write(this.Checkpoint, P_CHECKPOINT) } }; _isDirty = true; }
public DiskService(EngineSettings settings, int[] memorySegmentSizes) { _cache = new MemoryCache(memorySegmentSizes); // get new stream factory based on settings _streamFactory = settings.CreateDataFactory(); // create stream pool _streamPool = new StreamPool(_streamFactory, settings.ReadOnly); // create async writer queue for log file _queue = new DiskWriterQueue(_streamPool.Writer); // checks if is a new file var isNew = settings.ReadOnly == false && _streamPool.Writer.Length == 0; // create new database if not exist yet if (isNew) { LOG($"creating new database: '{Path.GetFileName(_streamFactory.Name)}'", "DISK"); _header = this.Initialize(_streamPool.Writer, settings.Collation, settings.InitialSize); } else { // load header page from position 0 from file var stream = _streamPool.Rent(); var buffer = new PageBuffer(new byte[PAGE_SIZE], 0, 0) { Position = 0 }; try { stream.Position = 0; stream.Read(buffer.Array, 0, PAGE_SIZE); // if first byte are 1 this datafile are encrypted but has do defined password to open if (buffer[0] == 1) { throw new LiteException(0, "This data file is encrypted and needs a password to open"); } _header = new HeaderPage(buffer); _streamPool.Return(stream); } catch { // return to pool before dispose _streamPool.Return(stream); this.Dispose(); throw; } } // define start/end position for log content _logStartPosition = (_header.LastPageID + 1) * PAGE_SIZE; _logEndPosition = _logStartPosition; // will be updated by RestoreIndex }
public CollectionService(HeaderPage header, Snapshot snapshot, TransactionPages transPages) { _snapshot = snapshot; _header = header; _transPages = transPages; }
/// <summary> /// Load all confirmed transactions from log file (used only when open datafile) /// Don't need lock because it's called on ctor of LiteEngine /// Update _disk instance with last log position /// </summary> public void RestoreIndex(HeaderPage header) { // get all page positions var positions = new Dictionary <long, List <PagePosition> >(); var last = 0L; // read all pages to get confirmed transactions (do not read page content, only page header) foreach (var buffer in _disk.ReadLog(true)) { // read direct from buffer to avoid create BasePage structure var pageID = buffer.ReadUInt32(BasePage.P_PAGE_ID); var isConfirmed = buffer.ReadBool(BasePage.P_IS_CONFIRMED); var transactionID = buffer.ReadUInt32(BasePage.P_TRANSACTION_ID); if (transactionID == 0 || transactionID == uint.MaxValue) { continue; } var position = new PagePosition(pageID, buffer.Position); if (positions.TryGetValue(transactionID, out var list)) { list.Add(position); } else { positions[transactionID] = new List <PagePosition> { position }; } if (isConfirmed) { // update last IsConfirmed page last = buffer.Position; this.ConfirmTransaction(transactionID, positions[transactionID]); var pageType = (PageType)buffer.ReadByte(BasePage.P_PAGE_TYPE); // when a header is modified in transaction, must always be the last page inside log file (per transaction) if (pageType == PageType.Header) { // copy this buffer block into original header block Buffer.BlockCopy(buffer.Array, buffer.Offset, header.Buffer.Array, header.Buffer.Offset, PAGE_SIZE); header.LoadPage(); } } // update last transaction ID _lastTransactionID = (int)transactionID; } // update last log position acording with last IsConfirmed on log if (last > 0) { _disk.LogEndPosition = last + PAGE_SIZE; } }
public SysDump(HeaderPage header, TransactionMonitor monitor, DiskService disk) : base("$dump") { _header = header; _monitor = monitor; _disk = disk; }
/// <summary> /// Run Commit event /// </summary> public void OnCommit(HeaderPage header) { this.Commit?.Invoke(header); }
public SysDump(HeaderPage header, TransactionMonitor monitor) : base("$dump") { _header = header; _monitor = monitor; }
private IEnumerable <BsonDocument> SysDump(FileOrigin origin) { var collections = _header.GetCollections().ToDictionary(x => x.Value, x => x.Key); foreach (var buffer in _disk.ReadFull(origin)) { var page = new BasePage(buffer); var pageID = page.PageID; if (origin == FileOrigin.Data && buffer.Position > 0 && pageID == 0) { // this will fix print PageID in data file bubbles pages pageID = (uint)(buffer.Position / PAGE_SIZE); } var doc = new BsonDocument(); doc["_position"] = (int)buffer.Position; doc["pageID"] = (int)pageID; doc["pageType"] = page.PageType.ToString(); doc["nextPageID"] = dumpPageID(page.NextPageID); doc["prevPageID"] = dumpPageID(page.PrevPageID); doc["collection"] = collections.GetOrDefault(page.ColID, "-"); doc["transactionID"] = (int)page.TransactionID; doc["isConfirmed"] = page.IsConfirmed; doc["itemsCount"] = (int)page.ItemsCount; doc["freeBytes"] = page.FreeBytes; doc["usedBytes"] = (int)page.UsedBytes; doc["fragmentedBytes"] = (int)page.FragmentedBytes; doc["nextFreePosition"] = (int)page.NextFreePosition; doc["highestIndex"] = (int)page.HighestIndex; if (page.PageType == PageType.Header) { var header = new HeaderPage(buffer); doc["freeEmptyPageID"] = dumpPageID(header.FreeEmptyPageID); doc["lastPageID"] = (int)header.LastPageID; doc["creationTime"] = header.CreationTime; doc["userVersion"] = header.UserVersion; doc["collections"] = new BsonDocument(header.GetCollections().ToDictionary(x => x.Key, x => new BsonValue((int)x.Value))); } else if (page.PageType == PageType.Collection) { var collection = new CollectionPage(buffer); doc["lastAnalyzed"] = collection.LastAnalyzed; doc["creationTime"] = collection.CreationTime; doc["freeDataPageID"] = new BsonArray(collection.FreeDataPageID.Select(x => dumpPageID(x))); doc["freeIndexPageID"] = new BsonArray(collection.FreeIndexPageID.Select(x => dumpPageID(x))); doc["indexes"] = new BsonArray(collection.GetCollectionIndexes().Select(x => new BsonDocument { ["name"] = x.Name, ["expression"] = x.Expression, ["unique"] = x.Unique, ["headPageID"] = dumpPageID(x.Head.PageID), ["tailPageID"] = dumpPageID(x.Tail.PageID), ["maxLevel"] = (int)x.MaxLevel, ["keyCount"] = (int)x.KeyCount, ["uniqueKeyCount"] = (int)x.UniqueKeyCount })); } yield return(doc); } BsonValue dumpPageID(uint pageID) { return(pageID == uint.MaxValue ? BsonValue.Null : new BsonValue((int)pageID)); } }
public SysPageList(HeaderPage header, TransactionMonitor monitor) : base("$page_list") { _header = header; _monitor = monitor; }
public SysDump(HeaderPage header, LiteEngine engine) : base("$dump") { _header = header; _engine = engine; }
public SysPageList(HeaderPage header, LiteEngine engine) : base("$page_list") { _header = header; _engine = engine; }