internal LiteFileInfo(DbEngine engine, BsonDocument doc) { _engine = engine; this.Id = doc["_id"].AsString; this.Filename = doc["filename"].AsString; this.MimeType = doc["mimeType"].AsString; this.Length = doc["length"].AsInt64; this.Chunks = doc["chunks"].AsInt32; this.UploadDate = doc["uploadDate"].AsDateTime; this.Metadata = doc["metadata"].AsDocument; }
internal LiteFileStream(DbEngine engine, LiteFileInfo file) { _engine = engine; _file = file; if (file.Length == 0) { throw LiteException.FileCorrupted(file); } _positionInChunk = 0; _currentChunkIndex = 0; _currentChunkData = this.GetChunkData(_currentChunkIndex); }
/// <summary> /// Loop in all registered versions and apply all that needs. Update dbversion /// </summary> internal void Apply(LiteDatabase db, DbEngine engine, Logger log) { if (_versions.Count == 0) return; var dbparams = engine.GetDbParam(); var updated = false; // apply all action version updates foreach(var version in _versions.Where(x => x.Key > dbparams.DbVersion)) { log.Write(Logger.COMMAND, "update database version to {0}", version.Key); version.Value(db); dbparams.DbVersion = version.Key; updated = true; } if(updated) { engine.SetParam(dbparams); } }
internal LiteFileStorage(DbEngine engine) { _engine = engine; }
/// <summary> /// Copy database do another disk /// </summary> public long Shrink() { // begin a write exclusive access using (var trans = _transaction.Begin(false)) { try { // create a temporary disk var tempDisk = _disk.GetTempDisk(); // get initial disk size var header = _pager.GetPage <HeaderPage>(0); var diff = 0L; // create temp engine instance to copy all documents using (var tempEngine = new DbEngine(tempDisk, new Logger())) { tempDisk.Open(false); // read all collections foreach (var col in _collections.GetAll()) { // first copy all indexes foreach (var index in col.GetIndexes(false)) { tempEngine.EnsureIndex(col.CollectionName, index.Field, index.Options); } // then, read all documents and copy to new engine var nodes = _indexer.FindAll(col.PK, Query.Ascending); tempEngine.Insert(col.CollectionName, nodes.Select(node => BsonSerializer.Deserialize(_data.Read(node.DataBlock)))); // then re-open the disk service as the previous Insert's auto-transaction just closed it. tempDisk.Open(false); } // get final header from temp engine var tempHeader = tempEngine._pager.GetPage <HeaderPage>(0, true); // copy info from initial header to final header tempHeader.ChangeID = header.ChangeID; // lets create journal file before re-write for (uint pageID = 0; pageID <= header.LastPageID; pageID++) { _disk.WriteJournal(pageID, _disk.ReadPage(pageID)); } // commit journal + shrink data file _disk.SetLength(BasePage.GetSizeOfPages(tempHeader.LastPageID + 1)); // lets re-write all pages copying from new database for (uint pageID = 0; pageID <= tempHeader.LastPageID; pageID++) { _disk.WritePage(pageID, tempDisk.ReadPage(pageID)); } // now delete journal _disk.DeleteJournal(); // get diff from initial and final last pageID diff = BasePage.GetSizeOfPages(header.LastPageID - tempHeader.LastPageID); tempDisk.Close(); } // unlock disk and clear cache to continue trans.Commit(); // delete temporary disk _disk.DeleteTempDisk(); return(diff); } catch (Exception ex) { _log.Write(Logger.ERROR, ex.Message); trans.Rollback(); throw; } } }
/// <summary> /// Copy database do another disk /// </summary> public int Shrink() { lock (_locker) { // lock and clear cache - no changes during shrink _disk.Lock(); _cache.Clear(); // create a temporary disk var tempDisk = _disk.GetTempDisk(); // get initial disk size var header = _pager.GetPage<HeaderPage>(0); var diff = 0; // create temp engine instance to copy all documents using (var tempEngine = new DbEngine(tempDisk, new Logger())) { // read all collections foreach (var col in _collections.GetAll()) { // first copy all indexes foreach (var index in col.GetIndexes(false)) { tempEngine.EnsureIndex(col.CollectionName, index.Field, index.Options); } // then, read all documents and copy to new engine var nodes = _indexer.FindAll(col.PK, Query.Ascending); tempEngine.Insert(col.CollectionName, nodes.Select(node => BsonSerializer.Deserialize(_data.Read(node.DataBlock, true).Buffer))); } // get final header from temp engine var tempHeader = tempEngine._pager.GetPage<HeaderPage>(0, true); // copy info from initial header to final header tempHeader.ChangeID = header.ChangeID; // lets create journal file before re-write for (uint pageID = 0; pageID <= header.LastPageID; pageID++) { _disk.WriteJournal(pageID, _disk.ReadPage(pageID)); } // commit journal + shrink data file _disk.SetLength((tempHeader.LastPageID + 1) * BasePage.PAGE_SIZE); // lets re-write all pages copying from new database for (uint pageID = 0; pageID <= tempHeader.LastPageID; pageID++) { _disk.WritePage(pageID, tempDisk.ReadPage(pageID)); } // now delete journal _disk.DeleteJournal(); // get diff from initial and final last pageID diff = (int)((header.LastPageID - tempHeader.LastPageID) * BasePage.PAGE_SIZE); } // unlock disk and ckar cache to continue _disk.Unlock(); _cache.Clear(); // delete temporary disk _disk.DeleteTempDisk(); return diff; } }
/// <summary> /// Copy database do another disk /// </summary> public long Shrink() { lock (_locker) { // lock and clear cache - no changes during shrink _disk.Lock(); _cache.Clear(); // create a temporary disk var tempDisk = _disk.GetTempDisk(); // get initial disk size var header = _pager.GetPage <HeaderPage>(0); var diff = 0L; // create temp engine instance to copy all documents using (var tempEngine = new DbEngine(tempDisk, new Logger())) { // read all collections foreach (var col in _collections.GetAll()) { // first copy all indexes foreach (var index in col.GetIndexes(false)) { tempEngine.EnsureIndex(col.CollectionName, index.Field, index.Options); } // then, read all documents and copy to new engine var nodes = _indexer.FindAll(col.PK, Query.Ascending); tempEngine.Insert(col.CollectionName, nodes.Select(node => BsonSerializer.Deserialize(_data.Read(node.DataBlock)))); } // get final header from temp engine var tempHeader = tempEngine._pager.GetPage <HeaderPage>(0, true); // copy info from initial header to final header tempHeader.ChangeID = header.ChangeID; // lets create journal file before re-write for (uint pageID = 0; pageID <= header.LastPageID; pageID++) { _disk.WriteJournal(pageID, _disk.ReadPage(pageID)); } // commit journal + shrink data file _disk.SetLength(BasePage.GetSizeOfPages(tempHeader.LastPageID + 1)); // lets re-write all pages copying from new database for (uint pageID = 0; pageID <= tempHeader.LastPageID; pageID++) { _disk.WritePage(pageID, tempDisk.ReadPage(pageID)); } // now delete journal _disk.DeleteJournal(); // get diff from initial and final last pageID diff = BasePage.GetSizeOfPages(header.LastPageID - tempHeader.LastPageID); } // unlock disk and ckar cache to continue _disk.Unlock(); _cache.Clear(); // delete temporary disk _disk.DeleteTempDisk(); return(diff); } }
public LiteFileStorage(DbEngine engine) { _engine = engine; }