/// <summary> /// Upgrade datafile from v6 to new v7 format used in LiteDB 3 /// </summary> public static bool Upgrade(string filename, string password = null, bool backup = true, int batchSize = 5000) { // if not exists, just exit if (!File.Exists(filename)) return false; // use a temp file to copy/convert data from var tempFile = FileHelper.GetTempFile(filename); // open fiel as stream and test if is V6 using(var stream = new FileStream(filename, FileMode.Open, FileAccess.Read)) { IDbReader reader = new LiteDB_V6.DbReader(); if (reader.Initialize(stream, password) == false) return false; // open new datafile to copy data from using (var engine = new LiteEngine(tempFile, false)) { foreach(var col in reader.GetCollections()) { // first, create all indexes var indexes = reader.GetIndexes(col); foreach(var index in indexes) { engine.EnsureIndex(col, index.Key, index.Value); } // now copy documents in 5000 groups var docs = reader.GetDocuments(col); foreach(var batch in docs.Batch(batchSize)) { engine.Insert(col, batch); // just clear pages engine.Rollback(); } } } } // if backup, move current file to new -bkp if (backup) { File.Move(filename, FileHelper.GetTempFile(filename, "-bkp")); } else { File.Delete(filename); } // move temp file to original filename File.Move(tempFile, filename); return true; }
/// <summary> /// Open/Create new file storage and returns linked Stream to write operations /// </summary> public LiteFileStream OpenWrite(string id, string filename, BsonDocument metadata = null) { // checks if file exists var file = this.FindById(id); if (file == null) { file = new LiteFileInfo(_engine, id, filename ?? id); // insert if new _engine.Insert(FILES, file.AsDocument); } // update metadata if passed if (metadata != null) { file.Metadata = metadata; } return(file.OpenWrite()); }
public int Insert(string collection, IEnumerable <BsonDocument> docs, BsonAutoId autoId) { this.OpenDatabase(); try { return(_engine.Insert(collection, docs, autoId)); } finally { this.CloseDatabase(); } }
/// <summary> /// Reduce disk size re-arranging unused spaces. Can change password. If temporary disk was not provided, use MemoryStream temp disk /// </summary> public long Shrink(string password = null, IDiskService temp = null) { var originalSize = _disk.FileLength; // if temp disk are not passed, use memory stream disk temp = temp ?? new StreamDiskService(new MemoryStream()); using (_locker.Reserved()) using (_locker.Exclusive()) using (var engine = new LiteEngine(temp, password)) { // read all collection foreach (var collectionName in this.GetCollectionNames()) { // first create all user indexes (exclude _id index) foreach (var index in this.GetIndexes(collectionName).Where(x => x.Field != "_id")) { engine.EnsureIndex(collectionName, index.Field, index.Unique); } // copy all docs engine.Insert(collectionName, this.Find(collectionName, Query.All())); } // copy user version engine.UserVersion = this.UserVersion; // set current disk size to exact new disk usage _disk.SetLength(temp.FileLength); // read new header page to start copy var header = BasePage.ReadPage(temp.ReadPage(0)) as HeaderPage; // copy (as is) all pages from temp disk to original disk for (uint i = 0; i <= header.LastPageID; i++) { var page = temp.ReadPage(i); _disk.WritePage(i, page); } // create/destroy crypto class _crypto = password == null ? null : new AesEncryption(password, header.Salt); // initialize all services again (crypto can be changed) this.InitializeServices(); } // return how many bytes are reduced return(originalSize - temp.FileLength); }
/// <summary> /// Reduce disk size re-arranging unused spaces. Can change password. If temporary disk was not provided, use MemoryStream temp disk /// </summary> public long Shrink(string password = null, IDiskService temp = null) { var originalSize = _disk.FileLength; // if temp disk are not passed, use memory stream disk temp = temp ?? new StreamDiskService(new MemoryStream()); using(_locker.Write()) using (var engine = new LiteEngine(temp, password)) { // read all collection foreach (var collectionName in this.GetCollectionNames()) { // first create all user indexes (exclude _id index) foreach (var index in this.GetIndexes(collectionName).Where(x => x.Field != "_id")) { engine.EnsureIndex(collectionName, index.Field, index.Unique); } // copy all docs engine.Insert(collectionName, this.Find(collectionName, Query.All())); } // copy user version engine.UserVersion = this.UserVersion; // set current disk size to exact new disk usage _disk.SetLength(temp.FileLength); // read new header page to start copy var header = BasePage.ReadPage(temp.ReadPage(0)) as HeaderPage; // copy (as is) all pages from temp disk to original disk for (uint i = 0; i <= header.LastPageID; i++) { var page = temp.ReadPage(i); _disk.WritePage(i, page); } // create/destroy crypto class _crypto = password == null ? null : new AesEncryption(password, header.Salt); // initialize all services again (crypto can be changed) this.InitializeServices(); } // return how many bytes are reduced return originalSize - temp.FileLength; }
/// <summary> /// Upgrade datafile from v6 to new v7 format used in LiteDB 3 /// </summary> public static bool Upgrade(string filename, string password = null, bool backup = true, int batchSize = 5000) { // if not exists, just exit if (!File.Exists(filename)) { return(false); } // use a temp file to copy/convert data from var tempFile = FileHelper.GetTempFile(filename); // open file as stream and test if is V6 using (var stream = new FileStream(filename, System.IO.FileMode.Open, FileAccess.Read)) { using (IDbReader reader = new LiteDB_V6.DbReader()) { if (reader.Initialize(stream, password) == false) { return(false); } // open new datafile to copy data from using (var engine = new LiteEngine(tempFile, false)) { foreach (var col in reader.GetCollections()) { // first, create all unique indexes foreach (var field in reader.GetUniqueIndexes(col)) { engine.EnsureIndex(col, field, true); } // now copy 5000 documents per batch var docs = reader.GetDocuments(col); foreach (var batch in docs.Batch(batchSize)) { engine.Insert(col, batch); // just clear pages/cache engine.Rollback(); } } } } } // if backup, move current file to new -bkp if (backup) { File.Move(filename, FileHelper.GetTempFile(filename, "-bkp")); } else { File.Delete(filename); } // move temp file to original filename File.Move(tempFile, filename); return(true); }
/// <summary> /// Try recovery data from current datafile into a new datafile. /// </summary> public static string Recovery(string filename) { // if not exists, just exit if (!File.Exists(filename)) { return(""); } var log = new StringBuilder(); var newfilename = FileHelper.GetTempFile(filename, "-recovery", true); var count = 0; using (var olddb = new LiteEngine(filename)) using (var newdb = new LiteEngine(newfilename, false)) { // get header from old database (this must must be possible to read) var header = olddb._pager.GetPage <HeaderPage>(0); var collections = RecoveryCollectionPages(olddb, header, log); // try recovery all data pages for (uint i = 1; i < header.LastPageID; i++) { DataPage dataPage = null; try { var buffer = olddb._disk.ReadPage(i); // searching only for DataPage (PageType == 4) if (buffer[4] != 4) { continue; } dataPage = BasePage.ReadPage(buffer) as DataPage; } catch (Exception ex) { log.AppendLine($"Page {i} (DataPage) Error: {ex.Message}"); continue; } // try find collectionName using pageID map (use fixed name if not found) if (collections.TryGetValue(i, out var colname) == false) { colname = "_recovery"; } foreach (var block in dataPage.DataBlocks) { try { // read bytes var bson = olddb._data.Read(block.Value.Position); // deserialize as document var doc = BsonSerializer.Deserialize(bson); // and insert into new database newdb.Insert(colname, doc); count++; } catch (Exception ex) { log.AppendLine($"Document {block.Value.Position} Error: {ex.Message}"); continue; } } } } log.Insert(0, $"Document recovery count: {count}\n"); return(log.ToString()); }