protected override void Configure(StorageEnvironmentOptions options) { options.MaxLogFileSize = 10 * AbstractPager.PageSize; options.OnRecoveryError += (sender, args) => { }; // just shut it up options.ManualFlushing = true; options.MaxScratchBufferSize = 1 * 1024 * 1024 * 1024; }
public TableStorage(StorageEnvironmentOptions options, IBufferPool bufferPool) { if (options == null) throw new ArgumentNullException("options"); _options = options; this.bufferPool = bufferPool; Debug.Assert(options != null); //#if DEBUG // var directoryOptions = options as StorageEnvironmentOptions.DirectoryStorageEnvironmentOptions; // // string debugJournalName; // if (directoryOptions != null) // debugJournalName = directoryOptions.TempPath.Replace(Path.DirectorySeparatorChar, '_').Replace(':','_'); // else // debugJournalName = "InMemoryDebugJournal-" + Interlocked.Increment(ref debugJournalCount); // // env = new StorageEnvironment(options, debugJournalName) {IsDebugRecording = true}; //#else env = new StorageEnvironment(options); //#endif Initialize(); }
public static void Execute(StorageEnvironmentOptions srcOptions, StorageEnvironmentOptions.DirectoryStorageEnvironmentOptions compactOptions, Action<CompactionProgress> progressReport = null) { if (srcOptions.IncrementalBackupEnabled) throw new InvalidOperationException(CannotCompactBecauseOfIncrementalBackup); long minimalCompactedDataFileSize; srcOptions.ManualFlushing = true; // prevent from flushing during compaction - we shouldn't touch any source files compactOptions.ManualFlushing = true; // let us flush manually during data copy using(var existingEnv = new StorageEnvironment(srcOptions)) using (var compactedEnv = new StorageEnvironment(compactOptions)) { CopyTrees(existingEnv, compactedEnv, progressReport); compactedEnv.FlushLogToDataFile(allowToFlushOverwrittenPages: true); compactedEnv.Journal.Applicator.SyncDataFile(compactedEnv.OldestTransaction); compactedEnv.Journal.Applicator.DeleteCurrentAlreadyFlushedJournal(); minimalCompactedDataFileSize = compactedEnv.NextPageNumber*AbstractPager.PageSize; } using (var compactedDataFile = new FileStream(Path.Combine(compactOptions.BasePath, Constants.DatabaseFilename), FileMode.Open, FileAccess.ReadWrite)) { compactedDataFile.SetLength(minimalCompactedDataFileSize); } }
protected StorageEnvironmentOptions ModifyOptions(StorageEnvironmentOptions options) { options.MaxLogFileSize = 1000 * AbstractPager.PageSize; options.IncrementalBackupEnabled = true; options.ManualFlushing = true; return options; }
protected void RequireFileBasedPager() { if(_storageEnvironment != null) throw new InvalidOperationException("Too late"); if (_options is StorageEnvironmentOptions.DirectoryStorageEnvironmentOptions) return; DeleteDirectory("test.data"); _options = StorageEnvironmentOptions.ForPath("test.data"); Configure(_options); }
public TableStorage(StorageEnvironmentOptions options, IBufferPool bufferPool) { if (options == null) throw new ArgumentNullException("options"); _options = options; this.bufferPool = bufferPool; Debug.Assert(options != null); env = new StorageEnvironment(options); Initialize(); }
public void Restore(StorageEnvironmentOptions options, IEnumerable<string> backupPaths) { var ownsPagers = options.OwnsPagers; options.OwnsPagers = false; using (var env = new StorageEnvironment(options)) { foreach (var backupPath in backupPaths) { Restore(env, backupPath); } } options.OwnsPagers = ownsPagers; }
internal static void CopyHeaders(CompressionLevel compression, ZipArchive package, DataCopier copier, StorageEnvironmentOptions storageEnvironmentOptions) { foreach (var headerFileName in HeaderAccessor.HeaderFileNames) { var header = stackalloc FileHeader[1]; if (!storageEnvironmentOptions.ReadHeader(headerFileName, header)) continue; var headerPart = package.CreateEntry(headerFileName, compression); Debug.Assert(headerPart != null); using (var headerStream = headerPart.Open()) { copier.ToStream((byte*)header, sizeof(FileHeader), headerStream); } } }
protected override void Configure(StorageEnvironmentOptions options) { options.MaxScratchBufferSize = 1024*1024*1; options.ManualFlushing = true; }
protected override void Configure(StorageEnvironmentOptions options) { options.MaxLogFileSize = 1000 * AbstractPager.PageSize; options.IncrementalBackupEnabled = true; options.ManualFlushing = true; }
public void Initialize() { _shutdownNotification = new CancellationTokenSource(); AbstractLowMemoryNotification.Initialize(ServerShutdown, Configuration); if (_logger.IsInfoEnabled) { _logger.Info("Starting to open server store for " + (Configuration.Core.RunInMemory ? "<memory>" : Configuration.Core.DataDirectory)); } var options = Configuration.Core.RunInMemory ? StorageEnvironmentOptions.CreateMemoryOnly(Configuration.Core.DataDirectory) : StorageEnvironmentOptions.ForPath(System.IO.Path.Combine(Configuration.Core.DataDirectory, "System")); options.SchemaVersion = 2; try { StorageEnvironment.MaxConcurrentFlushes = Configuration.Storage.MaxConcurrentFlushes; _env = new StorageEnvironment(options); using (var tx = _env.WriteTransaction()) { tx.DeleteTree("items");// note the different casing, we remove the old items tree _itemsSchema.Create(tx, "Items", 16); tx.Commit(); } using (var tx = _env.ReadTransaction()) { var table = tx.OpenTable(_itemsSchema, "Items"); var itemsFromBackwards = table.SeekBackwardFrom(_itemsSchema.FixedSizeIndexes[EtagIndexName], long.MaxValue); var reader = itemsFromBackwards.FirstOrDefault(); if (reader == null) { _lastEtag = 0; } else { int size; _lastEtag = Bits.SwapBytes(*(long *)reader.Read(3, out size)); } } } catch (Exception e) { if (_logger.IsOperationsEnabled) { _logger.Operations( "Could not open server store for " + (Configuration.Core.RunInMemory ? "<memory>" : Configuration.Core.DataDirectory), e); } options.Dispose(); throw; } ContextPool = new TransactionContextPool(_env); _timer = new Timer(IdleOperations, null, _frequencyToCheckForIdleDatabases, TimeSpan.FromDays(7)); Alerts.Initialize(_env, ContextPool); DatabaseInfoCache.Initialize(_env, ContextPool); LicenseStorage.Initialize(_env, ContextPool); }
protected StorageTest() { DeleteDirectory("test.data"); _options = StorageEnvironmentOptions.CreateMemoryOnly(); Configure(_options); }
protected override void Configure(StorageEnvironmentOptions options) { options.MaxScratchBufferSize = 1024*1024*12; // 2048 pages options.MaxNumberOfPagesInJournalBeforeFlush = 96; options.ManualFlushing = true; }
public void RecoverAndValidate(StorageEnvironmentOptions options) { while (ReadOneTransaction(options)) { } }
// all tests here relay on the fact than one log file can contains max 10 pages protected override void Configure(StorageEnvironmentOptions options) { options.MaxLogFileSize = 10 * AbstractPager.PageSize; }
public void Record_debug_journal_and_replay_it() { var structSchema = new StructureSchema <SampleStruct>() .Add <int>(SampleStruct.Foo) .Add <string>(SampleStruct.Bar); using (var env = new StorageEnvironment(StorageEnvironmentOptions.CreateMemoryOnly())) { env.DebugJournal = new DebugJournal(debugJouralName, env, true); using (var tx = env.NewTransaction(TransactionFlags.ReadWrite)) { env.CreateTree(tx, "test-tree"); tx.Commit(); } using (var writeBatch = new WriteBatch()) { var valueBuffer = new MemoryStream(Encoding.UTF8.GetBytes("{ \"title\": \"foo\",\"name\":\"bar\"}")); writeBatch.Add("foo", valueBuffer, "test-tree"); valueBuffer = new MemoryStream(Encoding.UTF8.GetBytes("testing testing 1 2!")); writeBatch.Add("bar", valueBuffer, "test-tree"); valueBuffer = new MemoryStream(Encoding.UTF8.GetBytes("testing testing 1 2 3!")); writeBatch.Add("foo-bar", valueBuffer, "test-tree"); writeBatch.MultiAdd("multi-foo", "AA", "test-tree"); env.Writer.Write(writeBatch); } using (var writeBatch = new WriteBatch()) { writeBatch.Increment("incr-key", 5, "test-tree"); env.Writer.Write(writeBatch); } using (var tx = env.NewTransaction(TransactionFlags.Read)) { Assert.Equal(5, tx.ReadTree("test-tree").Read("incr-key").Reader.ReadLittleEndianInt64()); using (var writeBatch = new WriteBatch()) { writeBatch.Increment("incr-key", 5, "test-tree"); env.Writer.Write(writeBatch); } Assert.Equal(5, tx.ReadTree("test-tree").Read("incr-key").Reader.ReadLittleEndianInt64()); } using (var tx = env.NewTransaction(TransactionFlags.Read)) { Assert.Equal(10, tx.ReadTree("test-tree").Read("incr-key").Reader.ReadLittleEndianInt64()); } using (var writeBatch = new WriteBatch()) { writeBatch.MultiAdd("multi-foo", "BB", "test-tree"); writeBatch.MultiAdd("multi-foo", "CC", "test-tree"); writeBatch.Delete("foo-bar", "test-tree"); env.Writer.Write(writeBatch); } using (var tx = env.NewTransaction(TransactionFlags.ReadWrite)) { env.CreateTree(tx, "test-tree2"); tx.Commit(); } using (var writeBatch = new WriteBatch()) { var valueBuffer = new MemoryStream(Encoding.UTF8.GetBytes("testing testing 1!")); writeBatch.Add("foo", valueBuffer, "test-tree2"); valueBuffer = new MemoryStream(Encoding.UTF8.GetBytes("testing testing 1 2!")); writeBatch.Add("bar", valueBuffer, "test-tree2"); valueBuffer = new MemoryStream(Encoding.UTF8.GetBytes("testing testing 1 2 3!")); writeBatch.Add("foo-bar", valueBuffer, "test-tree2"); env.Writer.Write(writeBatch); } using (var tx = env.NewTransaction(TransactionFlags.ReadWrite)) { env.CreateTree(tx, "structures-tree"); tx.Commit(); } using (var writeBatch = new WriteBatch()) { writeBatch.AddStruct("structs/1", new Structure <SampleStruct>(structSchema) .Set(SampleStruct.Foo, 13) .Set(SampleStruct.Bar, "debug journal testing"), "structures-tree"); env.Writer.Write(writeBatch); } using (var tx = env.NewTransaction(TransactionFlags.ReadWrite)) { env.CreateTree(tx, "rename-me"); tx.Commit(); } using (var writeBatch = new WriteBatch()) { writeBatch.Add("item", "renaming tree test", "rename-me"); env.Writer.Write(writeBatch); } using (var tx = env.NewTransaction(TransactionFlags.ReadWrite)) { env.RenameTree(tx, "rename-me", "renamed"); tx.Commit(); } } using (var env = new StorageEnvironment(StorageEnvironmentOptions.CreateMemoryOnly())) { env.DebugJournal = DebugJournal.FromFile(debugJouralName, env); env.DebugJournal.Replay(); using (var snapshot = env.CreateSnapshot()) { Assert.Equal("{ \"title\": \"foo\",\"name\":\"bar\"}", snapshot.Read("test-tree", "foo").Reader.ToStringValue()); Assert.Equal("testing testing 1 2!", snapshot.Read("test-tree", "bar").Reader.ToStringValue()); Assert.Equal("testing testing 1!", snapshot.Read("test-tree2", "foo").Reader.ToStringValue()); Assert.Equal("testing testing 1 2!", snapshot.Read("test-tree2", "bar").Reader.ToStringValue()); Assert.Equal("testing testing 1 2 3!", snapshot.Read("test-tree2", "foo-bar").Reader.ToStringValue()); Assert.Equal(10, snapshot.Read("test-tree", "incr-key").Reader.ReadLittleEndianInt64()); Assert.Equal(0, snapshot.ReadVersion("test-tree", "foo-bar")); using (var iter = snapshot.MultiRead("test-tree", "multi-foo")) { iter.Seek(Slice.BeforeAllKeys); Assert.Equal("AA", iter.CurrentKey.ToString()); Assert.DoesNotThrow(() => iter.MoveNext()); Assert.Equal("BB", iter.CurrentKey.ToString()); Assert.DoesNotThrow(() => iter.MoveNext()); Assert.Equal("CC", iter.CurrentKey.ToString()); } var structReader = snapshot.ReadStruct("structures-tree", "structs/1", structSchema).Reader; Assert.Equal(13, structReader.ReadInt(SampleStruct.Foo)); Assert.Equal("debug journal testing", structReader.ReadString(SampleStruct.Bar)); Assert.Equal("renaming tree test", snapshot.Read("renamed", "item").Reader.ToStringValue()); } } }
public void Record_debug_journal_and_replay_it_size_only() { using (var env = new StorageEnvironment(StorageEnvironmentOptions.CreateMemoryOnly())) { env.DebugJournal = new DebugJournal(debugJouralName, env, true) { RecordOnlyValueLength = true }; using (var tx = env.NewTransaction(TransactionFlags.ReadWrite)) { env.CreateTree(tx, "test-tree"); tx.Commit(); } using (var writeBatch = new WriteBatch()) { var valueBuffer = new MemoryStream(Encoding.UTF8.GetBytes("{ \"title\": \"foo\",\"name\":\"bar\"}")); writeBatch.Add("foo", valueBuffer, "test-tree"); valueBuffer = new MemoryStream(Encoding.UTF8.GetBytes("testing testing 1 2!")); writeBatch.Add("bar", valueBuffer, "test-tree"); valueBuffer = new MemoryStream(Encoding.UTF8.GetBytes("testing testing 1 2 3!")); writeBatch.Add("foo-bar", valueBuffer, "test-tree"); writeBatch.MultiAdd("multi-foo", "AA", "test-tree"); env.Writer.Write(writeBatch); } using (var writeBatch = new WriteBatch()) { writeBatch.Increment("incr-key", 5, "test-tree"); env.Writer.Write(writeBatch); } using (var tx = env.NewTransaction(TransactionFlags.Read)) { Assert.Equal(5, tx.ReadTree("test-tree").Read("incr-key").Reader.ReadLittleEndianInt64()); using (var writeBatch = new WriteBatch()) { writeBatch.Increment("incr-key", 5, "test-tree"); env.Writer.Write(writeBatch); } Assert.Equal(5, tx.ReadTree("test-tree").Read("incr-key").Reader.ReadLittleEndianInt64()); } using (var tx = env.NewTransaction(TransactionFlags.Read)) { Assert.Equal(10, tx.ReadTree("test-tree").Read("incr-key").Reader.ReadLittleEndianInt64()); } using (var writeBatch = new WriteBatch()) { writeBatch.MultiAdd("multi-foo", "BB", "test-tree"); writeBatch.MultiAdd("multi-foo", "CC", "test-tree"); writeBatch.Delete("foo-bar", "test-tree"); env.Writer.Write(writeBatch); } using (var tx = env.NewTransaction(TransactionFlags.ReadWrite)) { env.CreateTree(tx, "test-tree2"); tx.Commit(); } using (var writeBatch = new WriteBatch()) { var valueBuffer = new MemoryStream(Encoding.UTF8.GetBytes("testing testing 1!")); writeBatch.Add("foo", valueBuffer, "test-tree2"); valueBuffer = new MemoryStream(Encoding.UTF8.GetBytes("testing testing 1 2!")); writeBatch.Add("bar", valueBuffer, "test-tree2"); valueBuffer = new MemoryStream(Encoding.UTF8.GetBytes("testing testing 1 2 3!")); writeBatch.Add("foo-bar", valueBuffer, "test-tree2"); env.Writer.Write(writeBatch); } } using (var env = new StorageEnvironment(StorageEnvironmentOptions.CreateMemoryOnly())) { env.DebugJournal = DebugJournal.FromFile(debugJouralName, env, true); env.DebugJournal.Replay(); using (var snapshot = env.CreateSnapshot()) { Assert.Equal("{ \"title\": \"foo\",\"name\":\"bar\"}".Length, snapshot.Read("test-tree", "foo").Reader.Length); Assert.Equal("testing testing 1 2!".Length, snapshot.Read("test-tree", "bar").Reader.Length); Assert.Equal("testing testing 1!".Length, snapshot.Read("test-tree2", "foo").Reader.Length); Assert.Equal("testing testing 1 2!".Length, snapshot.Read("test-tree2", "bar").Reader.Length); Assert.Equal("testing testing 1 2 3!".Length, snapshot.Read("test-tree2", "foo-bar").Reader.Length); Assert.Equal(10, snapshot.Read("test-tree", "incr-key").Reader.ReadLittleEndianInt64()); Assert.Equal(0, snapshot.ReadVersion("test-tree", "foo-bar")); using (var iter = snapshot.MultiRead("test-tree", "multi-foo")) { iter.Seek(Slice.BeforeAllKeys); Assert.Equal("AA", iter.CurrentKey.ToString()); Assert.DoesNotThrow(() => iter.MoveNext()); Assert.Equal("BB", iter.CurrentKey.ToString()); Assert.DoesNotThrow(() => iter.MoveNext()); Assert.Equal("CC", iter.CurrentKey.ToString()); } } } }
public void StorageEnvironment_Two_Different_Tx_Should_be_shipped_properly1() { var transactionsToShip = new ConcurrentQueue <TransactionToShip>(); Env.Journal.OnTransactionCommit += tx => { tx.CreatePagesSnapshot(); transactionsToShip.Enqueue(tx); }; using (var tx = Env.NewTransaction(TransactionFlags.ReadWrite)) { var tree = Env.CreateTree(tx, "TestTree"); tree.Add("ABC", "Foo"); tx.Commit(); } using (var tx = Env.NewTransaction(TransactionFlags.ReadWrite)) { var tree = Env.CreateTree(tx, "TestTree2"); tree.Add("ABC", "Foo"); tx.Commit(); } using (var shippingDestinationEnv = new StorageEnvironment(StorageEnvironmentOptions.CreateMemoryOnly())) { TransactionToShip tx; transactionsToShip.TryDequeue(out tx); shippingDestinationEnv.Journal.Shipper.ApplyShippedLog(tx.PagesSnapshot); using (var snaphsot = shippingDestinationEnv.CreateSnapshot()) { Assert.DoesNotThrow(() => //if tree doesn't exist --> throws InvalidOperationException { var result = snaphsot.Read("TestTree", "ABC"); Assert.Equal(1, result.Version); Assert.Equal("Foo", result.Reader.ToStringValue()); }); } transactionsToShip.TryDequeue(out tx); shippingDestinationEnv.Journal.Shipper.ApplyShippedLog(tx.PagesSnapshot); using (var snaphsot = shippingDestinationEnv.CreateSnapshot()) { Assert.DoesNotThrow(() => //if tree doesn't exist --> throws InvalidOperationException { var result = snaphsot.Read("TestTree", "ABC"); Assert.Equal(1, result.Version); Assert.Equal("Foo", result.Reader.ToStringValue()); }); Assert.DoesNotThrow(() => //if tree doesn't exist --> throws InvalidOperationException { var result = snaphsot.Read("TestTree2", "ABC"); Assert.Equal(1, result.Version); Assert.Equal("Foo", result.Reader.ToStringValue()); }); } } }
public LogShipping() : base(StorageEnvironmentOptions.CreateMemoryOnly()) { }
public Windows32BitsMemoryMapPager(StorageEnvironmentOptions options, VoronPathSetting file, long?initialFileSize = null, Win32NativeFileAttributes fileAttributes = Win32NativeFileAttributes.Normal, Win32NativeFileAccess access = Win32NativeFileAccess.GenericRead | Win32NativeFileAccess.GenericWrite, bool usePageProtection = false) : base(options, canPrefetchAhead: false, usePageProtection: usePageProtection) { _memoryMappedFileAccess = access == Win32NativeFileAccess.GenericRead ? MemoryMappedFileAccess.Read : MemoryMappedFileAccess.ReadWrite; _mmFileAccessType = access == Win32NativeFileAccess.GenericRead ? NativeFileMapAccessType.Read : NativeFileMapAccessType.Read | NativeFileMapAccessType.Write; FileName = file; if (Options.CopyOnWriteMode) { ThrowNotSupportedOption(file.FullPath); } _fileAttributes = fileAttributes; _handle = CreateFile(file.FullPath, access, Win32NativeFileShare.Read | Win32NativeFileShare.Write | Win32NativeFileShare.Delete, IntPtr.Zero, Win32NativeFileCreationDisposition.OpenAlways, fileAttributes, IntPtr.Zero); if (_handle.IsInvalid) { var lastWin32ErrorCode = Marshal.GetLastWin32Error(); throw new IOException("Failed to open file storage of Windows32BitsMemoryMapPager for " + file, new Win32Exception(lastWin32ErrorCode)); } _fileInfo = new FileInfo(file.FullPath); var streamAccessType = access == Win32NativeFileAccess.GenericRead ? FileAccess.Read : FileAccess.ReadWrite; _fileStream = SafeFileStream.Create(_handle, streamAccessType); _totalAllocationSize = _fileInfo.Length; if ((access & Win32NativeFileAccess.GenericWrite) == Win32NativeFileAccess.GenericWrite || (access & Win32NativeFileAccess.GenericAll) == Win32NativeFileAccess.GenericAll || (access & Win32NativeFileAccess.FILE_GENERIC_WRITE) == Win32NativeFileAccess.FILE_GENERIC_WRITE) { var fileLength = _fileStream.Length; if ((fileLength == 0) && initialFileSize.HasValue) { fileLength = initialFileSize.Value; } if ((_fileStream.Length == 0) || (fileLength % AllocationGranularity != 0)) { fileLength = NearestSizeToAllocationGranularity(fileLength); SetFileLength(_handle, fileLength, file.FullPath); } _totalAllocationSize = fileLength; } NumberOfAllocatedPages = _totalAllocationSize / Constants.Storage.PageSize; SetPagerState(CreatePagerState()); }
protected override void Configure(StorageEnvironmentOptions options) { options.MaxStorageSize = 1024 * 1024 * 1; // 1MB }
private unsafe bool TryDecompressTransactionPages(StorageEnvironmentOptions options, TransactionHeader* current, byte* dataPage) { try { LZ4.Decode64(_pager.AcquirePagePointer(_readingPage), current->CompressedSize, dataPage, current->UncompressedSize, true); } catch (Exception e) { options.InvokeRecoveryError(this, "Could not de-compress, invalid data", e); RequireHeaderUpdate = true; return false; } return true; }
private static void Backup( StorageEnvironment env, CompressionLevel compression, Action <string> infoNotify, Action backupStarted, AbstractPager dataPager, ZipArchive package, string basePath, DataCopier copier) { var usedJournals = new List <JournalFile>(); long lastWrittenLogPage = -1; long lastWrittenLogFile = -1; LowLevelTransaction txr = null; var backupSuccess = false; try { long allocatedPages; var writePesistentContext = new TransactionPersistentContext(true); var readPesistentContext = new TransactionPersistentContext(true); using (var txw = env.NewLowLevelTransaction(writePesistentContext, TransactionFlags.ReadWrite)) // so we can snapshot the headers safely { txr = env.NewLowLevelTransaction(readPesistentContext, TransactionFlags.Read); // now have snapshot view allocatedPages = dataPager.NumberOfAllocatedPages; Debug.Assert(HeaderAccessor.HeaderFileNames.Length == 2); infoNotify("Voron copy headers for " + basePath); VoronBackupUtil.CopyHeaders(compression, package, copier, env.Options, basePath); // journal files snapshot var files = env.Journal.Files; // thread safety copy JournalInfo journalInfo = env.HeaderAccessor.Get(ptr => ptr->Journal); for (var journalNum = journalInfo.CurrentJournal - journalInfo.JournalFilesCount + 1; journalNum <= journalInfo.CurrentJournal; journalNum++) { var journalFile = files.FirstOrDefault(x => x.Number == journalNum); // first check journal files currently being in use if (journalFile == null) { long journalSize; using (var pager = env.Options.OpenJournalPager(journalNum)) { journalSize = Bits.NextPowerOf2(pager.NumberOfAllocatedPages * Constants.Storage.PageSize); } journalFile = new JournalFile(env, env.Options.CreateJournalWriter(journalNum, journalSize), journalNum); } journalFile.AddRef(); usedJournals.Add(journalFile); } if (env.Journal.CurrentFile != null) { lastWrittenLogFile = env.Journal.CurrentFile.Number; lastWrittenLogPage = env.Journal.CurrentFile.WritePosIn4KbPosition - 1; } // txw.Commit(); intentionally not committing } backupStarted?.Invoke(); // data file backup var dataPart = package.CreateEntry(Path.Combine(basePath, Constants.DatabaseFilename), compression); Debug.Assert(dataPart != null); if (allocatedPages > 0) //only true if dataPager is still empty at backup start { using (var dataStream = dataPart.Open()) { // now can copy everything else copier.ToStream(dataPager, 0, allocatedPages, dataStream); } } try { long lastBackedupJournal = 0; foreach (var journalFile in usedJournals) { var entryName = StorageEnvironmentOptions.JournalName(journalFile.Number); var journalPart = package.CreateEntry(Path.Combine(basePath, entryName), compression); Debug.Assert(journalPart != null); long pagesToCopy = journalFile.JournalWriter.NumberOfAllocated4Kb; if (journalFile.Number == lastWrittenLogFile) { pagesToCopy = lastWrittenLogPage + 1; } using (var stream = journalPart.Open()) { copier.ToStream(env, journalFile, 0, pagesToCopy, stream); infoNotify(string.Format("Voron copy journal file {0}", entryName)); } lastBackedupJournal = journalFile.Number; } if (env.Options.IncrementalBackupEnabled) { env.HeaderAccessor.Modify(header => { header->IncrementalBackup.LastBackedUpJournal = lastBackedupJournal; //since we backed-up everything, no need to start next incremental backup from the middle header->IncrementalBackup.LastBackedUpJournalPage = -1; }); } backupSuccess = true; } catch (Exception) { backupSuccess = false; throw; } finally { var lastSyncedJournal = env.HeaderAccessor.Get(header => header->Journal).LastSyncedJournal; foreach (var journalFile in usedJournals) { if (backupSuccess) // if backup succeeded we can remove journals { if (journalFile.Number < lastWrittenLogFile && // prevent deletion of the current journal and journals with a greater number journalFile.Number < lastSyncedJournal) // prevent deletion of journals that aren't synced with the data file { journalFile.DeleteOnClose = true; } } journalFile.Release(); } } } finally { txr?.Dispose(); } }
private bool ValidatePagesCrc(StorageEnvironmentOptions options, int compressedPages, TransactionHeader* current) { uint crc = Crc.Value(_pager.AcquirePagePointer(_readingPage), 0, compressedPages * AbstractPager.PageSize); if (crc != current->Crc) { RequireHeaderUpdate = true; options.InvokeRecoveryError(this, "Invalid CRC signature for transaction " + current->TransactionId, null); return false; } return true; }
public override void Persist(TransactionOperationContext context, StorageEnvironmentOptions options) { throw new NotImplementedException(); }
public virtual void Dispose() { if (_storageEnvironment != null) _storageEnvironment.Dispose(); _options.Dispose(); DeleteDirectory("test.data"); _storageEnvironment = null; _options = null; GC.Collect(GC.MaxGeneration); GC.WaitForPendingFinalizers(); }
public void ByDefaultAllFilesShouldBeStoredInOneDirectory() { var options = (StorageEnvironmentOptions.DirectoryStorageEnvironmentOptions)StorageEnvironmentOptions.ForPath(DataDir); Assert.Equal(DataDir, options.BasePath.FullPath); Assert.True(options.TempPath.FullPath.StartsWith(options.BasePath.FullPath)); }
public void Compact(InMemoryRavenConfiguration ravenConfiguration, Action <string> output) { if (ravenConfiguration.RunInMemory) { throw new InvalidOperationException("Cannot compact in-memory running Voron storage"); } tableStorage.Dispose(); var sourcePath = ravenConfiguration.DataDirectory; var compactPath = Path.Combine(ravenConfiguration.DataDirectory, "Voron.Compaction"); if (Directory.Exists(compactPath)) { Directory.Delete(compactPath, true); } RecoverFromFailedCompact(sourcePath); var sourceOptions = CreateStorageOptionsFromConfiguration(ravenConfiguration); var compactOptions = (StorageEnvironmentOptions.DirectoryStorageEnvironmentOptions)StorageEnvironmentOptions.ForPath(compactPath); output("Executing storage compaction"); StorageCompaction.Execute(sourceOptions, compactOptions, x => output(string.Format("Copied {0} of {1} records in '{2}' tree. Copied {3} of {4} trees.", x.CopiedTreeRecords, x.TotalTreeRecordsCount, x.TreeName, x.CopiedTrees, x.TotalTreeCount))); var sourceDir = new DirectoryInfo(sourcePath); var sourceFiles = new List <FileInfo>(); foreach (var pattern in new [] { "*.journal", "headers.one", "headers.two", VoronConstants.DatabaseFilename }) { sourceFiles.AddRange(sourceDir.GetFiles(pattern)); } var compactionBackup = Path.Combine(sourcePath, "Voron.Compaction.Backup"); if (Directory.Exists(compactionBackup)) { Directory.Delete(compactionBackup, true); output("Removing existing compaction backup directory"); } Directory.CreateDirectory(compactionBackup); output("Backing up original data files"); foreach (var file in sourceFiles) { File.Move(file.FullName, Path.Combine(compactionBackup, file.Name)); } var compactedFiles = new DirectoryInfo(compactPath).GetFiles(); output("Moving compacted files into target location"); foreach (var file in compactedFiles) { File.Move(file.FullName, Path.Combine(sourcePath, file.Name)); } output("Deleting original data backup"); Directory.Delete(compactionBackup, true); Directory.Delete(compactPath, true); }
public void TemporaryPathTest() { var options = (StorageEnvironmentOptions.DirectoryStorageEnvironmentOptions)StorageEnvironmentOptions.ForPath(DataDir, DataDir + "Temp", null, null, null); Assert.Equal(DataDir, options.BasePath.FullPath); Assert.Equal(DataDir + "Temp", options.TempPath.FullPath); }
public void IncrementalBackupShouldCopyJustNewPagesSinceLastBackup() { RequireFileBasedPager(); var random = new Random(); var buffer = new byte[100]; random.NextBytes(buffer); using (var tx = Env.NewTransaction(TransactionFlags.ReadWrite)) { for (int i = 0; i < 5; i++) { tx.State.Root.Add(tx, "items/" + i, new MemoryStream(buffer)); } tx.Commit(); } var usedPagesInJournal = Env.Journal.CurrentFile.WritePagePosition; var backedUpPages = BackupMethods.Incremental.ToFile(Env, _incrementalBackupFile(0)); Assert.Equal(usedPagesInJournal, backedUpPages); var writePos = Env.Journal.CurrentFile.WritePagePosition; using (var tx = Env.NewTransaction(TransactionFlags.ReadWrite)) { for (int i = 5; i < 10; i++) { tx.State.Root.Add(tx, "items/" + i, new MemoryStream(buffer)); } tx.Commit(); } var usedByLastTransaction = Env.Journal.CurrentFile.WritePagePosition - writePos; backedUpPages = BackupMethods.Incremental.ToFile(Env, _incrementalBackupFile(1)); Assert.Equal(usedByLastTransaction, backedUpPages); var options = StorageEnvironmentOptions.ForPath(_restoredStoragePath); options.MaxLogFileSize = Env.Options.MaxLogFileSize; BackupMethods.Incremental.Restore(options, new[] { _incrementalBackupFile(0), _incrementalBackupFile(1) }); using (var env = new StorageEnvironment(options)) { using (var tx = env.NewTransaction(TransactionFlags.Read)) { for (int i = 0; i < 10; i++) { var readResult = tx.State.Root.Read(tx, "items/" + i); Assert.NotNull(readResult); var memoryStream = new MemoryStream(); readResult.Reader.CopyTo(memoryStream); Assert.Equal(memoryStream.ToArray(), buffer); } } } }
public void DefaultScratchLocation() { var options = (StorageEnvironmentOptions.DirectoryStorageEnvironmentOptions)StorageEnvironmentOptions.ForPath(DataDir); using (var env = new StorageEnvironment(options)) { var scratchFile = Path.Combine(env.Options.TempPath.FullPath, StorageEnvironmentOptions.ScratchBufferName(0)); Assert.True(File.Exists(scratchFile)); } }
public void ShouldReportProgress() { using (var env = new StorageEnvironment(StorageEnvironmentOptions.ForPath(CompactionTestsData))) { using (var tx = env.NewTransaction(TransactionFlags.ReadWrite)) { var tree = env.CreateTree(tx, "fruits"); tree.Add("apple", new byte[123]); tree.Add("orange", new byte[99]); var tree2 = env.CreateTree(tx, "vegetables"); tree2.Add("carrot", new byte[123]); tree2.Add("potato", new byte[99]); var tree3 = env.CreateTree(tx, "multi"); tree3.MultiAdd("fruits", "apple"); tree3.MultiAdd("fruits", "orange"); tree3.MultiAdd("vegetables", "carrot"); tree3.MultiAdd("vegetables", "carrot"); tx.Commit(); } } var progressReport = new List <string>(); StorageCompaction.Execute(StorageEnvironmentOptions.ForPath(CompactionTestsData), (StorageEnvironmentOptions.DirectoryStorageEnvironmentOptions)StorageEnvironmentOptions.ForPath(CompactedData), x => progressReport.Add(string.Format("Copied {0} of {1} records in '{2}' tree. Copied {3} of {4} trees.", x.CopiedTreeRecords, x.TotalTreeRecordsCount, x.TreeName, x.CopiedTrees, x.TotalTreeCount))); Assert.NotEmpty(progressReport); Assert.Contains("Copied 0 of 2 records in 'fruits' tree. Copied 0 of 3 trees.", progressReport); Assert.Contains("Copied 2 of 2 records in 'fruits' tree. Copied 1 of 3 trees.", progressReport); Assert.Contains("Copied 0 of 2 records in 'multi' tree. Copied 1 of 3 trees.", progressReport); Assert.Contains("Copied 2 of 2 records in 'multi' tree. Copied 2 of 3 trees.", progressReport); Assert.Contains("Copied 0 of 2 records in 'vegetables' tree. Copied 2 of 3 trees.", progressReport); Assert.Contains("Copied 2 of 2 records in 'vegetables' tree. Copied 3 of 3 trees.", progressReport); }
public void ScratchLocationWithTemporaryPathSpecified() { var options = (StorageEnvironmentOptions.DirectoryStorageEnvironmentOptions)StorageEnvironmentOptions.ForPath(DataDir, DataDir + "Temp", null, null, null); using (var env = new StorageEnvironment(options)) { var scratchFile = Path.Combine(DataDir, StorageEnvironmentOptions.ScratchBufferName(0)); var scratchFileTemp = Path.Combine(DataDir + "Temp", StorageEnvironmentOptions.ScratchBufferName(0)); Assert.False(File.Exists(scratchFile)); Assert.True(File.Exists(scratchFileTemp)); } }
public static void OnDirectoryInitialize(StorageEnvironmentOptions options, DirectoryParameters parameters, Logger log) { Process process = null; try { var journalPath = string.Empty; if (options is StorageEnvironmentOptions.DirectoryStorageEnvironmentOptions dirOptions) { journalPath = dirOptions.JournalPath.FullPath; } var userArgs = parameters.OnDirectoryInitializeExecArguments ?? string.Empty; var args = $"{userArgs} {parameters.Type} {parameters.DatabaseName} " + $"{CommandLineArgumentEscaper.EscapeSingleArg(options.BasePath.ToString())} " + $"{CommandLineArgumentEscaper.EscapeSingleArg(options.TempPath.ToString())} " + $"{CommandLineArgumentEscaper.EscapeSingleArg(journalPath)}"; var startInfo = new ProcessStartInfo { FileName = parameters.OnDirectoryInitializeExec, Arguments = args, UseShellExecute = false, RedirectStandardOutput = true, RedirectStandardError = true, CreateNoWindow = true }; var sw = Stopwatch.StartNew(); try { process = Process.Start(startInfo); } catch (Exception e) { throw new InvalidOperationException($"Unable to execute '{parameters.OnDirectoryInitializeExec} {args}'. Failed to start process.", e); } var readStdOut = process.StandardOutput.ReadToEndAsync(); var readErrors = process.StandardError.ReadToEndAsync(); string GetStdError() { try { return(readErrors.Result); } catch (Exception e) { return($"Unable to get stderr, got exception: {e}"); } } string GetStdOut() { try { return(readStdOut.Result); } catch (Exception e) { return($"Unable to get stdout, got exception: {e}"); } } if (process.WaitForExit((int)parameters.OnDirectoryInitializeExecTimeout.TotalMilliseconds) == false) { process.Kill(); throw new InvalidOperationException($"Unable to execute '{parameters.OnDirectoryInitializeExec} {args}', waited for {(int)parameters.OnDirectoryInitializeExecTimeout.TotalMilliseconds} ms but the process didn't exit. Output: {GetStdOut()}{Environment.NewLine}Errors: {GetStdError()}"); } try { readStdOut.Wait(parameters.OnDirectoryInitializeExecTimeout); readErrors.Wait(parameters.OnDirectoryInitializeExecTimeout); } catch (Exception e) { throw new InvalidOperationException($"Unable to read redirected stderr and stdout when executing '{parameters.OnDirectoryInitializeExec} {args}'", e); } // Can have exit code o (success) but still get errors. We log the errors anyway. if (log.IsOperationsEnabled) { log.Operations($"Executing '{parameters.OnDirectoryInitializeExec} {args}' took {sw.ElapsedMilliseconds:#,#;;0} ms. Exit code: {process.ExitCode}{Environment.NewLine}Output: {GetStdOut()}{Environment.NewLine}Errors: {GetStdError()}{Environment.NewLine}"); } if (process.ExitCode != 0) { throw new InvalidOperationException( $"Command or executable '{parameters.OnDirectoryInitializeExec} {args}' failed. Exit code: {process.ExitCode}{Environment.NewLine}Output: {GetStdOut()}{Environment.NewLine}Errors: {GetStdError()}{Environment.NewLine}"); } } finally { process?.Dispose(); } }
public bool RecoverDatabase(TransactionHeader *txHeader) { // note, we don't need to do any concurrency here, happens as a single threaded // fashion on db startup var requireHeaderUpdate = false; var logInfo = _headerAccessor.Get(ptr => ptr->Journal); if (logInfo.JournalFilesCount == 0) { _journalIndex = logInfo.LastSyncedJournal; return(false); } var oldestLogFileStillInUse = logInfo.CurrentJournal - logInfo.JournalFilesCount + 1; if (_env.Options.IncrementalBackupEnabled == false) { // we want to check that we cleanup old log files if they aren't needed // this is more just to be safe than anything else, they shouldn't be there. var unusedfiles = oldestLogFileStillInUse; while (true) { unusedfiles--; if (_env.Options.TryDeleteJournal(unusedfiles) == false) { break; } } } var lastSyncedTransactionId = logInfo.LastSyncedTransactionId; var journalFiles = new List <JournalFile>(); long lastSyncedTxId = -1; long lastSyncedJournal = logInfo.LastSyncedJournal; uint lastShippedTxCrc = 0; for (var journalNumber = oldestLogFileStillInUse; journalNumber <= logInfo.CurrentJournal; journalNumber++) { using (var recoveryPager = _env.Options.CreateScratchPager(StorageEnvironmentOptions.JournalRecoveryName(journalNumber))) using (var pager = _env.Options.OpenJournalPager(journalNumber)) { RecoverCurrentJournalSize(pager); var transactionHeader = txHeader->TransactionId == 0 ? null : txHeader; var journalReader = new JournalReader(pager, recoveryPager, lastSyncedTransactionId, transactionHeader); journalReader.RecoverAndValidate(_env.Options); var pagesToWrite = journalReader .TransactionPageTranslation .Select(kvp => recoveryPager.Read(kvp.Value.JournalPos)) .OrderBy(x => x.PageNumber) .ToList(); var lastReadHeaderPtr = journalReader.LastTransactionHeader; if (lastReadHeaderPtr != null) { if (pagesToWrite.Count > 0) { ApplyPagesToDataFileFromJournal(pagesToWrite); } *txHeader = *lastReadHeaderPtr; lastSyncedTxId = txHeader->TransactionId; lastShippedTxCrc = txHeader->Crc; lastSyncedJournal = journalNumber; } if (journalReader.RequireHeaderUpdate || journalNumber == logInfo.CurrentJournal) { var jrnlWriter = _env.Options.CreateJournalWriter(journalNumber, pager.NumberOfAllocatedPages * AbstractPager.PageSize); var jrnlFile = new JournalFile(jrnlWriter, journalNumber); jrnlFile.InitFrom(journalReader); jrnlFile.AddRef(); // creator reference - write ahead log journalFiles.Add(jrnlFile); } if (journalReader.RequireHeaderUpdate) //this should prevent further loading of transactions { requireHeaderUpdate = true; break; } } } Shipper.SetPreviousTransaction(lastSyncedTxId, lastShippedTxCrc); _files = _files.AppendRange(journalFiles); Debug.Assert(lastSyncedTxId >= 0); Debug.Assert(lastSyncedJournal >= 0); _journalIndex = lastSyncedJournal; _headerAccessor.Modify( header => { header->Journal.LastSyncedJournal = lastSyncedJournal; header->Journal.LastSyncedTransactionId = lastSyncedTxId; header->Journal.CurrentJournal = lastSyncedJournal; header->Journal.JournalFilesCount = _files.Count; header->IncrementalBackup.LastCreatedJournal = _journalIndex; header->PreviousTransactionCrc = lastShippedTxCrc; }); CleanupInvalidJournalFiles(lastSyncedJournal); CleanupUnusedJournalFiles(oldestLogFileStillInUse, lastSyncedJournal); if (_files.Count > 0) { var lastFile = _files.Last(); if (lastFile.AvailablePages >= 2) { // it must have at least one page for the next transaction header and one page for data CurrentFile = lastFile; } } return(requireHeaderUpdate); }
public void CompactionMustNotLooseAnyData() { var treeNames = new List <string>(); var multiValueTreeNames = new List <string>(); var random = new Random(); var value1 = new byte[random.Next(1024 * 1024 * 2)]; var value2 = new byte[random.Next(1024 * 1024 * 2)]; random.NextBytes(value1); random.NextBytes(value2); const int treeCount = 5; const int recordCount = 6; const int multiValueTreeCount = 7; const int multiValueRecordsCount = 4; const int multiValuesCount = 3; using (var env = new StorageEnvironment(StorageEnvironmentOptions.ForPath(CompactionTestsData))) { for (int i = 0; i < treeCount; i++) { using (var tx = env.NewTransaction(TransactionFlags.ReadWrite)) { string name = "tree/" + i; treeNames.Add(name); var tree = env.CreateTree(tx, name); for (int j = 0; j < recordCount; j++) { tree.Add(string.Format("{0}/items/{1}", name, j), j % 2 == 0 ? value1 : value2); } tx.Commit(); } } for (int i = 0; i < multiValueTreeCount; i++) { using (var tx = env.NewTransaction(TransactionFlags.ReadWrite)) { var name = "multiValueTree/" + i; multiValueTreeNames.Add(name); var tree = env.CreateTree(tx, name); for (int j = 0; j < multiValueRecordsCount; j++) { for (int k = 0; k < multiValuesCount; k++) { tree.MultiAdd("record/" + j, "value/" + k); } } tx.Commit(); } } } StorageCompaction.Execute(StorageEnvironmentOptions.ForPath(CompactionTestsData), (StorageEnvironmentOptions.DirectoryStorageEnvironmentOptions)StorageEnvironmentOptions.ForPath(CompactedData)); using (var compacted = new StorageEnvironment(StorageEnvironmentOptions.ForPath(CompactedData))) { using (var tx = compacted.NewTransaction(TransactionFlags.Read)) { foreach (var treeName in treeNames) { var tree = compacted.CreateTree(tx, treeName); for (int i = 0; i < recordCount; i++) { var readResult = tree.Read(string.Format("{0}/items/{1}", treeName, i)); Assert.NotNull(readResult); if (i % 2 == 0) { var readBytes = new byte[value1.Length]; readResult.Reader.Read(readBytes, 0, readBytes.Length); Assert.Equal(value1, readBytes); } else { var readBytes = new byte[value2.Length]; readResult.Reader.Read(readBytes, 0, readBytes.Length); Assert.Equal(value2, readBytes); } } } foreach (var treeName in multiValueTreeNames) { var tree = compacted.CreateTree(tx, treeName); for (int i = 0; i < multiValueRecordsCount; i++) { var multiRead = tree.MultiRead("record/" + i); Assert.True(multiRead.Seek(Slice.BeforeAllKeys)); int count = 0; do { Assert.Equal("value/" + count, multiRead.CurrentKey.ToString()); count++; } while (multiRead.MoveNext()); Assert.Equal(multiValuesCount, count); } } } } }
protected override void Configure(StorageEnvironmentOptions options) { options.MaxLogFileSize = 10 * AbstractPager.PageSize; options.OnRecoveryError += (sender, args) => { }; // just shut it up options.ManualFlushing = true; }
protected override void Configure(StorageEnvironmentOptions options) { options.MaxScratchBufferSize = 1024*1024*8; }
public bool ReadOneTransaction(StorageEnvironmentOptions options, bool checkCrc = true) { if (_readingPage >= _pager.NumberOfAllocatedPages) return false; if (MaxPageToRead != null && _readingPage >= MaxPageToRead.Value) return false; TransactionHeader* current; if (!TryReadAndValidateHeader(options, out current)) return false; var transactionSize = GetNumberOfPagesFromSize(current->Compressed ? current->CompressedSize : current->UncompressedSize); if (current->TransactionId <= _lastSyncedTransactionId) { LastTransactionHeader = current; _readingPage += transactionSize; return true; // skipping } if (checkCrc && !ValidatePagesCrc(options, transactionSize, current)) return false; _recoveryPager.EnsureContinuous(null, _recoveryPage, (current->PageCount + current->OverflowPageCount) + 1); var dataPage = _recoveryPager.AcquirePagePointer(_recoveryPage); UnmanagedMemory.Set(dataPage, 0, (current->PageCount + current->OverflowPageCount) * AbstractPager.PageSize); if (current->Compressed) { if (TryDecompressTransactionPages(options, current, dataPage) == false) return false; } else { Memory.Copy(dataPage, _pager.AcquirePagePointer(_readingPage), (current->PageCount + current->OverflowPageCount) * AbstractPager.PageSize); } var tempTransactionPageTranslaction = new Dictionary<long, RecoveryPagePosition>(); for (var i = 0; i < current->PageCount; i++) { Debug.Assert(_pager.Disposed == false); Debug.Assert(_recoveryPager.Disposed == false); var page = _recoveryPager.Read(_recoveryPage); var pagePosition = new RecoveryPagePosition { JournalPos = _recoveryPage, TransactionId = current->TransactionId }; if (page.IsOverflow) { var numOfPages = _recoveryPager.GetNumberOfOverflowPages(page.OverflowSize); pagePosition.IsOverflow = true; pagePosition.NumberOfOverflowPages = numOfPages; _recoveryPage += numOfPages; } else { _recoveryPage++; } tempTransactionPageTranslaction[page.PageNumber] = pagePosition; } _readingPage += transactionSize; LastTransactionHeader = current; foreach (var pagePosition in tempTransactionPageTranslaction) { _transactionPageTranslation[pagePosition.Key] = pagePosition.Value; if (pagePosition.Value.IsOverflow) { Debug.Assert(pagePosition.Value.NumberOfOverflowPages != -1); for (int i = 1; i < pagePosition.Value.NumberOfOverflowPages; i++) { _transactionPageTranslation.Remove(pagePosition.Key + i); } } } return true; }
private void Restore(StorageEnvironment env, string singleBackupFile) { using (env.Journal.Applicator.TakeFlushingLock()) { using (var txw = env.NewTransaction(TransactionFlags.ReadWrite)) { using (env.Options.AllowManualFlushing()) { env.FlushLogToDataFile(txw); } using (var package = ZipFile.Open(singleBackupFile, ZipArchiveMode.Read, System.Text.Encoding.UTF8)) { if (package.Entries.Count == 0) { return; } var toDispose = new List <IDisposable>(); var tempDir = Directory.CreateDirectory(Path.GetTempPath() + Guid.NewGuid()).FullName; try { TransactionHeader *lastTxHeader = null; var pagesToWrite = new Dictionary <long, Page>(); long journalNumber = -1; foreach (var entry in package.Entries) { switch (Path.GetExtension(entry.Name)) { case ".merged-journal": case ".journal": var jounalFileName = Path.Combine(tempDir, entry.Name); using (var output = new FileStream(jounalFileName, FileMode.Create)) using (var input = entry.Open()) { output.Position = output.Length; input.CopyTo(output); } var pager = env.Options.OpenPager(jounalFileName); toDispose.Add(pager); if (long.TryParse(Path.GetFileNameWithoutExtension(entry.Name), out journalNumber) == false) { throw new InvalidOperationException("Cannot parse journal file number"); } var recoveryPager = env.Options.CreateScratchPager(Path.Combine(tempDir, StorageEnvironmentOptions.JournalRecoveryName(journalNumber))); toDispose.Add(recoveryPager); var reader = new JournalReader(pager, recoveryPager, 0, lastTxHeader); while (reader.ReadOneTransaction(env.Options)) { lastTxHeader = reader.LastTransactionHeader; } foreach (var translation in reader.TransactionPageTranslation) { var pageInJournal = translation.Value.JournalPos; var page = recoveryPager.Read(null, pageInJournal); pagesToWrite[translation.Key] = page; if (page.IsOverflow) { var numberOfOverflowPages = recoveryPager.GetNumberOfOverflowPages(page.OverflowSize); for (int i = 1; i < numberOfOverflowPages; i++) { pagesToWrite.Remove(translation.Key + i); } } } break; default: throw new InvalidOperationException("Unknown file, cannot restore: " + entry); } } var sortedPages = pagesToWrite.OrderBy(x => x.Key) .Select(x => x.Value) .ToList(); if (sortedPages.Count == 0) { return; } var last = sortedPages.Last(); env.Options.DataPager.EnsureContinuous(txw, last.PageNumber, last.IsOverflow ? env.Options.DataPager.GetNumberOfOverflowPages( last.OverflowSize) : 1); foreach (var page in sortedPages) { env.Options.DataPager.Write(page); } env.Options.DataPager.Sync(); var root = Tree.Open(txw, &lastTxHeader->Root); var freeSpaceRoot = Tree.Open(txw, &lastTxHeader->FreeSpace); freeSpaceRoot.Name = Constants.FreeSpaceTreeName; freeSpaceRoot.IsFreeSpaceTree = true; root.Name = Constants.RootTreeName; txw.UpdateRootsIfNeeded(root, freeSpaceRoot); txw.State.NextPageNumber = lastTxHeader->LastPageNumber + 1; env.Journal.Clear(txw); txw.Commit(); env.HeaderAccessor.Modify(header => { header->TransactionId = lastTxHeader->TransactionId; header->LastPageNumber = lastTxHeader->LastPageNumber; header->Journal.LastSyncedJournal = journalNumber; header->Journal.LastSyncedTransactionId = lastTxHeader->TransactionId; header->Root = lastTxHeader->Root; header->FreeSpace = lastTxHeader->FreeSpace; header->Journal.CurrentJournal = journalNumber + 1; header->Journal.JournalFilesCount = 0; }); } finally { toDispose.ForEach(x => x.Dispose()); try { Directory.Delete(tempDir, true); } catch (Exception) { // just temp dir - ignore it } } } } } }
private bool TryReadAndValidateHeader(StorageEnvironmentOptions options,out TransactionHeader* current) { current = (TransactionHeader*)_pager.Read(_readingPage).Base; if (current->HeaderMarker != Constants.TransactionHeaderMarker) { // not a transaction page, // if the header marker is zero, we are probably in the area at the end of the log file, and have no additional log records // to read from it. This can happen if the next transaction was too big to fit in the current log file. We stop reading // this log file and move to the next one. RequireHeaderUpdate = current->HeaderMarker != 0; if (RequireHeaderUpdate) { options.InvokeRecoveryError(this, "Transaction " + current->TransactionId + " header marker was set to garbage value, file is probably corrupted", null); } return false; } ValidateHeader(current, LastTransactionHeader); if (current->TxMarker.HasFlag(TransactionMarker.Commit) == false) { // uncommitted transaction, probably RequireHeaderUpdate = true; options.InvokeRecoveryError(this, "Transaction " + current->TransactionId + " was not committed", null); return false; } _readingPage++; return true; }
public async Task OnDirectoryInitializeInMemoryTest() { string script; IDictionary <string, string> customSettings = new ConcurrentDictionary <string, string>(); var scriptFile = Path.Combine(Path.GetTempPath(), Path.ChangeExtension(Guid.NewGuid().ToString(), ".ps1")); var outputFile = Path.Combine(Path.GetTempPath(), Path.ChangeExtension(Guid.NewGuid().ToString(), ".txt")); if (PlatformDetails.RunningOnPosix) { customSettings[RavenConfiguration.GetKey(x => x.Storage.OnDirectoryInitializeExec)] = "bash"; customSettings[RavenConfiguration.GetKey(x => x.Storage.OnDirectoryInitializeExecArguments)] = $"{scriptFile} {outputFile}"; script = "#!/bin/bash\r\necho \"$2 $3 $4 $5 $6\" >> $1"; File.WriteAllText(scriptFile, script); Process.Start("chmod", $"700 {scriptFile}"); } else { customSettings[RavenConfiguration.GetKey(x => x.Storage.OnDirectoryInitializeExec)] = "powershell"; customSettings[RavenConfiguration.GetKey(x => x.Storage.OnDirectoryInitializeExecArguments)] = $"{scriptFile} {outputFile}"; script = @" param([string]$userArg ,[string]$type, [string]$name, [string]$dataPath, [string]$tempPath, [string]$journalPath) Add-Content $userArg ""$type $name $dataPath $tempPath $journalPath\r\n"" exit 0"; File.WriteAllText(scriptFile, script); } UseNewLocalServer(customSettings: customSettings); // Creating dummy storage env options, so we can tell all the different paths using (var options = StorageEnvironmentOptions.CreateMemoryOnly()) { using (var store = GetDocumentStore()) { store.Maintenance.Send(new CreateSampleDataOperation()); // the database loads after all indexes are loaded var documentDatabase = await Server.ServerStore.DatabasesLandlord.TryGetOrCreateResourceStore(store.Database); var lines = File.ReadAllLines(outputFile); Assert.True(lines.Length == 6); Assert.True(lines[0].Contains($"{DirectoryExecUtils.EnvironmentType.System} {SystemDbName} {options.BasePath} {options.TempPath} {options.JournalPath}")); Assert.True(lines[1].Contains($"{DirectoryExecUtils.EnvironmentType.Configuration} {store.Database} {options.BasePath} {options.TempPath} {options.JournalPath}")); Assert.True(lines[2].Contains($"{DirectoryExecUtils.EnvironmentType.Database} {store.Database} {options.BasePath} {options.TempPath} {options.JournalPath}")); var indexes = documentDatabase.IndexStore.GetIndexes().ToArray(); Assert.True(indexes.Length == 3); // The indexes order in the IndexStore don't match the order of storage env creation and we need a one-to-one match. var matches = lines.ToList().GetRange(3, 3); foreach (var index in indexes) { var expected = $"{DirectoryExecUtils.EnvironmentType.Index} {store.Database} {index._environment.Options.BasePath} {index._environment.Options.TempPath} {index._environment.Options.JournalPath}"; var indexToRemove = matches.FindIndex(str => str.Contains(expected)); if (indexToRemove != -1) { matches.RemoveAt(indexToRemove); } } Assert.Equal(0, matches.Count); } } }
public bool ReadOneTransaction(StorageEnvironmentOptions options,bool checkCrc = true) { if (_readingPage >= _pager.NumberOfAllocatedPages) return false; TransactionHeader* current; if (!TryReadAndValidateHeader(options, out current)) return false; var compressedPages = (current->CompressedSize / AbstractPager.PageSize) + (current->CompressedSize % AbstractPager.PageSize == 0 ? 0 : 1); if (current->TransactionId <= _lastSyncedTransactionId) { LastTransactionHeader = current; _readingPage += compressedPages; return true; // skipping } if (checkCrc && !ValidatePagesCrc(options, compressedPages, current)) return false; _recoveryPager.EnsureContinuous(null, _recoveryPage, (current->PageCount + current->OverflowPageCount) + 1); var dataPage = _recoveryPager.AcquirePagePointer(_recoveryPage); NativeMethods.memset(dataPage, 0, (current->PageCount + current->OverflowPageCount) * AbstractPager.PageSize); try { LZ4.Decode64(_pager.AcquirePagePointer(_readingPage), current->CompressedSize, dataPage, current->UncompressedSize, true); } catch (Exception e) { options.InvokeRecoveryError(this, "Could not de-compress, invalid data", e); RequireHeaderUpdate = true; return false; } var tempTransactionPageTranslaction = new Dictionary<long, JournalFile.PagePosition>(); for (var i = 0; i < current->PageCount; i++) { Debug.Assert(_pager.Disposed == false); Debug.Assert(_recoveryPager.Disposed == false); var page = _recoveryPager.Read(_recoveryPage); tempTransactionPageTranslaction[page.PageNumber] = new JournalFile.PagePosition { JournalPos = _recoveryPage, TransactionId = current->TransactionId }; if (page.IsOverflow) { var numOfPages = _recoveryPager.GetNumberOfOverflowPages(page.OverflowSize); _recoveryPage += numOfPages; } else { _recoveryPage++; } } _readingPage += compressedPages; LastTransactionHeader = current; foreach (var pagePosition in tempTransactionPageTranslaction) { _transactionPageTranslation[pagePosition.Key] = pagePosition.Value; } return true; }
protected virtual void Configure(StorageEnvironmentOptions options) { }
public ScratchBufferPool(StorageEnvironment env) { _options = env.Options; _sizeLimit = env.Options.MaxScratchBufferSize; _current = NextFile(); }
protected StorageTest(StorageEnvironmentOptions options) { _options = options; }
protected override void Configure(StorageEnvironmentOptions options) { options.PageSize = 4 * Constants.Size.Kilobyte; base.Configure(options); }
public long ToFile(StorageEnvironment env, string backupPath, CancellationToken token, CompressionLevel compression = CompressionLevel.Optimal, Action <string> infoNotify = null, Action backupStarted = null) { infoNotify = infoNotify ?? (s => { }); if (env.Options.IncrementalBackupEnabled == false) { throw new InvalidOperationException("Incremental backup is disabled for this storage"); } long numberOfBackedUpPages = 0; var copier = new DataCopier(AbstractPager.PageSize * 16); var backupSuccess = true; long lastWrittenLogPage = -1; long lastWrittenLogFile = -1; using (var file = new FileStream(backupPath, FileMode.Create)) { using (var package = new ZipArchive(file, ZipArchiveMode.Create, leaveOpen: true)) { IncrementalBackupInfo backupInfo; using (var txw = env.NewTransaction(TransactionFlags.ReadWrite)) { backupInfo = env.HeaderAccessor.Get(ptr => ptr->IncrementalBackup); if (env.Journal.CurrentFile != null) { lastWrittenLogFile = env.Journal.CurrentFile.Number; lastWrittenLogPage = env.Journal.CurrentFile.WritePagePosition; } // txw.Commit(); intentionally not committing } using (env.NewTransaction(TransactionFlags.Read)) { if (backupStarted != null) { backupStarted();// we let call know that we have started the backup } var usedJournals = new List <JournalFile>(); try { long lastBackedUpPage = -1; long lastBackedUpFile = -1; var firstJournalToBackup = backupInfo.LastBackedUpJournal; if (firstJournalToBackup == -1) { firstJournalToBackup = 0; // first time that we do incremental backup } for (var journalNum = firstJournalToBackup; journalNum <= backupInfo.LastCreatedJournal; journalNum++) { token.ThrowIfCancellationRequested(); var num = journalNum; var journalFile = GetJournalFile(env, journalNum, backupInfo); journalFile.AddRef(); usedJournals.Add(journalFile); var startBackupAt = 0L; var pagesToCopy = journalFile.JournalWriter.NumberOfAllocatedPages; if (journalFile.Number == backupInfo.LastBackedUpJournal) { startBackupAt = backupInfo.LastBackedUpJournalPage + 1; pagesToCopy -= startBackupAt; } if (startBackupAt >= journalFile.JournalWriter.NumberOfAllocatedPages) // nothing to do here { continue; } var part = package.CreateEntry(StorageEnvironmentOptions.JournalName(journalNum), compression); Debug.Assert(part != null); if (journalFile.Number == lastWrittenLogFile) { pagesToCopy -= (journalFile.JournalWriter.NumberOfAllocatedPages - lastWrittenLogPage); } using (var stream = part.Open()) { copier.ToStream(journalFile, startBackupAt, pagesToCopy, stream, token); infoNotify(string.Format("Voron Incr copy journal number {0}", num)); } lastBackedUpFile = journalFile.Number; if (journalFile.Number == backupInfo.LastCreatedJournal) { lastBackedUpPage = startBackupAt + pagesToCopy - 1; // we used all of this file, so the next backup should start in the next file if (lastBackedUpPage == (journalFile.JournalWriter.NumberOfAllocatedPages - 1)) { lastBackedUpPage = -1; lastBackedUpFile++; } } numberOfBackedUpPages += pagesToCopy; } env.HeaderAccessor.Modify(header => { header->IncrementalBackup.LastBackedUpJournal = lastBackedUpFile; header->IncrementalBackup.LastBackedUpJournalPage = lastBackedUpPage; }); } catch (Exception) { backupSuccess = false; throw; } finally { var lastSyncedJournal = env.HeaderAccessor.Get(header => header->Journal).LastSyncedJournal; foreach (var jrnl in usedJournals) { if (backupSuccess) // if backup succeeded we can remove journals { if (jrnl.Number < lastWrittenLogFile && // prevent deletion of the current journal and journals with a greater number jrnl.Number < lastSyncedJournal) // prevent deletion of journals that aren't synced with the data file { jrnl.DeleteOnClose = true; } } jrnl.Release(); } } infoNotify(string.Format("Voron Incr Backup total {0} pages", numberOfBackedUpPages)); } } file.Flush(true); // make sure that this is actually persisted fully to disk return(numberOfBackedUpPages); } }
public WindowsMemoryMapPager(StorageEnvironmentOptions options, VoronPathSetting file, long?initialFileSize = null, Win32NativeFileAttributes fileAttributes = Win32NativeFileAttributes.Normal, Win32NativeFileAccess access = Win32NativeFileAccess.GenericRead | Win32NativeFileAccess.GenericWrite, bool usePageProtection = false) : base(options, usePageProtection) { SYSTEM_INFO systemInfo; GetSystemInfo(out systemInfo); FileName = file; _logger = LoggingSource.Instance.GetLogger <StorageEnvironment>($"Pager-{file}"); _access = access; _copyOnWriteMode = Options.CopyOnWriteMode && FileName.FullPath.EndsWith(Constants.DatabaseFilename); if (_copyOnWriteMode) { _memoryMappedFileAccess = MemoryMappedFileAccess.Read | MemoryMappedFileAccess.CopyOnWrite; fileAttributes = Win32NativeFileAttributes.Readonly; _access = Win32NativeFileAccess.GenericRead; } else { _memoryMappedFileAccess = _access == Win32NativeFileAccess.GenericRead ? MemoryMappedFileAccess.Read : MemoryMappedFileAccess.ReadWrite; } _fileAttributes = fileAttributes; _handle = Win32NativeFileMethods.CreateFile(file.FullPath, access, Win32NativeFileShare.Read | Win32NativeFileShare.Write | Win32NativeFileShare.Delete, IntPtr.Zero, Win32NativeFileCreationDisposition.OpenAlways, fileAttributes, IntPtr.Zero); if (_handle.IsInvalid) { int lastWin32ErrorCode = Marshal.GetLastWin32Error(); throw new IOException("Failed to open file storage of Win32MemoryMapPager for " + file, new Win32Exception(lastWin32ErrorCode)); } _fileInfo = new FileInfo(file.FullPath); var drive = _fileInfo.Directory.Root.Name.TrimEnd('\\'); try { if (PhysicalDrivePerMountCache.TryGetValue(drive, out UniquePhysicalDriveId) == false) { UniquePhysicalDriveId = GetPhysicalDriveId(drive); } if (_logger.IsInfoEnabled) { _logger.Info($"Physical drive '{drive}' unique id = '{UniquePhysicalDriveId}' for file '{file}'"); } } catch (Exception ex) { UniquePhysicalDriveId = 0; if (_logger.IsInfoEnabled) { _logger.Info($"Failed to determine physical drive Id for drive letter '{drive}', file='{file}'", ex); } } var streamAccessType = _access == Win32NativeFileAccess.GenericRead ? FileAccess.Read : FileAccess.ReadWrite; _fileStream = SafeFileStream.Create(_handle, streamAccessType); _totalAllocationSize = _fileInfo.Length; if ((access & Win32NativeFileAccess.GenericWrite) == Win32NativeFileAccess.GenericWrite || (access & Win32NativeFileAccess.GenericAll) == Win32NativeFileAccess.GenericAll || (access & Win32NativeFileAccess.FILE_GENERIC_WRITE) == Win32NativeFileAccess.FILE_GENERIC_WRITE) { var fileLength = _fileStream.Length; if (fileLength == 0 && initialFileSize.HasValue) { fileLength = initialFileSize.Value; } if (_fileStream.Length == 0 || (fileLength % AllocationGranularity != 0)) { fileLength = NearestSizeToAllocationGranularity(fileLength); Win32NativeFileMethods.SetFileLength(_handle, fileLength); } _totalAllocationSize = fileLength; } NumberOfAllocatedPages = _totalAllocationSize / Constants.Storage.PageSize; SetPagerState(CreatePagerState()); }
protected override void Configure(StorageEnvironmentOptions options) { options.MaxScratchBufferSize = _64KB * 4; }
protected override void Configure(StorageEnvironmentOptions options) { options.ManualFlushing = true; options.MaxLogFileSize = 3 * AbstractPager.PageSize; }
protected override void Configure(StorageEnvironmentOptions options) { options.ManualFlushing = true; }
public void ScratchPagesShouldNotBeReleasedUntilNotUsed() { var directory = "Test2"; if (Directory.Exists(directory)) { Directory.Delete(directory, true); } var options = StorageEnvironmentOptions.ForPath(directory); options.ManualFlushing = true; using (var env = new StorageEnvironment(options)) { CreateTrees(env, 2, "tree"); for (int a = 0; a < 3; a++) { using (var tx = env.NewTransaction(TransactionFlags.ReadWrite)) { tx.Environment.State.GetTree(tx, "tree0").Add(string.Format("key/{0}/{1}/1", new string('0', 1000), a), new MemoryStream()); tx.Environment.State.GetTree(tx, "tree0").Add(string.Format("key/{0}/{1}/2", new string('0', 1000), a), new MemoryStream()); tx.Commit(); } } using (var tx = env.NewTransaction(TransactionFlags.ReadWrite)) { tx.Environment.State.GetTree(tx, "tree1").Add("yek/1", new MemoryStream()); tx.Commit(); } using (var txr = env.NewTransaction(TransactionFlags.Read)) { using (var iterator = txr.Environment.State.GetTree(txr, "tree0").Iterate()) { Assert.True(iterator.Seek(Slice.BeforeAllKeys)); // all pages are from scratch (one from position 11) var currentKey = iterator.CurrentKey.ToString(); env.FlushLogToDataFile(); // frees pages from scratch (including the one at position 11) using (var txw = env.NewTransaction(TransactionFlags.ReadWrite)) { var tree = txw.Environment.State.GetTree(txw, "tree1"); tree.Add(string.Format("yek/{0}/0/0", new string('0', 1000)), new MemoryStream()); // allocates new page from scratch (position 11) txw.Commit(); } Assert.Equal(currentKey, iterator.CurrentKey.ToString()); using (var txw = env.NewTransaction(TransactionFlags.ReadWrite)) { var tree = txw.Environment.State.GetTree(txw, "tree1"); tree.Add("fake", new MemoryStream()); txw.Commit(); } Assert.Equal(currentKey, iterator.CurrentKey.ToString()); var count = 0; do { currentKey = iterator.CurrentKey.ToString(); count++; Assert.Contains("key/", currentKey); }while (iterator.MoveNext()); Assert.Equal(6, count); } } } }