public static void Execute(StorageEnvironmentOptions srcOptions, StorageEnvironmentOptions.DirectoryStorageEnvironmentOptions compactOptions, Action <StorageCompactionProgress> progressReport = null, CancellationToken token = default(CancellationToken)) { if (srcOptions.IncrementalBackupEnabled) { throw new InvalidOperationException(CannotCompactBecauseOfIncrementalBackup); } long minimalCompactedDataFileSize; srcOptions.ManualFlushing = true; // prevent from flushing during compaction - we shouldn't touch any source files compactOptions.ManualFlushing = true; // let us flush manually during data copy using (var existingEnv = new StorageEnvironment(srcOptions)) using (var compactedEnv = new StorageEnvironment(compactOptions)) { CopyTrees(existingEnv, compactedEnv, progressReport, token); compactedEnv.FlushLogToDataFile(); bool synced; const int maxNumberOfRetries = 100; var syncRetries = 0; while (true) { token.ThrowIfCancellationRequested(); using (var op = new WriteAheadJournal.JournalApplicator.SyncOperation(compactedEnv.Journal.Applicator)) { try { synced = op.SyncDataFile(); if (synced || ++syncRetries >= maxNumberOfRetries) { break; } Thread.Sleep(100); } catch (Exception e) { existingEnv.Options.SetCatastrophicFailure(ExceptionDispatchInfo.Capture(e)); throw; } } } if (synced) { compactedEnv.Journal.Applicator.DeleteCurrentAlreadyFlushedJournal(); } compactedEnv.Cleanup(); minimalCompactedDataFileSize = compactedEnv.NextPageNumber * Constants.Storage.PageSize; } using (var compactedDataFile = SafeFileStream.Create(compactOptions.BasePath.Combine(Constants.DatabaseFilename).FullPath, FileMode.Open, FileAccess.ReadWrite)) { compactedDataFile.SetLength(minimalCompactedDataFileSize); } }