public virtual void Setup() { if (DeleteBeforeEachBenchmark) { DeleteStorage(); Env = new StorageEnvironment(StorageEnvironmentOptions.ForPath(Path)); } }
public void CanBackupAndRestore() { RequireFileBasedPager(); var random = new Random(); var buffer = new byte[8192]; random.NextBytes(buffer); using (var tx = Env.NewTransaction(TransactionFlags.ReadWrite)) { for (int i = 0; i < 500; i++) { tx.Root.Add("items/" + i, new MemoryStream(buffer)); } tx.Commit(); } Assert.True(Env.Journal.Files.Count > 1); Env.FlushLogToDataFile(); // force writing data to the data file // add more data to journal files using (var tx = Env.NewTransaction(TransactionFlags.ReadWrite)) { for (int i = 500; i < 1000; i++) { tx.Root.Add("items/" + i, new MemoryStream(buffer)); } tx.Commit(); } Env.FlushLogToDataFile(); // force writing data to the data file - this won't sync data to disk because there was another sync withing last minute BackupMethods.Full.ToFile(Env, _backupFile); BackupMethods.Full.Restore(_backupFile, _recoveredStoragePath); var options = StorageEnvironmentOptions.ForPath(_recoveredStoragePath); options.MaxLogFileSize = Env.Options.MaxLogFileSize; using (var env = new StorageEnvironment(options)) { using (var tx = env.NewTransaction(TransactionFlags.Read)) { for (int i = 0; i < 1000; i++) { var readResult = tx.Root.Read("items/" + i); Assert.NotNull(readResult); var memoryStream = new MemoryStream(); readResult.Reader.CopyTo(memoryStream); Assert.Equal(memoryStream.ToArray(), buffer); } } } }
public void IncorrectWriteOfOverflowPagesFromJournalsToDataFile_RavenDB_2806() { RequireFileBasedPager(); const int testedOverflowSize = 20000; var overflowValue = new byte[testedOverflowSize]; new Random(1).NextBytes(overflowValue); using (var tx = Env.NewTransaction(TransactionFlags.ReadWrite)) { var tree = Env.CreateTree(tx, "test"); var itemBytes = new byte[16000]; new Random(2).NextBytes(itemBytes); tree.Add("items/1", itemBytes); new Random(3).NextBytes(itemBytes); tree.Add("items/2", itemBytes); tree.Delete("items/1"); tree.Delete("items/2"); tree.Add("items/3", overflowValue); tx.Commit(); } BackupMethods.Incremental.ToFile(Env, IncrementalBackupTestUtils.IncrementalBackupFile(0)); var options = StorageEnvironmentOptions.ForPath(IncrementalBackupTestUtils.RestoredStoragePath); options.MaxLogFileSize = Env.Options.MaxLogFileSize; BackupMethods.Incremental.Restore(options, new[] { IncrementalBackupTestUtils.IncrementalBackupFile(0) }); using (var env = new StorageEnvironment(options)) { using (var tx = env.NewTransaction(TransactionFlags.Read)) { var tree = tx.ReadTree("test"); var readResult = tree.Read("items/3"); var readBytes = new byte[testedOverflowSize]; readResult.Reader.Read(readBytes, 0, testedOverflowSize); Assert.Equal(overflowValue, readBytes); } } }
private static void ReadAndWriteOneTransaction(Stopwatch sw, int concurrency) { using (var env = new StorageEnvironment(StorageEnvironmentOptions.ForPath(Path))) { var value = new byte[100]; new Random().NextBytes(value); using (var tx = env.NewTransaction(TransactionFlags.ReadWrite)) { env.Options.DataPager.AllocateMorePages(tx, 1024 * 1024 * 768); tx.Commit(); } using (var tx = env.NewTransaction(TransactionFlags.ReadWrite)) { var ms = new MemoryStream(value); for (long i = 0; i < Transactions * ItemsPerTransaction; i++) { ms.Position = 0; tx.State.Root.Add(tx, i.ToString("0000000000000000"), ms); } tx.Commit(); } var countdownEvent = new CountdownEvent(concurrency); sw.Start(); for (int i = 0; i < concurrency; i++) { var currentBase = i; ThreadPool.QueueUserWorkItem(state => { using (var tx = env.NewTransaction(TransactionFlags.Read)) { var ms = new byte[100]; for (int j = 0; j < ((ItemsPerTransaction * Transactions) / concurrency); j++) { var current = j * currentBase; var key = current.ToString("0000000000000000"); var stream = tx.State.Root.Read(tx, key).Reader; { while (stream.Read(ms, 0, ms.Length) != 0) { } } } tx.Commit(); } countdownEvent.Signal(); }); } countdownEvent.Wait(); sw.Stop(); } }
public void ShouldNotThrowChecksumMismatch() { var random = new Random(1); var buffer = new byte[100]; random.NextBytes(buffer); for (int i = 0; i < 100; i++) { buffer[i] = 13; } var options = StorageEnvironmentOptions.ForPath(DataDir); using (var env = new StorageEnvironment(options)) { using (var tx = env.WriteTransaction()) { var tree = tx.CreateTree("foo"); for (int i = 0; i < 50; i++) { tree.Add("items/" + i, new MemoryStream(buffer)); } tx.Commit(); } using (var tx = env.WriteTransaction()) { var tree = tx.CreateTree("foo"); for (int i = 50; i < 100; i++) { tree.Add("items/" + i, new MemoryStream(buffer)); } tx.Commit(); } } options = StorageEnvironmentOptions.ForPath(DataDir); using (var env = new StorageEnvironment(options)) { using (var tx = env.ReadTransaction()) { var tree = tx.CreateTree("foo"); for (int i = 0; i < 100; i++) { var readResult = tree.Read("items/" + i); Assert.NotNull(readResult); var memoryStream = new MemoryStream(); readResult.Reader.CopyTo(memoryStream); Assert.Equal(memoryStream.ToArray(), buffer); } } } }
public void StorageRecoveryShouldWorkWhenThereAreMultipleCommitedTransactions2() { using (var env = new StorageEnvironment(StorageEnvironmentOptions.ForPath(DataDir))) { using (var tx = env.WriteTransaction()) { var tree = tx.CreateTree("atree"); for (var i = 0; i < 1000; i++) { tree.Add("key" + i, new MemoryStream()); } tx.Commit(); } using (var tx = env.WriteTransaction()) { var tree = tx.CreateTree("btree"); for (var i = 0; i < 5; i++) { tree.Add("key" + i, new MemoryStream()); } tx.Commit(); } } using (var env = new StorageEnvironment(StorageEnvironmentOptions.ForPath(DataDir))) { using (var tx = env.WriteTransaction()) { tx.CreateTree("atree"); tx.CreateTree("btree"); tx.Commit(); } using (var tx = env.ReadTransaction()) { var aTree = tx.CreateTree("atree"); var bTree = tx.CreateTree("btree"); for (var i = 0; i < 1000; i++) { Assert.NotNull(aTree.Read("key" + i)); } for (var i = 0; i < 5; i++) { Assert.NotNull(bTree.Read("key" + i)); } } } }
public void SettingMaxScratchBufferSizeCanBeLimitedOn32Bits() { using (var options = StorageEnvironmentOptions.ForPath(NewDataPath())) { options.ForceUsing32BitsPager = true; options.MaxScratchBufferSize = 4 * Constants.Size.Megabyte; Assert.Equal(4 * Constants.Size.Megabyte, options.MaxScratchBufferSize); } }
public void StorageRecoveryShouldWorkWhenThereAreNoTransactionsToRecoverFromLog() { using (var env = new StorageEnvironment(StorageEnvironmentOptions.ForPath(DataDir))) { } using (var env = new StorageEnvironment(StorageEnvironmentOptions.ForPath(DataDir))) { } }
public void Can_make_multiple_min_inc_backups_and_then_restore() { const int UserCount = 5000; _tempDir = Guid.NewGuid().ToString(); var storageEnvironmentOptions = StorageEnvironmentOptions.ForPath(_tempDir); storageEnvironmentOptions.IncrementalBackupEnabled = true; using (var envToSnapshot = new StorageEnvironment(storageEnvironmentOptions)) { int index = 0; for (int xi = 0; xi < 5; xi++) { for (int yi = 0; yi < 2; yi++) { using (var tx = envToSnapshot.NewTransaction(TransactionFlags.ReadWrite)) { var tree = envToSnapshot.CreateTree(tx, "test"); for (int i = 0; i < UserCount / 10; i++) { tree.Add("users/" + index, "john doe/" + index); index++; } tx.Commit(); } } var snapshotWriter = new MinimalIncrementalBackup(); snapshotWriter.ToFile(envToSnapshot, Path.Combine(_tempDir, xi + ".snapshot")); } } var incremental = new IncrementalBackup(); var restoredOptions = StorageEnvironmentOptions.ForPath(Path.Combine(_tempDir, "restored")); incremental.Restore(restoredOptions, Enumerable.Range(0, 5).Select(i => Path.Combine(_tempDir, i + ".snapshot"))); using (var snapshotRestoreEnv = new StorageEnvironment(restoredOptions)) { using (var tx = snapshotRestoreEnv.NewTransaction(TransactionFlags.Read)) { var tree = tx.ReadTree("test"); Assert.NotNull(tree); for (int i = 0; i < UserCount; i++) { var readResult = tree.Read("users/" + i); Assert.NotNull(readResult); Assert.Equal("john doe/" + i, readResult.Reader.ToStringValue()); } } } }
public void ShouldBeAbleToWriteValuesGreaterThanLogAndRecoverThem() { DeleteDirectory("test2.data"); var random = new Random(1234); var buffer = new byte[1024 * 512]; random.NextBytes(buffer); var options = StorageEnvironmentOptions.ForPath("test2.data"); options.MaxLogFileSize = 10 * AbstractPager.PageSize; using (var env = new StorageEnvironment(options)) { using (var tx = env.NewTransaction(TransactionFlags.ReadWrite)) { env.CreateTree(tx, "tree"); tx.Commit(); } using (var tx = env.NewTransaction(TransactionFlags.ReadWrite)) { tx.Environment.CreateTree(tx, "tree").Add("key1", new MemoryStream(buffer)); tx.Commit(); } } options = StorageEnvironmentOptions.ForPath("test2.data"); options.MaxLogFileSize = 10 * AbstractPager.PageSize; using (var env = new StorageEnvironment(options)) { using (var tx = env.NewTransaction(TransactionFlags.ReadWrite)) { env.CreateTree(tx, "tree"); tx.Commit(); } using (var tx = env.NewTransaction(TransactionFlags.Read)) { var read = tx.Environment.CreateTree(tx, "tree").Read("key1"); Assert.NotNull(read); { Assert.Equal(buffer.Length, read.Reader.Length); int used; Assert.Equal(buffer, read.Reader.ReadBytes(read.Reader.Length, out used).Take(used).ToArray()); } } } DeleteDirectory("test2.data"); }
public void SettingMaxScratchBufferSizeMustNotExceed32BitsLimit() { using (var options = StorageEnvironmentOptions.ForPath(NewDataPath())) { options.ForceUsing32BitsPager = true; options.MaxScratchBufferSize = 512 * Constants.Size.Megabyte; // 32 MB is the default on 32 bits Assert.Equal(32 * Constants.Size.Megabyte, options.MaxScratchBufferSize); } }
public static int Main() { var sp = Stopwatch.StartNew(); var storageEnvironmentOptions = StorageEnvironmentOptions.ForPath(@"\\10.0.0.10\Documents\main"); using (var se = new StorageEnvironment(storageEnvironmentOptions)) { } Console.WriteLine(sp.Elapsed); return(0); }
public FreeDbQueries(string path) { _storageEnvironment = new StorageEnvironment(StorageEnvironmentOptions.ForPath(path)); using (Transaction tx = _storageEnvironment.NewTransaction(TransactionFlags.ReadWrite)) { _storageEnvironment.CreateTree(tx, "albums"); _storageEnvironment.CreateTree(tx, "ix_diskids"); _storageEnvironment.CreateTree(tx, "ix_artists"); _storageEnvironment.CreateTree(tx, "ix_titles"); tx.Commit(); } }
public void ShouldReportProgress() { using (var env = new StorageEnvironment(StorageEnvironmentOptions.ForPath(DataDir))) { using (var tx = env.WriteTransaction()) { var tree = tx.CreateTree("fruits"); tree.Add("apple", new byte[123]); tree.Add("orange", new byte[99]); var tree2 = tx.CreateTree("vegetables"); tree2.Add("carrot", new byte[123]); tree2.Add("potato", new byte[99]); var tree3 = tx.CreateTree("multi"); tree3.MultiAdd("fruits", "apple"); tree3.MultiAdd("fruits", "orange"); tree3.MultiAdd("vegetables", "carrot"); tree3.MultiAdd("vegetables", "carrot"); tx.Commit(); } } var progressReport = new List <string>(); StorageCompaction.Execute(StorageEnvironmentOptions.ForPath(DataDir), (StorageEnvironmentOptions.DirectoryStorageEnvironmentOptions)StorageEnvironmentOptions.ForPath(Path.Combine(DataDir, "Compacted")), x => progressReport.Add($"Copied {x.ObjectProgress} of {x.ObjectTotal} records in '{x.ObjectName}' tree. Copied {x.GlobalProgress} of {x.GlobalTotal} trees.")); Assert.NotEmpty(progressReport); var lines = new[] { "Copied 0 of 2 records in '$Database-Metadata' tree. Copied 0 of 4 trees.", "Copied 2 of 2 records in '$Database-Metadata' tree. Copied 1 of 4 trees.", "Copied 0 of 2 records in 'fruits' tree. Copied 1 of 4 trees.", "Copied 2 of 2 records in 'fruits' tree. Copied 2 of 4 trees.", "Copied 0 of 2 records in 'multi' tree. Copied 2 of 4 trees.", "Copied 2 of 2 records in 'multi' tree. Copied 3 of 4 trees.", "Copied 0 of 2 records in 'vegetables' tree. Copied 3 of 4 trees.", "Copied 2 of 2 records in 'vegetables' tree. Copied 4 of 4 trees." }; foreach (var line in lines) { Assert.Contains(line, lines); } }
public StorageBenchmark() { if (DeleteBeforeSuite) { DeleteStorage(); } if (!DeleteBeforeEachBenchmark) { Env = new StorageEnvironment(StorageEnvironmentOptions.ForPath(Path)); } }
public void ShouldReportProgress() { using (var env = new StorageEnvironment(StorageEnvironmentOptions.ForPath(DataDir))) { using (var tx = env.WriteTransaction()) { var tree = tx.CreateTree("fruits"); tree.Add("apple", new byte[123]); tree.Add("orange", new byte[99]); var tree2 = tx.CreateTree("vegetables"); tree2.Add("carrot", new byte[123]); tree2.Add("potato", new byte[99]); var tree3 = tx.CreateTree("multi"); tree3.MultiAdd("fruits", "apple"); tree3.MultiAdd("fruits", "orange"); tree3.MultiAdd("vegetables", "carrot"); tree3.MultiAdd("vegetables", "carrot"); tx.Commit(); } } var progressReport = new List <string>(); StorageCompaction.Execute(StorageEnvironmentOptions.ForPath(DataDir), (StorageEnvironmentOptions.DirectoryStorageEnvironmentOptions)StorageEnvironmentOptions.ForPath(Path.Combine(DataDir, "Compacted")), x => progressReport.Add($"{x.Message} ({x.TreeName} - {x.TreeProgress}/{x.TreeTotal}). Copied {x.GlobalProgress} of {x.GlobalTotal} trees.")); Assert.NotEmpty(progressReport); var lines = new[] { "Copying variable size tree ($Database-Metadata - 0/2). Copied 0 of 4 trees.", "Copied variable size tree ($Database-Metadata - 2/2). Copied 1 of 4 trees.", "Copying variable size tree (fruits - 0/2). Copied 1 of 4 trees.", "Copied variable size tree (fruits - 2/2). Copied 2 of 4 trees.", "Copying variable size tree (multi - 0/2). Copied 2 of 4 trees.", "Copied variable size tree (multi - 2/2). Copied 3 of 4 trees.", "Copying variable size tree (vegetables - 0/2). Copied 3 of 4 trees.", "Copied variable size tree (vegetables - 2/2). Copied 4 of 4 trees." }; foreach (var line in lines) { Assert.Contains(line, progressReport); } }
public void ShouldProperlyRecover() { var sequentialLargeIds = ReadData("non-leaf-page-seq-id-large-values-2.txt"); var enumerator = sequentialLargeIds.GetEnumerator(); if (Directory.Exists("tests")) { Directory.Delete("tests", true); } var options = StorageEnvironmentOptions.ForPath("tests"); options.ManualFlushing = true; using (var env = new StorageEnvironment(options)) { for (var transactions = 0; transactions < 100; transactions++) { using (var tx = env.NewTransaction(TransactionFlags.ReadWrite)) { for (var i = 0; i < 100; i++) { enumerator.MoveNext(); tx.Root.Add(enumerator.Current.Key.ToString("0000000000000000"), new MemoryStream(enumerator.Current.Value)); } tx.Commit(); } if (transactions == 50) { env.FlushLogToDataFile(); } } ValidateRecords(env, new List <string> { "Root" }, sequentialLargeIds.Select(x => x.Key.ToString("0000000000000000")).ToList()); } options = StorageEnvironmentOptions.ForPath("tests"); options.ManualFlushing = true; using (var env = new StorageEnvironment(options)) { ValidateRecords(env, new List <string> { "Root" }, sequentialLargeIds.Select(x => x.Key.ToString("0000000000000000")).ToList()); } }
public ConfigurationStorage(DocumentDatabase db, ServerStore serverStore) { var options = db.Configuration.Core.RunInMemory ? StorageEnvironmentOptions.CreateMemoryOnly(Path.Combine(db.Configuration.Core.DataDirectory, "Configuration")) : StorageEnvironmentOptions.ForPath(Path.Combine(db.Configuration.Core.DataDirectory, "Configuration")); options.SchemaVersion = 1; Environment = new StorageEnvironment(options); AlertsStorage = new AlertsStorage(db.Name, serverStore); IndexesEtagsStorage = new IndexesEtagsStorage(db.Name); }
private PerformanceRecord ReadParallel(string operation, IEnumerable <uint> ids, PerfTracker perfTracker, int numberOfThreads) { var options = StorageEnvironmentOptions.ForPath(dataPath); options.ManualFlushing = true; using (var env = new StorageEnvironment(options)) { env.FlushLogToDataFile(); return(ExecuteReadWithParallel(operation, ids, numberOfThreads, () => ReadInternal(ids, perfTracker, env))); } }
public VoronDisksDestination() { _storageEnvironment = new StorageEnvironment(StorageEnvironmentOptions.ForPath("FreeDB")); using (var tx = _storageEnvironment.NewTransaction(TransactionFlags.ReadWrite)) { _storageEnvironment.CreateTree(tx, "albums"); _storageEnvironment.CreateTree(tx, "ix_diskids"); _storageEnvironment.CreateTree(tx, "ix_artists"); _storageEnvironment.CreateTree(tx, "ix_titles"); tx.Commit(); } _currentBatch = new WriteBatch(); }
private List <PerformanceRecord> Write(string operation, IEnumerable <TestData> data, int itemsPerTransaction, int numberOfTransactions, PerfTracker perfTracker) { NewStorage(); var storageEnvironmentOptions = StorageEnvironmentOptions.ForPath(dataPath); using (var env = new StorageEnvironment(storageEnvironmentOptions)) { var enumerator = data.GetEnumerator(); //return WriteInternal(operation, itemsPerTransaction, numberOfTransactions, perfTracker, env, enumerator); return(WriteInternalBatch(operation, enumerator, itemsPerTransaction, numberOfTransactions, perfTracker, env)); } }
public DateTimeSeries(string path) { _lastKey = "last-key"; _storageEnvironment = new StorageEnvironment(StorageEnvironmentOptions.ForPath(path)); using (var tx = _storageEnvironment.NewTransaction(TransactionFlags.ReadWrite)) { var read = tx.State.Root.Read(tx, _lastKey); _last = read != null?read.Reader.ReadInt64() : 1; tx.Commit(); } }
public ConfigurationStorage(DocumentDatabase db) { var path = db.Configuration.Core.DataDirectory.Combine("Configuration"); string tempPath = null; if (db.Configuration.Storage.TempPath != null) { tempPath = db.Configuration.Storage.TempPath.Combine("Configuration").ToFullPath(); } var options = db.Configuration.Core.RunInMemory ? StorageEnvironmentOptions.CreateMemoryOnly(path.FullPath, tempPath, db.IoChanges, db.CatastrophicFailureNotification) : StorageEnvironmentOptions.ForPath(path.FullPath, tempPath, null, db.IoChanges, db.CatastrophicFailureNotification); options.OnNonDurableFileSystemError += db.HandleNonDurableFileSystemError; options.OnRecoverableFailure += db.HandleRecoverableFailure; options.OnRecoveryError += db.HandleOnConfigurationRecoveryError; options.OnIntegrityErrorOfAlreadySyncedData += db.HandleOnConfigurationIntegrityErrorOfAlreadySyncedData; options.CompressTxAboveSizeInBytes = db.Configuration.Storage.CompressTxAboveSize.GetValue(SizeUnit.Bytes); options.SchemaVersion = SchemaUpgrader.CurrentVersion.ConfigurationVersion; options.SchemaUpgrader = SchemaUpgrader.Upgrader(SchemaUpgrader.StorageType.Configuration, this, null, null); options.ForceUsing32BitsPager = db.Configuration.Storage.ForceUsing32BitsPager; options.EnablePrefetching = db.Configuration.Storage.EnablePrefetching; options.DiscardVirtualMemory = db.Configuration.Storage.DiscardVirtualMemory; options.TimeToSyncAfterFlushInSec = (int)db.Configuration.Storage.TimeToSyncAfterFlush.AsTimeSpan.TotalSeconds; options.NumOfConcurrentSyncsPerPhysDrive = db.Configuration.Storage.NumberOfConcurrentSyncsPerPhysicalDrive; options.Encryption.MasterKey = db.MasterKey?.ToArray(); options.DoNotConsiderMemoryLockFailureAsCatastrophicError = db.Configuration.Security.DoNotConsiderMemoryLockFailureAsCatastrophicError; if (db.Configuration.Storage.MaxScratchBufferSize.HasValue) { options.MaxScratchBufferSize = db.Configuration.Storage.MaxScratchBufferSize.Value.GetValue(SizeUnit.Bytes); } options.PrefetchSegmentSize = db.Configuration.Storage.PrefetchBatchSize.GetValue(SizeUnit.Bytes); options.PrefetchResetThreshold = db.Configuration.Storage.PrefetchResetThreshold.GetValue(SizeUnit.Bytes); options.SyncJournalsCountThreshold = db.Configuration.Storage.SyncJournalsCountThreshold; options.IgnoreInvalidJournalErrors = db.Configuration.Storage.IgnoreInvalidJournalErrors; options.SkipChecksumValidationOnDatabaseLoading = db.Configuration.Storage.SkipChecksumValidationOnDatabaseLoading; options.IgnoreDataIntegrityErrorsOfAlreadySyncedTransactions = db.Configuration.Storage.IgnoreDataIntegrityErrorsOfAlreadySyncedTransactions; options.MaxNumberOfRecyclableJournals = db.Configuration.Storage.MaxNumberOfRecyclableJournals; DirectoryExecUtils.SubscribeToOnDirectoryInitializeExec(options, db.Configuration.Storage, db.Name, DirectoryExecUtils.EnvironmentType.Configuration, Logger); NotificationsStorage = new NotificationsStorage(db.Name); OperationsStorage = new OperationsStorage(); Environment = StorageLoader.OpenEnvironment(options, StorageEnvironmentWithType.StorageEnvironmentType.Configuration); ContextPool = new TransactionContextPool(Environment, db.Configuration.Memory.MaxContextSizeToKeep); }
public static ClusterManager Create(DocumentDatabase systemDatabase, DatabasesLandlord databasesLandlord) { if (systemDatabase == null) { throw new ArgumentNullException("systemDatabase"); } if (databasesLandlord == null) { throw new ArgumentNullException("databasesLandlord"); } DatabaseHelper.AssertSystemDatabase(systemDatabase); var configuration = systemDatabase.Configuration; var nodeConnectionInfo = CreateSelfConnection(systemDatabase); StorageEnvironmentOptions options; if (configuration.Core.RunInMemory == false) { var directoryPath = Path.Combine(configuration.Core.DataDirectory ?? AppDomain.CurrentDomain.BaseDirectory, "Raft"); if (Directory.Exists(directoryPath) == false) { Directory.CreateDirectory(directoryPath); } options = StorageEnvironmentOptions.ForPath(directoryPath); } else { options = StorageEnvironmentOptions.CreateMemoryOnly(configuration.Storage.TempPath); } var transport = new HttpTransport(nodeConnectionInfo.Name, systemDatabase.WorkContext.CancellationToken); var stateMachine = new ClusterStateMachine(systemDatabase, databasesLandlord); var raftEngineOptions = new RaftEngineOptions(nodeConnectionInfo, options, transport, stateMachine) { ElectionTimeout = (int)configuration.Cluster.ElectionTimeout.AsTimeSpan.TotalMilliseconds, HeartbeatTimeout = (int)configuration.Cluster.HeartbeatTimeout.AsTimeSpan.TotalMilliseconds, MaxLogLengthBeforeCompaction = configuration.Cluster.MaxLogLengthBeforeCompaction, MaxEntriesPerRequest = configuration.Cluster.MaxEntriesPerRequest, MaxStepDownDrainTime = configuration.Cluster.MaxStepDownDrainTime.AsTimeSpan }; var raftEngine = new RaftEngine(raftEngineOptions); stateMachine.RaftEngine = raftEngine; return(new ClusterManager(raftEngine)); }
public void AllPeers_and_AllVotingPeers_can_be_persistantly_saved_and_loaded() { var cancellationTokenSource = new CancellationTokenSource(); var path = "test" + Guid.NewGuid(); try { var expectedAllVotingPeers = new List <string> { "Node123", "Node1", "Node2", "NodeG", "NodeB", "NodeABC" }; using (var options = StorageEnvironmentOptions.ForPath(path)) { using (var persistentState = new PersistentState("self", options, cancellationTokenSource.Token) { CommandSerializer = new JsonCommandSerializer() }) { var currentConfiguration = persistentState.GetCurrentTopology(); Assert.Empty(currentConfiguration.AllVotingNodes); var currentTopology = new Topology(new Guid("355a589b-cadc-463d-a515-5add2ea47205"), expectedAllVotingPeers.Select(x => new NodeConnectionInfo { Name = x }), Enumerable.Empty <NodeConnectionInfo>(), Enumerable.Empty <NodeConnectionInfo>()); persistentState.SetCurrentTopology(currentTopology, 1); } } using (var options = StorageEnvironmentOptions.ForPath(path)) { using (var persistentState = new PersistentState("self", options, cancellationTokenSource.Token) { CommandSerializer = new JsonCommandSerializer() }) { var currentConfiguration = persistentState.GetCurrentTopology(); Assert.Equal(expectedAllVotingPeers.Count, currentConfiguration.AllVotingNodes.Count()); foreach (var nodeConnectionInfo in currentConfiguration.AllVotingNodes) { Assert.True(expectedAllVotingPeers.Contains(nodeConnectionInfo.Name)); } } } } finally { new DirectoryInfo(path).Delete(true); } }
public void ShouldBeAbleToWriteValuesGreaterThanLogAndRecoverThem() { var random = new Random(1234); var buffer = new byte[1024 * 512]; random.NextBytes(buffer); var options = StorageEnvironmentOptions.ForPath(DataDir); options.MaxLogFileSize = 10 * Constants.Storage.PageSize; using (var env = new StorageEnvironment(options)) { using (var tx = env.WriteTransaction()) { tx.CreateTree("tree"); tx.Commit(); } using (var tx = env.WriteTransaction()) { tx.CreateTree("tree").Add("key1", new MemoryStream(buffer)); tx.Commit(); } } options = StorageEnvironmentOptions.ForPath(DataDir); options.MaxLogFileSize = 10 * Constants.Storage.PageSize; using (var env = new StorageEnvironment(options)) { using (var tx = env.WriteTransaction()) { tx.CreateTree("tree"); tx.Commit(); } using (var tx = env.ReadTransaction()) { var read = tx.CreateTree("tree").Read("key1"); Assert.NotNull(read); { Assert.Equal(buffer.Length, read.Reader.Length); var bytes = read.Reader.ReadBytes(read.Reader.Length); Assert.Equal(buffer, bytes.Array.Skip(bytes.Offset).Take(bytes.Count).ToArray()); } } } }
protected void RequireFileBasedPager() { if (_storageEnvironment != null) { throw new InvalidOperationException("Too late"); } if (_options is StorageEnvironmentOptions.DirectoryStorageEnvironmentOptions) { return; } DeleteDirectory("test.data"); _options = StorageEnvironmentOptions.ForPath("test.data"); Configure(_options); }
public Recovery(VoronRecoveryConfiguration config) { _datafile = config.PathToDataFile; _output = config.OutputFileName; _pageSize = config.PageSizeInKb * Constants.Size.Kilobyte; _initialContextSize = config.InitialContextSizeInMB * Constants.Size.Megabyte; _initialContextLongLivedSize = config.InitialContextLongLivedSizeInKB * Constants.Size.Kilobyte; _option = StorageEnvironmentOptions.ForPath(config.DataFileDirectory, null, Path.Combine(config.DataFileDirectory, "Journal"), null, null); _copyOnWrite = !config.DisableCopyOnWriteMode; // by default CopyOnWriteMode will be true _option.CopyOnWriteMode = _copyOnWrite; _progressIntervalInSec = config.ProgressIntervalInSec; _previouslyWrittenDocs = new Dictionary <string, long>(); }
protected void RestartDatabase() { var isFileBasedEnv = Options is StorageEnvironmentOptions.DirectoryStorageEnvironmentOptions; StopDatabase(shouldDisposeOptions: isFileBasedEnv); if (isFileBasedEnv) { Options = StorageEnvironmentOptions.ForPath(DataDir); Configure(Options); } StartDatabase(); }
public void ShouldOccupyLessSpace(int seed) { var r = new Random(seed); var storageEnvironmentOptions = StorageEnvironmentOptions.ForPath(DataDir); storageEnvironmentOptions.ManualFlushing = true; using (var env = new StorageEnvironment(storageEnvironmentOptions)) { using (var tx = env.WriteTransaction()) { var tree = tx.CreateTree("records"); for (int i = 0; i < 100; i++) { var bytes = new byte[r.Next(10, 2 * 1024 * 1024)]; r.NextBytes(bytes); tree.Add("record/" + i, bytes); } tx.Commit(); } using (var tx = env.WriteTransaction()) { var tree = tx.CreateTree("records"); for (int i = 0; i < 50; i++) { tree.Delete("record/" + r.Next(0, 100)); } tx.Commit(); } env.FlushLogToDataFile(); } var oldSize = GetDirSize(new DirectoryInfo(DataDir)); storageEnvironmentOptions = StorageEnvironmentOptions.ForPath(DataDir); storageEnvironmentOptions.ManualFlushing = true; var compactedData = Path.Combine(DataDir, "Compacted"); StorageCompaction.Execute(storageEnvironmentOptions, (StorageEnvironmentOptions.DirectoryStorageEnvironmentOptions)StorageEnvironmentOptions.ForPath(compactedData)); var newSize = GetDirSize(new DirectoryInfo(compactedData)); Assert.True(newSize < oldSize, string.Format("Old size: {0:#,#;;0} MB, new size {1:#,#;;0} MB", oldSize / 1024 / 1024, newSize / 1024 / 1024)); }