public BackupOperation(RavenFileSystem filesystem, string backupSourceDirectory, string backupDestinationDirectory, StorageEnvironment env, bool incrementalBackup, FileSystemDocument fileSystemDocument) : base(filesystem, backupSourceDirectory, backupDestinationDirectory, incrementalBackup, fileSystemDocument) { if (env == null) throw new ArgumentNullException("env"); this.env = env; }
public BackupOperation(DocumentDatabase database, string backupSourceDirectory, string backupDestinationDirectory, StorageEnvironment env, bool incrementalBackup, DatabaseDocument databaseDocument) : base(database, backupSourceDirectory, backupDestinationDirectory, incrementalBackup, databaseDocument) { if (env == null) throw new ArgumentNullException("env"); this.env = env; }
public TableStorage(StorageEnvironmentOptions options, IBufferPool bufferPool) { if (options == null) throw new ArgumentNullException("options"); _options = options; this.bufferPool = bufferPool; env = new StorageEnvironment(_options); Initialize(); }
public FreeDbQueries(string path) { _storageEnvironment = new StorageEnvironment(StorageEnvironmentOptions.ForPath(path)); using (Transaction tx = _storageEnvironment.NewTransaction(TransactionFlags.ReadWrite)) { _storageEnvironment.CreateTree(tx, "albums"); _storageEnvironment.CreateTree(tx, "ix_diskids"); _storageEnvironment.CreateTree(tx, "ix_artists"); _storageEnvironment.CreateTree(tx, "ix_titles"); tx.Commit(); } }
public VoronDisksDestination() { _storageEnvironment = new StorageEnvironment(StorageEnvironmentOptions.ForPath("FreeDB")); using (var tx = _storageEnvironment.NewTransaction(TransactionFlags.ReadWrite)) { _storageEnvironment.CreateTree(tx, "albums"); _storageEnvironment.CreateTree(tx, "ix_diskids"); _storageEnvironment.CreateTree(tx, "ix_artists"); _storageEnvironment.CreateTree(tx, "ix_titles"); tx.Commit(); } _currentBatch = new WriteBatch(); }
public DateTimeSeries(string path) { _lastKey = "last-key"; _storageEnvironment = new StorageEnvironment(StorageEnvironmentOptions.ForPath(path)); using (var tx = _storageEnvironment.NewTransaction(TransactionFlags.ReadWrite)) { var read = tx.State.Root.Read(tx, _lastKey); _last = read != null ? read.Reader.ReadInt64() : 1; tx.Commit(); } }
public KeyValueStateMachine(StorageEnvironmentOptions options) { options.IncrementalBackupEnabled = true; _storageEnvironment = new StorageEnvironment(options); using (var tx = _storageEnvironment.NewTransaction(TransactionFlags.ReadWrite)) { _storageEnvironment.CreateTree(tx, "items"); var metadata = _storageEnvironment.CreateTree(tx, "$metadata"); var readResult = metadata.Read("last-index"); if (readResult != null) LastAppliedIndex = readResult.Reader.ReadLittleEndianInt64(); tx.Commit(); } }
public CounterStorage(string serverUrl, string counterStorageStorageName, InMemoryRavenConfiguration configuration) { CounterStorageUrl = String.Format("{0}counters/{1}", serverUrl, counterStorageStorageName); CounterStorageName = counterStorageStorageName; var options = configuration.RunInMemory ? StorageEnvironmentOptions.CreateMemoryOnly() : CreateStorageOptionsFromConfiguration(configuration.CountersDataDirectory, configuration.Settings); storageEnvironment = new StorageEnvironment(options); ReplicationTask = new RavenCounterReplication(this); ReplicationTimeoutInMs = configuration.GetConfigurationValue<int>("Raven/Replication/ReplicationRequestTimeout") ?? 60*1000; Initialize(); }
public VoronOdbBackend(string voronDataPath) { if (voronDataPath == null) { throw new ArgumentNullException("voronDataPath"); } _env = new StorageEnvironment(StorageEnvironmentOptions.ForPath(voronDataPath)); using (var tx = _env.NewTransaction(TransactionFlags.ReadWrite)) { _env.CreateTree(tx, Index); _env.CreateTree(tx, Objects); tx.Commit(); } }
public FullTextIndex(StorageEnvironmentOptions options, IAnalyzer analyzer) { Analyzer = analyzer; Conventions = new IndexingConventions(); BufferPool = new BufferPool(); StorageEnvironment = new StorageEnvironment(options); using (var tx = StorageEnvironment.NewTransaction(TransactionFlags.ReadWrite)) { ReadMetadata(tx); ReadLastDocumentId(tx); ReadFields(tx); tx.Commit(); } }
public CounterStorage(string serverUrl, string storageName, InMemoryRavenConfiguration configuration, TransportState recievedTransportState = null) { CounterStorageUrl = String.Format("{0}counters/{1}", serverUrl, storageName); Name = storageName; var options = configuration.RunInMemory ? StorageEnvironmentOptions.CreateMemoryOnly() : CreateStorageOptionsFromConfiguration(configuration.CountersDataDirectory, configuration.Settings); storageEnvironment = new StorageEnvironment(options); ReplicationTask = new RavenCounterReplication(this); //TODO: add an option to create a ReplicationRequestTimeout when creating a new counter storage ReplicationTimeoutInMs = configuration.GetConfigurationValue<int>("Raven/Replication/ReplicationRequestTimeout") ?? 60*1000; metricsCounters = new CountersMetricsManager(); transportState = recievedTransportState ?? new TransportState(); Initialize(); }
public TimeSeriesStorage(StorageEnvironmentOptions options) { _storageEnvironment = new StorageEnvironment(options); using (var tx = _storageEnvironment.NewTransaction(TransactionFlags.ReadWrite)) { var metadata = _storageEnvironment.CreateTree(tx, "$metadata"); var result = metadata.Read(tx, "id"); if (result == null) // new db{ { Id = Guid.NewGuid(); metadata.Add(tx, "id", new MemoryStream(Id.ToByteArray())); } else { Id = new Guid(result.Reader.ReadBytes(16)); } } }
public void WriteSomethingToVoron() { var serializer = new JsonSerializer(); using (var storage = new StorageEnvironment(StorageEnvironmentOptions.GetInMemory())) { using (var tx = storage.NewTransaction(TransactionFlags.ReadWrite)) { storage.CreateTree(tx, "foos"); tx.Commit(); } { var ms = new MemoryStream(); var batch = new WriteBatch(); var foo = new Foo { Id = "hello", Value = 99 }; using (var writer = new StreamWriter(ms)) { serializer.Serialize(new JsonTextWriter(writer), foo); writer.Flush(); ms.Position = 0; //var key = new Slice(EndianBitConverter.Big.GetBytes(counter++)); batch.Add(foo.Id, ms, "foos"); storage.Writer.Write(batch); } } using (var tx = storage.NewTransaction(TransactionFlags.Read)) { var foos = tx.GetTree("foos"); var readResult = foos.Read(tx, "hello"); using (var stream = readResult.Reader.AsStream()) { var foo = serializer.Deserialize<Foo>(new JsonTextReader(new StreamReader(stream))); Assert.Equal(99, foo.Value); } } } }
public CounterStorage(string serverUrl, string storageName, InMemoryRavenConfiguration configuration, TransportState recievedTransportState = null) { CounterStorageUrl = String.Format("{0}counters/{1}", serverUrl, storageName); Name = storageName; ResourceName = string.Concat(Constants.Counter.UrlPrefix, "/", storageName); var options = configuration.RunInMemory ? StorageEnvironmentOptions.CreateMemoryOnly() : CreateStorageOptionsFromConfiguration(configuration.CountersDataDirectory, configuration.Settings); storageEnvironment = new StorageEnvironment(options); ReplicationTask = new RavenCounterReplication(this); //TODO: add an option to create a ReplicationRequestTimeout when creating a new counter storage ReplicationTimeoutInMs = configuration.Replication.ReplicationRequestTimeoutInMilliseconds; metricsCounters = new CountersMetricsManager(); transportState = recievedTransportState ?? new TransportState(); Configuration = configuration; ExtensionsState = new AtomicDictionary<object>(); Initialize(); }
public ReturnTemporaryPageToPool(StorageEnvironment env, TemporaryPage tmp) { _tmp = tmp; _env = env; }
public void ForceFlushAndSyncEnvironment(StorageEnvironment env) { _syncIsRequired.Enqueue(env); _flushWriterEvent.Set(); }
private static void Migrate(StorageEnvironment env, string tableName, Action<string> output, Action<Slice, RavenJObject> modifyRecord) { long entriesCount; using (var tx = env.NewTransaction(TransactionFlags.ReadWrite)) { entriesCount = tx.ReadTree(tableName).State.EntriesCount; } if (entriesCount == 0) { output(string.Format("No records to migrate in '{0}' table.", tableName)); return; } output(string.Format("Starting to migrate '{0}' table to. Records to process: {1}", tableName, entriesCount)); using (var txw = env.NewTransaction(TransactionFlags.ReadWrite)) { env.DeleteTree(txw, "Temp_" + tableName); txw.Commit(); } using (var txw = env.NewTransaction(TransactionFlags.ReadWrite)) { env.CreateTree(txw, "Temp_" + tableName); txw.Commit(); } var migrated = 0L; var keyToSeek = Slice.BeforeAllKeys; do { using (var txw = env.NewTransaction(TransactionFlags.ReadWrite)) { var destTree = txw.ReadTree("Temp_" + tableName); var srcTree = txw.ReadTree(tableName); var iterator = srcTree.Iterate(); if (iterator.Seek(keyToSeek) == false) break; var itemsInBatch = 0; do { keyToSeek = iterator.CurrentKey; if (itemsInBatch != 0 && itemsInBatch % 100 == 0) break; using (var stream = iterator.CreateReaderForCurrent().AsStream()) { var value = stream.ToJObject(); modifyRecord(iterator.CurrentKey, value); using (var streamValue = new MemoryStream()) { value.WriteTo(streamValue); streamValue.Position = 0; destTree.Add(iterator.CurrentKey, streamValue); } migrated++; itemsInBatch++; } } while (iterator.MoveNext()); txw.Commit(); output(string.Format("{0} of {1} entries processed.", migrated, entriesCount)); } } while (migrated < entriesCount); using (var txw = env.NewTransaction(TransactionFlags.ReadWrite)) { env.DeleteTree(txw, tableName); env.RenameTree(txw, "Temp_" + tableName, tableName); txw.Commit(); } }
public void ApplySnapshot(long term, long index, Stream stream) { var basePath = _storageEnvironment.Options.BasePath; _storageEnvironment.Dispose(); foreach (var file in Directory.EnumerateFiles(basePath)) { File.Delete(file); } var files = new List<string>(); var buffer = new byte[1024 * 16]; var reader = new BinaryReader(stream); var filesCount = reader.ReadInt32(); if (filesCount == 0) throw new InvalidOperationException("Snapshot cannot contain zero files"); for (int i = 0; i < filesCount; i++) { var name = reader.ReadString(); files.Add(name); var len = reader.ReadInt64(); using (var file = File.Create(Path.Combine(basePath, name))) { file.SetLength(len); var totalFileRead = 0; while (totalFileRead < len) { var read = stream.Read(buffer, 0, (int)Math.Min(buffer.Length, len - totalFileRead)); if (read == 0) throw new EndOfStreamException(); totalFileRead += read; file.Write(buffer, 0, read); } } } new FullBackup().Restore(Path.Combine(basePath, files[0]), basePath); var options = StorageEnvironmentOptions.ForPath(basePath); options.IncrementalBackupEnabled = true; new IncrementalBackup().Restore(options, files.Skip(1)); _storageEnvironment = new StorageEnvironment(options); using (var tx = _storageEnvironment.NewTransaction(TransactionFlags.ReadWrite)) { var metadata = tx.ReadTree("$metadata"); metadata.Add("last-index", EndianBitConverter.Little.GetBytes(index)); LastAppliedIndex = index; tx.Commit(); } }
public Writer(CounterStorage parent, StorageEnvironment storageEnvironment) { this.parent = parent; transaction = storageEnvironment.NewTransaction(TransactionFlags.ReadWrite); reader = new Reader(parent, transaction); serverNamesToIds = transaction.State.GetTree(transaction, "serverNames->Ids"); serverIdsToNames = transaction.State.GetTree(transaction, "Ids->serverNames"); serversLastEtag = transaction.State.GetTree(transaction, "servers->lastEtag"); counters = transaction.State.GetTree(transaction, "counters"); countersGroups = transaction.State.GetTree(transaction, "countersGroups"); etagsCountersIx = transaction.State.GetTree(transaction, "etags->counters"); countersEtagIx = transaction.State.GetTree(transaction, "counters->etags"); metadata = transaction.State.GetTree(transaction, "$metadata"); storeBuffer = new byte[sizeof(long) + //positive sizeof(long)]; // negative storeBufferLength = storeBuffer.Length; }
private List<PerformanceRecord> WriteParallel(string operation, IEnumerable<TestData> data, int itemsPerTransaction, int numberOfTransactions, PerfTracker perfTracker, int numberOfThreads, out long elapsedMilliseconds) { NewStorage(); using (var env = new StorageEnvironment(StorageEnvironmentOptions.ForPath(dataPath))) { return ExecuteWriteWithParallel( data, numberOfTransactions, itemsPerTransaction, numberOfThreads, (enumerator, itmsPerTransaction, nmbrOfTransactions) => WriteInternalBatch(operation, enumerator, itmsPerTransaction, nmbrOfTransactions, perfTracker, env), out elapsedMilliseconds); } }
public Reader(CounterStorage parent, StorageEnvironment storageEnvironment) : this(parent, storageEnvironment.NewTransaction(TransactionFlags.Read)) { }
public void MaybeSyncEnvironment(StorageEnvironment env) { _maybeNeedToSync.Enqueue(env); }
private List<PerformanceRecord> Write(string operation, IEnumerable<TestData> data, int itemsPerTransaction, int numberOfTransactions, PerfTracker perfTracker) { NewStorage(); var storageEnvironmentOptions = StorageEnvironmentOptions.ForPath(dataPath); using (var env = new StorageEnvironment(storageEnvironmentOptions)) { var enumerator = data.GetEnumerator(); //return WriteInternal(operation, itemsPerTransaction, numberOfTransactions, perfTracker, env, enumerator); return WriteInternalBatch(operation, enumerator, itemsPerTransaction, numberOfTransactions, perfTracker, env); } }
private PerformanceRecord ReadParallel(string operation, IEnumerable<uint> ids, PerfTracker perfTracker, int numberOfThreads) { var options = StorageEnvironmentOptions.ForPath(dataPath); options.ManualFlushing = true; using (var env = new StorageEnvironment(options)) { env.FlushLogToDataFile(); return ExecuteReadWithParallel(operation, ids, numberOfThreads, () => ReadInternal(ids, perfTracker, env)); } }
private List<PerformanceRecord> WriteInternal( string operation, int itemsPerTransaction, int numberOfTransactions, PerfTracker perfTracker, StorageEnvironment env, IEnumerator<TestData> enumerator) { var sw = new Stopwatch(); byte[] valueToWrite = null; var records = new List<PerformanceRecord>(); for (var transactions = 0; transactions < numberOfTransactions; transactions++) { sw.Restart(); using (var tx = env.NewTransaction(TransactionFlags.ReadWrite)) { for (var i = 0; i < itemsPerTransaction; i++) { enumerator.MoveNext(); valueToWrite = GetValueToWrite(valueToWrite, enumerator.Current.ValueSize); tx.State.Root.Add(tx, enumerator.Current.Id.ToString("0000000000000000"), new MemoryStream(valueToWrite)); } tx.Commit(); perfTracker.Record(sw.ElapsedMilliseconds); } sw.Stop(); records.Add(new PerformanceRecord { Operation = operation, Time = DateTime.Now, Duration = sw.ElapsedMilliseconds, ProcessedItems = itemsPerTransaction }); } return records; }
private List<PerformanceRecord> WriteInternalBatch( string operation, IEnumerator<TestData> enumerator, long itemsPerBatch, long numberOfBatches, PerfTracker perfTracker, StorageEnvironment env) { var sw = new Stopwatch(); byte[] valueToWrite = null; var records = new List<PerformanceRecord>(); for (var b = 0; b < numberOfBatches; b++) { sw.Restart(); long v = 0; using (var batch = new WriteBatch()) { for (var i = 0; i < itemsPerBatch; i++) { enumerator.MoveNext(); valueToWrite = GetValueToWrite(valueToWrite, enumerator.Current.ValueSize); v += valueToWrite.Length; batch.Add(enumerator.Current.Id.ToString("0000000000000000"), new MemoryStream(valueToWrite), "Root"); } env.Writer.Write(batch); } sw.Stop(); perfTracker.Record(sw.ElapsedMilliseconds); records.Add(new PerformanceRecord { Bytes = v, Operation = operation, Time = DateTime.Now, Duration = sw.ElapsedMilliseconds, ProcessedItems = itemsPerBatch }); } return records; }
public void SuggestSyncEnvironment(StorageEnvironment env) { AddEnvironmentSyncRequest(env, false); }
private PerformanceRecord Read(string operation, IEnumerable<uint> ids, PerfTracker perfTracker) { var options = StorageEnvironmentOptions.ForPath(dataPath); options.ManualFlushing = true; using (var env = new StorageEnvironment(options)) { env.FlushLogToDataFile(); var sw = Stopwatch.StartNew(); var v = ReadInternal(ids, perfTracker, env); sw.Stop(); return new PerformanceRecord { Bytes = v, Operation = operation, Time = DateTime.Now, Duration = sw.ElapsedMilliseconds, ProcessedItems = ids.Count() }; } }
public void ForceSyncEnvironment(StorageEnvironment env) { AddEnvironmentSyncRequest(env, true); _flushWriterEvent.Set(); }
private static long ReadInternal(IEnumerable<uint> ids, PerfTracker perfTracker, StorageEnvironment env) { var ms = new byte[4096]; using (var tx = env.NewTransaction(TransactionFlags.Read)) { var sw = Stopwatch.StartNew(); long v = 0; foreach (var id in ids) { var key = id.ToString("0000000000000000"); var readResult = tx.State.Root.Read(tx, key); int reads = 0; while ((reads = readResult.Reader.Read(ms, 0, ms.Length)) > 0) { v += reads; } } perfTracker.Record(sw.ElapsedMilliseconds); return v; } }
public PersistentState(string name, StorageEnvironmentOptions options, CancellationToken cancellationToken) { _name = name; _log = LogManager.GetLogger(GetType().Name + "." + name); _cancellationToken = cancellationToken; _env = new StorageEnvironment(options); InitializeDatabase(); }
public void MaybeFlushEnvironment(StorageEnvironment env) { _maybeNeedToFlush.Enqueue(env); _flushWriterEvent.Set(); }