public void WriteSomethingToVoron() { var serializer = new JsonSerializer(); using (var storage = new StorageEnvironment(StorageEnvironmentOptions.GetInMemory())) { using (var tx = storage.NewTransaction(TransactionFlags.ReadWrite)) { storage.CreateTree(tx, "foos"); tx.Commit(); } { var ms = new MemoryStream(); var batch = new WriteBatch(); var foo = new Foo { Id = "hello", Value = 99 }; using (var writer = new StreamWriter(ms)) { serializer.Serialize(new JsonTextWriter(writer), foo); writer.Flush(); ms.Position = 0; //var key = new Slice(EndianBitConverter.Big.GetBytes(counter++)); batch.Add(foo.Id, ms, "foos"); storage.Writer.Write(batch); } } using (var tx = storage.NewTransaction(TransactionFlags.Read)) { var foos = tx.GetTree("foos"); var readResult = foos.Read(tx, "hello"); using (var stream = readResult.Reader.AsStream()) { var foo = serializer.Deserialize<Foo>(new JsonTextReader(new StreamReader(stream))); Assert.Equal(99, foo.Value); } } } }
public FreeDbQueries(string path) { _storageEnvironment = new StorageEnvironment(StorageEnvironmentOptions.ForPath(path)); using (Transaction tx = _storageEnvironment.NewTransaction(TransactionFlags.ReadWrite)) { _storageEnvironment.CreateTree(tx, "albums"); _storageEnvironment.CreateTree(tx, "ix_diskids"); _storageEnvironment.CreateTree(tx, "ix_artists"); _storageEnvironment.CreateTree(tx, "ix_titles"); tx.Commit(); } }
public VoronDisksDestination() { _storageEnvironment = new StorageEnvironment(StorageEnvironmentOptions.ForPath("FreeDB")); using (var tx = _storageEnvironment.NewTransaction(TransactionFlags.ReadWrite)) { _storageEnvironment.CreateTree(tx, "albums"); _storageEnvironment.CreateTree(tx, "ix_diskids"); _storageEnvironment.CreateTree(tx, "ix_artists"); _storageEnvironment.CreateTree(tx, "ix_titles"); tx.Commit(); } _currentBatch = new WriteBatch(); }
public DateTimeSeries(string path) { _lastKey = "last-key"; _storageEnvironment = new StorageEnvironment(StorageEnvironmentOptions.ForPath(path)); using (var tx = _storageEnvironment.NewTransaction(TransactionFlags.ReadWrite)) { var read = tx.State.Root.Read(tx, _lastKey); _last = read != null ? read.Reader.ReadInt64() : 1; tx.Commit(); } }
public KeyValueStateMachine(StorageEnvironmentOptions options) { options.IncrementalBackupEnabled = true; _storageEnvironment = new StorageEnvironment(options); using (var tx = _storageEnvironment.NewTransaction(TransactionFlags.ReadWrite)) { _storageEnvironment.CreateTree(tx, "items"); var metadata = _storageEnvironment.CreateTree(tx, "$metadata"); var readResult = metadata.Read("last-index"); if (readResult != null) LastAppliedIndex = readResult.Reader.ReadLittleEndianInt64(); tx.Commit(); } }
public VoronOdbBackend(string voronDataPath) { if (voronDataPath == null) { throw new ArgumentNullException("voronDataPath"); } _env = new StorageEnvironment(StorageEnvironmentOptions.ForPath(voronDataPath)); using (var tx = _env.NewTransaction(TransactionFlags.ReadWrite)) { _env.CreateTree(tx, Index); _env.CreateTree(tx, Objects); tx.Commit(); } }
public FullTextIndex(StorageEnvironmentOptions options, IAnalyzer analyzer) { Analyzer = analyzer; Conventions = new IndexingConventions(); BufferPool = new BufferPool(); StorageEnvironment = new StorageEnvironment(options); using (var tx = StorageEnvironment.NewTransaction(TransactionFlags.ReadWrite)) { ReadMetadata(tx); ReadLastDocumentId(tx); ReadFields(tx); tx.Commit(); } }
public TimeSeriesStorage(StorageEnvironmentOptions options) { _storageEnvironment = new StorageEnvironment(options); using (var tx = _storageEnvironment.NewTransaction(TransactionFlags.ReadWrite)) { var metadata = _storageEnvironment.CreateTree(tx, "$metadata"); var result = metadata.Read(tx, "id"); if (result == null) // new db{ { Id = Guid.NewGuid(); metadata.Add(tx, "id", new MemoryStream(Id.ToByteArray())); } else { Id = new Guid(result.Reader.ReadBytes(16)); } } }
private static long ReadInternal(IEnumerable<uint> ids, PerfTracker perfTracker, StorageEnvironment env) { var ms = new byte[4096]; using (var tx = env.NewTransaction(TransactionFlags.Read)) { var sw = Stopwatch.StartNew(); long v = 0; foreach (var id in ids) { var key = id.ToString("0000000000000000"); var readResult = tx.State.Root.Read(tx, key); int reads = 0; while ((reads = readResult.Reader.Read(ms, 0, ms.Length)) > 0) { v += reads; } } perfTracker.Record(sw.ElapsedMilliseconds); return v; } }
private List<PerformanceRecord> WriteInternal( string operation, int itemsPerTransaction, int numberOfTransactions, PerfTracker perfTracker, StorageEnvironment env, IEnumerator<TestData> enumerator) { var sw = new Stopwatch(); byte[] valueToWrite = null; var records = new List<PerformanceRecord>(); for (var transactions = 0; transactions < numberOfTransactions; transactions++) { sw.Restart(); using (var tx = env.NewTransaction(TransactionFlags.ReadWrite)) { for (var i = 0; i < itemsPerTransaction; i++) { enumerator.MoveNext(); valueToWrite = GetValueToWrite(valueToWrite, enumerator.Current.ValueSize); tx.State.Root.Add(tx, enumerator.Current.Id.ToString("0000000000000000"), new MemoryStream(valueToWrite)); } tx.Commit(); perfTracker.Record(sw.ElapsedMilliseconds); } sw.Stop(); records.Add(new PerformanceRecord { Operation = operation, Time = DateTime.Now, Duration = sw.ElapsedMilliseconds, ProcessedItems = itemsPerTransaction }); } return records; }
private static void Migrate(StorageEnvironment env, string tableName, Action<string> output, Action<Slice, RavenJObject> modifyRecord) { long entriesCount; using (var tx = env.NewTransaction(TransactionFlags.ReadWrite)) { entriesCount = tx.ReadTree(tableName).State.EntriesCount; } if (entriesCount == 0) { output(string.Format("No records to migrate in '{0}' table.", tableName)); return; } output(string.Format("Starting to migrate '{0}' table to. Records to process: {1}", tableName, entriesCount)); using (var txw = env.NewTransaction(TransactionFlags.ReadWrite)) { env.DeleteTree(txw, "Temp_" + tableName); txw.Commit(); } using (var txw = env.NewTransaction(TransactionFlags.ReadWrite)) { env.CreateTree(txw, "Temp_" + tableName); txw.Commit(); } var migrated = 0L; var keyToSeek = Slice.BeforeAllKeys; do { using (var txw = env.NewTransaction(TransactionFlags.ReadWrite)) { var destTree = txw.ReadTree("Temp_" + tableName); var srcTree = txw.ReadTree(tableName); var iterator = srcTree.Iterate(); if (iterator.Seek(keyToSeek) == false) break; var itemsInBatch = 0; do { keyToSeek = iterator.CurrentKey; if (itemsInBatch != 0 && itemsInBatch % 100 == 0) break; using (var stream = iterator.CreateReaderForCurrent().AsStream()) { var value = stream.ToJObject(); modifyRecord(iterator.CurrentKey, value); using (var streamValue = new MemoryStream()) { value.WriteTo(streamValue); streamValue.Position = 0; destTree.Add(iterator.CurrentKey, streamValue); } migrated++; itemsInBatch++; } } while (iterator.MoveNext()); txw.Commit(); output(string.Format("{0} of {1} entries processed.", migrated, entriesCount)); } } while (migrated < entriesCount); using (var txw = env.NewTransaction(TransactionFlags.ReadWrite)) { env.DeleteTree(txw, tableName); env.RenameTree(txw, "Temp_" + tableName, tableName); txw.Commit(); } }
public Writer(CounterStorage parent, StorageEnvironment storageEnvironment) { this.parent = parent; transaction = storageEnvironment.NewTransaction(TransactionFlags.ReadWrite); reader = new Reader(parent, transaction); serverNamesToIds = transaction.State.GetTree(transaction, "serverNames->Ids"); serverIdsToNames = transaction.State.GetTree(transaction, "Ids->serverNames"); serversLastEtag = transaction.State.GetTree(transaction, "servers->lastEtag"); counters = transaction.State.GetTree(transaction, "counters"); countersGroups = transaction.State.GetTree(transaction, "countersGroups"); etagsCountersIx = transaction.State.GetTree(transaction, "etags->counters"); countersEtagIx = transaction.State.GetTree(transaction, "counters->etags"); metadata = transaction.State.GetTree(transaction, "$metadata"); storeBuffer = new byte[sizeof(long) + //positive sizeof(long)]; // negative storeBufferLength = storeBuffer.Length; }
public Reader(CounterStorage parent, StorageEnvironment storageEnvironment) : this(parent, storageEnvironment.NewTransaction(TransactionFlags.Read)) { }
public void ApplySnapshot(long term, long index, Stream stream) { var basePath = _storageEnvironment.Options.BasePath; _storageEnvironment.Dispose(); foreach (var file in Directory.EnumerateFiles(basePath)) { File.Delete(file); } var files = new List<string>(); var buffer = new byte[1024 * 16]; var reader = new BinaryReader(stream); var filesCount = reader.ReadInt32(); if (filesCount == 0) throw new InvalidOperationException("Snapshot cannot contain zero files"); for (int i = 0; i < filesCount; i++) { var name = reader.ReadString(); files.Add(name); var len = reader.ReadInt64(); using (var file = File.Create(Path.Combine(basePath, name))) { file.SetLength(len); var totalFileRead = 0; while (totalFileRead < len) { var read = stream.Read(buffer, 0, (int)Math.Min(buffer.Length, len - totalFileRead)); if (read == 0) throw new EndOfStreamException(); totalFileRead += read; file.Write(buffer, 0, read); } } } new FullBackup().Restore(Path.Combine(basePath, files[0]), basePath); var options = StorageEnvironmentOptions.ForPath(basePath); options.IncrementalBackupEnabled = true; new IncrementalBackup().Restore(options, files.Skip(1)); _storageEnvironment = new StorageEnvironment(options); using (var tx = _storageEnvironment.NewTransaction(TransactionFlags.ReadWrite)) { var metadata = tx.ReadTree("$metadata"); metadata.Add("last-index", EndianBitConverter.Little.GetBytes(index)); LastAppliedIndex = index; tx.Commit(); } }