protected void StopDatabase() { var ownsPagers = _options.OwnsPagers; _options.OwnsPagers = false; _storageEnvironment.Dispose(); _options.OwnsPagers = ownsPagers; }
public void Dispose() { if (env != null) { env.Dispose(); } }
public virtual void Cleanup() { if (DeleteBeforeEachBenchmark) { Env.Dispose(); } }
public void Dispose() { if (_env != null && _isDisposed == false) { _env.Dispose(); _isDisposed = true; } }
public void Dispose() { ReplicationTask.Dispose(); if (storageEnvironment != null) { storageEnvironment.Dispose(); } metricsCounters.Dispose(); }
public void Dispose() { foreach (var state in _subscriptionStates.Values) { state.Dispose(); } _unmanagedBuffersPool.Dispose(); _environment.Dispose(); }
public void Dispose() { _storageEnvironment.Dispose(); _pager.Dispose(); if (File.Exists("test.data")) { File.Delete("test.data"); } }
protected RachisConsensus <CountingStateMachine> SetupServer(bool bootstrap = false, int port = 0, int electionTimeout = 300, [CallerMemberName] string caller = null) { var tcpListener = new TcpListener(IPAddress.Loopback, port); tcpListener.Start(); char ch; if (bootstrap) { ch = (char)65; } else { ch = (char)(65 + Interlocked.Increment(ref _count)); } var url = $"tcp://localhost:{((IPEndPoint)tcpListener.LocalEndpoint).Port}/?{caller}#{ch}"; var server = StorageEnvironmentOptions.CreateMemoryOnly(); int seed = PredictableSeeds ? _random.Next(int.MaxValue) : (int)Interlocked.Read(ref _count); var configuration = RavenConfiguration.CreateForServer(caller); configuration.Initialize(); configuration.Core.RunInMemory = true; configuration.Core.PublicServerUrl = new UriSetting($"http://localhost:{((IPEndPoint)tcpListener.LocalEndpoint).Port}"); configuration.Cluster.ElectionTimeout = new TimeSetting(electionTimeout, TimeUnit.Milliseconds); var serverStore = new RavenServer(configuration) { ThrowOnLicenseActivationFailure = true }.ServerStore; serverStore.Initialize(); var rachis = new RachisConsensus <CountingStateMachine>(serverStore, seed); var storageEnvironment = new StorageEnvironment(server); rachis.Initialize(storageEnvironment, configuration, configuration.Core.ServerUrls[0], out _); rachis.OnDispose += (sender, args) => { serverStore.Dispose(); storageEnvironment.Dispose(); }; if (bootstrap) { rachis.Bootstrap(url, "A"); } rachis.Url = url; _listeners.Add(tcpListener); RachisConsensuses.Add(rachis); var task = AcceptConnection(tcpListener, rachis); return(rachis); }
public virtual void Dispose() { _storageEnvironment?.Dispose(); _options.Dispose(); _allocator.Dispose(); DeleteDirectory(DataDir); _storageEnvironment = null; _options = null; _allocator = null; GC.Collect(GC.MaxGeneration); GC.WaitForPendingFinalizers(); }
public void Dispose() { if (!_isDisposed) { if (_ownsStorageEnvironment) { _env.Dispose(); } _isDisposed = true; _byteStringContext.Dispose(); _edgesSchema.Dispose(); _verticesSchema.Dispose(); } GC.SuppressFinalize(this); }
protected RachisConsensus <CountingStateMachine> SetupServer(bool bootstrap = false, int port = 0, int electionTimeout = 300, [CallerMemberName] string caller = null) { var tcpListener = new TcpListener(IPAddress.Loopback, port); tcpListener.Start(); var ch = (char)(66 + _count++); if (bootstrap) { ch = (char)65; _count--; } var url = "tcp://localhost:" + ((IPEndPoint)tcpListener.LocalEndpoint).Port + "/?" + caller + "#" + ch; var server = StorageEnvironmentOptions.CreateMemoryOnly(); int seed = PredictableSeeds ? _random.Next(int.MaxValue) : _count; var configuration = new RavenConfiguration(caller, ResourceType.Server); configuration.Initialize(); configuration.Core.RunInMemory = true; configuration.Core.PublicServerUrl = new UriSetting($"http://localhost:{((IPEndPoint)tcpListener.LocalEndpoint).Port}"); configuration.Cluster.ElectionTimeout = new TimeSetting(electionTimeout, TimeUnit.Milliseconds); var serverStore = new RavenServer(configuration).ServerStore; serverStore.Initialize(); var rachis = new RachisConsensus <CountingStateMachine>(serverStore, seed); var storageEnvironment = new StorageEnvironment(server); rachis.Initialize(storageEnvironment, configuration, configuration.Core.ServerUrls[0]); rachis.OnDispose += (sender, args) => { serverStore.Dispose(); storageEnvironment.Dispose(); }; if (bootstrap) { rachis.Bootstrap(url, "A"); } rachis.Url = url; _listeners.Add(tcpListener); RachisConsensuses.Add(rachis); var task = AcceptConnection(tcpListener, rachis); return(rachis); }
public void Dispose() { // give it 3 seconds to complete requests for (int i = 0; i < 30 && Interlocked.Read(ref metricsCounters.ConcurrentRequestsCount) > 0; i++) { Thread.Sleep(100); } ReplicationTask.Dispose(); if (storageEnvironment != null) { storageEnvironment.Dispose(); } metricsCounters.Dispose(); }
public void ApplySnapshot(long term, long index, Stream stream) { var basePath = _storageEnvironment.Options.BasePath; _storageEnvironment.Dispose(); foreach (var file in Directory.EnumerateFiles(basePath)) { File.Delete(file); } var files = new List <string>(); var buffer = new byte[1024 * 16]; var reader = new BinaryReader(stream); var filesCount = reader.ReadInt32(); if (filesCount == 0) { throw new InvalidOperationException("Snapshot cannot contain zero files"); } for (int i = 0; i < filesCount; i++) { var name = reader.ReadString(); files.Add(name); var len = reader.ReadInt64(); using (var file = File.Create(Path.Combine(basePath, name))) { file.SetLength(len); var totalFileRead = 0; while (totalFileRead < len) { var read = stream.Read(buffer, 0, (int)Math.Min(buffer.Length, len - totalFileRead)); if (read == 0) { throw new EndOfStreamException(); } totalFileRead += read; file.Write(buffer, 0, read); } } } new FullBackup().Restore(Path.Combine(basePath, files[0]), basePath); var options = StorageEnvironmentOptions.ForPath(basePath); options.IncrementalBackupEnabled = true; new IncrementalBackup().Restore(options, files.Skip(1)); _storageEnvironment = new StorageEnvironment(options); using (var tx = _storageEnvironment.NewTransaction(TransactionFlags.ReadWrite)) { var metadata = tx.ReadTree("$metadata"); metadata.Add("last-index", EndianBitConverter.Little.GetBytes(index)); LastAppliedIndex = index; tx.Commit(); } }
public RecoveryStatus Execute(CancellationToken ct) { var sw = new Stopwatch(); StorageEnvironment se = null; sw.Start(); if (_copyOnWrite) { Console.WriteLine("Recovering journal files, this may take a while..."); try { se = new StorageEnvironment(_option); Console.WriteLine( $"Journal recovery has completed successfully within {sw.Elapsed.TotalSeconds:N1} seconds"); } catch (Exception e) { Console.WriteLine($"Journal recovery failed, reason:{Environment.NewLine}{e}"); } finally { se?.Dispose(); } } _option = StorageEnvironmentOptions.ForPath(Path.GetDirectoryName(_datafile)); var mem = Pager.AcquirePagePointer(null, 0); long startOffset = (long)mem; var fi = new FileInfo(_datafile); var fileSize = fi.Length; //making sure eof is page aligned var eof = mem + (fileSize / _pageSize) * _pageSize; DateTime lastProgressReport = DateTime.MinValue; using (var destinationStreamDocuments = File.OpenWrite(Path.Combine(Path.GetDirectoryName(_output), Path.GetFileNameWithoutExtension(_output) + "-2-Documents" + Path.GetExtension(_output)))) using (var destinationStreamRevisions = File.OpenWrite(Path.Combine(Path.GetDirectoryName(_output), Path.GetFileNameWithoutExtension(_output) + "-3-Revisions" + Path.GetExtension(_output)))) using (var destinationStreamConflicts = File.OpenWrite(Path.Combine(Path.GetDirectoryName(_output), Path.GetFileNameWithoutExtension(_output) + "-4-Conflicts" + Path.GetExtension(_output)))) using (var logFile = File.CreateText(Path.Combine(Path.GetDirectoryName(_output), LogFileName))) using (var gZipStreamDocuments = new GZipStream(destinationStreamDocuments, CompressionMode.Compress, true)) using (var gZipStreamRevisions = new GZipStream(destinationStreamRevisions, CompressionMode.Compress, true)) using (var gZipStreamConflicts = new GZipStream(destinationStreamConflicts, CompressionMode.Compress, true)) using (var context = new JsonOperationContext(_initialContextSize, _initialContextLongLivedSize, SharedMultipleUseFlag.None)) using (var documentsWriter = new BlittableJsonTextWriter(context, gZipStreamDocuments)) using (var revisionsWriter = new BlittableJsonTextWriter(context, gZipStreamRevisions)) using (var conflictsWriter = new BlittableJsonTextWriter(context, gZipStreamConflicts)) { WriteSmugglerHeader(documentsWriter, 40018, "Docs"); WriteSmugglerHeader(revisionsWriter, 40018, "RevisionDocuments"); WriteSmugglerHeader(conflictsWriter, 40018, "ConflictDocuments"); while (mem < eof) { try { if (ct.IsCancellationRequested) { logFile.WriteLine( $"Cancellation requested while recovery was in position {GetFilePosition(startOffset, mem)}"); _cancellationRequested = true; break; } var now = DateTime.UtcNow; if ((now - lastProgressReport).TotalSeconds >= _progressIntervalInSec) { if (lastProgressReport != DateTime.MinValue) { Console.Clear(); Console.WriteLine("Press 'q' to quit the recovery process"); } lastProgressReport = now; var currPos = GetFilePosition(startOffset, mem); var eofPos = GetFilePosition(startOffset, eof); Console.WriteLine( $"{now:hh:MM:ss}: Recovering page at position {currPos:#,#;;0}/{eofPos:#,#;;0} ({(double)currPos / eofPos:p}) - Last recovered doc is {_lastRecoveredDocumentKey}"); } var pageHeader = (PageHeader *)mem; //this page is not raw data section move on if ((pageHeader->Flags).HasFlag(PageFlags.RawData) == false) { mem += _pageSize; continue; } if (pageHeader->Flags.HasFlag(PageFlags.Single) && pageHeader->Flags.HasFlag(PageFlags.Overflow)) { var message = $"page #{pageHeader->PageNumber} (offset={GetFilePosition(startOffset, mem)}) has both Overflow and Single flag turned"; mem = PrintErrorAndAdvanceMem(message, mem, logFile); continue; } //overflow page ulong checksum; if (pageHeader->Flags.HasFlag(PageFlags.Overflow)) { var endOfOverflow = (byte *)pageHeader + VirtualPagerLegacyExtensions.GetNumberOfOverflowPages(pageHeader->OverflowSize) * _pageSize; // the endOfOeverFlow can be equal to eof if the last page is overflow if (endOfOverflow > eof) { var message = $"Overflow page #{pageHeader->PageNumber} (offset={GetFilePosition(startOffset, mem)})" + $" size exceeds the end of the file ([{(long)pageHeader}:{(long)endOfOverflow}])"; mem = PrintErrorAndAdvanceMem(message, mem, logFile); continue; } if (pageHeader->OverflowSize <= 0) { var message = $"Overflow page #{pageHeader->PageNumber} (offset={GetFilePosition(startOffset, mem)})" + $" OverflowSize is not a positive number ({pageHeader->OverflowSize})"; mem = PrintErrorAndAdvanceMem(message, mem, logFile); continue; } // this can only be here if we know that the overflow size is valid checksum = StorageEnvironment.CalculatePageChecksum((byte *)pageHeader, pageHeader->PageNumber, pageHeader->Flags, pageHeader->OverflowSize); if (checksum != pageHeader->Checksum) { var message = $"Invalid checksum for overflow page {pageHeader->PageNumber}, expected hash to be {pageHeader->Checksum} but was {checksum}"; mem = PrintErrorAndAdvanceMem(message, mem, logFile); continue; } if (Write((byte *)pageHeader + PageHeader.SizeOf, pageHeader->OverflowSize, documentsWriter, revisionsWriter, conflictsWriter, logFile, context, startOffset, ((RawDataOverflowPageHeader *)mem)->TableType)) { var numberOfPages = VirtualPagerLegacyExtensions.GetNumberOfOverflowPages(pageHeader->OverflowSize); mem += numberOfPages * _pageSize; } else //write document failed { mem += _pageSize; } continue; } checksum = StorageEnvironment.CalculatePageChecksum((byte *)pageHeader, pageHeader->PageNumber, pageHeader->Flags, 0); if (checksum != pageHeader->Checksum) { var message = $"Invalid checksum for page {pageHeader->PageNumber}, expected hash to be {pageHeader->Checksum} but was {checksum}"; mem = PrintErrorAndAdvanceMem(message, mem, logFile); continue; } // small raw data section var rawHeader = (RawDataSmallPageHeader *)mem; // small raw data section header if (rawHeader->RawDataFlags.HasFlag(RawDataPageFlags.Header)) { mem += _pageSize; continue; } if (rawHeader->NextAllocation > _pageSize) { var message = $"RawDataSmallPage #{rawHeader->PageNumber} at {GetFilePosition(startOffset, mem)} next allocation is larger than {_pageSize} bytes"; mem = PrintErrorAndAdvanceMem(message, mem, logFile); continue; } for (var pos = PageHeader.SizeOf; pos < rawHeader->NextAllocation;) { var currMem = mem + pos; var entry = (RawDataSection.RawDataEntrySizes *)currMem; //this indicates that the current entry is invalid because it is outside the size of a page if (pos > _pageSize) { var message = $"RawDataSmallPage #{rawHeader->PageNumber} has an invalid entry at {GetFilePosition(startOffset, currMem)}"; mem = PrintErrorAndAdvanceMem(message, mem, logFile); //we can't retrive entries past the invalid entry break; } //Allocated size of entry exceed the bound of the page next allocation if (entry->AllocatedSize + pos + sizeof(RawDataSection.RawDataEntrySizes) > rawHeader->NextAllocation) { var message = $"RawDataSmallPage #{rawHeader->PageNumber} has an invalid entry at {GetFilePosition(startOffset, currMem)}" + "the allocated entry exceed the bound of the page next allocation."; mem = PrintErrorAndAdvanceMem(message, mem, logFile); //we can't retrive entries past the invalid entry break; } if (entry->UsedSize > entry->AllocatedSize) { var message = $"RawDataSmallPage #{rawHeader->PageNumber} has an invalid entry at {GetFilePosition(startOffset, currMem)}" + "the size of the entry exceed the allocated size"; mem = PrintErrorAndAdvanceMem(message, mem, logFile); //we can't retrive entries past the invalid entry break; } pos += entry->AllocatedSize + sizeof(RawDataSection.RawDataEntrySizes); if (entry->AllocatedSize == 0 || entry->UsedSize == -1) { continue; } if (Write(currMem + sizeof(RawDataSection.RawDataEntrySizes), entry->UsedSize, documentsWriter, revisionsWriter, conflictsWriter, logFile, context, startOffset, ((RawDataSmallPageHeader *)mem)->TableType) == false) { break; } } mem += _pageSize; } catch (Exception e) { var message = $"Unexpected exception at position {GetFilePosition(startOffset, mem)}:{Environment.NewLine} {e}"; mem = PrintErrorAndAdvanceMem(message, mem, logFile); } } documentsWriter.WriteEndArray(); conflictsWriter.WriteEndArray(); revisionsWriter.WriteEndArray(); documentsWriter.WriteEndObject(); conflictsWriter.WriteEndObject(); revisionsWriter.WriteEndObject(); logFile.WriteLine( $"Discovered a total of {_numberOfDocumentsRetrieved:#,#;00} documents within {sw.Elapsed.TotalSeconds::#,#.#;;00} seconds."); logFile.WriteLine($"Discovered a total of {_numberOfFaultedPages::#,#;00} faulted pages."); } if (_cancellationRequested) { return(RecoveryStatus.CancellationRequested);
public void Dispose() { _storageEnvironment.Dispose(); }
public override void Done() { _storageEnvironment.Writer.Write(_currentBatch); _storageEnvironment.Dispose(); }