private void DisposeIndexWriter() { if (_indexWriter == null) { return; } var writer = _indexWriter; _indexWriter = null; try { writer.Analyzer.Close(); } catch (Exception e) { if (_logger.IsInfoEnabled) { _logger.Info("Error while closing the index (closing the analyzer failed)", e); } } try { writer.Dispose(waitForMerges: false); } catch (Exception e) { if (_logger.IsInfoEnabled) { _logger.Info("Error when closing the index", e); } } }
private void CreateIndexWriter(IState state) { _indexWriter = new TimeTrackingIndexWriter(_directory, _analyzer, _indexDeletionPolicy, _maxFieldLength, state); _indexWriter.UseCompoundFile = false; _indexWriter.SetMergePolicy(new LogByteSizeMergePolicy(_indexWriter) { MaxMergeMB = _index.Configuration.MaximumSizePerSegment.GetValue(SizeUnit.Megabytes), MergeFactor = _index.Configuration.MergeFactor, LargeSegmentSizeMB = _index.Configuration.LargeSegmentSizeToMerge.GetValue(SizeUnit.Megabytes), NumberOfLargeSegmentsToMergeInSingleBatch = _index.Configuration.NumberOfLargeSegmentsToMergeInSingleBatch }); if (_indexReaderWarmer != null) { _indexWriter.MergedSegmentWarmer = _indexReaderWarmer; } var scheduler = new TimeTrackingSerialMergeScheduler(_index); _indexWriter.InitializeMergeScheduler(scheduler, state); // RavenDB already manages the memory for those, no need for Lucene to do this as well _indexWriter.SetMaxBufferedDocs(IndexWriter.DISABLE_AUTO_FLUSH); _indexWriter.SetRAMBufferSizeMB(1024); }