private void WriteDocumentToIndex(object doc, RavenIndexWriter indexWriter, Analyzer analyzer) { float boost; var fields = GetFields(doc, out boost).ToList(); string reduceKeyAsString = ExtractReduceKey(ViewGenerator, doc); reduceKeyField.SetValue(reduceKeyAsString); reduceValueField.SetValue(ToJsonDocument(doc).ToString(Formatting.None)); luceneDoc.GetFields().Clear(); luceneDoc.Boost = boost; luceneDoc.Add(reduceKeyField); luceneDoc.Add(reduceValueField); foreach (var field in fields) { luceneDoc.Add(field); } batchers.ApplyAndIgnoreAllErrors( exception => { logIndexing.WarnException( string.Format("Error when executed OnIndexEntryCreated trigger for index '{0}', key: '{1}'", name, reduceKeyAsString), exception); Context.AddError(name, reduceKeyAsString, exception.Message, "OnIndexEntryCreated Trigger"); }, trigger => trigger.OnIndexEntryCreated(reduceKeyAsString, luceneDoc)); parent.LogIndexedDocument(reduceKeyAsString, luceneDoc); parent.AddDocumentToIndex(indexWriter, luceneDoc, analyzer); }
private void RemoveExistingReduceKeysFromIndex(RavenIndexWriter indexWriter) { foreach (var reduceKey in ReduceKeys) { var entryKey = reduceKey; parent.InvokeOnIndexEntryDeletedOnAllBatchers(batchers, new Term(Constants.ReduceKeyFieldName, entryKey)); indexWriter.DeleteDocuments(new Term(Constants.ReduceKeyFieldName, entryKey)); } }
private void WriteDocumentToIndex(object doc, RavenIndexWriter indexWriter, Analyzer analyzer, Stopwatch convertToLuceneDocumentDuration, Stopwatch addDocumentDutation) { string reduceKeyAsString; using (StopwatchScope.For(convertToLuceneDocumentDuration)) { float boost; try { var fields = GetFields(doc, out boost); reduceKeyAsString = ExtractReduceKey(ViewGenerator, doc); reduceKeyField.SetValue(reduceKeyAsString); reduceValueField.SetValue(ToJsonDocument(doc).ToString(Formatting.None)); luceneDoc.GetFields().Clear(); luceneDoc.Boost = boost; luceneDoc.Add(reduceKeyField); luceneDoc.Add(reduceValueField); foreach (var field in fields) { luceneDoc.Add(field); } } catch (Exception e) { Context.AddError(indexId, parent.PublicName, TryGetDocKey(doc), e, "Reduce" ); logIndexing.WarnException("Could not get fields to during reduce for " + parent.PublicName, e); return; } } batchers.ApplyAndIgnoreAllErrors( exception => { logIndexing.WarnException( string.Format("Error when executed OnIndexEntryCreated trigger for index '{0}', key: '{1}'", indexId, reduceKeyAsString), exception); Context.AddError(indexId, parent.PublicName, reduceKeyAsString, exception, "OnIndexEntryCreated Trigger"); }, trigger => trigger.OnIndexEntryCreated(reduceKeyAsString, luceneDoc)); parent.LogIndexedDocument(reduceKeyAsString, luceneDoc); using (StopwatchScope.For(addDocumentDutation)) { parent.AddDocumentToIndex(indexWriter, luceneDoc, analyzer); } }
private void RemoveExistingReduceKeysFromIndex(RavenIndexWriter indexWriter, Stopwatch deleteExistingDocumentsDuration) { foreach (var reduceKey in ReduceKeys) { var entryKey = reduceKey; parent.InvokeOnIndexEntryDeletedOnAllBatchers(batchers, new Term(Constants.ReduceKeyFieldName, entryKey)); using (StopwatchScope.For(deleteExistingDocumentsDuration)) { indexWriter.DeleteDocuments(new Term(Constants.ReduceKeyFieldName, entryKey)); } } }
private void RemoveExistingReduceKeysFromIndex(RavenIndexWriter indexWriter) { foreach (var reduceKey in ReduceKeys) { var entryKey = reduceKey; indexWriter.DeleteDocuments(new Term(Constants.ReduceKeyFieldName, entryKey)); batchers.ApplyAndIgnoreAllErrors( exception => { logIndexing.WarnException( string.Format("Error when executed OnIndexEntryDeleted trigger for index '{0}', key: '{1}'", name, entryKey), exception); Context.AddError(name, entryKey, exception.Message, "OnIndexEntryDeleted Trigger"); }, trigger => trigger.OnIndexEntryDeleted(entryKey)); } }
protected void AddDocumentToIndex(RavenIndexWriter currentIndexWriter, Document luceneDoc, Analyzer analyzer) { Analyzer newAnalyzer = AnalyzerGenerators.Aggregate(analyzer, (currentAnalyzer, generator) => { Analyzer generateAnalyzer = generator.Value.GenerateAnalyzerForIndexing(name, luceneDoc, currentAnalyzer); if (generateAnalyzer != currentAnalyzer && currentAnalyzer != analyzer) currentAnalyzer.Close(); return generateAnalyzer; }); try { if (indexExtensions.Count > 0) currentlyIndexDocuments.Add(CloneDocument(luceneDoc)); currentIndexWriter.AddDocument(luceneDoc, newAnalyzer); foreach (var fieldable in luceneDoc.GetFields()) { using (fieldable.ReaderValue) // dispose all the readers { } } } finally { if (newAnalyzer != analyzer) newAnalyzer.Close(); } }
private void CreateIndexWriter() { snapshotter = new SnapshotDeletionPolicy(new KeepOnlyLastCommitDeletionPolicy()); IndexWriter.IndexReaderWarmer indexReaderWarmer = context.IndexReaderWarmers != null ? new IndexReaderWarmersWrapper(name, context.IndexReaderWarmers) : null; indexWriter = new RavenIndexWriter(directory, stopAnalyzer, snapshotter, IndexWriter.MaxFieldLength.UNLIMITED, context.Configuration.MaxIndexWritesBeforeRecreate, indexReaderWarmer); }
public void Dispose() { try { // this is here so we can give good logs in the case of a long shutdown process if (Monitor.TryEnter(writeLock, 100) == false) { var localReason = waitReason; if (localReason != null) logIndexing.Warn("Waiting for {0} to complete before disposing of index {1}, that might take a while if the server is very busy", localReason, name); Monitor.Enter(writeLock); } disposed = true; foreach (var indexExtension in indexExtensions) { indexExtension.Value.Dispose(); } if (currentIndexSearcherHolder != null) { var item = currentIndexSearcherHolder.SetIndexSearcher(null, wait: true); if (item.WaitOne(TimeSpan.FromSeconds(5)) == false) { logIndexing.Warn("After closing the index searching, we waited for 5 seconds for the searching to be done, but it wasn't. Continuing with normal shutdown anyway."); } } if (indexWriter != null) { try { ForceWriteToDisk(); WriteInMemoryIndexToDiskIfNecessary(Etag.Empty); } catch (Exception e) { logIndexing.ErrorException("Error while writing in memory index to disk.", e); } } if (indexWriter != null) // just in case, WriteInMemoryIndexToDiskIfNecessary recreates writer { var writer = indexWriter; indexWriter = null; try { writer.Analyzer.Close(); } catch (Exception e) { logIndexing.ErrorException("Error while closing the index (closing the analyzer failed)", e); } try { writer.Dispose(); } catch (Exception e) { logIndexing.ErrorException("Error when closing the index", e); } } try { directory.Dispose(); } catch (Exception e) { logIndexing.ErrorException("Error when closing the directory", e); } } finally { Monitor.Exit(writeLock); } }
private void WriteDocumentToIndex(object doc, RavenIndexWriter indexWriter, Analyzer analyzer) { float boost; List<AbstractField> fields; try { fields = GetFields(doc, out boost).ToList(); } catch (Exception e) { Context.AddError(indexId, parent.PublicName, TryGetDocKey(doc), e.Message, "Reduce" ); logIndexing.WarnException("Could not get fields to during reduce for " + parent.PublicName, e); return; } string reduceKeyAsString = ExtractReduceKey(ViewGenerator, doc); reduceKeyField.SetValue(reduceKeyAsString); reduceValueField.SetValue(ToJsonDocument(doc).ToString(Formatting.None)); luceneDoc.GetFields().Clear(); luceneDoc.Boost = boost; luceneDoc.Add(reduceKeyField); luceneDoc.Add(reduceValueField); foreach (var field in fields) { luceneDoc.Add(field); } batchers.ApplyAndIgnoreAllErrors( exception => { logIndexing.WarnException( string.Format("Error when executed OnIndexEntryCreated trigger for index '{0}', key: '{1}'", indexId, reduceKeyAsString), exception); Context.AddError(indexId, parent.PublicName, reduceKeyAsString, exception.Message, "OnIndexEntryCreated Trigger"); }, trigger => trigger.OnIndexEntryCreated(reduceKeyAsString, luceneDoc)); parent.LogIndexedDocument(reduceKeyAsString, luceneDoc); parent.AddDocumentToIndex(indexWriter, luceneDoc, analyzer); }
protected void AddDocumentToIndex(RavenIndexWriter currentIndexWriter, Document luceneDoc, Analyzer analyzer) { Analyzer newAnalyzer = AnalyzerGenerators.Aggregate(analyzer, (currentAnalyzer, generator) => { Analyzer generateAnalyzer = generator.Value.GenerateAnalyzerForIndexing(name, luceneDoc, currentAnalyzer); if (generateAnalyzer != currentAnalyzer && currentAnalyzer != analyzer) currentAnalyzer.Close(); return generateAnalyzer; }); try { if (indexExtensions.Count > 0) currentlyIndexDocuments.Add(CloneDocument(luceneDoc)); currentIndexWriter.AddDocument(luceneDoc, newAnalyzer); } finally { if (newAnalyzer != analyzer) newAnalyzer.Close(); } }
private void CreateIndexWriter() { snapshotter = new SnapshotDeletionPolicy(new KeepOnlyLastCommitDeletionPolicy()); indexWriter = new RavenIndexWriter(directory, stopAnalyzer, snapshotter, IndexWriter.MaxFieldLength.UNLIMITED, context.Configuration.MaxIndexWritesBeforeRecreate); }