public override void WriteEndVersion(Process process, AbstractConnection input, Entity entity, bool force = false) { if (entity.Updates + entity.Inserts <= 0 && !force) return; var versionType = entity.Version == null ? "string" : entity.Version.SimpleType; var end = entity.End ?? new DefaultFactory(Logger).Convert(entity.End, versionType); using (var dir = LuceneDirectoryFactory.Create(this, TflBatchEntity(entity.ProcessName))) { using (var writer = new IndexWriter(dir, new KeywordAnalyzer(), IndexWriter.MaxFieldLength.UNLIMITED)) { var doc = new Document(); doc.fields.Add(new NumericField("id", Libs.Lucene.Net.Document.Field.Store.YES, true).SetIntValue(entity.TflBatchId)); doc.fields.Add(new Libs.Lucene.Net.Document.Field("process", entity.ProcessName, Libs.Lucene.Net.Document.Field.Store.YES, Libs.Lucene.Net.Document.Field.Index.NOT_ANALYZED_NO_NORMS)); doc.fields.Add(new Libs.Lucene.Net.Document.Field("connection", input.Name, Libs.Lucene.Net.Document.Field.Store.YES, Libs.Lucene.Net.Document.Field.Index.NOT_ANALYZED_NO_NORMS)); doc.fields.Add(new Libs.Lucene.Net.Document.Field("entity", entity.Alias, Libs.Lucene.Net.Document.Field.Store.YES, Libs.Lucene.Net.Document.Field.Index.NOT_ANALYZED_NO_NORMS)); doc.fields.Add(new NumericField("updates", Libs.Lucene.Net.Document.Field.Store.YES, true).SetLongValue(entity.Updates)); doc.fields.Add(new NumericField("inserts", Libs.Lucene.Net.Document.Field.Store.YES, true).SetLongValue(entity.Inserts)); doc.fields.Add(new NumericField("deletes", Libs.Lucene.Net.Document.Field.Store.YES, true).SetLongValue(entity.Deletes)); doc.fields.Add(LuceneWriter.CreateField("version", versionType, new SearchType { Analyzer = "keyword" }, end)); doc.fields.Add(new Libs.Lucene.Net.Document.Field("version_type", versionType, Libs.Lucene.Net.Document.Field.Store.YES, Libs.Lucene.Net.Document.Field.Index.NOT_ANALYZED_NO_NORMS)); doc.fields.Add(new NumericField("tflupdate", Libs.Lucene.Net.Document.Field.Store.YES, true).SetLongValue(DateTime.UtcNow.Ticks)); writer.AddDocument(doc); writer.Commit(); writer.Optimize(); } } }
private void InitBlock(IndexWriter enclosingInstance) { this.enclosingInstance = enclosingInstance; }
public ReaderPool(IndexWriter enclosingInstance) { InitBlock(enclosingInstance); }
protected MergePolicy(IndexWriter writer) { this.writer = writer; }
internal DocumentsWriter(Directory directory, IndexWriter writer, IndexingChain indexingChain) { InitBlock(); this.directory = directory; this.writer = writer; this.similarity = writer.Similarity; flushedDocCount = writer.MaxDoc(); consumer = indexingChain.GetChain(this); if (consumer is DocFieldProcessor) { docFieldProcessor = (DocFieldProcessor) consumer; } }
internal ReadOnlyDirectoryReader(IndexWriter writer, SegmentInfos infos, int termInfosIndexDivisor):base(writer, infos, termInfosIndexDivisor) { }
internal SegmentMerger(IndexWriter writer, System.String name, MergePolicy.OneMerge merge) { InitBlock(); directory = writer.Directory; segment = name; if (merge != null) { checkAbort = new CheckAbort(merge, directory); } else { checkAbort = new AnonymousClassCheckAbort1(this, null, null); } termIndexInterval = writer.TermIndexInterval; }
public MergeThread(ConcurrentMergeScheduler enclosingInstance, IndexWriter writer, MergePolicy.OneMerge startMerge) { InitBlock(enclosingInstance); this.writer = writer; this.startMerge = startMerge; }
/// <summary>Create and return a new MergeThread </summary> protected internal virtual MergeThread GetMergeThread(IndexWriter writer, MergePolicy.OneMerge merge) { lock (this) { var thread = new MergeThread(this, writer, merge); thread.SetThreadPriority(mergeThreadPriority); thread.IsBackground = true; thread.Name = "Lucene Merge Thread #" + mergeThreadCount++; return thread; } }
public override void Merge(IndexWriter writer) { // TODO: .NET doesn't support this // assert !Thread.holdsLock(writer); this.writer = writer; InitMergeThreadPriority(); dir = writer.Directory; // First, quickly run through the newly proposed merges // and add any orthogonal merges (ie a merge not // involving segments already pending to be merged) to // the queue. If we are way behind on merging, many of // these newly proposed merges will likely already be // registered. if (Verbose()) { Message("now merge"); Message(" index: " + writer.SegString()); } // Iterate, pulling from the IndexWriter's queue of // pending merges, until it's empty: while (true) { // TODO: we could be careful about which merges to do in // the BG (eg maybe the "biggest" ones) vs FG, which // merges to do first (the easiest ones?), etc. MergePolicy.OneMerge merge = writer.GetNextMerge(); if (merge == null) { if (Verbose()) Message(" no more merges pending; now return"); return ; } // We do this w/ the primary thread to keep // deterministic assignment of segment names writer.MergeInit(merge); bool success = false; try { lock (this) { while (MergeThreadCount() >= _maxThreadCount) { if (Verbose()) Message(" too many merge threads running; stalling..."); System.Threading.Monitor.Wait(this); } if (Verbose()) Message(" consider merge " + merge.SegString(dir)); System.Diagnostics.Debug.Assert(MergeThreadCount() < _maxThreadCount); // OK to spawn a new merge thread to handle this // merge: MergeThread merger = GetMergeThread(writer, merge); mergeThreads.Add(merger); if (Verbose()) Message(" launch new thread [" + merger.Name + "]"); merger.Start(); success = true; } } finally { if (!success) { writer.MergeFinish(merge); } } } }
protected LogMergePolicy(IndexWriter writer):base(writer) { }
/// <summary>Run the merges provided by <see cref="IndexWriter.GetNextMerge()" />. </summary> public abstract void Merge(IndexWriter writer);
// Used by near real-time search internal DirectoryReader(IndexWriter writer, SegmentInfos infos, int termInfosIndexDivisor) { this.internalDirectory = writer.Directory; this.readOnly = true; segmentInfos = infos; segmentInfosStart = (SegmentInfos) infos.Clone(); this.termInfosIndexDivisor = termInfosIndexDivisor; if (!readOnly) { // We assume that this segments_N was previously // properly sync'd: synced.UnionWith(infos.Files(internalDirectory, true)); } // IndexWriter synchronizes externally before calling // us, which ensures infos will not change; so there's // no need to process segments in reverse order int numSegments = infos.Count; var readers = new SegmentReader[numSegments]; Directory dir = writer.Directory; int upto = 0; for (int i = 0; i < numSegments; i++) { bool success = false; try { SegmentInfo info = infos.Info(i); if (info.dir == dir) { readers[upto++] = writer.readerPool.GetReadOnlyClone(info, true, termInfosIndexDivisor); } success = true; } finally { if (!success) { // Close all readers we had opened: for (upto--; upto >= 0; upto--) { try { readers[upto].Close(); } catch (System.Exception) { // keep going - we want to clean up as much as possible } } } } } this.writer = writer; if (upto < readers.Length) { // This means some segments were in a foreign Directory var newReaders = new SegmentReader[upto]; Array.Copy(readers, 0, newReaders, 0, upto); readers = newReaders; } Initialize(readers); }