/// <summary> /// Cleans up the index directory from old index files. This method uses the /// last commit found by <see cref="GetLastCommit(Directory)"/>. If it matches the /// expected <paramref name="segmentsFile"/>, then all files not referenced by this commit point /// are deleted. /// </summary> /// <remarks> /// <b>NOTE:</b> This method does a best effort attempt to clean the index /// directory. It suppresses any exceptions that occur, as this can be retried /// the next time. /// </remarks> public static void CleanupOldIndexFiles(Directory directory, string segmentsFile) { try { IndexCommit commit = GetLastCommit(directory); // commit == null means weird IO errors occurred, ignore them // if there were any IO errors reading the expected commit point (i.e. // segments files mismatch), then ignore that commit either. if (commit != null && commit.SegmentsFileName.Equals(segmentsFile)) { HashSet <string> commitFiles = new HashSet <string>(commit.FileNames .Union(new[] { IndexFileNames.SEGMENTS_GEN })); Regex matcher = IndexFileNames.CODEC_FILE_PATTERN; foreach (string file in directory.ListAll() .Where(file => !commitFiles.Contains(file) && (matcher.IsMatch(file) || file.StartsWith(IndexFileNames.SEGMENTS)))) { try { directory.DeleteFile(file); } catch { // suppress, it's just a best effort } } } } catch { // ignore any errors that happens during this state and only log it. this // cleanup will have a chance to succeed the next time we get a new // revision. } }
//JAVA TO C# CONVERTER WARNING: Method 'throws' clauses are not available in C#: //ORIGINAL LINE: private org.neo4j.graphdb.ResourceIterator<java.io.File> listReadOnlyStoreFiles() throws java.io.IOException private ResourceIterator <File> ListReadOnlyStoreFiles() { // In read-only mode we don't need to take a snapshot, because the index will not be modified. //JAVA TO C# CONVERTER WARNING: The original Java variable was marked 'final': //ORIGINAL LINE: final java.util.Collection<java.io.File> files = new java.util.ArrayList<>(); ICollection <File> files = new List <File>(); MakeSureAllIndexesAreInstantiated(); foreach (IndexReference index in AllIndexes) { File indexDirectory = GetFileDirectory(_baseStorePath, index.Identifier); IndexSearcher searcher = index.Searcher; using (IndexReader indexReader = searcher.IndexReader) { DirectoryReader directoryReader = ( DirectoryReader )indexReader; IndexCommit commit = directoryReader.IndexCommit; foreach (string fileName in commit.FileNames) { Files.Add(new File(indexDirectory, fileName)); } } } return(Iterators.asResourceIterator(Files.GetEnumerator())); }
/// <summary> /// Returns a <see cref="string"/> representation of a revision's version from the given /// <see cref="IndexCommit"/>s of the search and taxonomy indexes. /// </summary> /// <returns>a <see cref="string"/> representation of a revision's version from the given <see cref="IndexCommit"/>s of the search and taxonomy indexes.</returns> public static string RevisionVersion(IndexCommit indexCommit, IndexCommit taxonomyCommit) { return(string.Format("{0:X}:{1:X}", indexCommit.Generation, taxonomyCommit.Generation)); }
/// <summary> /// Returns a map of the revision files from the given <see cref="IndexCommit"/>s of the search and taxonomy indexes. /// </summary> /// <exception cref="IOException"></exception> public static IDictionary <string, IList <RevisionFile> > RevisionFiles(IndexCommit indexCommit, IndexCommit taxonomyCommit) { return(new Dictionary <string, IList <RevisionFile> > { { INDEX_SOURCE, IndexRevision.RevisionFiles(indexCommit).Values.First() }, { TAXONOMY_SOURCE, IndexRevision.RevisionFiles(taxonomyCommit).Values.First() } }); }
public static async Task UploadDirectoryAsync(this IAssetStore assetStore, DirectoryInfo directory, IndexCommit commit) { using (var fileStream = new FileStream( Path.Combine(directory.FullName, ArchiveFile), FileMode.Create, FileAccess.ReadWrite, FileShare.None, 4096, FileOptions.DeleteOnClose)) { using (var zipArchive = new ZipArchive(fileStream, ZipArchiveMode.Create, true)) { foreach (var fileName in commit.FileNames) { var file = new FileInfo(Path.Combine(directory.FullName, fileName)); try { if (!file.Name.Equals(ArchiveFile, StringComparison.OrdinalIgnoreCase) && !file.Name.Equals(LockFile, StringComparison.OrdinalIgnoreCase)) { zipArchive.CreateEntryFromFile(file.FullName, file.Name); } } catch (IOException) { continue; } } } fileStream.Position = 0; await assetStore.UploadAsync(directory.Name, 0, string.Empty, fileStream, true); } }
/// <summary> /// Builder method for <see cref="IndexWriterConfig.IndexCommit"/>. /// </summary> /// <param name="config">this <see cref="IndexWriterConfig"/> instance</param> /// <param name="commit"></param> /// <returns>this <see cref="IndexWriterConfig"/> instance</returns> public static IndexWriterConfig SetIndexCommit(this IndexWriterConfig config, IndexCommit commit) { config.IndexCommit = commit; return(config); }
public static IndexWriter ConfigureWriter(Config config, PerfRunData runData, OpenMode mode, IndexCommit commit) { IndexWriterConfig iwc = CreateWriterConfig(config, runData, mode, commit); string infoStreamVal = config.Get("writer.info.stream", null); if (infoStreamVal != null) { if (infoStreamVal.Equals("SystemOut", StringComparison.Ordinal)) { iwc.SetInfoStream(Console.Out); } else if (infoStreamVal.Equals("SystemErr", StringComparison.Ordinal)) { iwc.SetInfoStream(Console.Error); } else { FileInfo f = new FileInfo(infoStreamVal); iwc.SetInfoStream(new StreamWriter(new FileStream(f.FullName, FileMode.Create, FileAccess.Write), Encoding.GetEncoding(0))); } } IndexWriter writer = new IndexWriter(runData.Directory, iwc); return(writer); }
public static IndexWriterConfig CreateWriterConfig(Config config, PerfRunData runData, OpenMode mode, IndexCommit commit) { // :Post-Release-Update-Version.LUCENE_XY: LuceneVersion version = (LuceneVersion)Enum.Parse(typeof(LuceneVersion), config.Get("writer.version", LuceneVersion.LUCENE_48.ToString())); IndexWriterConfig iwConf = new IndexWriterConfig(version, runData.Analyzer); iwConf.OpenMode = mode; IndexDeletionPolicy indexDeletionPolicy = GetIndexDeletionPolicy(config); iwConf.IndexDeletionPolicy = indexDeletionPolicy; if (commit != null) { iwConf.IndexCommit = commit; } string mergeScheduler = config.Get("merge.scheduler", "Lucene.Net.Index.ConcurrentMergeScheduler, Lucene.Net"); Type mergeSchedulerType = Type.GetType(mergeScheduler); if (mergeSchedulerType is null) { throw RuntimeException.Create("Unrecognized merge scheduler type '" + mergeScheduler + "'"); // LUCENENET: We don't get an exception in this case, so throwing one for compatibility } else if (mergeSchedulerType.Equals(typeof(NoMergeScheduler))) { iwConf.MergeScheduler = NoMergeScheduler.INSTANCE; } else { try { iwConf.MergeScheduler = (IMergeScheduler)Activator.CreateInstance(mergeSchedulerType); } catch (Exception e) when(e.IsException()) { throw RuntimeException.Create("unable to instantiate class '" + mergeScheduler + "' as merge scheduler", e); } if (mergeScheduler.Equals("Lucene.Net.Index.ConcurrentMergeScheduler", StringComparison.Ordinal)) { ConcurrentMergeScheduler cms = (ConcurrentMergeScheduler)iwConf.MergeScheduler; int maxThreadCount = config.Get("concurrent.merge.scheduler.max.thread.count", ConcurrentMergeScheduler.DEFAULT_MAX_THREAD_COUNT); int maxMergeCount = config.Get("concurrent.merge.scheduler.max.merge.count", ConcurrentMergeScheduler.DEFAULT_MAX_MERGE_COUNT); cms.SetMaxMergesAndThreads(maxMergeCount, maxThreadCount); } } string defaultCodec = config.Get("default.codec", null); if (defaultCodec != null) { try { Type clazz = Type.GetType(defaultCodec); iwConf.Codec = (Codec)Activator.CreateInstance(clazz); } catch (Exception e) when(e.IsException()) { throw RuntimeException.Create("Couldn't instantiate Codec: " + defaultCodec, e); } } string mergePolicy = config.Get("merge.policy", "Lucene.Net.Index.LogByteSizeMergePolicy, Lucene.Net"); bool isCompound = config.Get("compound", true); Type mergePolicyType = Type.GetType(mergePolicy); if (mergePolicyType is null) { throw RuntimeException.Create("Unrecognized merge policy type '" + mergePolicy + "'"); // LUCENENET: We don't get an exception in this case, so throwing one for compatibility } else if (mergePolicyType.Equals(typeof(NoMergePolicy))) { iwConf.MergePolicy = isCompound ? NoMergePolicy.COMPOUND_FILES : NoMergePolicy.NO_COMPOUND_FILES; } else { try { iwConf.MergePolicy = (MergePolicy)Activator.CreateInstance(mergePolicyType); } catch (Exception e) when(e.IsException()) { throw RuntimeException.Create("unable to instantiate class '" + mergePolicy + "' as merge policy", e); } iwConf.MergePolicy.NoCFSRatio = isCompound ? 1.0 : 0.0; if (iwConf.MergePolicy is LogMergePolicy logMergePolicy) { logMergePolicy.MergeFactor = config.Get("merge.factor", OpenIndexTask.DEFAULT_MERGE_PFACTOR); } } double ramBuffer = config.Get("ram.flush.mb", OpenIndexTask.DEFAULT_RAM_FLUSH_MB); int maxBuffered = config.Get("max.buffered", OpenIndexTask.DEFAULT_MAX_BUFFERED); if (maxBuffered == IndexWriterConfig.DISABLE_AUTO_FLUSH) { iwConf.RAMBufferSizeMB = ramBuffer; iwConf.MaxBufferedDocs = maxBuffered; } else { iwConf.MaxBufferedDocs = maxBuffered; iwConf.RAMBufferSizeMB = ramBuffer; } return(iwConf); }
/// <summary> /// Returns a string representation of a revision's version from the given /// <see cref="IndexCommit"/> /// </summary> public static string RevisionVersion(IndexCommit commit) { return(commit.Generation.ToString("X")); }
//JAVA TO C# CONVERTER WARNING: Method 'throws' clauses are not available in C#: //ORIGINAL LINE: ReadOnlyIndexSnapshotFileIterator(java.io.File indexDirectory, org.apache.lucene.index.IndexCommit indexCommit) throws java.io.IOException internal ReadOnlyIndexSnapshotFileIterator(File indexDirectory, IndexCommit indexCommit) { this._indexDirectory = indexDirectory; this._indexCommit = indexCommit; this._fileNames = this._indexCommit.FileNames.GetEnumerator(); }