public override void Flush() { try { base.Flush(); } finally { @delegate.Flush(); } }
public override void Flush(IDictionary <TermsHashConsumerPerThread, ICollection <TermsHashConsumerPerField> > threadsAndFields, SegmentWriteState state) { lock (this) { // NOTE: it's possible that all documents seen in this segment // hit non-aborting exceptions, in which case we will // not have yet init'd the TermVectorsWriter. This is // actually OK (unlike in the stored fields case) // because, although IieldInfos.hasVectors() will return // true, the TermVectorsReader gracefully handles // non-existence of the term vectors files. if (tvx != null) { if (state.numDocsInStore > 0) { // In case there are some final documents that we // didn't see (because they hit a non-aborting exception): Fill(state.numDocsInStore - docWriter.DocStoreOffset); } tvx.Flush(); tvd.Flush(); tvf.Flush(); } foreach (var entry in threadsAndFields) { foreach (var field in entry.Value) { TermVectorsTermsWriterPerField perField = (TermVectorsTermsWriterPerField)field; perField.termsHashPerField.Reset(); perField.ShrinkHash(); } TermVectorsTermsWriterPerThread perThread = (TermVectorsTermsWriterPerThread)entry.Key; perThread.termsHashPerThread.Reset(true); } } }
protected override void Dispose(bool disposing) { _fileMutex.WaitOne(); try { // make sure it's all written out _indexOutput.Flush(); long originalLength = _indexOutput.Length; _indexOutput.Dispose(); using (var blobStream = new StreamInput(CacheDirectory.OpenInput(_name, IOContext.DEFAULT))) { // push the blobStream up to the cloud _blob.UploadFromStream(blobStream); // set the metadata with the original index file properties _blob.SetMetadata(); Debug.WriteLine(string.Format("PUT {1} bytes to {0} in cloud", _name, blobStream.Length)); } #if FULLDEBUG Debug.WriteLine(string.Format("CLOSED WRITESTREAM {0}", _name)); #endif // clean up _indexOutput = null; _blobContainer = null; _blob = null; GC.SuppressFinalize(this); } finally { _fileMutex.ReleaseMutex(); } }
public override void Flush() { io.Flush(); }
public override void Flush() { _cacheDirIndexOutput.Flush(); }
public override void Flush() { Sleep(flushDelayMillis); @delegate.Flush(); }
public override void Flush() { _indexOutput?.Flush(); }
public override void Flush() { Output.Flush(); }
/// <summary> /// Forces any buffered output to be written. /// </summary> /// <exception cref="System.IO.FileNotFoundException">Unable to open SharePoint File: File does not exist.</exception> public override void Flush() { m_indexOutput.Flush(); }
/// <summary> /// Closes the sql index output. /// </summary> public override void Close() { string fileName = _fileName; // make sure it's all written out _indexOutput.Flush(); long originalLength = _indexOutput.Length(); _indexOutput.Close(); Stream fileStream = new StreamInput(CacheDirectory.OpenInput(fileName)); try { // push the file stream up to the db. ICommandBuilder builder = _sqlStorageProviderUtility.GetCommandBuilder2(); DbConnection connection = builder.GetConnection(_connString); DbTransaction transaction = _sqlStorageProviderUtility.BeginTransaction(connection); QueryBuilder queryBuilder = new QueryBuilder(builder); //bool fileExists = FileExists(transaction, _wiki, _fileName); // To achieve decent performance, an UPDATE query is issued if the file exists, // otherwise an INSERT query is issued string query; List <Parameter> parameters; byte[] fileData = null; int size = Tools.ReadStream(fileStream, ref fileData, MaxFileSize); if (size < 0) { _sqlStorageProviderUtility.RollbackTransaction(transaction); throw new ArgumentException("Source Stream contains too much data", "sourceStream"); } //if(fileExists) { // query = queryBuilder.Update("SearchIndex", new string[] { "Size", "LastModified", "Data" }, new string[] { "Size", "LastModified", "Data" }); // query = queryBuilder.Where(query, "Wiki", WhereOperator.Equals, "Wiki"); // query = queryBuilder.AndWhere(query, "Name", WhereOperator.Equals, "Name"); // parameters = new List<Parameter>(5); // parameters.Add(new Parameter(ParameterType.String, "Wiki", _wiki)); // parameters.Add(new Parameter(ParameterType.Int64, "Size", (long)originalLength)); // parameters.Add(new Parameter(ParameterType.DateTime, "LastModified", DateTime.Now.ToUniversalTime())); // parameters.Add(new Parameter(ParameterType.ByteArray, "Data", fileData)); // parameters.Add(new Parameter(ParameterType.String, "Name", _fileName)); //} //else { query = queryBuilder.InsertInto("SearchIndex", new string[] { "Wiki", "Name", "Size", "LastModified", "Data" }, new string[] { "Wiki", "Name", "Size", "LastModified", "Data" }); parameters = new List <Parameter>(5); parameters.Add(new Parameter(ParameterType.String, "Wiki", _wiki)); parameters.Add(new Parameter(ParameterType.String, "Name", _fileName)); parameters.Add(new Parameter(ParameterType.Int64, "Size", (long)originalLength)); parameters.Add(new Parameter(ParameterType.DateTime, "LastModified", DateTime.Now.ToUniversalTime())); parameters.Add(new Parameter(ParameterType.ByteArray, "Data", fileData)); //} DbCommand command = builder.GetCommand(transaction, query, parameters); int rows = _sqlStorageProviderUtility.ExecuteNonQuery(command, false); if (rows == 1) { _sqlStorageProviderUtility.CommitTransaction(transaction); } else { _sqlStorageProviderUtility.RollbackTransaction(transaction); } } finally { fileStream.Dispose(); } // clean up _indexOutput = null; GC.SuppressFinalize(this); }
internal void Flush() { indexStream.Flush(); fieldsStream.Flush(); }
public override void Flush() { cacheOutput.Flush(); isFlushed = true; }