void insertThreadWrite(int threadhashkey, List <IEntity> row) { if (threadhashkey < 0) { threadhashkey = 0; } int threadIndex = threadhashkey % threads_.Count(); WriterThread writerThread = threads_[threadIndex]; lock (writerThread.writeQueue_) { int rows = writerThread.writeQueue_[writerThread.writeQueue_.Count - 1][0].rows(); if (rows > WriterThread.vectorSize) { writerThread.writeQueue_.Add(createListVector()); } int size = row.Count; for (int i = 0; i < size; ++i) { if ((int)colTypes_[i] < AbstractVector.ARRAY_VECTOR_BASE) { writerThread.writeQueue_[writerThread.writeQueue_.Count - 1][i].append((IScalar)row[i]); } else { writerThread.writeQueue_[writerThread.writeQueue_.Count - 1][i].append((IVector)row[i]); } } if (writerThread.writeQueue_[writerThread.writeQueue_.Count - 1][0].rows() >= batchSize_) { Monitor.Pulse(writerThread.writeQueue_); } } }
public virtual void _testStressLocks(LockFactory lockFactory, System.IO.FileInfo indexDir) { FSDirectory fs1 = FSDirectory.Open(new System.IO.DirectoryInfo(indexDir.FullName), lockFactory); // First create a 1 doc index: IndexWriter w = new IndexWriter(fs1, new WhitespaceAnalyzer(), true, IndexWriter.MaxFieldLength.LIMITED); AddDoc(w); w.Close(); WriterThread writer = new WriterThread(this, 100, fs1); SearcherThread searcher = new SearcherThread(this, 100, fs1); writer.Start(); searcher.Start(); while (writer.IsAlive || searcher.IsAlive) { System.Threading.Thread.Sleep(new System.TimeSpan((System.Int64) 10000 * 1000)); } Assert.IsTrue(!writer.hitException, "IndexWriter hit unexpected exceptions"); Assert.IsTrue(!searcher.hitException, "IndexSearcher hit unexpected exceptions"); // Cleanup _TestUtil.RmDir(indexDir); }
public virtual void _testStressLocks(LockFactory lockFactory, DirectoryInfo indexDir) { Directory dir = NewFSDirectory(indexDir, lockFactory); // First create a 1 doc index: IndexWriter w = new IndexWriter(dir, (new IndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random()))).SetOpenMode(IndexWriterConfig.OpenMode_e.CREATE)); AddDoc(w); w.Dispose(); WriterThread writer = new WriterThread(this, 100, dir); SearcherThread searcher = new SearcherThread(this, 100, dir); writer.Start(); searcher.Start(); while (writer.IsAlive || searcher.IsAlive) { Thread.Sleep(1000); } Assert.IsTrue(!writer.HitException, "IndexWriter hit unexpected exceptions"); Assert.IsTrue(!searcher.HitException, "IndexSearcher hit unexpected exceptions"); dir.Dispose(); // Cleanup System.IO.Directory.Delete(indexDir.FullName, true); }
public static void TestOne() { ReaderThread[] readers = new ReaderThread[5]; WriterThread[] writers = new WriterThread[2]; for (int i = 0; i < readers.Length; ++i) { readers [i] = new ReaderThread("#" + i); readers [i].Start(); } for (int i = 0; i < writers.Length; ++i) { writers [i] = new WriterThread("#" + i); writers [i].Start(); } }
protected void Wait(bool _throw) { if (WriterThread != null) { WriterThread.Join(); if (WriterException != null) { Exception ex = WriterException; WriterException = null; if (_throw) { throw new AggregateException(ex); } } } }
public virtual void TestIsCurrentWithThreads() { Directory dir = NewDirectory(); IndexWriterConfig conf = NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random)); IndexWriter writer = new IndexWriter(dir, conf); ReaderHolder holder = new ReaderHolder(); ReaderThread[] threads = new ReaderThread[AtLeast(3)]; CountdownEvent latch = new CountdownEvent(1); WriterThread writerThread = new WriterThread(holder, writer, AtLeast(500), Random, latch); for (int i = 0; i < threads.Length; i++) { threads[i] = new ReaderThread(holder, latch); threads[i].Start(); } writerThread.Start(); writerThread.Join(); bool failed = writerThread.failed != null; if (failed) { Console.WriteLine(writerThread.failed.ToString()); Console.Write(writerThread.failed.StackTrace); } for (int i = 0; i < threads.Length; i++) { threads[i].Join(); if (threads[i].failed != null) { Console.WriteLine(threads[i].failed.ToString()); Console.Write(threads[i].failed.StackTrace); failed = true; } } Assert.IsFalse(failed); writer.Dispose(); dir.Dispose(); }
public virtual void TestIsCurrentWithThreads() { Directory dir = NewDirectory(); IndexWriterConfig conf = NewIndexWriterConfig(TEST_VERSION_CURRENT, new MockAnalyzer(Random())); IndexWriter writer = new IndexWriter(dir, conf); ReaderHolder holder = new ReaderHolder(); ReaderThread[] threads = new ReaderThread[AtLeast(3)]; CountdownEvent latch = new CountdownEvent(1); WriterThread writerThread = new WriterThread(holder, writer, AtLeast(500), Random(), latch); for (int i = 0; i < threads.Length; i++) { threads[i] = new ReaderThread(holder, latch); threads[i].Start(); } writerThread.Start(); writerThread.Join(); bool failed = writerThread.Failed != null; if (failed) { Console.WriteLine(writerThread.Failed.ToString()); Console.Write(writerThread.Failed.StackTrace); } for (int i = 0; i < threads.Length; i++) { threads[i].Join(); if (threads[i].Failed != null) { Console.WriteLine(threads[i].Failed.ToString()); Console.Write(threads[i].Failed.StackTrace); failed = true; } } Assert.IsFalse(failed); writer.Dispose(); dir.Dispose(); }
private static void QueueAndWrite(LogData JsonObj) { try { string strFileName; if (FileNameCreator == null) { strFileName = FileNameTemplate; strFileName = strFileName.Replace("yyyyMMdd", DateTime.Now.ToString("yyyyMMdd")); } else { strFileName = FileNameCreator.Invoke(); } WriterThread objWriter = GetWriter(strFileName); objWriter.LogQueue.Enqueue(JsonObj); // // Se il WriterThread non è avviato lo avvio, altrimenti continuo // if (!objWriter.Running) { // // Devo impostare qui il Running a TRUE altrimenti c'è il rischio che un richiesta aggiuntiva di scrittura su log arrivi // prima che il thread parta causando un errore // objWriter.Running = true; objWriter.FileTarget = strFileName; new Thread(new ThreadStart(objWriter.Write)).Start(); } } catch (Exception ex) { throw new Exception("QueueAndWrite -> " + ex.Message); } }
/// <summary> /// This method will copy the file or directory represented by this /// <tt>SmbFile</tt> and it's sub-contents to the location specified by the /// <tt>dest</tt> parameter. /// </summary> /// <remarks> /// This method will copy the file or directory represented by this /// <tt>SmbFile</tt> and it's sub-contents to the location specified by the /// <tt>dest</tt> parameter. This file and the destination file do not /// need to be on the same host. This operation does not copy extended /// file attibutes such as ACLs but it does copy regular attributes as /// well as create and last write times. This method is almost twice as /// efficient as manually copying as it employs an additional write /// thread to read and write data concurrently. /// <p/> /// It is not possible (nor meaningful) to copy entire workgroups or /// servers. /// </remarks> /// <param name="dest">the destination file or directory</param> /// <exception cref="SmbException">SmbException</exception> /// <exception cref="SharpCifs.Smb.SmbException"></exception> public virtual void CopyTo(SmbFile dest) { SmbComReadAndX req; SmbComReadAndXResponse resp; WriterThread w; int bsize; byte[][] b; if (_share == null || dest._share == null) { throw new SmbException("Invalid operation for workgroups or servers"); } req = new SmbComReadAndX(); resp = new SmbComReadAndXResponse(); Connect0(); dest.Connect0(); ResolveDfs(null); try { if (GetAddress().Equals(dest.GetAddress()) && _canon.RegionMatches(true, 0, dest._canon , 0, Math.Min(_canon.Length, dest._canon.Length))) { throw new SmbException("Source and destination paths overlap."); } } catch (UnknownHostException) { } w = new WriterThread(this); w.SetDaemon(true); w.Start(); SmbTransport t1 = Tree.Session.transport; SmbTransport t2 = dest.Tree.Session.transport; if (t1.SndBufSize < t2.SndBufSize) { t2.SndBufSize = t1.SndBufSize; } else { t1.SndBufSize = t2.SndBufSize; } bsize = Math.Min(t1.RcvBufSize - 70, t1.SndBufSize - 70); b = new[] { new byte[bsize], new byte[bsize] }; try { CopyTo0(dest, b, bsize, w, req, resp); } finally { w.Write(null, -1, null, 0); } }
/// <exception cref="SharpCifs.Smb.SmbException"></exception> internal virtual void CopyTo0(SmbFile dest, byte[][] b, int bsize, WriterThread w, SmbComReadAndX req, SmbComReadAndXResponse resp) { int i; if (_attrExpiration < Runtime.CurrentTimeMillis()) { _attributes = AttrReadonly | AttrDirectory; _createTime = 0L; _lastModified = 0L; _isExists = false; IInfo info = QueryPath(GetUncPath0(), Trans2QueryPathInformationResponse.SMB_QUERY_FILE_BASIC_INFO ); _attributes = info.GetAttributes(); _createTime = info.GetCreateTime(); _lastModified = info.GetLastWriteTime(); _isExists = true; _attrExpiration = Runtime.CurrentTimeMillis() + AttrExpirationPeriod; } if (IsDirectory()) { SmbFile[] files; SmbFile ndest; string path = dest.GetUncPath0(); if (path.Length > 1) { try { dest.Mkdir(); dest.SetPathInformation(_attributes, _createTime, _lastModified); } catch (SmbException se) { if (se.GetNtStatus() != NtStatus.NtStatusAccessDenied && se.GetNtStatus() != NtStatus .NtStatusObjectNameCollision) { throw; } } } files = ListFiles("*", AttrDirectory | AttrHidden | AttrSystem, null, null); try { for (i = 0; i < files.Length; i++) { ndest = new SmbFile(dest, files[i].GetName(), files[i].Type, files[i]._attributes, files[i]._createTime, files[i]._lastModified, files[i]._size); files[i].CopyTo0(ndest, b, bsize, w, req, resp); } } catch (UnknownHostException uhe) { throw new SmbException(Url.ToString(), uhe); } catch (UriFormatException mue) { throw new SmbException(Url.ToString(), mue); } } else { long off; try { Open(ORdonly, 0, AttrNormal, 0); try { dest.Open(OCreat | OWronly | OTrunc, SmbConstants.FileWriteData | SmbConstants.FileWriteAttributes, _attributes, 0); } catch (SmbAuthException sae) { if ((dest._attributes & AttrReadonly) != 0) { dest.SetPathInformation(dest._attributes & ~AttrReadonly, 0L, 0L); dest.Open(OCreat | OWronly | OTrunc, SmbConstants.FileWriteData | SmbConstants.FileWriteAttributes, _attributes, 0); } else { throw; } } i = 0; off = 0L; for (; ; ) { req.SetParam(Fid, off, bsize); resp.SetParam(b[i], 0); Send(req, resp); lock (w) { if (w.E != null) { throw w.E; } while (!w.Ready) { try { Runtime.Wait(w); } catch (Exception ie) { throw new SmbException(dest.Url.ToString(), ie); } } if (w.E != null) { throw w.E; } if (resp.DataLength <= 0) { break; } w.Write(b[i], resp.DataLength, dest, off); } i = i == 1 ? 0 : 1; off += resp.DataLength; } dest.Send(new Trans2SetFileInformation(dest.Fid, _attributes, _createTime, _lastModified ), new Trans2SetFileInformationResponse()); dest.Close(0L); } catch (SmbException se) { if (IgnoreCopyToException == false) { throw new SmbException("Failed to copy file from [" + ToString() + "] to [" + dest + "]", se); } if (Log.Level > 1) { Runtime.PrintStackTrace(se, Log); } } finally { Close(); } } }
protected override void ExecuteConcrete() { #region Extension of the compressed file try { var extension = Path.GetExtension(destination); if (!extension.Equals(".gz", StringComparison.CurrentCultureIgnoreCase)) { destination += ".gz"; } } catch { Console.WriteLine("Resulting file name contrains invalid characters."); return; } #endregion #region Checking and re-writing existing file try { if (File.Exists(destination)) { Console.WriteLine($"The file {destination} already exists. Rewrite it? y/n"); var answer = Console.ReadLine(); if (!answer.Equals("y", StringComparison.CurrentCultureIgnoreCase)) { return; } else { try { File.Delete(destination); } catch (UnauthorizedAccessException) { Console.WriteLine("You have no rights to overwrite the file, or this file is in use by another app."); return; } catch (IOException) { Console.WriteLine("The file is in use by another app, so it is unable to overwrite it."); return; } catch { Console.WriteLine("Unable to overwrite this file. Probably you've set an invalid file name."); return; } } } } catch { Console.WriteLine("Unknown error. Please try set another resulting file name."); } #endregion var numberOfCompressorThreads = Environment.ProcessorCount; var sharedStateAfterReadFile = ReaderCompressorSharedState.Create(numberOfCompressorThreads, c_readFileChunkSize); var sharedStateAfterCompression = CompressorWriterSharedState.Create(numberOfCompressorThreads, numberOfCompressorThreads * 2); var reader = new ReaderThread(source, c_readFileChunkSize, (IForReaderThread)sharedStateAfterReadFile); var compressors = Enumerable .Range(0, numberOfCompressorThreads) .Select(x => new CompressorThread(c_readFileChunkSize, (IForCompressorThreadInput)sharedStateAfterReadFile, (IForCompressorThreadOutput)sharedStateAfterCompression)) .ToList(); var writer = new WriterThread(destination, c_readFileChunkSize, (IForWriterThread)sharedStateAfterCompression); //Console.WriteLine("Press 'c' to cancel."); //if (Console.ReadKey(true).KeyChar == 'c') //{ // sharedStateAfterReadFile.Cancel(); //} reader.Join(); compressors.ForEach(x => x.Join()); writer.Join(); Console.WriteLine("Finished. Press enter to exit."); Console.ReadLine(); }
/** * If fail to connect to the specified DolphinDB server, this function throw an exception. */ public MultithreadedTableWriter(string hostName, int port, string userId, string password, string dbName, string tableName, bool useSSL, bool enableHighAvailability = false, string[] pHighAvailabilitySites = null, int batchSize = 1, float throttle = 0.01f, int threadCount = 5, string partitionCol = "", int[] pCompressMethods = null) { hostName_ = hostName; port_ = port; userId_ = userId; password_ = password; useSSL_ = useSSL; dbName_ = dbName; tableName_ = tableName; batchSize_ = batchSize; throttleMilsecond_ = (int)throttle * 1000; isExiting_ = false; if (threadCount < 1) { throw new Exception("The parameter threadCount must be greater than or equal to 1."); } if (batchSize < 1) { throw new Exception("The parameter batchSize must be greater than or equal to 1."); } if (throttle < 0) { throw new Exception("The parameter throttle must be positive."); } if (threadCount > 1 && partitionCol == String.Empty) { throw new Exception("The parameter partitionCol must be specified when threadCount is greater than 1."); } DBConnection pConn = new DBConnection(false, useSSL_, pCompressMethods != null); bool ret = pConn.connect(hostName_, port_, userId_, password_, "", enableHighAvailability, pHighAvailabilitySites); if (!ret) { throw new Exception(string.Format("Failed to connect to server {0}:{1}. ", hostName, port)); } BasicDictionary schema; if (tableName == "") { schema = (BasicDictionary)pConn.run("schema(" + dbName + ")"); } else { schema = (BasicDictionary)pConn.run("schema(loadTable(\"" + dbName + "\",\"" + tableName + "\"))"); } IEntity partColNames = null; if (schema.ContainsKey("partitionColumnName")) { partColNames = schema.get(new BasicString("partitionColumnName")); isPartionedTable_ = true; } else { isPartionedTable_ = false; if (tableName != "") { if (threadCount > 1) {//只有多线程的时候需要 throw new Exception("The parameter threadCount must be 1 for a dimension table."); } } } BasicTable colDefs = (BasicTable)schema.get("colDefs"); BasicIntVector colDefsTypeInt = (BasicIntVector)colDefs.getColumn("typeInt"); BasicStringVector colDefsName = (BasicStringVector)colDefs.getColumn("name"); BasicStringVector colDefsTypeString = (BasicStringVector)colDefs.getColumn("typeString"); colTypes_ = new List <DATA_TYPE>(); colNames_ = new List <string>(); colTypeString_ = new List <string>(); int columnSize = colDefsName.rows(); if (pCompressMethods != null) { if (columnSize != pCompressMethods.Length) { throw new Exception(string.Format("The number of elements in parameter compressMethods does not match the column size {0}. ", columnSize)); } this.compressTypes_ = new int[columnSize]; Array.Copy(pCompressMethods, this.compressTypes_, columnSize); } for (int i = 0; i < columnSize; i++) { colNames_.Add(colDefsName.getString(i)); colTypes_.Add((DATA_TYPE)colDefsTypeInt.getInt(i)); colTypeString_.Add(colDefsTypeString.getString(i)); if (compressTypes_ != null) { AbstractVector.checkCompressedMethod(colTypes_[i], compressTypes_[i]); } } if (threadCount > 1) { if (isPartionedTable_) { IEntity partitionSchema; int partitionType; if (partColNames.isScalar()) { if (partColNames.getString() != partitionCol) { throw new Exception(string.Format("The parameter partionCol must be the partitioning column \"{0}\" in the table. ", partitionCol)); } partitionColumnIdx_ = ((BasicInt)schema.get("partitionColumnIndex")).getInt(); partitionSchema = schema.get("partitionSchema"); partitionType = ((BasicInt)schema.get("partitionType")).getInt(); } else { int dims = ((BasicStringVector)partColNames).rows(); if (dims > 1 && partitionCol == "") { throw new Exception("The parameter partitionCol must be specified when threadCount is greater than 1."); } int index = -1; for (int i = 0; i < dims; ++i) { if (((BasicStringVector)partColNames).getString(i) == partitionCol) { index = i; break; } } if (index < 0) { throw new Exception(string.Format("The parameter partionCol must be the partitioning column \"{0}\" in the table. ", partitionCol)); } partitionColumnIdx_ = ((BasicIntVector)schema.get("partitionColumnIndex")).getInt(index); partitionSchema = ((BasicAnyVector)schema.get("partitionSchema")).get(index); partitionType = ((BasicIntVector)schema.get("partitionType")).getInt(index); } DATA_TYPE partitionColType = colTypes_[partitionColumnIdx_]; partitionDomain_ = DomainFactory.createDomain((PARTITION_TYPE)partitionType, partitionColType, partitionSchema); } else {//isPartionedTable_==false if (partitionCol != "") { int threadcolindex = -1; for (int i = 0; i < colNames_.Count; i++) { if (colNames_[i] == partitionCol) { threadcolindex = i; break; } } if (threadcolindex < 0) { throw new Exception(string.Format("No match found for {0}. ", partitionCol)); } threadByColIndexForNonPartion_ = threadcolindex; } } } // init done, start thread now. isExiting_ = false; threads_ = new List <WriterThread>(threadCount); for (int i = 0; i < threadCount; i++) { WriterThread writerThread = new WriterThread(this, pConn); if (i == 0) { writerThread.conn_ = pConn; } else { writerThread.conn_ = new DBConnection(useSSL_, false); if (writerThread.conn_.connect(hostName_, port_, userId_, password_, "", enableHighAvailability, pHighAvailabilitySites) == false) { throw new Exception(string.Format("Failed to connect to server {0}:{1}. ", hostName, port)); } } threads_.Add(writerThread); } }
public virtual void _TestStressLocks(LockFactory lockFactory, System.String indexDirName) { FSDirectory fs1 = FSDirectory.GetDirectory(indexDirName, lockFactory); // First create a 1 doc index: IndexWriter w = new IndexWriter(fs1, new WhitespaceAnalyzer(), true); AddDoc(w); w.Close(); WriterThread writer = new WriterThread(this, 100, fs1); SearcherThread searcher = new SearcherThread(this, 100, fs1); writer.Start(); searcher.Start(); while (writer.IsAlive || searcher.IsAlive) { try { System.Threading.Thread.Sleep(new System.TimeSpan((System.Int64) 10000 * 1000)); } catch (System.Threading.ThreadInterruptedException) { } } Assert.IsTrue(!writer.hitException, "IndexWriter hit unexpected exceptions"); Assert.IsTrue(!searcher.hitException, "IndexSearcher hit unexpected exceptions"); // Cleanup RmDir(indexDirName); }