protected override void threadmain() { using (updateDB db = new updateDB(Program.connstr, Program.dbName)) { // Read anything we queued in a previous run foreach (var alreadyClaimed in db.getClaimedUpdate()) { claimedUpdates.Enqueue(alreadyClaimed); } // Make sure we have at least highWaterMark updates queued (if they are available) while (claimedUpdates.Count < highWaterMark) { wsusUpdate nextUpdate = db.startNextUpdate(); if (nextUpdate == null) { _allUpdatesExhausted = true; exitTime = true; break; } claimedUpdates.Enqueue(nextUpdate); } while (true) { // if we drop below the low water mark, queue updates until we get to the high water mark. if (claimedUpdates.Count < lowWaterMark) { while (claimedUpdates.Count < highWaterMark) { wsusUpdate nextUpdate = db.startNextUpdate(); if (nextUpdate == null) { _allUpdatesExhausted = true; exitTime = true; break; } claimedUpdates.Enqueue(nextUpdate); } } if (exitTime) { // TODO: should we unmark what we have in progress before exiting? break; } pollTimer.WaitOne(100); } } }
protected override void threadmain() { using (updateDB db = new updateDB(Program.connstr, Program.dbName)) { while (true) { if (queue.IsEmpty) { if (exitTime) { if (queue.IsEmpty) { break; } } pollTimer.WaitOne(100); continue; } if (!queue.TryDequeue(out downloadThread.downloadedUpdate updateToProcess) || updateToProcess == null) { continue; } currentlyProcessing = updateToProcess.update.parent.downloadURI; UpdateProcessingStarted.Set(); updateToProcess.update.parent.startTime = DateTime.Now; updateToProcess.update.OnLogString = this.OnLogString; try { updateToProcess.update.writeToDB(asyncSQLParams, updateToProcess.absolutepath); } catch (Exception e) { db.logError(updateToProcess.update.parent, e); } archiver.enqueue(updateToProcess.absolutepath); currentlyProcessing = null; } } }
public static void importWSUSFiles() { using (updateDB db = new updateDB(connstr, dbName)) { int updateIndexInBatch = 0; wsusUpdate[] updates = new wsusUpdate[400]; foreach (IUpdate update in getUpdates()) { foreach (IInstallableItem item in update.GetInstallableItems()) { foreach (UpdateFile f in item.Files) { if (f.Type == FileType.Express || f.OriginUri.ToString().EndsWith(".txt")) { continue; } wsusUpdate upd = new wsusUpdate(f); updates[updateIndexInBatch++] = upd; if (updateIndexInBatch == updates.Length) { db.insert_noconcurrency(updates); updateIndexInBatch = 0; } } } } wsusUpdate[] updatesFinalBatch = new wsusUpdate[updateIndexInBatch]; Array.Copy(updates, updatesFinalBatch, updateIndexInBatch); if (updateIndexInBatch == updates.Length) { db.insert_noconcurrency(updatesFinalBatch); } db.removeDuplicateWsusFiles(); } }
protected override void threadmain() { using (updateDB db = new updateDB(Program.connstr, Program.dbName)) { while (true) { if (toMarkComplete.IsEmpty && batches.IsEmpty && batches_wim.IsEmpty) { // we'll only exit the thread if all the current data is flushed. if (exitTime) { if (toMarkComplete.IsEmpty && batches.IsEmpty && batches_wim.IsEmpty) { break; } } pollTimer.WaitOne(TimeSpan.FromSeconds(1)); } { if (toMarkComplete.TryDequeue(out var thisMarkComplete)) { db.completeWsusUpdate(thisMarkComplete.parentID, thisMarkComplete.starttime, thisMarkComplete.endtime, thisMarkComplete.sqltime, thisMarkComplete.succeeded); } } { if (batches.TryDequeue(out fileBatch batch)) { if (batch.Parent.dbID.HasValue == false) { batch.Parent.dbID = db.getWSUSFileByFileHash(batch.Parent.fileHashFromWSUS).dbID; } Debug.WriteLine($"Async inserting {batch.Files.Length} files"); db.bulkInsertFiles(batch.Parent, batch.Files); if (batch.IsFinal) { db.completeWsusUpdate(batch.Parent.dbID.Value, batch.Parent.startTime, batch.Parent.endTime, batch.Parent.sqltime, true); } } } { if (batches_wim.TryDequeue(out fileBatch_wim batchWim)) { if (batchWim.Parent.dbID.HasValue == false) { batchWim.Parent.dbID = db.getWSUSFileByFileHash(batchWim.Parent.fileHashFromWSUS).dbID; } //db.insert_noconcurrency(new[] {batchWim.Files[0].parent}, "fileSource_wim"); db.bulkInsertFiles(batchWim.Parent, batchWim.Files); if (batchWim.IsFinal) { db.completeWsusUpdate(batchWim.Parent.dbID.Value, batchWim.Parent.startTime, batchWim.Parent.endTime, batchWim.Parent.sqltime, true); } } } } } }
public static void Main(string[] args) { using (logDB = new updateDB(connstr, dbName)) { ConcurrentQueue <downloadThread.downloadedUpdate> toProcessQueue = new ConcurrentQueue <downloadThread.downloadedUpdate>(); ConcurrentQueue <string> toArchiveQueue = new ConcurrentQueue <string>(); AutoResetEvent downloadRequestEvent = new AutoResetEvent(false); AutoResetEvent downloadCompleteEvent = new AutoResetEvent(false); using (threadLifetimeCollection threads = new threadLifetimeCollection()) { asyncsqlthread sqlparams = new asyncsqlthread(logger); threads.add(sqlparams.start()); updateReserverThread updateGetterThread = new updateReserverThread(logger); threads.add(updateGetterThread.start()); archiveThread archiver = new archiveThread(logger, toArchiveQueue); threads.add(archiver.start()); downloadThread[] downloadThreads = new downloadThread[downloadThreadCount]; for (int i = 0; i < downloadThreads.Length; i++) { downloadThreads[i] = new downloadThread(logger, downloadRequestEvent, toProcessQueue, updateGetterThread); downloadThreads[i].threadname += $" ({i})"; downloadThreads[i].outqueue = toProcessQueue; downloadThreads[i].maxFinishedDownloads = maxPendingDownloads; downloadThreads[i].downloadComplete = downloadCompleteEvent; threads.add(downloadThreads[i].start()); } processingThread[] processingThreads = new processingThread[processingThreadCount]; for (int i = 0; i < processingThreads.Length; i++) { processingThreads[i] = new processingThread(logger, downloadCompleteEvent, toProcessQueue, downloadRequestEvent, archiver, sqlparams); processingThreads[i].threadname += $" ({i})"; threads.add(processingThreads[i].start()); } while (true) { // have we finished it all? if (updateGetterThread.allUpdatesExhausted()) { Console.WriteLine("all done"); // Once the update-getter signals completion, it has already passed pending updates to the download threads, so it's safe to // tear down threads without fear of missing updates. foreach (downloadThread dt in downloadThreads) { dt.stop(); } foreach (processingThread pt in processingThreads) { pt.stop(); } archiver.stop(); sqlparams.stop(); break; } // OK, show some stats. Thread.Sleep(TimeSpan.FromSeconds(1)); //Console.Clear(); Console.WriteLine( $"Processing queue: {toProcessQueue.Count}, reserved updates {updateGetterThread.claimedUpdates.Count}, " + $"async sql queue {sqlparams.batches.Count} / {sqlparams.batches_wim.Count} / {sqlparams.toMarkComplete.Count}"); for (int i = 0; i < downloadThreads.Length; i++) { var current = downloadThreads[i].currentlydownloading; if (current != null) { Console.WriteLine( $"Download thread {i}: downloading {current.downloadURI} ({current.sizeMB} MB)"); } else { Console.WriteLine($"Download thread {i}: idle"); } } for (int i = 0; i < processingThreads.Length; i++) { string cur = processingThreads[i].currentlyProcessing; if (cur != null) { Console.WriteLine($"Processing thread {i}: processing {cur}"); } else { Console.WriteLine($"Processing thread {i}: idle"); } } } } } }