public void PerformWork(Work work, ISessionImplementor session) { if (session.TransactionInProgress) { ITransaction transaction = ((ISession)session).Transaction; PostTransactionWorkQueueSynchronization txSync = (PostTransactionWorkQueueSynchronization) synchronizationPerTransaction[transaction]; if (txSync == null || txSync.IsConsumed) { txSync = new PostTransactionWorkQueueSynchronization(queueingProcessor, synchronizationPerTransaction); transaction.RegisterSynchronization(txSync); lock (synchronizationPerTransaction.SyncRoot) synchronizationPerTransaction[transaction] = txSync; } txSync.Add(work); } else { WorkQueue queue = new WorkQueue(2); //one work can be split queueingProcessor.Add(work, queue); queueingProcessor.PrepareWorks(queue); queueingProcessor.PerformWorks(queue); } }
private static void OnWorkQueueUserWork(object sender, WorkQueue<IResolve>.EnqueueEventArgs e) { //lock (m_Object) //{ e.Item.Execute(); //} }
public void ExceptionTest() { var queue = new WorkQueue(); AssertThrow<ArgumentNullException>( () => queue.Enqueue(null), () => queue.Enqueue((Func<string>)null)); }
//TODO implements parallel batchWorkers (one per Directory) public void PerformWorks(WorkQueue workQueue) { WaitCallback processor = backendQueueProcessorFactory.GetProcessor(workQueue.GetSealedQueue()); if (sync) processor(null); else ThreadPool.QueueUserWorkItem(processor); }
public void TestSingleActionT() { using (ManualResetEvent finished = new ManualResetEvent(false)) using (WorkQueue worker = new WorkQueue(2)) { worker.Enqueue(delegate() { finished.Set(); }); Assert.IsTrue(finished.WaitOne(100, false)); } }
static void Insert(BPlusTree<Guid, TestInfo> tree, IDictionary<Guid, TestInfo> testdata, int threads, int count, TimeSpan wait) { using (var work = new WorkQueue<IEnumerable<KeyValuePair<Guid, TestInfo>>>(tree.AddRange, threads)) { foreach (var set in TestInfo.CreateSets(threads, count, testdata)) work.Enqueue(set); work.Complete(false, wait == TimeSpan.MaxValue ? Timeout.Infinite : (int)Math.Min(int.MaxValue, wait.TotalMilliseconds)); } }
public bool CrawlSite() { if (!NoDefaultPages && UpdateSearchTemplate && _config.Searching != null && !String.IsNullOrEmpty(_config.Searching.TemplateUri)) AddUri(new Uri(_baseUri, _config.Searching.TemplateUri)); _excluded.ReadRobotsFile(_baseUri, "HttpClone"); _excluded.AddRange(_config.ExcludedPaths.SafeEnumeration()); if (!NoDefaultPages) AddUrls(_baseUri, _config.IncludedPaths.SafeEnumeration()); using (WorkQueue queue = new WorkQueue(System.Diagnostics.Debugger.IsAttached ? 1 : 10)) { queue.OnError += (o, e) => Console.Error.WriteLine(e.GetException().Message); TaskCounter httpCalls = new TaskCounter(queue.Enqueue); TaskCounter parsing = new TaskCounter(queue.Enqueue); while (true) { if (httpCalls.Count >= 5) { httpCalls.WaitOne(); } else { bool complete = httpCalls.Count == 0 && parsing.Count == 0; string path; if (_queue.TryDequeue(out path)) { string[] etag = new string[1]; if (ShouldFetch(path, etag)) httpCalls.Run(new FetchUrl(this, path, etag[0], parsing.Run).DoWork); } else { if (complete) break; parsing.WaitOne(); } } } queue.Complete(true, 1000); } //Post-crawling step(s) if (UpdateSearchTemplate && _config.Searching != null && !String.IsNullOrEmpty(_config.Searching.TemplateUri)) { new SearchTemplateBuilder(_data, _baseUri) .UpdateTemplate(); } return Modified; }
[Test] public void RunningPriority() { runningPriority = (ThreadPriority)(-1); WorkQueue worklist = new WorkQueue(); worklist.RunningWorkItem += new WorkItemEventHandler(worklist_RunningWorkItem); worklist.Add(new WorkQueueTest.SimpleWork(1, ThreadPriority.AboveNormal)); worklist.WaitAll(); AssertEquals("running priority", ThreadPriority.AboveNormal, runningPriority); }
public void ItDoesWork() { var complete = false; var queue = new WorkQueue(); queue.Add(new WorkQueueItem(() => complete = true, 1)); queue.DoUnitOfWork(); Assert.That(complete, Is.True); }
private void workQueue_UserWork(object sender, WorkQueue <WarningInfo> .EnqueueEventArgs e) { try { DoWork(e); } catch (Exception ex) { Log.getInstance().Write(ex, MsgType.Error); } }
public void TestMultipleActionsComplete() { int[] count = new int[1]; using (WorkQueue worker = new WorkQueue(Math.Max(2, Environment.ProcessorCount))) { for( int i=0; i < 1000; i++) worker.Enqueue(delegate() { Interlocked.Increment(ref count[0]); Thread.Sleep(1); }); worker.Complete(true, -1); } Assert.AreEqual(1000, count[0]); }
/// <inheritdoc/> internal override ProgramPointBase GetWork() { while (hQueue._queue.Count == 0 && hQueue._parent != null) { hQueue = hQueue._parent; } var result = hQueue._queue.Dequeue(); _containedPoints.Remove(result); return(result); }
public static void UserWorkpaly(object o, WorkQueue <string[]> .EnqueueEventArgs queue) { try { // Console.WriteLine("执行:" + queue.Item[0]); SendMessByIdentification(queue.Item[0], queue.Item[1]); } catch (Exception ex) { Console.WriteLine("发送队列异常:" + ex.Message); } }
public override async Task <bool> Download(Archive a, AbsolutePath destination) { try { using var queue = new WorkQueue(); await using var folder = await TempFolder.Create(); folder.Dir.Combine("tracks").CreateDirectory(); var client = new YoutubeClient(Common.Http.ClientFactory.Client); var meta = await client.Videos.GetAsync(Key); var video = await client.Videos.Streams.GetManifestAsync(Key); var stream = video.Streams.OfType <AudioOnlyStreamInfo>().Where(f => f.AudioCodec.StartsWith("mp4a")).OrderByDescending(a => a.Bitrate) .ToArray().First(); var initialDownload = folder.Dir.Combine("initial_download"); var trackFolder = folder.Dir.Combine("tracks"); await using (var fs = await initialDownload.Create()) { await client.Videos.Streams.CopyToAsync(stream, fs, new Progress($"Downloading {a.Name}"), CancellationToken.None); } await Tracks.PMap(queue, async track => { Utils.Status($"Extracting track {track.Name}"); await ExtractTrack(initialDownload, trackFolder, track); }); await using var dest = await destination.Create(); using var ar = new ZipArchive(dest, ZipArchiveMode.Create); foreach (var track in trackFolder.EnumerateFiles().OrderBy(e => e)) { Utils.Status($"Adding {track.FileName} to archive"); var entry = ar.CreateEntry(Path.Combine("Data", "Music", (string)track.RelativeTo(trackFolder)), CompressionLevel.NoCompression); entry.LastWriteTime = meta.UploadDate; await using var es = entry.Open(); await using var ins = await track.OpenRead(); await ins.CopyToAsync(es); } return(true); } catch (VideoUnavailableException) { return(false); } }
public EndToEndTests(ITestOutputHelper helper) : base(helper) { Queue = new WorkQueue(); Consts.TestMode = true; utils = new TestUtils(); utils.Game = Game.SkyrimSpecialEdition; _unsub = Utils.LogMessages.Subscribe(f => XunitContext.WriteLine($"{DateTime.Now} - {f}")); _downloadFolder.CreateDirectory(); }
/// <summary> /// Insert an EditStudy request. /// </summary> /// <param name="context"></param> /// <param name="studyStorageKey"></param> /// <param name="serverPartitionKey"></param> /// <param name="type"></param> /// <param name="updateItems"></param> /// <param name="reason"></param> /// <param name="user"></param> /// <param name="editType"></param> /// <returns></returns> private static WorkQueue InsertEditStudyRequest(IUpdateContext context, ServerEntityKey studyStorageKey, ServerEntityKey serverPartitionKey, WorkQueueTypeEnum type, List <UpdateItem> updateItems, string reason, string user, EditType editType) { var broker = context.GetBroker <IInsertWorkQueue>(); InsertWorkQueueParameters criteria = new EditStudyWorkQueueParameters(studyStorageKey, serverPartitionKey, type, updateItems, reason, user, editType); WorkQueue editEntry = broker.FindOne(criteria); if (editEntry == null) { throw new ApplicationException(string.Format("Unable to insert an Edit request of type {0} for study for user {1}", type.Description, user)); } return(editEntry); }
public void ShuttingDownReturnsTrueAfterShutdownIsCalled() { using IWorkQueue <string> queue = new WorkQueue <string>(); var shuttingDownBefore = queue.ShuttingDown(); queue.ShutDown(); var shuttingDownAfter = queue.ShuttingDown(); Assert.False(shuttingDownBefore); Assert.True(shuttingDownAfter); }
/// <summary> /// Indicates whether or not this WQI will result in patient/study information change. /// This usually indicates if the operation can be safely deleted from the system without any major consequences. /// </summary> public static bool WillResultInDataChanged(this WorkQueue item) { var harmlessWQITypes = new[] { WorkQueueTypeEnum.AutoRoute, WorkQueueTypeEnum.CompressStudy, // not changing patient/study info WorkQueueTypeEnum.PurgeStudy, // nearline or online WorkQueueTypeEnum.MigrateStudy, WorkQueueTypeEnum.WebMoveStudy }; return(!harmlessWQITypes.Contains(item.WorkQueueTypeEnum)); }
public void Add(Work work, WorkQueue workQueue) { //don't check for builder it's done in prepareWork //FIXME WorkType.COLLECTION does not play well with batchSize workQueue.Add(work); if (batchSize > 0 && workQueue.Count >= batchSize) { WorkQueue subQueue = workQueue.SplitQueue(); PrepareWorks(subQueue); PerformWorks(subQueue); } }
public void MainTest() { AsyncPump.Run( async () => { var counter = 0; var queue = new WorkQueue(); queue.Enqueue(() => ++counter).Ignore(); await queue.Enqueue(() => ++counter); Assert.AreEqual(2, counter); }); }
public void Enqueue_ProcessesNotifications_AndRestarts() { // Arrange var initialWorkItem = new TestBatchableWorkItem(); var workItemToCauseRestart = new TestBatchableWorkItem(); TestAccessor.BlockBackgroundWorkStart = new ManualResetEventSlim(initialState: false); TestAccessor.NotifyBackgroundWorkStarting = new ManualResetEventSlim(initialState: false); TestAccessor.NotifyBackgroundCapturedWorkload = new ManualResetEventSlim(initialState: false); TestAccessor.BlockBackgroundWorkCompleting = new ManualResetEventSlim(initialState: false); TestAccessor.NotifyBackgroundWorkCompleted = new ManualResetEventSlim(initialState: false); // Act & Assert WorkQueue.Enqueue("key", initialWorkItem); Assert.True(TestAccessor.IsScheduledOrRunning, "Queue should be scheduled during Enqueue"); Assert.NotEmpty(TestAccessor.Work); // Allow the background work to start. TestAccessor.BlockBackgroundWorkStart.Set(); TestAccessor.NotifyBackgroundWorkStarting.Wait(TimeSpan.FromSeconds(3)); Assert.True(TestAccessor.IsScheduledOrRunning, "Worker should be processing now"); TestAccessor.NotifyBackgroundCapturedWorkload.Wait(TimeSpan.FromSeconds(3)); Assert.Empty(TestAccessor.Work); WorkQueue.Enqueue("key", workItemToCauseRestart); Assert.NotEmpty(TestAccessor.Work); // Now we should see the worker restart when it finishes. // Allow work to complete, which should restart the timer. TestAccessor.BlockBackgroundWorkCompleting.Set(); TestAccessor.NotifyBackgroundWorkCompleted.Wait(TimeSpan.FromSeconds(3)); TestAccessor.NotifyBackgroundWorkCompleted.Reset(); // It should start running again right away. Assert.True(TestAccessor.IsScheduledOrRunning, "Queue should be scheduled during Enqueue"); Assert.NotEmpty(TestAccessor.Work); // Allow the background work to proceed. TestAccessor.BlockBackgroundWorkStart.Set(); TestAccessor.BlockBackgroundWorkCompleting.Set(); TestAccessor.NotifyBackgroundWorkCompleted.Wait(TimeSpan.FromSeconds(3)); Assert.False(TestAccessor.IsScheduledOrRunning, "Queue should not have restarted"); Assert.Empty(TestAccessor.Work); Assert.True(initialWorkItem.Processed); Assert.True(workItemToCauseRestart.Processed); Assert.Empty(ErrorReporter.ReportedExceptions); }
/// <summary> /// Constructs a render coordinator. A render manager and synchronous draw methods are automatically provided for you. /// </summary> /// <param name="deviceService"></param> public RenderCoordinator(IGraphicsDeviceService deviceService, Thread mainThread, ThreadGroup threadGroup) { ThreadGroup = threadGroup; Manager = new RenderManager(deviceService.GraphicsDevice, mainThread, ThreadGroup); _SyncBeginDraw = DefaultBeginDraw; _SyncEndDraw = DefaultEndDraw; DrawQueue = ThreadGroup.GetQueueForType <DrawTask>(); CoreInitialize(); }
public override async Task <JobResult> Execute(DBContext db, SqlService sql, AppSettings settings) { using (var queue = new WorkQueue()) { var files = Directory.EnumerateFiles(settings.ArchiveDir) .Where(f => !f.EndsWith(Consts.HashFileExtension)) .ToList(); var total_count = files.Count; int completed = 0; await files.PMap(queue, async file => { try { Interlocked.Increment(ref completed); if (await sql.HaveIndexdFile(await file.FileHashCachedAsync())) { Utils.Log($"({completed}/{total_count}) Skipping {Path.GetFileName(file)}, it's already indexed"); return; } var sub_folder = Guid.NewGuid().ToString(); string folder = Path.Combine(settings.DownloadDir, sub_folder); Utils.Log($"({completed}/{total_count}) Copying {file}"); Directory.CreateDirectory(folder); Utils.Log($"({completed}/{total_count}) Copying {file}"); File.Copy(file, Path.Combine(folder, Path.GetFileName(file))); Utils.Log($"({completed}/{total_count}) Analyzing {file}"); var vfs = new Context(queue, true); await vfs.AddRoot(folder); var root = vfs.Index.ByRootPath.First().Value; Utils.Log($"({completed}/{total_count}) Ingesting {root.ThisAndAllChildren.Count()} files"); await sql.MergeVirtualFile(root); Utils.Log($"({completed}/{total_count}) Cleaning up {file}"); Utils.DeleteDirectory(folder); } catch (Exception ex) { Utils.Log(ex.ToString()); } }); } return(JobResult.Success()); }
public void TestMultipleActionTComplete() { int[] counters = new int[10]; using (WorkQueue worker = new WorkQueue(Math.Max(2, Environment.ProcessorCount))) { for (int i = 0; i < 1000; i++) worker.Enqueue(delegate(int offset) { Interlocked.Increment(ref counters[offset]); }, i % 10); worker.Complete(true, -1); } foreach (int counter in counters) Assert.AreEqual(100, counter); }
public MapViewerForm() { InitializeComponent(); workQueue = new WorkQueue(this); workQueue.Working += workQueue_Working; InitMap(); InitCommands(); AttributeQueryHandler.End += AttributeQueryHandler_End; AttributeQueryHandler.Begin += AttributeQueryHandler_Begin; layersView1.LayersContextMenu = layerContextMenu; layersView1.ContextMenuStrip = layersContextMenu; stylesControl1.Styles = _loadedStyles; }
public void TestMultipleActionsComplete() { int[] count = new int[1]; using (WorkQueue worker = new WorkQueue(Math.Max(2, Environment.ProcessorCount))) { for (int i = 0; i < 1000; i++) { worker.Enqueue(delegate() { Interlocked.Increment(ref count[0]); Thread.Sleep(1); }); } worker.Complete(true, -1); } Assert.AreEqual(1000, count[0]); }
public void TestThreadAborts() { int[] counter = new int[1]; using (WorkQueue <int[]> worker = new WorkQueue <int[]>(ProcessOne, Math.Max(2, Environment.ProcessorCount))) { for (int i = 0; i < 1000; i++) { worker.Enqueue(counter); } worker.Complete(false, 10); } Assert.AreNotEqual(0, counter[0]); }
public void TestExceptionHandled() { Exception error = null; using (WorkQueue worker = new WorkQueue(1)) { worker.OnError += delegate(object o, ErrorEventArgs e) { error = e.GetException(); }; worker.Enqueue(delegate() { throw new ArgumentException("Handled?"); }); worker.Complete(true, -1); } Assert.IsTrue(error is ArgumentException); Assert.AreEqual("Handled?", error.Message); }
/// <summary> /// Gets the recommended maximum number of threads that should be used for the current machine. /// This will either run a heavy processing job to do the measurement in the specified folder, or refer to caches. /// /// If the folder does not exist, it will be created, and not cleaned up afterwards. /// </summary> /// <param name="folder"></param> /// <returns>Recommended maximum number of threads to use</returns> public static async Task <int> RecommendQueueSize(AbsolutePath folder) { using var queue = new WorkQueue(); Utils.Log($"Benchmarking {folder}"); var raw_speed = await Utils.TestDiskSpeed(queue, folder); Utils.Log($"{raw_speed.ToFileSizeString()}/sec for {folder}"); int speed = (int)(raw_speed / 1024 / 1024); // Less than 100MB/sec, stick with two threads. return(speed < 100 ? 2 : Math.Min(Environment.ProcessorCount, speed / 100 * 2)); }
public void ItGivesUpOnWork() { var queue = new WorkQueue(); queue.Add(new WorkQueueItem(() => { throw new Exception(); }, 2)); queue.DoUnitOfWork(); Assert.That(queue.HasWork, Is.False); }
public Page(PageID pageID, PagedWorldSection parent) : base() { this.mID = pageID; this.mParent = parent; WorkQueue wq = Root.Instance.WorkQueue; this.workQueueChannel = wq.GetChannel("Axiom/Page"); wq.AddRequestHandler(this.workQueueChannel, this); wq.AddResponseHandler(this.workQueueChannel, this); Touch(); }
//TODO implements parallel batchWorkers (one per Directory) public void PerformWorks(WorkQueue workQueue) { WaitCallback processor = backendQueueProcessorFactory.GetProcessor(workQueue.GetSealedQueue()); if (sync) { processor(null); } else { ThreadPool.QueueUserWorkItem(processor); } }
/// <summary> /// Returns a value indicating whether the specified <see cref="WorkQueue"/> can be manually rescheduled. /// </summary> /// <param name="item"></param> /// <returns></returns> static public bool CanReschedule(WorkQueue item) { if (item == null) { return(false); } return // it's pending (item.WorkQueueStatusEnum == WorkQueueStatusEnum.Pending // it's idle || item.WorkQueueStatusEnum == WorkQueueStatusEnum.Idle); }
public void DynamicNumThreads_Increased() { var subj = new BehaviorSubject <int>(Small); using (var queue = new WorkQueue(subj)) { Assert.Equal(Small, queue.DesiredNumWorkers); Assert.Equal(Small, queue._tasks.Count); subj.OnNext(Large); Assert.Equal(Large, queue.DesiredNumWorkers); Assert.Equal(Large, queue._tasks.Count); } }
public PointCloudPlayerSource(IMotionFrameSerializer serializer) { _serializer = serializer; _filesToLoad = new List<string>(); _bufferedFrames = new List<MotionFrame>(); _loadingQueue = new WorkQueue<string>(); _loadingQueue.Callback = LoadFrameWorker; Unload(); CreatePlaybackThread(); _soundPlayer = new SoundPlayer(); }
public PointCloudPlayerSource(IMotionFrameSerializer serializer) { _serializer = serializer; _filesToLoad = new List <string>(); _bufferedFrames = new List <MotionFrame>(); _loadingQueue = new WorkQueue <string>(); _loadingQueue.Callback = LoadFrameWorker; Unload(); CreatePlaybackThread(); _soundPlayer = new SoundPlayer(); }
public SqliteLogger(string categoryName, IDML dml, IConfiguration configuration = null) : base(categoryName, configuration) { this.categoryName = categoryName; this.repository = dml; workQueue = new WorkQueue <LogInfo>(1000, (s, e) => { lock (dml)//这里要锁实际调用的logService。 { dml.Add(e.Item.ToArray()); dml.SaveChanges(); } }); }
public async Task Typical_Func() { using var queue = new WorkQueue(TypicalThreadCount); var input = Enumerable.Range(0, TypicalThreadCount * 2).ToArray(); var workTask = Utils.PMap(Enumerable.Range(0, TypicalThreadCount * 2), queue, (item) => { Assert.True(WorkQueue.WorkerThread); Thread.Sleep(TypicalDelayMS); return(item.ToString()); }); var results = await workTask.TimeoutButContinue(TimeSpan.FromSeconds(TimeoutSeconds), () => throw new TimeoutException()); Assert.True(input.Select(i => i.ToString()).SequenceEqual(results)); }
public void ProcessAllWorkItems(ref ProcessCounters counter) { InternalCounter = counter; ThreadsUsed = new List <int>(); while (WorkQueue.Any()) { lock (WorkQueue) { var workItem = WorkQueue.FirstOrDefault(); ProcessWorkItem(workItem); } } }
[Test] public void Concurrency() { running = 0; completed = 0; worklist = new WorkQueue(); worklist.RunningWorkItem += new WorkItemEventHandler(concurrent_StartedWorkItem); worklist.CompletedWorkItem += new WorkItemEventHandler(concurrent_CompletedWorkItem); for (int i = 1; i <= 50; ++i) { worklist.Add(new AutomatedWork(i)); } worklist.WaitAll(); AssertEquals("completed", 50, completed); }
private static async Task <ExtractedFiles> ExtractAllWithBSA(WorkQueue queue, AbsolutePath source) { try { await using var arch = BSADispatch.OpenRead(source); var files = arch.Files.ToDictionary(f => f.Path, f => (IExtractedFile) new ExtractedBSAFile(f)); return(new ExtractedFiles(files, arch)); } catch (Exception ex) { Utils.ErrorThrow(ex, $"While Extracting {source}"); throw new Exception(); } }
public async Task Setup() { _rdm = new Random((int)DateTime.Now.ToFileTimeUtc()); _itms = Enumerable.Range(0, Threads * 10).ToArray(); _queue = new WorkQueue(Threads); _file = new TempFile(); await using var f = await _file.Path.Create(); var data = new byte[1024 * 1024 * 10]; // 1GB _rdm.NextBytes(data); await f.WriteAsync(data); }
public void PrepareWorks(WorkQueue workQueue) { List<Work> queue = workQueue.GetQueue(); int initialSize = queue.Count; List<LuceneWork> luceneQueue = new List<LuceneWork>(initialSize); //TODO load factor for containedIn /** * Collection work type are processed second, so if the owner entity has already been processed for whatever reason * the work will be ignored. * However if the owner entity has not been processed, an "UPDATE" work is executed * * Processing collection works last is mandatory to avoid reindexing a object to be deleted */ ProcessWorkByLayer(queue, initialSize, luceneQueue, Layer.FIRST); ProcessWorkByLayer(queue, initialSize, luceneQueue, Layer.SECOND); workQueue.SetSealedQueue(luceneQueue); }
/* PUBLIC METHODS: */ public Session( Configuration config_, AnswerCallback answerCallback_) { m_config = config_; m_serializer = new Serializer(m_config.schemadir, m_config.schemaext); m_connection = new Connection( m_config); m_lasterror = null; m_state = State.Init; m_stateLock = new object(); m_requestqueue = new WorkQueue<Request>('R'); m_pendingqueue = new WorkQueue<PendingRequest>('A'); m_answerCallback = answerCallback_; m_request_thread = null; m_answer_thread = null; }
public ArtofKinectRecorderWindow() { InitializeComponent(); frameQueue = new WorkQueue<MotionFrame>(); frameQueue.Callback = ProcessFrame; frameQueue.MaxQueueLength = 5; InitSensor(); InitSerializer(); InitSoundCapture(); CreateViews(); lastFPSCheck = DateTime.Now; Application.Current.Exit += (s, e) => { this.CurrentFrameViewer = null; pointCloudFrameViewer.pointCloudImage.Dispose(); //pointCloudFrameViewer2.Deactivate(); //pointCloudFrameViewer2.pointCloudImage.Dispose(); if (playerSource != null) { playerSource.Dispose(); playerSource = null; } if (soundRecording != null) { soundRecording.Stop(); soundRecording.Dispose(); soundRecording = null; } if (sensorDevice != null) { sensorDevice.Dispose(); sensorDevice = null; } if (frameQueue != null) { frameQueue.Dispose(); frameQueue = null; } }; }
public override int run(string[] args) { if(args.Length > 0) { System.Console.Error.WriteLine(appName() + ": too many arguments"); return 1; } callbackOnInterrupt(); Ice.ObjectAdapter adapter = communicator().createObjectAdapter("Hello"); _workQueue = new WorkQueue(); adapter.add(new HelloI(_workQueue), communicator().stringToIdentity("hello")); _workQueue.Start(); adapter.activate(); communicator().waitForShutdown(); _workQueue.Join(); return 0; }
public void TestEnqueueAfterDispose() { int counter = 0; WorkQueue worker = new WorkQueue(1); worker.Complete(false, 100); worker.Enqueue(delegate() { counter++; }); Assert.Fail("Enqueue after Dispose()", counter); }
/// <summary> /// Handle a close command /// </summary> private void DoClose() { Tracer.Trace(TraceChannel.MSS, "Media closed"); if (m_heuristics != null) { m_heuristics.Shutdown(); } if (m_manifestInfo != null) { m_manifestInfo.Shutdown(); } m_workQueueThread = null; m_isWorkQueueThreadStarted = false; m_workQueue = null; m_playbackInfo = null; }
public void TestThreadAborts() { int[] counter = new int[1]; using (WorkQueue<int[]> worker = new WorkQueue<int[]>(ProcessOne, Math.Max(2, Environment.ProcessorCount))) { for (int i = 0; i < 1000; i++) worker.Enqueue(counter); worker.Complete(false, 10); } Assert.AreNotEqual(0, counter[0]); }
public ShellViewModel() { #region Queued Uploads Action<BackgroundWorker, DoWorkEventArgs> doWork = (worker, args) => { // get work item from argument var item = (WorkItem<string, UploadedImage>)args.Argument; Debug.WriteLine(item.Worker); item.Status = WorkStatus.Processing; // init params needed string filename = item.Args; string contentType = "image/" + Path.GetExtension(filename).Substring(1).ToLower(); byte[] image = File.ReadAllBytes(filename); // init HTTPWebRequest stuff var req = (HttpWebRequest)WebRequest.Create("http://api.imgur.com/2/upload"); var bound = "-------------" + DateTime.Now.Ticks.ToString(); var tmplField = "--" + bound + "\r\nContent-Disposition: form-data; name='{0}'\r\n\r\n{1}\r\n"; var tmplFile = "--" + bound + "\r\nContent-Disposition: form-data; name='{0}'; filename='{1}'\r\nContent-Type={2}\r\n\r\n"; req.Method = "POST"; req.ContentType = "multipart/form-data; boundary=" + bound; req.AllowWriteStreamBuffering = false; #region write upload data to memory stream // variables UTF8Encoding encoder = new UTF8Encoding(); MemoryStream memStream = new MemoryStream(); BinaryWriter memBW = new BinaryWriter(memStream, encoder); // write fields memBW.Write(encoder.GetBytes(string.Format(tmplField, "key", "c06f4d0cdf6f2cc652635a08be34973d"))); memBW.Write(encoder.GetBytes(string.Format(tmplField, "type", "file"))); memBW.Write(encoder.GetBytes(string.Format(tmplFile, "image", filename, contentType))); memBW.Flush(); // write image memBW.Write(image); memBW.Flush(); // write closing memBW.Write(encoder.GetBytes("\r\n--" + bound + "--")); memBW.Flush(); memStream.Position = 0; req.ContentLength = memStream.Length; #endregion write upload data to memory stream try { using (var reqStream = req.GetRequestStream()) { BinaryWriter reqWriter = new BinaryWriter(reqStream); byte[] buffer = new byte[640]; // 50KB Buffer int read = 0, bytesRead = 0; while ((read = memStream.Read(buffer, 0, buffer.Length)) > 0) { if (worker.CancellationPending) { item.Status = WorkStatus.Cancelled; args.Cancel = true; return; } reqWriter.Write(buffer, 0, read); bytesRead += read; item.Progress = (double)bytesRead / memStream.Length * 100; } reqWriter.Flush(); // close stream writers memBW.Close(); } var res = req.GetResponse(); using (var resStream = res.GetResponseStream()) { XDocument doc = XDocument.Load(resStream); var uploadedImage = (from imgurLink in doc.Descendants("imgur_page") from directLink in doc.Descendants("original") select new UploadedImage { Link = imgurLink.Value, DirectLink = directLink.Value }).FirstOrDefault(); item.Result = uploadedImage; item.Status = WorkStatus.Finished; } } catch (WebException e) { if (e.Status != WebExceptionStatus.RequestCanceled) { item.Status = WorkStatus.Error; item.Result = new UploadedImage { Error = e }; } } catch (Exception e) { item.Status = WorkStatus.Error; item.Result = new UploadedImage { Error = e }; } item.Status = (item.Status == WorkStatus.Processing) ? WorkStatus.Finished : item.Status; }; UploadQueue = new WorkQueue<string, UploadedImage>(1, doWork); #endregion }
private void SetupFrameQueue() { _frameQueue = new WorkQueue<MotionFrame>(); _frameQueue.Callback = ProcessFrame; _frameQueue.MaxQueueLength = 1; }
static WorkQueueManager() { m_WorkQueue = new WorkQueue<IResolve>(); m_WorkQueue.DoWorkEvent += OnWorkQueueUserWork; }
private void ShutdownFrameQueue() { if (_frameQueue != null) { _frameQueue.Dispose(); _frameQueue = null; } }
private void Reinitialize (ControlFlowGraph cfg) { this.cfg = cfg; this.pendingStates = new IDataFlowState[cfg.BlockCount]; this.doneStates = new IDataFlowState[cfg.BlockCount]; // initialize work queue and disabled queue joinWorkItems = new WorkQueue(cfg.PreOrderCompare); }
/// <summary> /// Initializes a new instance of the AdaptiveStreamingSource class /// </summary> /// <param name="mediaElement">The media element that we are sending samples to. We use it internally to keep track of playback statistics</param> /// <param name="url">The url of the manifest for the stream we are serving</param> public AdaptiveStreamingSource(MediaElement mediaElement, Uri url) { // Make sure our Url is not null if (null == url) { throw new ArgumentNullException("url", Errors.NullUrlOnMSSError); } // Also check the mediaElement parameter if (null == mediaElement) { throw new ArgumentNullException("mediaElement", Errors.NullMediaElementOnMSSError); } // Remember the Url to the manifest we are streaming m_manifestSourceUrl = url; // Create our default manifest parser ManifestParser = new ManifestParserImpl(); // Create our default chunk parser factory ChunkParserFactory = new ChunkParserFactoryImpl(); // Create our default url generator UrlGenerator = new UrlGeneratorImpl(); // Create a new queue for processing commands. All work is done on a background thread, // which will shuttle events back to the UI thread in case something needs to be displayed. m_workQueue = new WorkQueue(); // Create the thread that we are going to run background commands on m_workQueueThread = new Thread(WorkerThread); // Make sure we remember the Dispatcher class for the UI thread UIDispatcher.Load(); // Playback info is a wrapper on around media element m_playbackInfo = new PlaybackInfo(mediaElement); // Hook our heuristics events m_heuristics.ChunkReplacementSuggested += HeuristicsChunkReplacementSuggested; }
/// <summary> /// Demonstrates a simple producer-consumer scenario using a shared queue /// which internally uses Monitor.Pulse and Monitor.Wait to ensure that consumers /// blocked for input are woken up when input arrives from the producer. /// </summary> private static void PulseAndWait() { WorkQueue<int> queue = new WorkQueue<int>(); Thread producer = new Thread(() => { while (true) { queue.Enqueue(42); Thread.Sleep(10); } }); Thread consumer = new Thread(() => { while (true) { queue.Dequeue(); Console.Write("."); } }); producer.Start(); consumer.Start(); Console.ReadLine(); producer.Abort(); consumer.Abort(); //Don't do this in a real application! }
TrafficMatrixEntry[,] TrafficMatrix; // Traffic matrix ordered by (loc,rem) addr pairs. #endregion Fields #region Constructors /// <summary> /// The primary class of the Rate Controller implementation. Provides the /// runtime environment for a Policy Module implementing a control algorithm. /// </summary> /// <param name="client">Reference to client policy module.</param> /// <param name="tenantId">Unique TenantId.</param> /// <param name="agentPort">Port on which network agents are listening.</param> public OktofsRateController(IPolicyModule client, uint tenantId, int agentPort) { // // Initialize state for Oktofs rate controller. // iPolicyModule = client; AgentPort = agentPort; TenantId = tenantId; AgentNameToConn = new Dictionary<string, Connection>(StringComparer.OrdinalIgnoreCase); IoFlowNameToConn = new Dictionary<string, Connection>(StringComparer.OrdinalIgnoreCase); LockPendingReplies = new object[(int)MessageTypes.EndOfList]; for (int i = 0; i < LockPendingReplies.Length; i++) LockPendingReplies[i] = new object(); CountPendingReplies = new int[(int)MessageTypes.EndOfList]; DictPendingReplies = new Dictionary<uint, MessageTypes>(); DictVmNameToSid = new Dictionary<string, string>(StringComparer.OrdinalIgnoreCase); DictDevice = new Dictionary<string, Device>(); const int TIMER_QUEUE_LENGTH = 16; softTimer = new SoftTimer(TIMER_QUEUE_LENGTH, this); qpc = new Qpc(); // // Initialize a Oktopus network rate controller as an embedded object. // netRateController = new RateController(this, tenantId, Parameters.NETAGENT_TCP_PORT_NUMBER); // // Callbacks into Policy Module are performed sequentially on a single work queue thread. // Work items on this queue are generated by Timeouts and by Alerts from network agents. // const int WORK_QUEUE_LENGTH = 128; const int WORK_QUEUE_MAX_READERS = 1; RcWorkQueue = new WorkQueue<RcWorkItem>(this.RunRcWorkQueueItem, WORK_QUEUE_MAX_READERS, WORK_QUEUE_LENGTH); }
public HelloI(WorkQueue workQueue) { _workQueue = workQueue; }