public void Run() { Exception exception = null; if (IsRunning) { while (!token.IsCancellationRequested) { #region Treat Exception /* * exception variable is set in try { ... }catch {..} just after. * TreatException(..) close consumer, taskManager, flush all state stores and return an enumeration (FAIL if thread must stop, CONTINUE if developer want to continue stream processing). * IF response == ExceptionHandlerResponse.FAIL, break infinte thread loop. * ELSE IF response == ExceptionHandlerResponse.CONTINUE, reset exception variable, flush all states, revoked partitions, and re-subscribe source topics to resume processing. * exception behavior is implemented in the begin of infinite loop to be more readable, catch block just setted exception variable and log content to track it. */ if (exception != null) { ExceptionHandlerResponse response = TreatException(exception); if (response == ExceptionHandlerResponse.FAIL) { break; } else if (response == ExceptionHandlerResponse.CONTINUE) { exception = null; HandleInnerException(); } } #endregion try { if (!manager.RebalanceInProgress) { long now = DateTime.Now.GetMilliseconds(); var records = PollRequest(GetTimeout()); DateTime n = DateTime.Now; var count = AddToTasks(records); if (count > 0) { log.Debug($"Add {count} records in tasks in {DateTime.Now - n}"); } n = DateTime.Now; int processed = 0; long timeSinceLastPoll = 0; do { processed = 0; for (int i = 0; i < numIterations; ++i) { processed = manager.Process(now); if (processed == 0) { break; } // NOT AVAILABLE NOW, NEED PROCESSOR API //if (processed > 0) // manager.MaybeCommitPerUserRequested(); //else // break; } timeSinceLastPoll = Math.Max(DateTime.Now.GetMilliseconds() - lastPollMs, 0); if (MaybeCommit()) { numIterations = numIterations > 1 ? numIterations / 2 : numIterations; } else if (timeSinceLastPoll > streamConfig.MaxPollIntervalMs.Value / 2) { numIterations = numIterations > 1 ? numIterations / 2 : numIterations; break; } else if (processed > 0) { numIterations++; } } while (processed > 0); if (State == ThreadState.RUNNING) { MaybeCommit(); } if (State == ThreadState.PARTITIONS_ASSIGNED) { SetState(ThreadState.RUNNING); } if (records.Any()) { log.Debug($"Processing {count} records in {DateTime.Now - n}"); } } } catch (TaskMigratedException e) { HandleTaskMigrated(e); } catch (KafkaException e) { exception = e; log.Error($"{logPrefix}Encountered the following unexpected Kafka exception during processing, " + $"this usually indicate Streams internal errors:", exception); } catch (Exception e) { exception = e; log.Error($"{logPrefix}Encountered the following error during processing:", exception); } } while (IsRunning) { // Use for waiting end of disposing Thread.Sleep(100); } // Dispose consumer try { consumer.Dispose(); } catch (Exception e) { log.Error($"{logPrefix}Failed to close consumer due to the following error:", e); } } }
public void Run() { Exception exception = null; long totalProcessLatency = 0, totalCommitLatency = 0; try { if (IsRunning) { while (!token.IsCancellationRequested) { #region Treat Exception /* * exception variable is set in try { ... }catch {..} just after. * TreatException(..) close consumer, taskManager, flush all state stores and return an enumeration (FAIL if thread must stop, CONTINUE if developer want to continue stream processing). * IF response == ExceptionHandlerResponse.FAIL, break infinte thread loop. * ELSE IF response == ExceptionHandlerResponse.CONTINUE, reset exception variable, flush all states, revoked partitions, and re-subscribe source topics to resume processing. * exception behavior is implemented in the begin of infinite loop to be more readable, catch block just setted exception variable and log content to track it. */ if (exception != null) { ExceptionHandlerResponse response = TreatException(exception); if (response == ExceptionHandlerResponse.FAIL) { break; } else if (response == ExceptionHandlerResponse.CONTINUE) { exception = null; HandleInnerException(); } } #endregion try { if (!manager.RebalanceInProgress) { RestorePhase(); long now = DateTime.Now.GetMilliseconds(); long startMs = now; IEnumerable <ConsumeResult <byte[], byte[]> > records = new List <ConsumeResult <byte[], byte[]> >(); long pollLatency = ActionHelper.MeasureLatency(() => { records = PollRequest(GetTimeout()); }); pollSensor.Record(pollLatency, now); DateTime n = DateTime.Now; var count = AddToTasks(records); if (count > 0) { log.LogDebug($"Add {count} records in tasks in {DateTime.Now - n}"); pollRecordsSensor.Record(count, now); } n = DateTime.Now; int processed = 0, totalProcessed = 0; long timeSinceLastPoll = 0; do { processed = 0; now = DateTime.Now.GetMilliseconds(); for (int i = 0; i < numIterations; ++i) { long processLatency = 0; if (!manager.RebalanceInProgress) { processLatency = ActionHelper.MeasureLatency(() => { processed = manager.Process(now); }); } else { processed = 0; } totalProcessed += processed; totalProcessLatency += processLatency; if (processed == 0) { break; } processLatencySensor.Record((double)processLatency / processed, now); processRateSensor.Record(processed, now); // NOT AVAILABLE NOW, NEED PROCESSOR API //if (processed > 0) // manager.MaybeCommitPerUserRequested(); //else // break; } timeSinceLastPoll = Math.Max(DateTime.Now.GetMilliseconds() - lastPollMs, 0); int commited = 0; long commitLatency = ActionHelper.MeasureLatency(() => commited = Commit()); totalCommitLatency += commitLatency; if (commited > 0) { commitSensor.Record(commitLatency / (double)commited, now); numIterations = numIterations > 1 ? numIterations / 2 : numIterations; } else if (timeSinceLastPoll > streamConfig.MaxPollIntervalMs.Value / 2) { numIterations = numIterations > 1 ? numIterations / 2 : numIterations; break; } else if (processed > 0) { numIterations++; } } while (processed > 0); if (State == ThreadState.RUNNING) { totalCommitLatency += ActionHelper.MeasureLatency(() => Commit()); } if (State == ThreadState.PARTITIONS_ASSIGNED) { SetState(ThreadState.RUNNING); } now = DateTime.Now.GetMilliseconds(); double runOnceLatency = (double)now - startMs; if (totalProcessed > 0) { log.LogDebug($"Processing {totalProcessed} records in {DateTime.Now - n}"); } processRecordsSensor.Record(totalProcessed, now); processRatioSensor.Record(totalProcessLatency / runOnceLatency, now); pollRatioSensor.Record(pollLatency / runOnceLatency, now); commitRatioSensor.Record(totalCommitLatency / runOnceLatency, now); totalProcessLatency = 0; totalCommitLatency = 0; if (lastMetrics.Add(TimeSpan.FromMilliseconds(streamConfig.MetricsIntervalMs)) < DateTime.Now) { ExportMetrics(DateTime.Now.GetMilliseconds()); lastMetrics = DateTime.Now; } } else { Thread.Sleep((int)consumeTimeout.TotalMilliseconds); } } catch (TaskMigratedException e) { HandleTaskMigrated(e); } catch (KafkaException e) { exception = e; log.LogError(e, "{LogPrefix}Encountered the following unexpected Kafka exception during processing, this usually indicate Streams internal errors:", logPrefix); } catch (Exception e) { exception = e; log.LogError(exception, "{LogPrefix}Encountered the following error during processing:", logPrefix); } } } } finally { CompleteShutdown(); } }
public void Run() { Exception exception = null; if (IsRunning) { while (!token.IsCancellationRequested) { if (exception != null) { ExceptionHandlerResponse response = TreatException(exception); if (response == ExceptionHandlerResponse.FAIL) { break; } else if (response == ExceptionHandlerResponse.CONTINUE) { exception = null; } } try { if (!manager.RebalanceInProgress) { long now = DateTime.Now.GetMilliseconds(); var records = PollRequest(GetTimeout()); DateTime n = DateTime.Now; var count = AddToTasks(records); if (count > 0) { log.Debug($"Add {count} records in tasks in {DateTime.Now - n}"); } n = DateTime.Now; int processed = 0; long timeSinceLastPoll = 0; do { processed = 0; for (int i = 0; i < numIterations; ++i) { processed = manager.Process(now); if (processed == 0) { break; } // NOT AVAILABLE NOW, NEED PROCESSOR API //if (processed > 0) // manager.MaybeCommitPerUserRequested(); //else // break; } timeSinceLastPoll = Math.Max(DateTime.Now.GetMilliseconds() - lastPollMs, 0); if (MaybeCommit()) { numIterations = numIterations > 1 ? numIterations / 2 : numIterations; } else if (timeSinceLastPoll > streamConfig.MaxPollIntervalMs.Value / 2) { numIterations = numIterations > 1 ? numIterations / 2 : numIterations; break; } else if (processed > 0) { numIterations++; } } while (processed > 0); if (State == ThreadState.RUNNING) { MaybeCommit(); } if (State == ThreadState.PARTITIONS_ASSIGNED) { SetState(ThreadState.RUNNING); } if (records.Any()) { log.Debug($"Processing {count} records in {DateTime.Now - n}"); } } } catch (TaskMigratedException e) { HandleTaskMigrated(e); } catch (KafkaException e) { log.Error($"{logPrefix}Encountered the following unexpected Kafka exception during processing, " + $"this usually indicate Streams internal errors:", exception); exception = e; } catch (Exception e) { log.Error($"{logPrefix}Encountered the following error during processing:", exception); exception = e; } } while (IsRunning) { // Use for waiting end of disposing Thread.Sleep(100); } // Dispose consumer try { consumer.Dispose(); } catch (Exception e) { log.Error($"{logPrefix}Failed to close consumer due to the following error:", e); } } }