private void PersistThresholds(int workerId, IMongoCollection <BsonDocument> collection) { IList <BsonDocument> startEvents = MongoQueryHelper.GetSrmStartEventsForWorker(workerId, collection); foreach (var srmStartEvent in startEvents) { ResourceManagerThreshold threshold = MongoQueryHelper.GetThreshold(srmStartEvent, collection); threshold.LogsetHash = logsetHash; resourceManagerPersister.Enqueue(threshold); } }
public override IPluginResponse Execute(IPluginRequest pluginRequest) { IPluginResponse response = CreatePluginResponse(); logsetHash = pluginRequest.LogsetHash; InitializeDatabaseTables(); IPersister <NetstatActiveConnection> activeConnectionsPersister = GetConcurrentBatchPersister <NetstatActiveConnection>(pluginRequest); // Process netstat entries for all available workers. var netstatCollection = MongoDatabase.GetCollection <BsonDocument>(ParserConstants.NetstatCollectionName); foreach (string workerId in MongoQueryHelper.GetDistinctWorkers(netstatCollection)) { Log.InfoFormat("Retrieving netstat information for worker '{0}'..", workerId); IEnumerable <NetstatActiveConnection> activeConnectionsForWorker = GetActiveConnectionEntriesForWorker(workerId, netstatCollection); activeConnectionsPersister.Enqueue(activeConnectionsForWorker); } // Shutdown persister and wait for data to flush. activeConnectionsPersister.Shutdown(); Log.Info("Finished processing netstat data!"); // Check if we persisted any data. if (!PersistedData()) { Log.Info("Failed to persist any netstat data!"); response.GeneratedNoData = true; } return(response); }
public void Process(IMongoCollection <TDocument> documents, QueryDefinition <TDocument> query, Func <TDocument, TModel> transform, CancellationToken cancellationToken = default(CancellationToken)) { Log.InfoFormat("Processing {0} events..", typeof(TModel).Name); using (var connection = outputConnectionFactory.OpenDbConnection()) { connection.CreateOrMigrateTable <TModel>(); } using (IPersister <TModel> persister = persisterFactory.BuildPersister()) using (new PersisterStatusWriter <TModel>(persister, Log)) { IAsyncCursor <TDocument> cursor = query.BuildQuery(documents).ToCursor(); while (cursor.MoveNext(cancellationToken)) { cursor.Current.ForEach(document => { TModel model = transform(document); persister.Enqueue(model); }); } persister.Shutdown(); } Log.InfoFormat("Finished processing {0} events!", typeof(TModel).Name); }
private void PersistNetstatEntries(IEnumerable <NetstatEntry> netstatEntries) { IPersister <NetstatEntry> persister = GetConcurrentBatchPersister <NetstatEntry>(); persister.Enqueue(netstatEntries); persister.Shutdown(); }
public void Enqueue(BackgrounderJob job) { _jobPersister.Enqueue(job); if (job.Errors != null && job.Errors.Count > 0) { _errorPersister.Enqueue(job.Errors); } if (job.BackgrounderJobDetail != null) { var detail = job.BackgrounderJobDetail as BackgrounderExtractJobDetail; if (detail != null) { _extractJobDetailsPersister.Enqueue(detail); } else { var jobDetail = job.BackgrounderJobDetail as BackgrounderSubscriptionJobDetail; if (jobDetail != null) { _subscriptionJobDetailsPersister.Enqueue(jobDetail); } } } }
private void ProcessEvents <T>(IPluginRequest pluginRequest, Func <IMongoCollection <T>, IAsyncCursor <T> > query) where T : BaseHyperEvent, new() { Log.InfoFormat("Processing {0} events..", typeof(T).Name); var collection = MongoDatabase.GetCollection <T>(ParserConstants.HyperCollectionName); GetOutputDatabaseConnection().CreateOrMigrateTable <T>(); IPersister <T> persister = GetConcurrentBatchPersister <T>(pluginRequest); using (GetPersisterStatusWriter(persister)) { IAsyncCursor <T> cursor = query(collection); while (cursor.MoveNext()) { cursor.Current.ForEach(document => { document.LogsetHash = pluginRequest.LogsetHash; persister.Enqueue(document); }); } persister.Shutdown(); } Log.InfoFormat("Finished processing {0} events!", typeof(T).Name); }
private void PersistSessionInformation(int sessionId, IList <BsonDocument> sessionLines) { IList <DataengineEvent> queryExecuteEvents = GetAllQueryExecuteEvents(sessionId, sessionLines); IDictionary <int, StatementPrepareEvent> statementPrepareEvents = GetAllStatementPrepareEvents(sessionLines); IList <DataengineEvent> statementExecuteEvents = GetAllStatementExecuteEvents(sessionId, sessionLines, statementPrepareEvents); foreach (var queryExecuteEvent in queryExecuteEvents) { dataenginePersister.Enqueue(queryExecuteEvent); } foreach (var statementExecuteEvent in statementExecuteEvents) { dataenginePersister.Enqueue(statementExecuteEvent); } }
private void ProcessJobsForBackgrounderId(string workerId, int backgrounderId, string jobType) { Queue <BsonDocument> recordQueue = new Queue <BsonDocument>(MongoQueryHelper.GetJobEventsForProcessByType(workerId, backgrounderId, jobType, backgrounderJavaCollection)); while (recordQueue.Count >= 2) { // This logic is a bit messy but unfortunately our backgrounder logs are messy. // We pop the next element off the record queue, make sure its a valid start event and then peek at the next element to make sure its the corresponding // Completion message, if it is then we go forward with processing, if it isn't then we drop whatever we previously popped and move on. // This prevents one failed completion message from throwing off the ordering of the whole queue. BsonDocument startEvent = recordQueue.Dequeue(); if (IsValidJobStartEvent(startEvent, jobType)) { if (IsValidJobFinishEvent(recordQueue.Peek(), jobType)) { BsonDocument endEvent = recordQueue.Dequeue(); try { var job = new BackgrounderJob(startEvent, endEvent, logsetHash); AppendDetailsToJob(job, endEvent); backgrounderPersister.Enqueue(job); } catch (Exception ex) { Log.ErrorFormat("Failed to extract job info from events '{0}' & '{1}': {2}", startEvent, endEvent, ex.Message); } } // If the next event in the list isnt a finish event then we can assume the previous job timed out. else { try { var job = new BackgrounderJob(startEvent, true, logsetHash); AppendDetailsToJob(job, recordQueue.Peek()); backgrounderPersister.Enqueue(job); } catch (Exception ex) { Log.ErrorFormat("Failed to extract job info from timed-out event '{0}': {1}", startEvent, ex.Message); } } } } }
protected void ProcessCollection(IMongoCollection <BsonDocument> collection, IPersister <VizqlServerSession> persister, IDictionary <int, string> workerHostnameMap) { var uniqueSessionIds = Queries.GetAllUniqueServerSessionIds(collection); foreach (var sessionId in uniqueSessionIds) { var processedSession = ProcessSession(sessionId, collection, workerHostnameMap); persister.Enqueue(processedSession); } }
protected void ProcessZookeeperFsyncLatency(BsonDocument document) { try { zookeeperFsyncPersister.Enqueue(new ZookeeperFsyncLatency(document, logsetHash)); } catch (Exception ex) { Log.Error(ex); } }
/// <summary> /// Populates the VizPortalRequest object and queues it for insertion. /// </summary> protected void ProcessVizportalRequest(BsonDocument mongoDocument) { try { VizportalEvent vizportalRequest = new VizportalEvent(mongoDocument, logsetHash); vizportalPersister.Enqueue(vizportalRequest); } catch (Exception ex) { string errorMessage = String.Format("Encountered an exception on {0}: {1}", mongoDocument.GetValue("req"), ex); pluginResponse.AppendError(errorMessage); Log.Error(errorMessage); } }
/// <summary> /// Populates the HttpdRequest object and queues it for insertion. /// </summary> protected void ProcessApacheRequest(BsonDocument document) { try { HttpdRequest httpdRequest = new HttpdRequest(document, logsetHash); apachePersister.Enqueue(httpdRequest); } catch (Exception ex) { string errorMessage = String.Format("Encountered an exception on {0}: {1}", document.GetValue("request_id"), ex); pluginResponse.AppendError(errorMessage); Log.Error(errorMessage); } }
protected void ProcessFilestoreRequest(BsonDocument mongoDocument) { try { FilestoreEvent filestoreRequest = new FilestoreEvent(mongoDocument, logsetHash); filestorePersister.Enqueue(filestoreRequest); } catch (Exception ex) { string errorMessage = String.Format("Encountered an exception on {0}: {1}", mongoDocument.GetValue("_id"), ex); pluginResponse.AppendError(errorMessage); Log.Error(errorMessage); } }
protected void ProcessPostgresLine(BsonDocument document) { try { PostgresEvent postgresInformation = new PostgresEvent(document, logsetHash); postgresPersister.Enqueue(postgresInformation); } catch (Exception ex) { string errorMessage = String.Format("Encountered an exception processing line: {0}", ex); pluginResponse.AppendError(errorMessage); Log.Error(errorMessage); } }
protected void ProcessClusterControllerDiskIoSample(BsonDocument document) { try { ClusterControllerDiskIoSample diskIoSample = new ClusterControllerDiskIoSample(document, logsetHash); clusterControllerDiskIoSamplePersister.Enqueue(diskIoSample); } catch (Exception ex) { string errorMessage = String.Format("Encountered an exception on {0}: {1}", document.GetValue("_id"), ex); pluginResponse.AppendError(errorMessage); Log.Error(errorMessage); } }
protected void ProcessSearchserverEvent(BsonDocument document) { try { SearchserverEvent searchserverEvent = new SearchserverEvent(document, logsetHash); searchserverPersister.Enqueue(searchserverEvent); } catch (Exception ex) { string errorMessage = String.Format("Encountered an exception on {0}: {1}", document.GetValue("_id"), ex); pluginResponse.AppendError(errorMessage); Log.Error(errorMessage); } }
private void ProcessSession(VizqlSession session, IMongoCollection <BsonDocument> collection) { try { session = MongoQueryHelper.AppendAllSessionEvents(session, collection); persistenceHelper.Enqueue(session as VizqlDesktopSession); } catch (Exception ex) { string errorMessage = String.Format("Failed to process session {0} in {1}: {2}", session.VizqlSessionId, collection.CollectionNamespace.CollectionName, ex.Message); Log.Error(errorMessage); pluginResponse.AppendError(errorMessage); } }
public void Enqueue(VizqlServerSession session) { try { _sessionPersister.Enqueue(session); _errorPersister.Enqueue(session.ErrorEvents); ItemsPersisted++; Log.DebugFormat($"Persisted session {session.VizqlSessionId}"); } catch (Exception ex) { Log.ErrorFormat($"Failed to persist session '{session.VizqlSessionId}': {ex.Message}"); } }
private void ProcessSessions(IMongoCollection <BsonDocument> collection, IPersister <VizqlDesktopSession> persister) { foreach (var session in Queries.GetAllDesktopSessions(collection)) { try { var processedSession = Queries.AppendAllSessionEvents(session, collection); persister.Enqueue(processedSession as VizqlDesktopSession); } catch (Exception ex) { Log.ErrorFormat("Failed to process session {0} in {1}: {2}", session.VizqlSessionId, collection.CollectionNamespace.CollectionName, ex.Message); } } }
public Option <TModel> Persist(TModel model) { try { using (new PersisterStatusWriter <TModel>(persister, Log)) { persister.Enqueue(model); } return(model.Some()); } catch (Exception ex) { Log.ErrorFormat("Failed to persist {0}: {1}", typeof(TModel).Name, ex.Message); return(Option.None <TModel>()); } }
public void Enqueue(VizqlServerSession session) { try { sessionPersister.Enqueue(session); errorPersister.Enqueue(session.ErrorEvents); performanceEventPersister.Enqueue(session.PerformanceEvents); endQueryPersister.Enqueue(session.EndQueryEvents.Select(query => query.WithTruncatedQueryText(maxQueryLength))); qpQueryEndPersister.Enqueue(session.QpQueryEndEvents); ItemsPersisted++; Log.DebugFormat("Persisted session {0}", session.VizqlSessionId); } catch (Exception ex) { Log.ErrorFormat("Failed to persist session '{0}': {1}", session.VizqlSessionId, ex.Message); } }
public void Process(IMongoCollection <TDocument> documents, QueryDefinition <TDocument> query, Func <TDocument, TModel> transform, FilterDefinition <TDocument> estimationQuery = null, CancellationToken cancellationToken = default(CancellationToken)) { Log.InfoFormat("Processing {0} events..", typeof(TModel).Name); using (var statusWriter = BuildPersisterStatusWriter(documents, estimationQuery)) { IAsyncCursor <TDocument> cursor = query.BuildQuery(documents).ToCursor(); while (cursor.MoveNext(cancellationToken)) { foreach (TModel model in cursor.Current.Select(transform)) { persister.Enqueue(model); } } } Log.InfoFormat("Finished processing {0} events!", typeof(TModel).Name); }
public Option <TModel> Persist(TModel model) { try { using (var connection = outputConnectionFactory.OpenDbConnection()) { connection.CreateOrMigrateTable <TModel>(); } using (IPersister <TModel> persister = persisterFactory.BuildPersister()) using (new PersisterStatusWriter <TModel>(persister, Log)) { persister.Enqueue(model); persister.Shutdown(); } return(model.Some()); } catch (Exception ex) { Log.ErrorFormat("Failed to persist {0}: {1}", typeof(TModel).Name, ex.Message); return(Option.None <TModel>()); } }