/// <summary> /// Starts a task that is responsible for deserializing all path events as fed to it through the specified queue /// </summary> /// <param name="pathEventsToDeserialize">The queue where path events (as file positions) to deserialize will be fed</param> /// <returns>A task that will finish when the queue that it consumes from is both marked completed and empty</returns> private Task CreatePathEventConsumerTask(BlockingCollection <long> pathEventsToDeserialize) { return(Task.Run(() => { using (FileStream fileStream = File.Open(m_logFilename, FileMode.Open, FileAccess.Read, FileShare.Read)) { ParallelEventReader parallelEventReader = new ParallelEventReader(this, fileStream); while (!pathEventsToDeserialize.IsCompleted) { // Get next AddPath event file position long positionToDeserialize; try { positionToDeserialize = pathEventsToDeserialize.Take(); } catch (InvalidOperationException) { // This exception is thrown when CompleteAdding is called while we are blocked on the call to Take // If we see this exception, we know we are done processing path events and the task can exit return; } // Seek to the start of the next AddPath event to deserialize fileStream.Seek(positionToDeserialize, SeekOrigin.Begin); EventHeader header = EventHeader.ReadFrom(parallelEventReader); // Ensure event id it is for an AddPath event Contract.Assert((BinaryLogger.LogSupportEventId)header.EventId == BinaryLogger.LogSupportEventId.AddPath); // Determine what position the file stream should have after calling the event handler var startOfNextEvent = fileStream.Position + header.EventPayloadSize; // Handle the event ParallelReadPathEvent(parallelEventReader); // Ensure that the correct number of bytes were read out of the file Contract.Assert(fileStream.Position == startOfNextEvent); } } })); }
/// <summary> /// Reads an event /// </summary> /// <returns>the result of reading the next event</returns> public EventReadResult ReadEvent() { try { while (true) { if (m_nextReadPosition != null && LogStream.Position != m_nextReadPosition.Value) { LogStream.Seek(m_nextReadPosition.Value, SeekOrigin.Begin); } var position = LogStream.Position; if (position == LogLength) { return(EventReadResult.EndOfStream); } // Read the header EventHeader header = EventHeader.ReadFrom(m_logStreamReader); m_currentEventPayloadSize = header.EventPayloadSize; position = LogStream.Position; // There are less bytes than specified by the payload // The file is corrupted or truncated if (position + header.EventPayloadSize > LogLength) { return(EventReadResult.UnexpectedEndOfStream); } m_nextReadPosition = position + header.EventPayloadSize; // Handle the internal events if (header.EventId < (uint)BinaryLogger.LogSupportEventId.Max) { switch ((BinaryLogger.LogSupportEventId)header.EventId) { case BinaryLogger.LogSupportEventId.StartTime: ReadStartTimeEvent(m_logStreamReader); break; case BinaryLogger.LogSupportEventId.AddPath: ReadPathEvent(m_logStreamReader); break; case BinaryLogger.LogSupportEventId.AddStringId: ReadStringIdEvent(m_logStreamReader); break; } Contract.Assert(LogStream.Position == (position + header.EventPayloadSize)); continue; } else { header.EventId -= (uint)BinaryLogger.LogSupportEventId.Max; } EventHandler handler; if ((m_handlers.Length > header.EventId) && ((handler = m_handlers[header.EventId]) != null)) { handler(header.EventId, header.WorkerId, header.Timestamp, m_logStreamReader); Contract.Assert(LogStream.Position <= (position + header.EventPayloadSize), "Event handler read beyond the event payload"); } // Seek to the start of the next event as we may not have read the entire payload (i.e. EventStatsAnalyzer) if (LogStream.Position != m_nextReadPosition.Value) { LogStream.Seek(m_nextReadPosition.Value, SeekOrigin.Begin); } return(EventReadResult.Success); } } catch (EndOfStreamException) { return(EventReadResult.UnexpectedEndOfStream); } }
/// <summary> /// Reads all events in the log file and processes them in parallel /// </summary> /// <returns>If all events are read successfully, <see cref="BinaryLogReader.EventReadResult.EndOfStream"/> will be returned</returns> public EventReadResult ReadAllEvents() { int numPathEventConsumers = 2; int numNotPathEventConsumers = 5; // Create a boolean array to decide which events to process bool[] shouldProcessEvent = new bool[((int)BinaryLogger.LogSupportEventId.Max) + m_handlers.Length]; for (int i = 0; i < shouldProcessEvent.Length; ++i) { if (i < (int)BinaryLogger.LogSupportEventId.Max) { // Always process internal events shouldProcessEvent[i] = true; } else if (m_handlers[i - (int)BinaryLogger.LogSupportEventId.Max] != null) { // Only process event if handler is defined shouldProcessEvent[i] = true; } else { shouldProcessEvent[i] = false; } } BlockingCollection <long>[] addPathEventsToDeserialize = new BlockingCollection <long> [numPathEventConsumers]; BlockingCollection <long>[] notAddPathEventsToDeserialize = new BlockingCollection <long> [numNotPathEventConsumers]; try { // Initialize the queues for (int i = 0; i < addPathEventsToDeserialize.Length; ++i) { addPathEventsToDeserialize[i] = new BlockingCollection <long>(); } for (int i = 0; i < notAddPathEventsToDeserialize.Length; ++i) { notAddPathEventsToDeserialize[i] = new BlockingCollection <long>(); } // Start the event consumers Task[] pathEventConsumers = new Task[numPathEventConsumers]; for (int i = 0; i < pathEventConsumers.Length; ++i) { pathEventConsumers[i] = CreatePathEventConsumerTask(addPathEventsToDeserialize[i]); } Task[] notPathEventConsumers = new Task[numNotPathEventConsumers]; for (int i = 0; i < notPathEventConsumers.Length; ++i) { notPathEventConsumers[i] = CreateNonPathEventConsumerTask(notAddPathEventsToDeserialize[i]); } // Event positions are added to the queues in a round robin manner // These variables indicate which queue to put the next event position in int pathEventQueueToAddTo = 0; int notPathEventQueueToAddTo = 0; EventReadResult result; try { while (true) { if (m_nextReadPosition != null) { LogStream.Seek(m_nextReadPosition.Value, SeekOrigin.Begin); } var position = LogStream.Position; if (position == LogLength) { result = EventReadResult.EndOfStream; break; } // Read the header EventHeader header = EventHeader.ReadFrom(m_logStreamReader); if (shouldProcessEvent[header.EventId]) { // Add event to appropriate queue if ((BinaryLogger.LogSupportEventId)header.EventId == BinaryLogger.LogSupportEventId.AddPath) { addPathEventsToDeserialize[pathEventQueueToAddTo].Add(position); pathEventQueueToAddTo++; pathEventQueueToAddTo %= numPathEventConsumers; } else { notAddPathEventsToDeserialize[notPathEventQueueToAddTo].Add(position); notPathEventQueueToAddTo++; notPathEventQueueToAddTo %= numNotPathEventConsumers; } } m_currentEventPayloadSize = header.EventPayloadSize; position = LogStream.Position; // There are less bytes than specified by the payload // The file is corrupted or truncated if (position + header.EventId > LogLength) { result = EventReadResult.UnexpectedEndOfStream; break; } m_nextReadPosition = position + header.EventPayloadSize; } } catch (EndOfStreamException) { result = EventReadResult.UnexpectedEndOfStream; } // We are done adding events to the queues so mark all the queues as complete for adding foreach (var q in addPathEventsToDeserialize) { q.CompleteAdding(); } foreach (var q in notAddPathEventsToDeserialize) { q.CompleteAdding(); } // Wait for all events to be processed Task.WaitAll(pathEventConsumers); Task.WaitAll(notPathEventConsumers); return(result); } finally { // Dispose the queues foreach (var q in addPathEventsToDeserialize) { q.Dispose(); } foreach (var q in notAddPathEventsToDeserialize) { q.Dispose(); } } }
/// <summary> /// Starts a task that is responsible for deserializing all non-path events as fed to it through the specified queue /// </summary> /// <param name="nonPathEventsToDeserialize">The queue where non-path events (as file positions) to deserialize will be fed</param> /// <returns>A task that will finish when the queue that it consumes from is both marked completed and empty</returns> private Task CreateNonPathEventConsumerTask(BlockingCollection <long> nonPathEventsToDeserialize) { return(Task.Run(() => { using (FileStream fileStream = File.Open(m_logFilename, FileMode.Open, FileAccess.Read, FileShare.Read)) { ParallelEventReader parallelEventReader = new ParallelEventReader(this, fileStream); while (!nonPathEventsToDeserialize.IsCompleted) { // Get next event file position long positionToDeserialize; try { positionToDeserialize = nonPathEventsToDeserialize.Take(); } catch (InvalidOperationException) { // This exception is thrown when CompleteAdding is called while we are blocked on the call to Take // If we see this exception, we know we are done processing events and the task can exit return; } // Seek to the start of the next event to deserialize fileStream.Seek(positionToDeserialize, SeekOrigin.Begin); EventHeader header = EventHeader.ReadFrom(parallelEventReader); // Ensure that event id is NOT an AddPath event Contract.Assert((BinaryLogger.LogSupportEventId)header.EventId != BinaryLogger.LogSupportEventId.AddPath); var startOfNextEvent = fileStream.Position + header.EventPayloadSize; // Handle the internal events if (header.EventId < (uint)BinaryLogger.LogSupportEventId.Max) { switch ((BinaryLogger.LogSupportEventId)header.EventId) { case BinaryLogger.LogSupportEventId.StartTime: ReadStartTimeEvent(parallelEventReader); break; case BinaryLogger.LogSupportEventId.AddStringId: ReadStringIdEvent(parallelEventReader); break; } Contract.Assert(fileStream.Position == startOfNextEvent); continue; } else { header.EventId -= (uint)BinaryLogger.LogSupportEventId.Max; } EventHandler handler; if ((m_handlers.Length > header.EventId) && ((handler = m_handlers[header.EventId]) != null)) { handler(header.EventId, header.WorkerId, header.Timestamp, parallelEventReader); Contract.Assert(fileStream.Position <= startOfNextEvent, "Event handler read beyond the event payload"); } } } })); }