/// <inheritdoc /> public override void Dispose() { m_perfCollector?.Cancel(); m_perfCollector?.Join(); m_perfCollector?.Dispose(); m_timeoutTaskCancelationSource.Cancel(); ulong reportCount = (ulong)Counters.GetCounterValue(SandboxedProcessCounters.AccessReportCount); if (reportCount > 0) { Counters.AddToCounter(SandboxedProcessCounters.SumOfAccessReportAvgQueueTimeUs, (long)(m_sumOfReportQueueTimesUs / reportCount)); Counters.AddToCounter(SandboxedProcessCounters.SumOfAccessReportAvgCreationTimeUs, (long)(m_sumOfReportCreationTimesUs / reportCount)); } if (!Killed) { // Try to kill all processes once the parent gets disposed, so we clean up all used // system resources appropriately KillAllChildProcesses(); } if (m_pipKextStats != null) { var statsJson = Newtonsoft.Json.JsonConvert.SerializeObject(m_pipKextStats.Value); LogProcessState($"Process Kext Stats: {statsJson}"); } base.Dispose(); }
private string GetFileIODuration(Stream?resultStream) { if (resultStream is TrackingFileStream tfs) { Counters.AddToCounter(GrpcContentServerCounters.StreamContentReadFromDiskDuration, tfs.ReadDuration); return($"{tfs.ReadDuration.TotalMilliseconds}ms"); } return(string.Empty); }
/// <summary> /// The batch log payload example: /// {"CacheMissAnalysisResults": /// { /// Pip123: { /// Description: /// FromCacheLookUp: /// Detail: { /// ActualMissType: ... /// ReasonFromAnalysis: ... /// Info: ... /// } /// }, /// Pip345: { /// Description: /// FromCacheLookUp: /// Detail: { /// ActualMissType: ... /// ReasonFromAnalysis: ... /// Info: ... /// } /// }, /// } ///} /// </summary> internal Task <Unit> BatchLogging(JProperty[] results) { // Use JsonTextWritter for 2 reasons: // 1. easily control when to start a new log event and when to end it. // 2. according to some research, manually serialization with JsonTextWritter can improve performance. using (Counters.StartStopwatch(FingerprintStoreCounters.CacheMissBatchLoggingTime)) { ProcessResults(results, m_configuration, m_loggingContext); Counters.AddToCounter(FingerprintStoreCounters.CacheMissBatchingDequeueCount, results.Length); return(Unit.VoidTask); } }
/// <inheritdoc /> public override void Dispose() { m_timeoutTaskCancelationSource.Cancel(); var reportCount = Counters.GetCounterValue(SandboxedProcessCounters.AccessReportCount); if (reportCount > 0) { Counters.AddToCounter(SandboxedProcessCounters.SumOfAccessReportAvgQueueTimeUs, m_sumOfReportQueueTimesUs / reportCount); Counters.AddToCounter(SandboxedProcessCounters.SumOfAccessReportAvgCreationTimeUs, m_sumOfReportCreationTimesUs / reportCount); } if (!Killed) { // Try to kill all processes once the parent gets disposed, so we clean up all used // system resources appropriately KillAllChildProcesses(); } base.Dispose(); }
private void SaveInternal(string fileContentTablePath) { Contract.Requires(!string.IsNullOrWhiteSpace(fileContentTablePath)); Contract.EnsuresOnThrow <BuildXLException>(true); ExceptionUtilities.HandleRecoverableIOException( () => { int numEvicted = 0; Directory.CreateDirectory(Path.GetDirectoryName(fileContentTablePath)); // Note that we are using a non-async file stream here. That's because we're doing lots of tiny writes for simplicity, // but tiny writes on an async stream end up blocking anyway while adding silly overhead. using (FileStream stream = FileUtilities.CreateFileStream( fileContentTablePath, FileMode.Create, FileAccess.Write, FileShare.Delete, // Do not write the file with SequentialScan since it will be reread in the subsequent build FileOptions.None)) { // We don't have anything in particular to correlate this file to, // so we are simply creating a unique correlation id that is used as part // of the header consistency check. FileEnvelopeId correlationId = FileEnvelopeId.Create(); s_fileEnvelope.WriteHeader(stream, correlationId); using (var writer = new BuildXLWriter(debug: false, stream: stream, leaveOpen: true, logStats: false)) { long numberOfEntriesPosition = writer.BaseStream.Position; writer.Write(0U); uint entriesWritten = 0; var hashBuffer = new byte[ContentHashingUtilities.HashInfo.ByteLength]; foreach (var fileAndEntryPair in m_entries) { // Skip saving anything with a TTL of zero. These entries were loaded // with a TTL of one (immediately decremented) and were not used since load. // See class remarks. if (fileAndEntryPair.Value.TimeToLive == 0) { numEvicted++; continue; } // Key: Volume and File ID fileAndEntryPair.Key.Serialize(writer); // Entry: USN, hash, time to live. writer.Write(fileAndEntryPair.Value.Usn.Value); fileAndEntryPair.Value.Hash.SerializeHashBytes(hashBuffer, 0); writer.Write(hashBuffer); writer.Write(fileAndEntryPair.Value.Length); writer.Write(fileAndEntryPair.Value.TimeToLive); entriesWritten++; } var endPosition = writer.BaseStream.Position; writer.BaseStream.Position = numberOfEntriesPosition; writer.Write(entriesWritten); writer.BaseStream.Position = endPosition; } s_fileEnvelope.FixUpHeader(stream, correlationId); } Counters.AddToCounter(FileContentTableCounters.NumEvicted, numEvicted); return(Unit.Void); }, ex => { throw new BuildXLException("Failure writing file content table", ex); }); }