/// <summary> /// Adds pip static fingerprint. /// </summary> /// <param name="pip">Pip.</param> /// <param name="fingerprint">Static fingerprint.</param> public void AddFingerprint(Pip pip, ContentFingerprint fingerprint) { Contract.Requires(pip != null); m_staticFingerprintsToPips[fingerprint] = pip.PipId; m_pipsToStaticFingerprints.Add(pip.PipId, fingerprint); }
/// <summary> /// Creates a new binary logger to write to the given stream /// </summary> /// <param name="logStream">the stream to write events to</param> /// <param name="context">the context containing the path table</param> /// <param name="logId">the log id to place in the header of execution used to verify with other data structures on load.</param> /// <param name="lastStaticAbsolutePathIndex">the last absolute path guaranteed to be in the serialized form of the corresponding path table</param> /// <param name="closeStreamOnDispose">specifies whether the stream is closed on disposal of the logger</param> /// <param name="onEventWritten">optional callback after each event is written to underlying stream</param> public BinaryLogger(Stream logStream, PipExecutionContext context, Guid logId, int lastStaticAbsolutePathIndex = int.MinValue, bool closeStreamOnDispose = true, Action onEventWritten = null) { m_context = context; LogId = logId; m_lastStaticAbsolutePathIndex = lastStaticAbsolutePathIndex; m_logStreamWriter = new BuildXLWriter(debug: false, stream: logStream, leaveOpen: !closeStreamOnDispose, logStats: false); m_watch = Stopwatch.StartNew(); m_capturedPaths = new ConcurrentBigMap <AbsolutePath, bool>(); m_capturedStrings = new ConcurrentBigMap <StringId, bool>(); m_capturedStrings.Add(StringId.Invalid, true); m_writerPool = new ObjectPool <EventWriter>( () => new EventWriter(this), writer => { writer.Seek(0, SeekOrigin.Begin); return(writer); }); var logIdBytes = logId.ToByteArray(); Contract.Assert(logIdBytes.Length == LogIdByteLength); logStream.Write(logIdBytes, 0, logIdBytes.Length); LogStartTime(DateTime.UtcNow); m_pendingEventsDrainingThread = new Thread( () => { foreach (PooledObjectWrapper <EventWriter> wrapper in m_pendingEvents.GetConsumingEnumerable()) { var eventWriter = wrapper.Instance; WriteEventData(eventWriter); m_writerPool.PutInstance(wrapper); onEventWritten?.Invoke(); } }); m_pendingEventsDrainingThread.Start(); }
/// <summary> /// Override event to capture its data and store it in the protobuf /// </summary> public override void PipExecutionDirectoryOutputs(PipExecutionDirectoryOutputs data) { foreach (var(directoryArtifact, fileArtifactArray) in data.DirectoryOutputs) { foreach (var file in fileArtifactArray) { m_dynamicFileProducerMap.Add(file, data.PipId.Value); } var value = new PipExecutionDirectoryOutputsEvent { WorkerID = WorkerID.Value, PipID = data.PipId.Value, DirectoryArtifact = directoryArtifact.ToDirectoryArtifact(PathTable, m_nameExpander), }; value.FileArtifactArray.AddRange(fileArtifactArray.Select( file => file.ToFileArtifact(PathTable, m_nameExpander))); var key = new EventKey { EventTypeID = Xldb.Proto.ExecutionEventId.PipExecutionDirectoryOutputs, PipId = data.PipId.Value, PipExecutionDirectoryOutputKey = AbsolutePathToXldbString(directoryArtifact.Path) }; var keyArr = key.ToByteArray(); var valueArr = value.ToByteArray(); WriteToDb(keyArr, valueArr, XldbDataStore.EventColumnFamilyName); AddToDbStorageDictionary(DBStoredTypes.PipExecutionDirectoryOutputs, keyArr.Length + valueArr.Length); } }
/// <summary> /// Creates a writer /// </summary> public InliningWriter(Stream stream, PathTable pathTable, bool debug = false, bool leaveOpen = true, bool logStats = false) : base(debug, stream, leaveOpen, logStats) { m_pathTable = pathTable; // Reserve invalid as 0-th index m_pathToParentIndexMap.Add(AbsolutePath.Invalid, 0); m_stringSet.Add(new StringId(int.MaxValue)); }
/// <summary> /// Creates a new binary logger to write to the given stream /// </summary> /// <param name="logStream">the stream to write events to</param> /// <param name="context">the context containing the path table</param> /// <param name="logId">the log id to place in the header of execution used to verify with other data structures on load.</param> /// <param name="lastStaticAbsolutePathIndex">the last absolute path guaranteed to be in the serialized form of the corresponding path table</param> /// <param name="closeStreamOnDispose">specifies whether the stream is closed on disposal of the logger</param> /// <param name="onEventWritten">optional callback after each event is written to underlying stream</param> public BinaryLogger(Stream logStream, PipExecutionContext context, Guid logId, int lastStaticAbsolutePathIndex = int.MinValue, bool closeStreamOnDispose = true, Action onEventWritten = null) { m_context = context; LogId = logId; m_lastStaticAbsolutePathIndex = lastStaticAbsolutePathIndex; m_logStreamWriter = new BuildXLWriter(debug: false, stream: logStream, leaveOpen: !closeStreamOnDispose, logStats: false); m_watch = Stopwatch.StartNew(); m_capturedPaths = new ConcurrentBigMap <AbsolutePath, bool>(); m_capturedStrings = new ConcurrentBigMap <StringId, bool>(); m_capturedStrings.Add(StringId.Invalid, true); m_writerPool = new ObjectPool <EventWriter>( () => new EventWriter(this), writer => { writer.Seek(0, SeekOrigin.Begin); return(writer); }); m_onEventWritten = onEventWritten; var logIdBytes = logId.ToByteArray(); Contract.Assert(logIdBytes.Length == LogIdByteLength); logStream.Write(logIdBytes, 0, logIdBytes.Length); LogStartTime(DateTime.UtcNow); m_pendingEventsDrainingThread = new Thread( () => { // Keeps trying to drain the event queue as long as new events can be added // Adding events and completing the queue are properly synchronized already (a write lock // is taken before completing), but there is the slim chance we finish draining the queue // and before we check for m_completeAdding here again, an Add + CompleteAdding happen, which // could leave unprocessed events in the queue. So we also check here whether the queue is not empty while (!m_completeAdding || !m_pendingEvents.IsEmpty) { // Wait until at least one event is available for consumption m_eventsAvailable.WaitOne(); // If new events can still be added, let's give writers the chance to add more events. // This allows for more efficient draining, since therefore the thread synchronization tax is only paid // once for many events. A 100ms lag is acceptable for events to be sitting in the queue. Profiler shows a 10x // aggregated time gain by doing this. Some consideration for why picking this time: // - A greater didn't seem to make a difference (e.g. 500ms was tried with equivalent perf results) // - There are temporal constraints that need events (in particular, the build manifest one) to get serialized // and sent to workers before the pip completion event (which does not go through the binary logger) happens. So // we don't want xlg events to sit in the queue for too long. if (!m_completeAdding) { Thread.Sleep(100); } // Drain all events available on the queue while (m_pendingEvents.TryDequeue(out var action)) { action.Run(); } } }); m_pendingEventsDrainingThread.Start(); }
/// <summary> /// Creates a new binary logger to write to the given stream /// </summary> /// <param name="logStream">the stream to write events to</param> /// <param name="context">the context containing the path table</param> /// <param name="logId">the log id to place in the header of execution used to verify with other data structures on load.</param> /// <param name="lastStaticAbsolutePathIndex">the last absolute path guaranteed to be in the serialized form of the corresponding path table</param> /// <param name="closeStreamOnDispose">specifies whether the stream is closed on disposal of the logger</param> /// <param name="onEventWritten">optional callback after each event is written to underlying stream</param> public BinaryLogger(Stream logStream, PipExecutionContext context, Guid logId, int lastStaticAbsolutePathIndex = int.MinValue, bool closeStreamOnDispose = true, Action onEventWritten = null) { m_context = context; LogId = logId; m_lastStaticAbsolutePathIndex = lastStaticAbsolutePathIndex; m_logStreamWriter = new BuildXLWriter(debug: false, stream: logStream, leaveOpen: !closeStreamOnDispose, logStats: false); m_watch = Stopwatch.StartNew(); m_capturedPaths = new ConcurrentBigMap <AbsolutePath, bool>(); m_capturedStrings = new ConcurrentBigMap <StringId, bool>(); m_capturedStrings.Add(StringId.Invalid, true); m_writerPool = new ObjectPool <EventWriter>( () => new EventWriter(this), writer => { writer.Seek(0, SeekOrigin.Begin); return(writer); }); var logIdBytes = logId.ToByteArray(); Contract.Assert(logIdBytes.Length == LogIdByteLength); logStream.Write(logIdBytes, 0, logIdBytes.Length); LogStartTime(DateTime.UtcNow); m_pendingEventsDrainingThread = new Thread( () => { try { foreach (PooledObjectWrapper <EventWriter> wrapper in m_pendingEvents.GetConsumingEnumerable()) { var eventWriter = wrapper.Instance; WriteEventData(eventWriter); m_writerPool.PutInstance(wrapper); onEventWritten?.Invoke(); } } catch (InvalidOperationException) { // InvalidOperationException is thrown when calling Take() for a marked-as-completed blocking collection. // However, GetConsumingEnumerable throws an InvalidOperationException here, which is unusual. // In further investigations, we discovered that it might throw one if the collection in BlockingCollection // is passed in the constructor and we externally modify that collection outside of BlockingCollection. // Even we do not do that, we rarely have InvalidOperationException here, which is a NetCore bug. // We reported the bug; but for now, we swallow that exception and we treat it as a signal for completion. return; } }); m_pendingEventsDrainingThread.Start(); }
public int DuplicateFile(File file, string newRoot) { string newLocation = DuplicatePath(file.Location, newRoot); int existing; if (FilesByPath.TryGetValue(newLocation, out existing)) { return(existing); } else { File newFile = new File(newLocation) { Hash = Guid.NewGuid().ToString(), IsOutputFile = file.IsOutputFile }; int newFileId = ++m_maxFile; Files[newFileId] = newFile; FilesByPath.Add(newLocation, newFileId); newFile.SetUnscaledLength(file.GetScaledLengthInBytes(1.0)); return(newFileId); } }
public void TestConcurrentBigMapOperations() { var map = new ConcurrentBigMap <int, string>(); XAssert.IsTrue(map.TryAdd(0, "value")); XAssert.IsFalse(map.TryAdd(0, "not added value")); XAssert.AreEqual("value", map[0]); map[0] = "newValue"; map[1] = "value1"; var value0 = "newValue"; XAssert.AreEqual(value0, map[0]); XAssert.IsTrue(map.ContainsKey(0)); XAssert.IsTrue(map.ContainsKey(1)); XAssert.IsFalse(map.ContainsKey(12)); XAssert.AreEqual(2, map.Count); // Test TryGetValue string value1; XAssert.IsTrue(map.TryGetValue(1, out value1)); XAssert.AreEqual("value1", value1); string value31; XAssert.IsFalse(map.TryGetValue(31, out value31)); // Test update XAssert.IsFalse(map.TryUpdate(1, "notUpdatedValue1", "notActualValue1")); XAssert.AreEqual("value1", map[1]); XAssert.IsTrue(map.TryUpdate(1, "updatedValue1", "value1")); value1 = map[1]; XAssert.AreEqual("updatedValue1", value1); // Test remove int beforeFailedRemoveCount = map.Count; string value23; XAssert.IsFalse(map.TryRemove(23, out value23)); XAssert.AreEqual(beforeFailedRemoveCount, map.Count); map.Add(23, "value23"); XAssert.AreEqual(beforeFailedRemoveCount + 1, map.Count); XAssert.IsTrue(map.TryRemove(23, out value23)); XAssert.AreEqual("value23", value23); XAssert.AreEqual(beforeFailedRemoveCount, map.Count); Assert.Equal(new int[] { 0, 1 }, map.Keys.ToArray()); Assert.Equal(new string[] { value0, value1 }, map.Values.ToArray()); XAssert.AreEqual(2, map.Count); string addedData = "added data"; string notAddedData = "not added data"; var result = map.GetOrAdd(2, addedData, (key, data0) => data0); XAssert.IsFalse(result.IsFound); XAssert.AreEqual(addedData, result.Item.Value); XAssert.AreEqual(addedData, map[2]); // Ensure entry is not updated for get or add result = map.GetOrAdd(2, notAddedData, (key, data0) => data0); XAssert.IsTrue(result.IsFound); XAssert.AreEqual(addedData, result.Item.Value); XAssert.AreEqual(addedData, map[2]); Func <int, string, string, string> updateFunction = (key, data0, currentValue) => "updated " + currentValue; var updatedData = updateFunction(2, notAddedData, addedData); result = map.AddOrUpdate(2, notAddedData, (key, data0) => data0, updateFunction); XAssert.IsTrue(result.IsFound); XAssert.AreEqual(addedData, result.OldItem.Value); XAssert.AreEqual(updatedData, result.Item.Value); XAssert.AreEqual(updatedData, map[2]); result = map.AddOrUpdate(3, addedData, (key, data0) => data0, updateFunction); XAssert.IsFalse(result.IsFound); XAssert.AreEqual(addedData, result.Item.Value); XAssert.AreEqual(addedData, map[3]); TestOperationsHelper(parallel: false); }