/// <summary> /// This function executes on the master to maintain the globle source change affected contents /// </summary> public void ReportSourceChangeAffectedFiles( Pip pip, ReadOnlyArray <AbsolutePath> dynamicallyObservedFiles, ReadOnlyArray <FileArtifact> outputContents) { if (IsOutputAffectedBySourceChange(pip, dynamicallyObservedFiles)) { foreach (var o in outputContents) { m_sourceChangeAffectedFiles.Add(o); } } }
/// <summary> /// Writes an input file if necessary /// </summary> private void QueueWritingInputFileIfNecessary(int fileId) { if (!m_writeInputFiles || !m_sourceFilesWritten.Add(fileId)) { return; } File file = m_buildGraph.Files[fileId]; string originalLocation = file.Location; string remappedPath = RemapPath(originalLocation); // Avoid overwriting spec files when they are registered as inputs if (!m_specFileToPipsLookup.Contains(originalLocation)) { try { if (!file.WasLengthSet) { Interlocked.Increment(ref m_inputsWithDefaultSize); } m_inputsToWrite.Add(new Tuple <string, string, int>(remappedPath, file.Hash, file.GetScaledLengthInBytes(m_inputScaleFactor))); } #pragma warning disable ERP022 // Unobserved exception in generic exception handler catch { // Some of the registered inputs are directories instead of actual files. As luck has it the directories // are generally created before the the file is attempted to be created. So we can just skip it and move along. Console.WriteLine("Warning: Could not write input file: " + remappedPath); } #pragma warning restore ERP022 // Unobserved exception in generic exception handler } }
/// <summary> /// Add a mount defined by a module /// </summary> /// <remarks> /// Equivalent to <see cref="AddResolvedMount(IMount, LocationData?)"/> but affects the result of /// <see cref="IsModuleDefinedMount(IMount)"/> /// </remarks> public void AddResolvedModuleDefinedMount(IMount mount, LocationData?mountLocation = null) { Contract.RequiresNotNull(mount); Contract.Assert(!m_finalized); m_moduleDefinedMounts.Add(mount); AddResolvedMount(mount, mountLocation); }
/// <summary> /// Creates a writer /// </summary> public InliningWriter(Stream stream, PathTable pathTable, bool debug = false, bool leaveOpen = true, bool logStats = false) : base(debug, stream, leaveOpen, logStats) { m_pathTable = pathTable; // Reserve invalid as 0-th index m_pathToParentIndexMap.Add(AbsolutePath.Invalid, 0); m_stringSet.Add(new StringId(int.MaxValue)); }
/// <inheritdoc/> public override void HandleFileAccess(long pipId, string pipDescription, ReportedFileOperation operation, RequestedAccess requestedAccess, FileAccessStatus status, bool explicitlyReported, uint processId, uint error, DesiredAccess desiredAccess, ShareMode shareMode, CreationDisposition creationDisposition, FlagsAndAttributes flagsAndAttributes, string path, string processArgs) { if (AbsolutePath.TryCreate(m_pathTable, path, out AbsolutePath absolutePath)) { m_fileAccessPaths.Add(absolutePath); } m_allFileAccessPaths.Add(path); }
/// <inheritdoc/> public override void HandleFileAccess(FileAccessData fileAccessData) { if (AbsolutePath.TryCreate(m_pathTable, fileAccessData.Path, out AbsolutePath absolutePath)) { m_fileAccessPaths.Add(absolutePath); } m_allFileAccessPaths.Add(fileAccessData.Path); }
internal void UpdatePipRetryInfo(ProcessRunnablePip processRunnable, ExecutionResult executionResult, CounterCollection <PipExecutorCounter> pipExecutionCounters) { if (executionResult.HasUserRetries) { if (executionResult.Result == PipResultStatus.Succeeded) { pipExecutionCounters.IncrementCounter(PipExecutorCounter.ProcessUserRetriesSucceededPipsCount); if (m_pipsSucceedingAfterUserRetry.Count < MaxListOfPipIdsForTelemetry) { m_pipsSucceedingAfterUserRetry.Add(processRunnable.Process.FormattedSemiStableHash); } } else if (executionResult.Result == PipResultStatus.Failed) { pipExecutionCounters.IncrementCounter(PipExecutorCounter.ProcessUserRetriesFailedPipsCount); if (m_pipsFailingAfterLastUserRetry.Count < MaxListOfPipIdsForTelemetry) { m_pipsFailingAfterLastUserRetry.Add(processRunnable.Process.FormattedSemiStableHash); } } } }
private Task <ProactiveCopyResult> RequestProactiveCopyIfNeededAsync(OperationContext context, ContentHash hash, string path = null) { if (!_pendingProactivePuts.Add(hash)) { return(Task.FromResult(ProactiveCopyResult.CopyNotRequiredResult)); } return(context.PerformOperationAsync( Tracer, traceErrorsOnly: true, operation: async() => { try { var hashArray = _buildIdHash != null ? new[] { hash, _buildIdHash.Value } : new[] { hash }; // First check in local location store, then global if failed. var getLocationsResult = await ContentLocationStore.GetBulkAsync(context, hashArray, context.Token, UrgencyHint.Nominal, GetBulkOrigin.Local); if (getLocationsResult.Succeeded && getLocationsResult.ContentHashesInfo[0].Locations.Count > Settings.ProactiveCopyLocationsThreshold) { _counters[Counters.GetLocationsSatisfiedFromLocal].Increment(); return ProactiveCopyResult.CopyNotRequiredResult; } else { getLocationsResult += await ContentLocationStore.GetBulkAsync(context, hashArray, context.Token, UrgencyHint.Nominal, GetBulkOrigin.Global).ThrowIfFailure(); _counters[Counters.GetLocationsSatisfiedFromRemote].Increment(); } if (getLocationsResult.ContentHashesInfo[0].Locations.Count > Settings.ProactiveCopyLocationsThreshold) { return ProactiveCopyResult.CopyNotRequiredResult; } IReadOnlyList <MachineLocation> buildRingMachines = null; // Get random machine inside build ring Task <BoolResult> insideRingCopyTask; if ((Settings.ProactiveCopyMode & ProactiveCopyMode.InsideRing) != 0) { if (_buildIdHash != null) { buildRingMachines = getLocationsResult.ContentHashesInfo[getLocationsResult.ContentHashesInfo.Count - 1].Locations; var candidates = buildRingMachines.Where(m => !m.Equals(LocalCacheRootMachineLocation)).ToArray(); if (candidates.Length > 0) { var candidate = candidates[ThreadSafeRandom.Generator.Next(0, candidates.Length)]; Tracer.Info(context, $"{nameof(RequestProactiveCopyIfNeededAsync)}: Copying {hash.ToShortString()} to machine '{candidate}' in build ring (of {candidates.Length} machines)."); insideRingCopyTask = DistributedCopier.RequestCopyFileAsync(context, hash, candidate); } else { insideRingCopyTask = Task.FromResult(new BoolResult("Could not find any machines belonging to the build ring.")); } } else { insideRingCopyTask = Task.FromResult(new BoolResult("BuildId was not specified, so machines in the build ring cannot be found.")); } } else { insideRingCopyTask = BoolResult.SuccessTask; } buildRingMachines ??= new[] { LocalCacheRootMachineLocation }; Task <BoolResult> outsideRingCopyTask; if ((Settings.ProactiveCopyMode & ProactiveCopyMode.OutsideRing) != 0) { var fromPredictionStore = true; Result <MachineLocation> getLocationResult = null; if (_predictionStore != null && path != null) { var machines = _predictionStore.GetTargetMachines(context, path); if (machines?.Count > 0) { var index = ThreadSafeRandom.Generator.Next(0, machines.Count); getLocationResult = new Result <MachineLocation>(new MachineLocation(machines[index])); } } if (getLocationResult == null) { getLocationResult = ContentLocationStore.GetRandomMachineLocation(except: buildRingMachines); fromPredictionStore = false; } if (getLocationResult.Succeeded) { var candidate = getLocationResult.Value; Tracer.Info(context, $"{nameof(RequestProactiveCopyIfNeededAsync)}: Copying {hash.ToShortString()} to machine '{candidate}' outside build ring. Candidate gotten from {(fromPredictionStore ? nameof(RocksDbContentPlacementPredictionStore) : nameof(ContentLocationStore))}"); outsideRingCopyTask = DistributedCopier.RequestCopyFileAsync(context, hash, candidate); } else { outsideRingCopyTask = Task.FromResult(new BoolResult(getLocationResult)); } } else { outsideRingCopyTask = BoolResult.SuccessTask; } return new ProactiveCopyResult(await insideRingCopyTask, await outsideRingCopyTask); } finally { _pendingProactivePuts.Remove(hash); } }));
public async Task Serialization_Bug695424() { var st = new StringTable(); var pt = new PathTable(st); ConcurrentBigSet <AbsolutePath> paths = new ConcurrentBigSet <AbsolutePath>(); List <string> pathStrings = new List <string>(); int max = 32769; StringBuilder builder = new StringBuilder(); builder.Append(A("c", "i")); var length = builder.Length; for (int i = 0; i < 100; i++) { builder.Length = length; builder.Append(i); builder.Append('\\'); var jLength = builder.Length; for (int j = 0; j < 10; j++) { builder.Length = jLength; builder.Append('j'); builder.Append(j); builder.Append('\\'); var kLenght = builder.Length; for (int k = 0; k < 66; k++) { builder.Length = kLenght; builder.Append('k'); builder.Append(k); builder.Append('\\'); if (pt.Count < max) { paths.Add(AbsolutePath.Create(pt, builder.ToString())); } else { pathStrings.Add(builder.ToString()); } } } } PathTable pt2; using (MemoryStream ms = new MemoryStream()) { using (BuildXLWriter writer = new BuildXLWriter(true, ms, true, logStats: true)) { pt.Serialize(writer); } ms.Position = 0; using (BuildXLReader reader = new BuildXLReader(true, ms, true)) { pt2 = await PathTable.DeserializeAsync(reader, Task.FromResult(st)); } } foreach (var pathString in pathStrings) { AbsolutePath.Create(pt2, pathString); } foreach (var path in paths.UnsafeGetList()) { var pathString = path.ToString(pt).ToUpperInvariant(); var path2 = AbsolutePath.Create(pt2, pathString); XAssert.AreEqual(path, path2); } }
private Task <BoolResult> RequestProactiveCopyIfNeededAsync(OperationContext context, ContentHash hash) { if (!_pendingProactivePuts.Add(hash)) { return(BoolResult.SuccessTask); } return(context.PerformOperationAsync( Tracer, traceErrorsOnly: true, operation: async() => { try { var hashArray = _buildIdHash != null ? new[] { hash, _buildIdHash.Value } : new[] { hash }; // First check in local location store, then global if failed. var getLocationsResult = await ContentLocationStore.GetBulkAsync(context, hashArray, context.Token, UrgencyHint.Nominal, GetBulkOrigin.Local); if (getLocationsResult.Succeeded && getLocationsResult.ContentHashesInfo[0].Locations.Count > Settings.ProactiveCopyLocationsThreshold) { _counters[Counters.GetLocationsSatisfiedFromLocal].Increment(); return BoolResult.Success; } else { getLocationsResult += await ContentLocationStore.GetBulkAsync(context, hashArray, context.Token, UrgencyHint.Nominal, GetBulkOrigin.Global).ThrowIfFailure(); _counters[Counters.GetLocationsSatisfiedFromRemote].Increment(); } if (getLocationsResult.ContentHashesInfo[0].Locations.Count > Settings.ProactiveCopyLocationsThreshold) { return BoolResult.Success; } IReadOnlyList <MachineLocation> buildRingMachines; Task <BoolResult> copyToBuildRingMachineTask = BoolResult.SuccessTask; // Get random machine inside build ring if (_buildIdHash != null) { buildRingMachines = getLocationsResult.ContentHashesInfo[getLocationsResult.ContentHashesInfo.Count - 1].Locations; var candidates = buildRingMachines.Where(m => !m.Equals(LocalCacheRootMachineLocation)).ToArray(); if (candidates.Length > 0) { var candidate = candidates[ThreadSafeRandom.Generator.Next(0, candidates.Length)]; Tracer.Info(context, $"Copying {hash.ToShortString()} to machine '{candidate}' in build ring (of {candidates.Length} machines)."); copyToBuildRingMachineTask = DistributedCopier.RequestCopyFileAsync(context, hash, candidate); } } else { buildRingMachines = new[] { LocalCacheRootMachineLocation }; } BoolResult result = BoolResult.Success; var getLocationResult = ContentLocationStore.GetRandomMachineLocation(except: buildRingMachines); if (getLocationResult.Succeeded) { var candidate = getLocationResult.Value; Tracer.Info(context, $"Copying {hash.ToShortString()} to machine '{candidate}' outside build ring."); result &= await DistributedCopier.RequestCopyFileAsync(context, hash, candidate); } return result & await copyToBuildRingMachineTask; } finally { _pendingProactivePuts.Remove(hash); } })); }
private bool RemoveExtraneousFilesAndDirectories( Func <string, bool> isPathInBuild, List <string> pathsToScrub, HashSet <string> blockedPaths, HashSet <string> nonDeletableRootDirectories, MountPathExpander mountPathExpander, bool logRemovedFiles, string statisticIdentifier) { int directoriesEncountered = 0; int filesEncountered = 0; int filesRemoved = 0; int directoriesRemovedRecursively = 0; using (var pm = PerformanceMeasurement.Start( m_loggingContext, statisticIdentifier, // The start of the scrubbing is logged before calling this function, since there are two sources of scrubbing (regular scrubbing and shared opaque scrubbing) // with particular messages (_ => {}), loggingContext => { Tracing.Logger.Log.ScrubbingFinished(loggingContext, directoriesEncountered, filesEncountered, filesRemoved, directoriesRemovedRecursively); Logger.Log.BulkStatistic( loggingContext, new Dictionary <string, long> { [I($"{Category}.DirectoriesEncountered")] = directoriesEncountered, [I($"{Category}.FilesEncountered")] = filesEncountered, [I($"{Category}.FilesRemoved")] = filesRemoved, [I($"{Category}.DirectoriesRemovedRecursively")] = directoriesRemovedRecursively, }); })) using (var timer = new Timer( o => { // We don't have a good proxy for how much scrubbing is left. Instead we use the file counters to at least show progress Tracing.Logger.Log.ScrubbingStatus(m_loggingContext, filesEncountered); }, null, dueTime: m_loggingConfiguration.GetTimerUpdatePeriodInMs(), period: m_loggingConfiguration.GetTimerUpdatePeriodInMs())) { var deletableDirectoryCandidates = new ConcurrentDictionary <string, bool>(StringComparer.OrdinalIgnoreCase); var nondeletableDirectories = new ConcurrentDictionary <string, bool>(StringComparer.OrdinalIgnoreCase); var directoriesToEnumerate = new BlockingCollection <string>(); var allEnumeratedDirectories = new ConcurrentBigSet <string>(); foreach (var path in pathsToScrub) { SemanticPathInfo foundSemanticPathInfo; if (blockedPaths.Contains(path)) { continue; } if (ValidateDirectory(mountPathExpander, path, out foundSemanticPathInfo)) { if (!isPathInBuild(path)) { directoriesToEnumerate.Add(path); allEnumeratedDirectories.Add(path); } else { nondeletableDirectories.TryAdd(path, true); } } else { string mountName = "Invalid"; string mountPath = "Invalid"; if (mountPathExpander != null && foundSemanticPathInfo.IsValid) { mountName = foundSemanticPathInfo.RootName.ToString(mountPathExpander.PathTable.StringTable); mountPath = foundSemanticPathInfo.Root.ToString(mountPathExpander.PathTable); } Tracing.Logger.Log.ScrubbingFailedBecauseDirectoryIsNotScrubbable(pm.LoggingContext, path, mountName, mountPath); } } var cleaningThreads = new Thread[m_maxDegreeParallelism]; int pending = directoriesToEnumerate.Count; if (directoriesToEnumerate.Count == 0) { directoriesToEnumerate.CompleteAdding(); } for (int i = 0; i < m_maxDegreeParallelism; i++) { var t = new Thread(() => { while (!directoriesToEnumerate.IsCompleted && !m_cancellationToken.IsCancellationRequested) { string currentDirectory; if (directoriesToEnumerate.TryTake(out currentDirectory, Timeout.Infinite)) { Interlocked.Increment(ref directoriesEncountered); bool shouldDeleteCurrentDirectory = true; var result = FileUtilities.EnumerateDirectoryEntries( currentDirectory, false, (dir, fileName, attributes) => { string fullPath = Path.Combine(dir, fileName); // Skip specifically blocked paths. if (blockedPaths.Contains(fullPath)) { shouldDeleteCurrentDirectory = false; return; } string realPath = fullPath; // If this is a symlinked directory, get the final real target directory that it points to, so we can track duplicate work properly var isDirectorySymlink = FileUtilities.IsDirectorySymlinkOrJunction(fullPath); if (isDirectorySymlink && FileUtilities.TryGetLastReparsePointTargetInChain(handle: null, sourcePath: fullPath) is var maybeRealPath && maybeRealPath.Succeeded) { realPath = maybeRealPath.Result; } // If the current path is a directory, only follow it if we haven't followed it before (making sure we use the real path in case of symlinks) var shouldEnumerateDirectory = (attributes & FileAttributes.Directory) == FileAttributes.Directory && !allEnumeratedDirectories.GetOrAdd(realPath).IsFound; if (shouldEnumerateDirectory) { if (nondeletableDirectories.ContainsKey(fullPath)) { shouldDeleteCurrentDirectory = false; } if (!isPathInBuild(fullPath)) { // Current directory is not in the build, then recurse to its members. Interlocked.Increment(ref pending); directoriesToEnumerate.Add(fullPath); if (!nonDeletableRootDirectories.Contains(fullPath)) { // Current directory can be deleted, then it is a candidate to be deleted. deletableDirectoryCandidates.TryAdd(fullPath, true); } else { // Current directory can't be deleted (e.g., the root of a mount), then don't delete it. // However, note that we recurse to its members to find all extraneous directories and files. shouldDeleteCurrentDirectory = false; } } else { // Current directory is in the build, i.e., directory is an output directory. // Stop recursive directory traversal because none of its members should be deleted. shouldDeleteCurrentDirectory = false; } } // On Mac directory symlinks are treated like any files, and so we must delete them if // when they happen to be marked as shared opaque directory output. // // When 'fullPath' is a directory symlink the 'if' right above this 'if' will add it to // 'deletableDirectoryCandidates'; there is code that deletes all directories added to this // list but that code expects a real directory and so might fail to delete a directory symlink. if (!shouldEnumerateDirectory || (isDirectorySymlink && OperatingSystemHelper.IsMacOS)) { Interlocked.Increment(ref filesEncountered); if (!isPathInBuild(fullPath)) { // File is not in the build, delete it. if (TryDeleteFile(pm.LoggingContext, fullPath, logRemovedFiles)) { Interlocked.Increment(ref filesRemoved); } } else { // File is in the build, then don't delete it, but mark the current directory that // it should not be deleted. shouldDeleteCurrentDirectory = false; } } }); if (!result.Succeeded) { // Different trace levels based on result. if (result.Status != EnumerateDirectoryStatus.SearchDirectoryNotFound) { Tracing.Logger.Log.ScrubbingFailedToEnumerateDirectory( pm.LoggingContext, currentDirectory, result.Status.ToString()); } } if (!shouldDeleteCurrentDirectory) { // If directory should not be deleted, then all of its parents should not be deleted. int index; string preservedDirectory = currentDirectory; bool added; do { added = nondeletableDirectories.TryAdd(preservedDirectory, true); }while (added && (index = preservedDirectory.LastIndexOf(Path.DirectorySeparatorChar)) != -1 && !string.IsNullOrEmpty(preservedDirectory = preservedDirectory.Substring(0, index))); } Interlocked.Decrement(ref pending); } if (Volatile.Read(ref pending) == 0) { directoriesToEnumerate.CompleteAdding(); } } });