public void CorruptFile() { // Write out some garbage to create a corrupt file File.WriteAllText(m_path, "1Cleanasldkjf09234,kns90j23lk4n2309u4"); FakeFile f1 = CreateFakeFile(R("foo", "bar1.txt"), "bar1.txt"); // Try to use it using (FileCombiner combiner = CreateFileCombiner(m_loggingContext, m_path)) { XAssert.IsNull(combiner.RequestFile(f1.Path, f1.Hash)); AddFile(combiner, f1); } // Reload and consume the added file using (FileCombiner combiner = CreateFileCombiner(m_loggingContext, m_path)) { using (MemoryStream ms = combiner.RequestFile(f1.Path, f1.Hash)) { XAssert.IsNotNull(ms); } } AssertWarningEventLogged(EventId.FileCombinerVersionIncremented); }
/// <nodoc/> public FrontEndPublicFacadeAndAstProvider( FrontEndEngineAbstraction engine, Logger logger, LoggingContext loggingContext, string frontEndEngineDirectory, bool logFrontEndStatistics, PathTable pathTable, IFrontEndStatistics statistics, CancellationToken cancellationToken) { Contract.Requires(engine != null); Contract.Requires(loggingContext != null); Contract.Requires(!string.IsNullOrEmpty(frontEndEngineDirectory)); Contract.Requires(pathTable != null); Contract.Requires(statistics != null); m_engine = engine; m_pathTable = pathTable; m_statistics = statistics; m_fileCombiner = new FileCombiner( loggingContext, SpecCacheFullPath(frontEndEngineDirectory), FileCombiner.FileCombinerUsage.IncrementalScriptFrontEnd, logFrontEndStatistics); m_logger = logger; m_loggingContext = loggingContext; var queueOptions = new ExecutionDataflowBlockOptions { MaxDegreeOfParallelism = 1, CancellationToken = cancellationToken }; Action <FileContentWithHash> action = SaveFile; m_filesToSaveQueue = new ActionBlock <FileContentWithHash>(action, queueOptions); }
public void FileGetsUpdated() { FakeFile initial = CreateFakeFile(R("foo", "bar1.txt"), "BeginningContent"); // Create a combiner and add some files using (FileCombiner combiner = CreateFileCombiner(m_loggingContext, m_path, 1)) { AddFile(combiner, initial); } FakeFile updated = CreateFakeFile(R("foo", "bar1.txt"), "UpdatedContent"); using (FileCombiner combiner = CreateFileCombiner(m_loggingContext, m_path, 1)) { AddFile(combiner, updated); } using (FileCombiner combiner = CreateFileCombiner(m_loggingContext, m_path, 1)) { using (MemoryStream ms = combiner.RequestFile(initial.Path, initial.Hash)) { XAssert.IsNull(ms); } using (MemoryStream ms = combiner.RequestFile(updated.Path, updated.Hash)) { XAssert.IsNotNull(ms); AssertContentMatches(ms, updated); } } }
public void CompactFile() { using (FileCombiner combiner = CreateFileCombiner(m_loggingContext, m_path, 1)) { for (int i = 0; i < 10; i++) { FakeFile f = CreateFakeFile(R("foo", "bar") + i, i.ToString()); AddFile(combiner, f); } } FileInfo fileInfo = new FileInfo(m_path); long initialLength = fileInfo.Length; // File shouldn't shrink using (FileCombiner combiner = CreateFileCombiner(m_loggingContext, m_path, .9)) { for (int i = 0; i < 2; i++) { FakeFile f = CreateFakeFile(R("foo", "bar") + i, i.ToString()); combiner.RequestFile(f.Path, f.Hash); } } fileInfo = new FileInfo(m_path); XAssert.AreEqual(initialLength, fileInfo.Length); // File shouldn't shrink since no new content was added. Delay the shrink for a future run. using (FileCombiner combiner = CreateFileCombiner(m_loggingContext, m_path, .2)) { combiner.RequestFile(R("foo", "bar1"), null); combiner.RequestFile(R("foo", "bar8"), null); } fileInfo = new FileInfo(m_path); XAssert.AreEqual(initialLength, fileInfo.Length); // File should shrink using (FileCombiner combiner = CreateFileCombiner(m_loggingContext, m_path, .2)) { combiner.RequestFile(R("foo", "bar1"), null); combiner.RequestFile(R("foo", "bar8"), null); FakeFile f = CreateFakeFile(R("foo", "bar10"), "10"); AddFile(combiner, f); } fileInfo = new FileInfo(m_path); XAssert.IsTrue(initialLength > fileInfo.Length); // Request files from before, inbetween, and after the ones that got removed using (FileCombiner combiner = CreateFileCombiner(m_loggingContext, m_path, .2)) { AssertContentMatches(combiner.RequestFile(R("foo", "bar1"), null), "1"); AssertContentMatches(combiner.RequestFile(R("foo", "bar8"), null), "8"); AssertContentMatches(combiner.RequestFile(R("foo", "bar10"), null), "10"); } }
public void CreateAndReloadCombinedFile() { int maxBackingBufferBytes = 3; // let's make sure we are going to span multiple chunks... FakeFile f1 = CreateFakeFile(R("foo", "bar1.txt"), "bar1.txt"); FakeFile f2 = CreateFakeFile(R("foo", "bar2.txt"), "bar2.txt"); FakeFile f3 = CreateFakeFile(R("foo", "bar3.txt"), "bar3.txt"); XAssert.IsTrue(f1.Content.Length > maxBackingBufferBytes * 3); XAssert.IsTrue(f2.Content.Length > maxBackingBufferBytes * 3); XAssert.IsTrue(f3.Content.Length > maxBackingBufferBytes * 3); Logger.FileCombinerStats stats = new Logger.FileCombinerStats(); // Create a combiner and add some files using (FileCombiner combiner = CreateFileCombiner(m_loggingContext, m_path, 1)) { combiner.GetStatsRefForTest(ref stats); AddFile(combiner, f3); AddFile(combiner, f2); AddFile(combiner, f1); } XAssert.AreEqual(3, stats.EndCount); XAssert.AreEqual(0, stats.CompactingTimeMs, "FileCombiner should not have been compacted"); // Make sure the file is longer than the max backing buffer so we can test data being split across buffers FileInfo info = new FileInfo(m_path); XAssert.IsTrue(info.Length > maxBackingBufferBytes); // reload the combiner using (FileCombiner combiner = CreateFileCombiner(m_loggingContext, m_path, 1, maxBackingBufferBytes)) { // fetch a file that exists and verify the correct data is returned using (MemoryStream ms = combiner.RequestFile(f1.Path, f1.Hash)) { XAssert.IsNotNull(ms); AssertContentMatches(ms, f1); } // Fetch a file with the wrong hash. Make sure no content is returned using (MemoryStream ms = combiner.RequestFile(f1.Path, f2.Hash)) { XAssert.IsNull(ms); } // Fetch a file that doesn't exist. Make sure no content is returned using (MemoryStream ms = combiner.RequestFile(R("foo", "bar4"), f2.Hash)) { XAssert.IsNull(ms); } } }
[Trait("Category", "WindowsOSOnly")] // need to investigate if equivalent behavior on Unix public void FileInUse() { // Open the backing file so the FileCombiner can't open it using (var file = File.Open(m_path, FileMode.CreateNew, FileAccess.ReadWrite, FileShare.None)) { FakeFile f1 = CreateFakeFile(R("foo", "bar1.txt"), "bar1.txt"); // Try to use it using (FileCombiner combiner = CreateFileCombiner(m_loggingContext, m_path)) { XAssert.IsNull(combiner.RequestFile(f1.Path, f1.Hash)); AddFile(combiner, f1); } AssertWarningEventLogged(EventId.FileCombinerFailedToInitialize); AssertWarningEventLogged(EventId.FileCombinerFailedToCreate); } }
public void CreateAndReloadCombinedFileAlignmentBug() { FakeFile file = default(FakeFile); file.Path = R("foo", "bar1.txt"); using (var contentStream = new MemoryStream()) { using (var binaryWriter = new BuildXLWriter(debug: false, stream: contentStream, leaveOpen: true, logStats: false)) { binaryWriter.WriteCompact(-1); } file.Content = contentStream.ToArray(); file.Hash = ContentHashingUtilities.HashBytes(file.Content); } int maxBackingBufferBytes = 4 + 6 + 8 + file.Content.Length; // magic size that used to trigger a bug Logger.FileCombinerStats stats = new Logger.FileCombinerStats(); // Create a combiner and add some files using (FileCombiner combiner = CreateFileCombiner(m_loggingContext, m_path, 1)) { combiner.GetStatsRefForTest(ref stats); AddFile(combiner, file); } XAssert.AreEqual(1, stats.EndCount); XAssert.AreEqual(0, stats.CompactingTimeMs, "FileCombiner should not have been compacted"); // reload the combiner using (FileCombiner combiner = CreateFileCombiner(m_loggingContext, m_path, 1, maxBackingBufferBytes)) { // fetch a file that exists and verify the correct data is returned using (MemoryStream ms = combiner.RequestFile(file.Path, file.Hash)) { XAssert.IsNotNull(ms); AssertContentMatches(ms, file); } } }
private void AddFile(FileCombiner combiner, FakeFile fakeFile) { combiner.AddFile(fakeFile.Content, fakeFile.Hash, fakeFile.Path); }
/// <summary> /// Creates an instance of <see cref="FrontEndEngineImplementation"/>. /// </summary> public FrontEndEngineImplementation( LoggingContext loggingContext, PathTable pathTable, IConfiguration configuration, IStartupConfiguration startupConfiguration, MountsTable mountsTable, InputTracker inputTracker, SnapshotCollector snapshotCollector, DirectoryTranslator directoryTranslator, Func <FileContentTable> getFileContentTable, int timerUpdatePeriod, bool isPartialReuse, IEnumerable <IFrontEnd> registeredFrontends) { Contract.Requires(loggingContext != null); Contract.Requires(pathTable != null); Contract.Requires(configuration != null); Contract.Requires(startupConfiguration != null); Contract.Requires(mountsTable != null); Contract.Requires(inputTracker != null); Contract.Requires(getFileContentTable != null); Contract.Requires(registeredFrontends != null); m_loggingContext = loggingContext; PathTable = pathTable; m_mountsTable = mountsTable; m_inputTracker = inputTracker; m_getFileContentTable = getFileContentTable; m_isPartialReuse = isPartialReuse; m_frontendsEnvironmentRestriction = registeredFrontends.ToDictionary(frontend => frontend.Name, frontEnd => frontEnd.ShouldRestrictBuildParameters); m_snapshotCollector = snapshotCollector; GetTimerUpdatePeriod = timerUpdatePeriod; Layout = configuration.Layout; if (ShouldUseSpecCache(configuration)) { m_specCache = new FileCombiner( loggingContext, Path.Combine(configuration.Layout.EngineCacheDirectory.ToString(PathTable), SpecCacheFileName), FileCombiner.FileCombinerUsage.SpecFileCache, configuration.FrontEnd.LogStatistics); } m_allBuildParameters = new ConcurrentDictionary <string, TrackedValue>(StringComparer.OrdinalIgnoreCase); foreach (var kvp in PopulateFromEnvironmentAndApplyOverrides(loggingContext, startupConfiguration.Properties).ToDictionary()) { m_allBuildParameters.TryAdd(kvp.Key, new TrackedValue(kvp.Value, false)); } m_localDiskContentStore = new LocalDiskContentStore( loggingContext, PathTable, m_getFileContentTable(), m_inputTracker.FileChangeTracker, directoryTranslator, vfsCasRoot: configuration.Cache.VfsCasRoot); m_localDiskContentStoreConcurrencyLimiter = new ActionBlockSlim <MaterializeFileRequest>( Environment.ProcessorCount, request => { var requestCompletionSource = request.CompletionSource; try { var materializeResult = m_localDiskContentStore.TryMaterializeAsync( request.Cache, request.FileRealizationModes, request.Path, request.ContentHash, trackPath: request.TrackPath, recordPathInFileContentTable: request.RecordPathInFileContentTable).GetAwaiter().GetResult(); requestCompletionSource.SetResult(materializeResult); } catch (TaskCanceledException) { requestCompletionSource.SetCanceled(); } catch (Exception e) { requestCompletionSource.SetException(e); } }); }