public void SuccessForBatchObjectDownloadJob() { using (ITracer tracer = CreateTracer()) { MockGSDEnlistment enlistment = new MockGSDEnlistment(); MockHttpGitObjects httpGitObjects = new MockHttpGitObjects(tracer, enlistment); httpGitObjects.AddBlobContent(FakeSha, FakeShaContents); MockPhysicalGitObjects gitObjects = new MockPhysicalGitObjects(tracer, null, enlistment, httpGitObjects); BlockingCollection <string> input = new BlockingCollection <string>(); input.Add(FakeSha); input.CompleteAdding(); BatchObjectDownloadStage dut = new BatchObjectDownloadStage(1, 1, input, new BlockingCollection <string>(), tracer, enlistment, httpGitObjects, gitObjects); dut.Start(); dut.WaitForCompletion(); string sha; input.TryTake(out sha).ShouldEqual(false); dut.AvailablePacks.Count.ShouldEqual(0); dut.AvailableObjects.Count.ShouldEqual(1); string output = dut.AvailableObjects.Take(); output.ShouldEqual(FakeSha); } }
public void OnlyRequestsObjectsNotDownloaded() { string obj1Sha = new string('1', 40); string obj2Sha = new string('2', 40); BlockingCollection <string> input = new BlockingCollection <string>(); input.Add(obj1Sha); input.Add(obj2Sha); input.CompleteAdding(); int obj1Count = 0; int obj2Count = 0; Func <string, string> objectResolver = (oid) => { if (oid.Equals(obj1Sha)) { obj1Count++; return("Object1Contents"); } if (oid.Equals(obj2Sha) && obj2Count++ == 1) { return("Object2Contents"); } return(null); }; BlockingCollection <string> output = new BlockingCollection <string>(); MockTracer tracer = new MockTracer(); MockGVFSEnlistment enlistment = new MockGVFSEnlistment(); MockBatchHttpGitObjects httpObjects = new MockBatchHttpGitObjects(tracer, enlistment, objectResolver); BatchObjectDownloadStage dut = new BatchObjectDownloadStage( MaxParallel, ChunkSize, input, output, tracer, enlistment, httpObjects, new MockPhysicalGitObjects(tracer, null, enlistment, httpObjects)); dut.Start(); dut.WaitForCompletion(); input.Count.ShouldEqual(0); output.Count.ShouldEqual(2); output.Take().ShouldEqual(obj1Sha); output.Take().ShouldEqual(obj2Sha); obj1Count.ShouldEqual(1); obj2Count.ShouldEqual(2); }
public void DoesNotExitEarlyIfInputTakesLongerThanChunkSizeToGetFirstBlob() { BlockingCollection <string> input = new BlockingCollection <string>(); string objSha = new string('1', 40); int objCount = 0; Func <string, string> objectResolver = (oid) => { if (oid.Equals(objSha)) { objCount++; return("Object1Contents"); } return(null); }; BlockingCollection <string> output = new BlockingCollection <string>(); MockTracer tracer = new MockTracer(); MockGVFSEnlistment enlistment = new MockGVFSEnlistment(); MockBatchHttpGitObjects httpObjects = new MockBatchHttpGitObjects(tracer, enlistment, objectResolver); BatchObjectDownloadStage dut = new BatchObjectDownloadStage( MaxParallel, 1, input, output, tracer, enlistment, httpObjects, new MockPhysicalGitObjects(tracer, null, enlistment, httpObjects)); dut.Start(); Thread.Sleep(TimeSpan.FromMilliseconds(110)); input.Add(objSha); input.CompleteAdding(); dut.WaitForCompletion(); objCount.ShouldEqual(1); }
public void ErrorsForBatchObjectDownloadJob() { using (ITracer tracer = CreateTracer()) { MockGSDEnlistment enlistment = new MockGSDEnlistment(); MockHttpGitObjects httpGitObjects = new MockHttpGitObjects(tracer, enlistment); MockPhysicalGitObjects gitObjects = new MockPhysicalGitObjects(tracer, null, enlistment, httpGitObjects); BlockingCollection <string> input = new BlockingCollection <string>(); input.Add(FakeSha); input.CompleteAdding(); BatchObjectDownloadStage dut = new BatchObjectDownloadStage(1, 1, input, new BlockingCollection <string>(), tracer, enlistment, httpGitObjects, gitObjects); dut.Start(); dut.WaitForCompletion(); string sha; input.TryTake(out sha).ShouldEqual(false); IndexPackRequest request; dut.AvailablePacks.TryTake(out request).ShouldEqual(false); } }
public void PrefetchWithStats( string branchOrCommit, bool isBranch, bool hydrateFilesAfterDownload, out int matchedBlobCount, out int downloadedBlobCount, out int hydratedFileCount) { matchedBlobCount = 0; downloadedBlobCount = 0; hydratedFileCount = 0; if (string.IsNullOrWhiteSpace(branchOrCommit)) { throw new FetchException("Must specify branch or commit to fetch"); } GitRefs refs = null; string commitToFetch; if (isBranch) { refs = this.ObjectRequestor.QueryInfoRefs(branchOrCommit); if (refs == null) { throw new FetchException("Could not query info/refs from: {0}", this.Enlistment.RepoUrl); } else if (refs.Count == 0) { throw new FetchException("Could not find branch {0} in info/refs from: {1}", branchOrCommit, this.Enlistment.RepoUrl); } commitToFetch = refs.GetTipCommitId(branchOrCommit); } else { commitToFetch = branchOrCommit; } this.DownloadMissingCommit(commitToFetch, this.GitObjects); // For FastFetch only, examine the shallow file to determine the previous commit that had been fetched string shallowFile = Path.Combine(this.Enlistment.WorkingDirectoryBackingRoot, GVFSConstants.DotGit.Shallow); string previousCommit = null; // Use the shallow file to find a recent commit to diff against to try and reduce the number of SHAs to check. if (File.Exists(shallowFile)) { previousCommit = File.ReadAllLines(shallowFile).Where(line => !string.IsNullOrWhiteSpace(line)).LastOrDefault(); if (string.IsNullOrWhiteSpace(previousCommit)) { this.Tracer.RelatedError("Shallow file exists, but contains no valid SHAs."); this.HasFailures = true; return; } } BlockingCollection <string> availableBlobs = new BlockingCollection <string>(); //// // First create the pipeline // // diff ---> blobFinder ---> downloader ---> packIndexer // | | | | // ------------------------------------------------------> fileHydrator //// // diff // Inputs: // * files/folders // * commit id // Outputs: // * RequiredBlobs (property): Blob ids required to satisfy desired paths // * FileAddOperations (property): Repo-relative paths corresponding to those blob ids DiffHelper diff = new DiffHelper(this.Tracer, this.Enlistment, this.FileList, this.FolderList, includeSymLinks: false); // blobFinder // Inputs: // * requiredBlobs (in param): Blob ids from output of `diff` // Outputs: // * availableBlobs (out param): Locally available blob ids (shared between `blobFinder`, `downloader`, and `packIndexer`, all add blob ids to the list as they are locally available) // * MissingBlobs (property): Blob ids that are missing and need to be downloaded // * AvailableBlobs (property): Same as availableBlobs FindBlobsStage blobFinder = new FindBlobsStage(this.SearchThreadCount, diff.RequiredBlobs, availableBlobs, this.Tracer, this.Enlistment); // downloader // Inputs: // * missingBlobs (in param): Blob ids from output of `blobFinder` // Outputs: // * availableBlobs (out param): Loose objects that have completed downloading (shared between `blobFinder`, `downloader`, and `packIndexer`, all add blob ids to the list as they are locally available) // * AvailableObjects (property): Same as availableBlobs // * AvailablePacks (property): Packfiles that have completed downloading BatchObjectDownloadStage downloader = new BatchObjectDownloadStage(this.DownloadThreadCount, this.ChunkSize, blobFinder.MissingBlobs, availableBlobs, this.Tracer, this.Enlistment, this.ObjectRequestor, this.GitObjects); // packIndexer // Inputs: // * availablePacks (in param): Packfiles that have completed downloading from output of `downloader` // Outputs: // * availableBlobs (out param): Blobs that have completed downloading and indexing (shared between `blobFinder`, `downloader`, and `packIndexer`, all add blob ids to the list as they are locally available) IndexPackStage packIndexer = new IndexPackStage(this.IndexThreadCount, downloader.AvailablePacks, availableBlobs, this.Tracer, this.GitObjects); // fileHydrator // Inputs: // * workingDirectoryRoot (in param): the root of the working directory where hydration takes place // * blobIdsToPaths (in param): paths of all blob ids that need to be hydrated from output of `diff` // * availableBlobs (in param): blobs id that are available locally, from whatever source // Outputs: // * Hydrated files on disk. HydrateFilesStage fileHydrator = new HydrateFilesStage(Environment.ProcessorCount * 2, this.Enlistment.WorkingDirectoryRoot, diff.FileAddOperations, availableBlobs, this.Tracer); // All the stages of the pipeline are created and wired up, now kick them off in the proper sequence ThreadStart performDiff = () => { diff.PerformDiff(previousCommit, commitToFetch); this.HasFailures |= diff.HasFailures; }; if (hydrateFilesAfterDownload) { // Call synchronously to ensure that diff.FileAddOperations // is completely populated when fileHydrator starts performDiff(); } else { new Thread(performDiff).Start(); } blobFinder.Start(); downloader.Start(); if (hydrateFilesAfterDownload) { fileHydrator.Start(); } // If indexing happens during searching, searching progressively gets slower, so wait on searching before indexing. blobFinder.WaitForCompletion(); this.HasFailures |= blobFinder.HasFailures; packIndexer.Start(); downloader.WaitForCompletion(); this.HasFailures |= downloader.HasFailures; packIndexer.WaitForCompletion(); this.HasFailures |= packIndexer.HasFailures; availableBlobs.CompleteAdding(); if (hydrateFilesAfterDownload) { fileHydrator.WaitForCompletion(); this.HasFailures |= fileHydrator.HasFailures; } matchedBlobCount = blobFinder.AvailableBlobCount + blobFinder.MissingBlobCount; downloadedBlobCount = blobFinder.MissingBlobCount; hydratedFileCount = fileHydrator.ReadFileCount; if (!this.SkipConfigUpdate && !this.HasFailures) { this.UpdateRefs(branchOrCommit, isBranch, refs); if (isBranch) { this.HasFailures |= !this.UpdateRefSpec(this.Tracer, this.Enlistment, branchOrCommit, refs); } } if (!this.HasFailures) { this.SavePrefetchArgs(commitToFetch, hydrateFilesAfterDownload); } }
/// <param name="branchOrCommit">A specific branch to filter for, or null for all branches returned from info/refs</param> public override void Prefetch(string branchOrCommit, bool isBranch) { if (string.IsNullOrWhiteSpace(branchOrCommit)) { throw new FetchException("Must specify branch or commit to fetch"); } GitRefs refs = null; string commitToFetch; if (isBranch) { refs = this.ObjectRequestor.QueryInfoRefs(branchOrCommit); if (refs == null) { throw new FetchException("Could not query info/refs from: {0}", this.Enlistment.RepoUrl); } else if (refs.Count == 0) { throw new FetchException("Could not find branch {0} in info/refs from: {1}", branchOrCommit, this.Enlistment.RepoUrl); } commitToFetch = refs.GetTipCommitId(branchOrCommit); } else { commitToFetch = branchOrCommit; } using (new IndexLock(this.Enlistment.EnlistmentRoot, this.Tracer)) { this.DownloadMissingCommit(commitToFetch, this.GitObjects); // Configure pipeline // Checkout uses DiffHelper when running checkout.Start(), which we use instead of LsTreeHelper // Checkout diff output => FindBlobs => BatchDownload => IndexPack => Checkout available blobs CheckoutStage checkout = new CheckoutStage(this.checkoutThreadCount, this.FolderList, commitToFetch, this.Tracer, this.Enlistment, this.forceCheckout); FindBlobsStage blobFinder = new FindBlobsStage(this.SearchThreadCount, checkout.RequiredBlobs, checkout.AvailableBlobShas, this.Tracer, this.Enlistment); BatchObjectDownloadStage downloader = new BatchObjectDownloadStage(this.DownloadThreadCount, this.ChunkSize, blobFinder.MissingBlobs, checkout.AvailableBlobShas, this.Tracer, this.Enlistment, this.ObjectRequestor, this.GitObjects); IndexPackStage packIndexer = new IndexPackStage(this.IndexThreadCount, downloader.AvailablePacks, checkout.AvailableBlobShas, this.Tracer, this.GitObjects); // Start pipeline downloader.Start(); blobFinder.Start(); checkout.Start(); blobFinder.WaitForCompletion(); this.HasFailures |= blobFinder.HasFailures; // Delay indexing. It interferes with FindMissingBlobs, and doesn't help Bootstrapping. packIndexer.Start(); downloader.WaitForCompletion(); this.HasFailures |= downloader.HasFailures; packIndexer.WaitForCompletion(); this.HasFailures |= packIndexer.HasFailures; // Since pack indexer is the last to finish before checkout finishes, it should propagate completion. // This prevents availableObjects from completing before packIndexer can push its objects through this link. checkout.AvailableBlobShas.CompleteAdding(); checkout.WaitForCompletion(); this.HasFailures |= checkout.HasFailures; if (!this.SkipConfigUpdate && !this.HasFailures) { bool shouldSignIndex = !this.GetIsIndexSigningOff(); // Update the index - note that this will take some time EventMetadata updateIndexMetadata = new EventMetadata(); updateIndexMetadata.Add("IndexSigningIsOff", shouldSignIndex); using (ITracer activity = this.Tracer.StartActivity("UpdateIndex", EventLevel.Informational, Keywords.Telemetry, updateIndexMetadata)) { Index sourceIndex = this.GetSourceIndex(); GitIndexGenerator indexGen = new GitIndexGenerator(this.Tracer, this.Enlistment, shouldSignIndex); indexGen.CreateFromRef(commitToFetch, indexVersion: 2, isFinal: false); this.HasFailures |= indexGen.HasFailures; if (!indexGen.HasFailures) { Index newIndex = new Index( this.Enlistment.EnlistmentRoot, this.Tracer, indexGen.TemporaryIndexFilePath, readOnly: false); // Update from disk only if the caller says it is ok via command line // or if we updated the whole tree and know that all files are up to date bool allowIndexMetadataUpdateFromWorkingTree = this.allowIndexMetadataUpdateFromWorkingTree || checkout.UpdatedWholeTree; newIndex.UpdateFileSizesAndTimes(checkout.AddedOrEditedLocalFiles, allowIndexMetadataUpdateFromWorkingTree, shouldSignIndex, sourceIndex); // All the slow stuff is over, so we will now move the final index into .git\index, shortly followed by // updating the ref files and releasing index.lock. string indexPath = Path.Combine(this.Enlistment.DotGitRoot, GVFSConstants.DotGit.IndexName); this.Tracer.RelatedEvent(EventLevel.Informational, "MoveUpdatedIndexToFinalLocation", new EventMetadata() { { "UpdatedIndex", indexGen.TemporaryIndexFilePath }, { "Index", indexPath } }); File.Delete(indexPath); File.Move(indexGen.TemporaryIndexFilePath, indexPath); newIndex.WriteFastFetchIndexVersionMarker(); } } if (!this.HasFailures) { this.UpdateRefs(branchOrCommit, isBranch, refs); if (isBranch) { // Update the refspec before setting the upstream or git will complain the remote branch doesn't exist this.HasFailures |= !this.UpdateRefSpec(this.Tracer, this.Enlistment, branchOrCommit, refs); using (ITracer activity = this.Tracer.StartActivity("SetUpstream", EventLevel.Informational)) { string remoteBranch = refs.GetBranchRefPairs().Single().Key; GitProcess git = new GitProcess(this.Enlistment); GitProcess.Result result = git.SetUpstream(branchOrCommit, remoteBranch); if (result.ExitCodeIsFailure) { activity.RelatedError("Could not set upstream for {0} to {1}: {2}", branchOrCommit, remoteBranch, result.Errors); this.HasFailures = true; } } } } } } }