public HgDivergenceInfo(HgRevset ahead, HgChangeset latestHeadChangeset, HgRevset behind, HgChangeset latestBaseChangeset) { Ahead = ahead; LatestHeadChangeset = latestHeadChangeset; Behind = behind; LatestBaseChangeset = latestBaseChangeset; }
private IEnumerable<HgChunk> BuildManifestBundleGroup(HgRepository hgRepository, HgRevset hgRevset) { var manifestRevset = new HgRevset(hgRepository.Manifest.Revlog.Entries.Where(hre => hgRevset.Contains(hre.LinkRevision))); var manifest = BuildBundleGroup(hgRepository, hgRepository.Manifest.Revlog, manifestRevset); return manifest; }
private IList<HgTag> GetTagsInternal() { if(repository.Changelog.Tip == null) return new List<HgTag>(); ReadTagsCache(); var tip = repository.Changelog[repository.Changelog.Revlog.Entries.Last().Revision]; if(headNodeCache.Count > 0 && headNodeCache[0] == tip.Metadata.NodeID && revisionCache.Count > 0 && revisionCache[0] == tip.Metadata.Revision) { var tags = tagCache. Where(kvp => repository.Changelog.Revlog.GetEntry(kvp.Value.Item1) != null). Select(kvp => new HgTag(kvp.Key, kvp.Value.Item1)). ToList(); return tags; } // if var heads = repository.GetHeads(); if(heads.Count == 0) return new List<HgTag>(); var hgFilelog = repository.GetFilelog(new HgPath(".hgtags")); if(hgFilelog == null || hgFilelog.Revlog.Entries.Count == 0) { headNodeCache.Clear(); headNodeCache.AddRange(heads.Select(h => h.NodeID)); return new List<HgTag>(); } // if var newHeads = new HgRevset(heads.Where(h => !headNodeCache.Contains(h.NodeID)).ToList()); RefreshTagsInternal(newHeads); var visitedFileNodes = new HashSet<HgNodeID>(); foreach(var head in newHeads.OldestToNewest) { var fileNode = filelogNodeCache.ContainsKey(head.NodeID) ? filelogNodeCache[head.NodeID] : HgNodeID.Null; if(fileNode != HgNodeID.Null && !visitedFileNodes.Contains(fileNode)) { visitedFileNodes.Add(fileNode); var hgtags = repository.GetFile(new HgManifestFileEntry(new HgPath(".hgtags"), fileNode)); if(hgtags == null) continue; using(var streamReader = new StreamReader(new MemoryStream(hgtags.Data), repository.Encoder.Utf8)) { IDictionary<string, Tuple<HgNodeID, IList<HgNodeID>>> fileTags = new Dictionary<string, Tuple<HgNodeID, IList<HgNodeID>>>(); ReadTags(streamReader.ReadLine, fileTags); UpdateTags(fileTags); } // using } // if } // foreach WriteTagsCache(); return tagCache.Select(t => new HgTag(t.Key, t.Value.Item1)).ToList(); }
private IEnumerable<HgBundleFile> BuildBundleFiles(HgRepository hgRepository, HgRevset hgRevset, HashSet<string> paths) { var orderedPaths = paths.OrderBy(p => p).ToList(); foreach(var path in orderedPaths) { var bundleFile = BuildBundleFile(hgRepository, hgRevset, path); if(bundleFile != null) yield return bundleFile; } // foreach }
private HgBundleFile BuildBundleFile(HgRepository hgRepository, HgRevset hgRevset, string path) { // TODO: Do not bundle files without chunks log.Debug("bundling {0}", path); var hgPath = new HgPath(path); var hgFilelog = hgRepository.GetFilelog(hgPath); if(hgFilelog == null) return null; var filelogRevset = new HgRevset(hgFilelog.Revlog.Entries.Where(fre => hgRevset.Contains(fre.LinkRevision))); var hgBundleGroup = BuildBundleGroup(hgRepository, hgFilelog.Revlog, filelogRevset); var hgBundleFile = new HgBundleFile(hgPath, hgBundleGroup); return hgBundleFile; }
public HgBundle BuildBundle(HgRepository hgRepository, HgRevset hgRevset) { log.Debug("bundling changelog"); var paths = new HashSet<string>(); var changelog = BuildChangesetBundleGroup(hgRepository, hgRevset, hc => paths.AddRange(hc.Files)); log.Debug("bundling manifests"); var manifest = BuildManifestBundleGroup(hgRepository, hgRevset); // // List of all files that ever were tracked log.Debug("bundling filelogs"); var files = BuildBundleFiles(hgRepository, hgRevset, paths); var hgBundle = new HgBundle(changelog, manifest, files); return hgBundle; }
public HgBundle GetBundle(HgRevset common, HgRevset heads) { var hgRevsetManager = new HgRevsetManager(); var hgRevset = hgRevsetManager.GetRevset(this, common, heads); var hgBundleBuilder = new HgBundleBuilder(new HgFileSystem(), Encoder); var hgBundle = hgBundleBuilder.BuildBundle(this, hgRevset); return hgBundle; }
public IEnumerable<HgChangesetDetails> GetChangesetsDetails(HgRevset hgRevset) { // // We need to preload all changesets and manifests var changesets = GetChangesets( hgRevset. Select(e => Changelog.Revlog[e.Revision]). SelectMany(e => new[] { e.Revision, e.FirstParentRevision, e.SecondParentRevision }). Where(e => e != uint.MaxValue). Distinct()). ToDictionary(c => c.Metadata.NodeID); var manifests = GetManifestEntries( new HgRevset( changesets.Values. Select(c => c.ManifestNodeID). Select(e => Manifest.Revlog.GetEntry(e)))). ToDictionary(m => m.Metadata.NodeID); foreach(var entry in hgRevset.OldestToNewest) { var c = changesets[entry.NodeID]; // // This is somehow possible for imported repos if(c.ManifestNodeID == HgNodeID.Null) continue; var m = manifests[c.ManifestNodeID]; IList<HgChangesetFileDetails> files; // // For a very first changeset we treat all files as Added if(c.Metadata.FirstParentRevisionNodeID == HgNodeID.Null) { files = m.Files. Select(mfe => new HgChangesetFileDetails(mfe.Path, HgChangesetFileOperation.Added, null)). ToList(); } // if else { Func<HgManifestFileEntry, string> fileName = (mfe) => mfe.Path.FullPath.TrimStart('/'); var cf = new HashSet<string>(c.Files.Select(f => f.TrimStart('/')).ToList()); var f_ = new HashSet<string>(m.Files.Select(fileName)); var p1 = changesets[c.Metadata.FirstParentRevisionNodeID]; var mp1 = manifests[p1.ManifestNodeID]; var fp1 = new HashSet<string>(mp1.Files.Select(fileName)); // // This is possible for imported repos. See above var parentManifestFiles = mp1 == null ? new List<string>() : mp1.Files.Select(fileName).ToList(); var addedFiles = f_.Except(fp1).Intersect(cf).ToList(); var removedFiles = fp1.Except(f_).Intersect(cf).ToList(); var modifiedFiles = f_.Intersect(fp1).Intersect(cf).Where(f => m[f].FilelogNodeID != mp1[f].FilelogNodeID).ToList(); var removed = removedFiles.Select(Removed); var added = addedFiles.Select(Added); var modified = modifiedFiles.Select(Modified); // // Prepare enough room to avoid reallocations later on files = new List<HgChangesetFileDetails>(f_.Count * 2); files.AddRange(added); files.AddRange(modified); files.AddRange(removed); } // else var changesetDetails = new HgChangesetDetails(c, files); yield return changesetDetails; } // foreach }
public HgDivergenceInfo GetDivergence(HgNodeID baseNodeID, HgNodeID headNodeID) { var hgRevsetManager = new HgRevsetManager(); var @base = (baseNodeID == HgNodeID.Null ? null : Changelog[baseNodeID]); var head = (headNodeID == HgNodeID.Null ? null : Changelog[headNodeID]); var baseAncestors = @base == null ? new HgRevset() : hgRevsetManager.GetAncestors(this, new HgRevset(@base.Metadata)); var headAncestors = head == null ? new HgRevset() : hgRevsetManager.GetAncestors(this, new HgRevset(head.Metadata)); var ahead = headAncestors - baseAncestors; var behind = baseAncestors - headAncestors; // // If ahead has only one commit and that one is closing branch without affecting any files, ignore it altogether if(ahead.Count == 1) { var aheadChangeset = Changelog[ahead.Single().NodeID]; if(aheadChangeset.Branch.Closed && aheadChangeset.Files.Count == 0) ahead = new HgRevset(); } // if return new HgDivergenceInfo(ahead, head, behind, @base); }
public IList<HgChangeset> GetChangesets(HgRevset hgRevset) { return GetChangesets(hgRevset.Select(hre => hre.Revision)).ToList(); }
public IEnumerable<HgManifestEntry> GetManifestEntries(HgRevset hgRevset) { var revlogReader = new HgRevlogReader(Manifest.Revlog, fileSystem); var entries = revlogReader.ReadRevlogEntries(hgRevset); var manifestEntries = new HgManifestReader(Encoder).ReadManifestEntries(entries); return manifestEntries.ToList(); }
public HgRevset GetTopologicalHeads() { var hgRevlogGraph = new HgRevlogGraph(); hgRevlogGraph.Add(Changelog.Revlog.Entries); var headNodes = hgRevlogGraph.Nodes. Where(n => n.Children.Count == 0). // // FIXME: Since we know that HgRevlogReader reuses cached revision data in forward-only // manner, we need to order heads by revision ascending to ensure that this // cache will actually get hit. OrderBy(n => n.Revision). Select(n => new HgRevsetEntry(n.Revision, n.NodeID)). ToList(); /*if(headNodes.Count == 0) headNodes.Add(new HgRevsetEntry(uint.MaxValue, HgNodeID.Null));*/ var heads = new HgRevset(headNodes); return heads; }
public HgRevset GetHeads() { var branchmap = GetBranchmap(); var headNodes = branchmap.SelectMany(b => b.Heads).OrderByDescending(h => h.Revision); var heads = new HgRevset(headNodes); return heads; }
public IEnumerable<HgRevlogEntryData> ReadRevlogEntries(HgRevset hgRevset) { foreach(var hgRevlogEntryData in standinRevlogReader.ReadRevlogEntries(hgRevset)) yield return GetRevlogEntryData(hgRevlogEntryData); }
private IEnumerable<HgChunk> BuildChangesetBundleGroup(HgRepository hgRepository, HgRevset hgRevset, Action<HgChangeset> callback) { var hgChangelogReader = new HgChangelogReader(hgEncoder); return BuildBundleGroup(hgRepository, hgRepository.Changelog.Revlog, hgRevset, hred => callback(hgChangelogReader.ReadChangeset(hred))); }
private void RefreshTagsInternal(HgRevset headNodes) { foreach(var hgHead in headNodes) { var changeset = repository.Changelog[hgHead.Revision]; var manifestNodeID = changeset.ManifestNodeID; var hgManifest = repository.Manifest[manifestNodeID]; var hgManifestFileEntry = hgManifest.GetFile(new HgPath(".hgtags")); if(hgManifestFileEntry == null) continue; /*var hgTagsFile = repository.GetFile(hgManifestFileEntry); var headTags = Encoding.UTF8.GetString(hgTagsFile.Data).Split(new[] { '\n' }, StringSplitOptions.RemoveEmptyEntries). Select(s => new { nodeID = new HgNodeID(s.Substring(0, 40)), name = s.Substring(41) }). Select(t => new HgTag(t.name, repository.Changelog[t.nodeID])). ToList();*/ filelogNodeCache[hgHead.NodeID] = hgManifestFileEntry.FilelogNodeID; //tags.AddRange(headTags); } // foreach //tags = tags.Distinct().OrderByDescending(t => t.Changeset.Metadata.Revision).ToList(); //log.Debug("retrieved {0} tags", tags.Count); //cachedTipNodeID = tip.Metadata.NodeID; //cachedTipRevision = tip.Metadata.Revision; //return null; }
private IEnumerable<HgChunk> BuildBundleGroup(HgRepository hgRepository, HgRevlog hgRevlog, HgRevset hgRevset, Action<HgRevlogEntryData> callback = null) { var hgRevlogReader = new HgRevlogReader(hgRevlog, fileSystem); // // See http://stackoverflow.com/a/10359273/60188. Pure magic var revisionChunks = hgRevset. Select(hre => hre.Revision). OrderBy(r => r). Select((r, i) => new { r, i }). GroupBy(x => x.r - x.i). Select(x => x.Select(xx => xx.r)). Select(c => c.ToArray()). ToArray(); if(revisionChunks.Length == 0) yield break; byte[] prev = null; uint prevRev = uint.MaxValue; var prediff = false; var hgRevlogEntry = hgRevlog[revisionChunks[0][0]]; if(hgRevlogEntry.FirstParentRevisionNodeID != HgNodeID.Null) { prev = hgRevlogReader.ReadRevlogEntry(hgRevlogEntry.FirstParentRevision).Data; prediff = true; } foreach(var revisionChunk in revisionChunks) { foreach(var revision in revisionChunk) { hgRevlogEntry = hgRevlog[revision]; var hgChangeset = hgRepository.Changelog.Revlog[hgRevlogEntry.LinkRevision]; byte[] data = null; if(prev == null || hgRevlogEntry.BaseRevision == hgRevlogEntry.Revision || prediff || (prevRev != UInt32.MaxValue && prevRev + 1 != revision)) { var hgRevlogEntryData = hgRevlogReader.ReadRevlogEntry(revision); if(prev == null) { // // Trivial case var buffer = new byte[hgRevlogEntryData.Data.Length + 12]; using(var stream = new MemoryStream(buffer)) using(var binaryWriter = new BigEndianBinaryWriter(stream)) { binaryWriter.Write((uint)0); binaryWriter.Write((uint)0); binaryWriter.Write((uint)hgRevlogEntryData.Data.Length); binaryWriter.Write(hgRevlogEntryData.Data); } // using data = buffer; } // if else { data = BDiff.Diff(prev, hgRevlogEntryData.Data); if(prediff) prediff = false; } // else prev = hgRevlogEntryData.Data; } // if else { data = hgRevlogReader.ReadRevlogEntryDataRaw(revision); prev = MPatch.Patch(prev, new List<byte[]> { data }); } // else if(callback != null) callback(new HgRevlogEntryData(hgRevlogEntry, prev)); if(performIntegrityChecks) { var expectedNodeID = GetRevlogEntryDataNodeID(hgRevlogEntry.FirstParentRevisionNodeID, hgRevlogEntry.SecondParentRevisionNodeID, prev); if(expectedNodeID != hgRevlogEntry.NodeID) { // TODO: Exception class throw new ApplicationException("integrity violation for " + hgRevlogEntry.NodeID.Short); } // if } // if var hgChunk = new HgChunk(hgRevlogEntry.NodeID, hgRevlogEntry.FirstParentRevisionNodeID, hgRevlogEntry.SecondParentRevisionNodeID, hgChangeset.NodeID, data); yield return hgChunk; prevRev = revision; } // foreach } // foreach }
public HgBranchHeads(string branch, HgRevset heads) { Branch = branch; Heads = heads; }