private void DoUpdates() { int cnt = pendingUpdates.GetAndSet(0); if (0 < cnt) { pm.Update(cnt); } }
/// <exception cref="NGit.Errors.MissingObjectException"></exception> /// <exception cref="NGit.Errors.IncorrectObjectTypeException"></exception> /// <exception cref="System.IO.IOException"></exception> public override bool Include(TreeWalk walker) { count++; if (count % stepSize == 0) { if (count <= total) { monitor.Update(stepSize); } if (monitor.IsCancelled()) { throw StopWalkException.INSTANCE; } } return(true); }
/// <summary>Execute this batch update.</summary> /// <remarks> /// Execute this batch update. /// <p> /// The default implementation of this method performs a sequential reference /// update over each reference. /// </remarks> /// <param name="walk"> /// a RevWalk to parse tags in case the storage system wants to /// store them pre-peeled, a common performance optimization. /// </param> /// <param name="update">progress monitor to receive update status on.</param> /// <exception cref="System.IO.IOException"> /// the database is unable to accept the update. Individual /// command status must be tested to determine if there is a /// partial failure, or a total failure. /// </exception> public virtual void Execute(RevWalk walk, ProgressMonitor update) { update.BeginTask(JGitText.Get().updatingReferences, commands.Count); foreach (ReceiveCommand cmd in commands) { try { update.Update(1); if (cmd.GetResult() == ReceiveCommand.Result.NOT_ATTEMPTED) { cmd.UpdateType(walk); RefUpdate ru = NewUpdate(cmd); switch (cmd.GetType()) { case ReceiveCommand.Type.DELETE: { cmd.SetResult(ru.Delete(walk)); continue; goto case ReceiveCommand.Type.CREATE; } case ReceiveCommand.Type.CREATE: case ReceiveCommand.Type.UPDATE: case ReceiveCommand.Type.UPDATE_NONFASTFORWARD: { cmd.SetResult(ru.Update(walk)); continue; } } } } catch (IOException err) { cmd.SetResult(ReceiveCommand.Result.REJECTED_OTHER_REASON, MessageFormat.Format(JGitText .Get().lockError, err.Message)); } } update.EndTask(); }
/// <exception cref="System.IO.IOException"></exception> public override void WriteTo(OutputStream os, ProgressMonitor pm) { if (onDiskFile == null) { base.WriteTo(os, pm); return; } if (pm == null) { pm = NullProgressMonitor.INSTANCE; } FileInputStream @in = new FileInputStream(onDiskFile); try { int cnt; byte[] buf = new byte[TemporaryBuffer.Block.SZ]; while ((cnt = @in.Read(buf)) >= 0) { os.Write(buf, 0, cnt); pm.Update(cnt / 1024); } } finally { @in.Close(); } }
/// <summary>Send this buffer to an output stream.</summary> /// <remarks> /// Send this buffer to an output stream. /// <p> /// This method may only be invoked after /// <see cref="Close()">Close()</see> /// has completed /// normally, to ensure all data is completely transferred. /// </remarks> /// <param name="os">stream to send this buffer's complete content to.</param> /// <param name="pm"> /// if not null progress updates are sent here. Caller should /// initialize the task and the number of work units to <code> /// <see cref="Length()">Length()</see> /// /1024</code>. /// </param> /// <exception cref="System.IO.IOException"> /// an error occurred reading from a temporary file on the local /// system, or writing to the output stream. /// </exception> public virtual void WriteTo(OutputStream os, ProgressMonitor pm) { if (pm == null) { pm = NullProgressMonitor.INSTANCE; } foreach (TemporaryBuffer.Block b in blocks) { os.Write(b.buffer, 0, b.count); pm.Update(b.count / 1024); } }
/// <exception cref="System.IO.IOException"></exception> /// <exception cref="NGit.Errors.MissingObjectException"></exception> public void SelectObjectRepresentation(PackWriter packer, ProgressMonitor monitor , Iterable<ObjectToPack> objects) { foreach (ObjectToPack otp in objects) { db.SelectObjectRepresentation(packer, otp, this); monitor.Update(1); } }
public static MergeCommandResult MergeTrees(NGit.ProgressMonitor monitor, NGit.Repository repo, RevCommit srcBase, RevCommit srcCommit, string sourceDisplayName, bool commitResult) { RevCommit newHead = null; RevWalk revWalk = new RevWalk(repo); try { // get the head commit Ref headRef = repo.GetRef(Constants.HEAD); if (headRef == null) { throw new NoHeadException(JGitText.Get().commitOnRepoWithoutHEADCurrentlyNotSupported ); } RevCommit headCommit = revWalk.ParseCommit(headRef.GetObjectId()); ResolveMerger merger = (ResolveMerger)((ThreeWayMerger)MergeStrategy.RESOLVE.NewMerger (repo)); merger.SetWorkingTreeIterator(new FileTreeIterator(repo)); merger.SetBase(srcBase); bool noProblems; IDictionary <string, MergeResult <NGit.Diff.Sequence> > lowLevelResults = null; IDictionary <string, ResolveMerger.MergeFailureReason> failingPaths = null; IList <string> modifiedFiles = null; ResolveMerger resolveMerger = (ResolveMerger)merger; resolveMerger.SetCommitNames(new string[] { "BASE", "HEAD", sourceDisplayName }); noProblems = merger.Merge(headCommit, srcCommit); lowLevelResults = resolveMerger.GetMergeResults(); modifiedFiles = resolveMerger.GetModifiedFiles(); failingPaths = resolveMerger.GetFailingPaths(); if (monitor != null) { monitor.Update(50); } if (noProblems) { if (modifiedFiles != null && modifiedFiles.Count == 0) { return(new MergeCommandResult(headCommit, null, new ObjectId[] { headCommit.Id, srcCommit .Id }, MergeStatus.ALREADY_UP_TO_DATE, MergeStrategy.RESOLVE, null, null)); } DirCacheCheckout dco = new DirCacheCheckout(repo, headCommit.Tree, repo.LockDirCache (), merger.GetResultTreeId()); dco.SetFailOnConflict(true); dco.Checkout(); if (commitResult) { newHead = new NGit.Api.Git(repo).Commit().SetMessage(srcCommit.GetFullMessage() ).SetAuthor(srcCommit.GetAuthorIdent()).Call(); return(new MergeCommandResult(newHead.Id, null, new ObjectId[] { headCommit.Id, srcCommit .Id }, MergeStatus.MERGED, MergeStrategy.RESOLVE, null, null)); } else { return(new MergeCommandResult(headCommit, null, new ObjectId[] { headCommit.Id, srcCommit .Id }, MergeStatus.MERGED, MergeStrategy.RESOLVE, null, null)); } } else { if (failingPaths != null) { return(new MergeCommandResult(null, merger.GetBaseCommit(0, 1), new ObjectId[] { headCommit.Id, srcCommit.Id }, MergeStatus.FAILED, MergeStrategy.RESOLVE, lowLevelResults , failingPaths, null)); } else { return(new MergeCommandResult(null, merger.GetBaseCommit(0, 1), new ObjectId[] { headCommit.Id, srcCommit.Id }, MergeStatus.CONFLICTING, MergeStrategy.RESOLVE, lowLevelResults , null)); } } } finally { revWalk.Release(); } }
/// <exception cref="System.IO.IOException"></exception> private void ResolveDeltas(PackParser.DeltaVisit visit, int type, PackParser.ObjectTypeAndSize info, ProgressMonitor progress) { do { progress.Update(1); info = OpenDatabase(visit.delta, info); switch (info.type) { case Constants.OBJ_OFS_DELTA: case Constants.OBJ_REF_DELTA: { break; } default: { throw new IOException(MessageFormat.Format(JGitText.Get().unknownObjectType, Sharpen.Extensions.ValueOf (info.type))); } } byte[] delta = InflateAndReturn(PackParser.Source.DATABASE, info.size); CheckIfTooLarge(type, BinaryDelta.GetResultSize(delta)); visit.data = BinaryDelta.Apply(visit.parent.data, delta); delta = null; if (!CheckCRC(visit.delta.crc)) { throw new IOException(MessageFormat.Format(JGitText.Get().corruptionDetectedReReadingAt , Sharpen.Extensions.ValueOf(visit.delta.position))); } objectDigest.Update(Constants.EncodedTypeString(type)); objectDigest.Update(unchecked((byte)' ')); objectDigest.Update(Constants.EncodeASCII(visit.data.Length)); objectDigest.Update(unchecked((byte)0)); objectDigest.Update(visit.data); tempObjectId.FromRaw(objectDigest.Digest(), 0); VerifySafeObject(tempObjectId, type, visit.data); PackedObjectInfo oe; oe = NewInfo(tempObjectId, visit.delta, visit.parent.id); oe.SetOffset(visit.delta.position); OnInflatedObjectData(oe, type, visit.data); AddObjectAndTrack(oe); visit.id = oe; visit.nextChild = FirstChildOf(oe); visit = visit.Next(); } while (visit != null); }
/// <exception cref="System.IO.IOException"></exception> private int BuildMatrix(ProgressMonitor pm) { // Allocate for the worst-case scenario where every pair has a // score that we need to consider. We might not need that many. // matrix = new long[srcs.Count * dsts.Count]; long[] srcSizes = new long[srcs.Count]; long[] dstSizes = new long[dsts.Count]; BitSet dstTooLarge = null; // Consider each pair of files, if the score is above the minimum // threshold we need record that scoring in the matrix so we can // later find the best matches. // int mNext = 0; for (int srcIdx = 0; srcIdx < srcs.Count; srcIdx++) { DiffEntry srcEnt = srcs[srcIdx]; if (!IsFile(srcEnt.oldMode)) { pm.Update(dsts.Count); continue; } SimilarityIndex s = null; for (int dstIdx = 0; dstIdx < dsts.Count; dstIdx++) { DiffEntry dstEnt = dsts[dstIdx]; if (!IsFile(dstEnt.newMode)) { pm.Update(1); continue; } if (!RenameDetector.SameType(srcEnt.oldMode, dstEnt.newMode)) { pm.Update(1); continue; } if (dstTooLarge != null && dstTooLarge.Get(dstIdx)) { pm.Update(1); continue; } long srcSize = srcSizes[srcIdx]; if (srcSize == 0) { srcSize = Size(DiffEntry.Side.OLD, srcEnt) + 1; srcSizes[srcIdx] = srcSize; } long dstSize = dstSizes[dstIdx]; if (dstSize == 0) { dstSize = Size(DiffEntry.Side.NEW, dstEnt) + 1; dstSizes[dstIdx] = dstSize; } long max = Math.Max(srcSize, dstSize); long min = Math.Min(srcSize, dstSize); if (min * 100 / max < renameScore) { // Cannot possibly match, as the file sizes are so different pm.Update(1); continue; } if (s == null) { try { s = Hash(DiffEntry.Side.OLD, srcEnt); } catch (SimilarityIndex.TableFullException) { tableOverflow = true; goto SRC_continue; } } SimilarityIndex d; try { d = Hash(DiffEntry.Side.NEW, dstEnt); } catch (SimilarityIndex.TableFullException) { if (dstTooLarge == null) { dstTooLarge = new BitSet(dsts.Count); } dstTooLarge.Set(dstIdx); tableOverflow = true; pm.Update(1); continue; } int contentScore = s.Score(d, 10000); // nameScore returns a value between 0 and 100, but we want it // to be in the same range as the content score. This allows it // to be dropped into the pretty formula for the final score. int nameScore = NameScore(srcEnt.oldPath, dstEnt.newPath) * 100; int score = (contentScore * 99 + nameScore * 1) / 10000; if (score < renameScore) { pm.Update(1); continue; } matrix[mNext++] = Encode(score, srcIdx, dstIdx); pm.Update(1); } SRC_continue: ; } SRC_break: ; // Sort everything in the range we populated, which might be the // entire matrix, or just a smaller slice if we had some bad low // scoring pairs. // Arrays.Sort(matrix, 0, mNext); return mNext; }
/// <exception cref="System.IO.IOException"></exception> internal virtual void Compute(ProgressMonitor pm) { if (pm == null) { pm = NullProgressMonitor.INSTANCE; } pm.BeginTask(JGitText.Get().renamesFindingByContent, 2 * srcs.Count * dsts.Count); // int mNext = BuildMatrix(pm); @out = new AList<DiffEntry>(Math.Min(mNext, dsts.Count)); // Match rename pairs on a first come, first serve basis until // we have looked at everything that is above our minimum score. // for (--mNext; mNext >= 0; mNext--) { long ent = matrix[mNext]; int sIdx = SrcFile(ent); int dIdx = DstFile(ent); DiffEntry s = srcs[sIdx]; DiffEntry d = dsts[dIdx]; if (d == null) { pm.Update(1); continue; } // was already matched earlier DiffEntry.ChangeType type; if (s.changeType == DiffEntry.ChangeType.DELETE) { // First use of this source file. Tag it as a rename so we // later know it is already been used as a rename, other // matches (if any) will claim themselves as copies instead. // s.changeType = DiffEntry.ChangeType.RENAME; type = DiffEntry.ChangeType.RENAME; } else { type = DiffEntry.ChangeType.COPY; } @out.AddItem(DiffEntry.Pair(type, s, d, Score(ent))); dsts.Set(dIdx, null); // Claim the destination was matched. pm.Update(1); } srcs = CompactSrcList(srcs); dsts = CompactDstList(dsts); pm.EndTask(); }
public static MergeCommandResult MergeTrees (ProgressMonitor monitor, NGit.Repository repo, RevCommit srcBase, RevCommit srcCommit, string sourceDisplayName, bool commitResult) { RevCommit newHead; RevWalk revWalk = new RevWalk(repo); try { // get the head commit Ref headRef = repo.GetRef(Constants.HEAD); if (headRef == null) { throw new NoHeadException(JGitText.Get().commitOnRepoWithoutHEADCurrentlyNotSupported ); } RevCommit headCommit = revWalk.ParseCommit(headRef.GetObjectId()); ResolveMerger merger = (ResolveMerger)((ThreeWayMerger)MergeStrategy.RESOLVE.NewMerger (repo)); merger.SetWorkingTreeIterator(new FileTreeIterator(repo)); merger.SetBase(srcBase); bool noProblems; IDictionary<string, MergeResult<Sequence>> lowLevelResults = null; IDictionary<string, ResolveMerger.MergeFailureReason> failingPaths = null; IList<string> modifiedFiles = null; ResolveMerger resolveMerger = merger; resolveMerger.SetCommitNames(new string[] { "BASE", "HEAD", sourceDisplayName }); noProblems = merger.Merge(headCommit, srcCommit); lowLevelResults = resolveMerger.GetMergeResults(); modifiedFiles = resolveMerger.GetModifiedFiles(); failingPaths = resolveMerger.GetFailingPaths(); if (monitor != null) monitor.Update (50); if (noProblems) { if (modifiedFiles != null && modifiedFiles.Count == 0) { return new MergeCommandResult(headCommit, null, new ObjectId[] { headCommit.Id, srcCommit .Id }, MergeStatus.ALREADY_UP_TO_DATE, MergeStrategy.RESOLVE, null, null); } DirCacheCheckout dco = new DirCacheCheckout(repo, headCommit.Tree, repo.LockDirCache (), merger.GetResultTreeId()); dco.SetFailOnConflict(true); dco.Checkout(); if (commitResult) { newHead = new NGit.Api.Git(repo).Commit().SetMessage(srcCommit.GetFullMessage() ).SetAuthor(srcCommit.GetAuthorIdent()).Call(); return new MergeCommandResult(newHead.Id, null, new ObjectId[] { headCommit.Id, srcCommit .Id }, MergeStatus.MERGED, MergeStrategy.RESOLVE, null, null); } else { return new MergeCommandResult(headCommit, null, new ObjectId[] { headCommit.Id, srcCommit .Id }, MergeStatus.MERGED, MergeStrategy.RESOLVE, null, null); } } else { if (failingPaths != null) { return new MergeCommandResult(null, merger.GetBaseCommit(0, 1), new ObjectId[] { headCommit.Id, srcCommit.Id }, MergeStatus.FAILED, MergeStrategy.RESOLVE, lowLevelResults , failingPaths, null); } else { return new MergeCommandResult(null, merger.GetBaseCommit(0, 1), new ObjectId[] { headCommit.Id, srcCommit.Id }, MergeStatus.CONFLICTING, MergeStrategy.RESOLVE, lowLevelResults , null); } } } finally { revWalk.Release(); } }
/// <exception cref="System.NotSupportedException"></exception> /// <exception cref="NGit.Errors.TransportException"></exception> private void ExecuteImp(ProgressMonitor monitor, FetchResult result) { conn = transport.OpenFetch(); try { result.SetAdvertisedRefs(transport.GetURI(), conn.GetRefsMap()); ICollection<Ref> matched = new HashSet<Ref>(); foreach (RefSpec spec in toFetch) { if (spec.GetSource() == null) { throw new TransportException(MessageFormat.Format(JGitText.Get().sourceRefNotSpecifiedForRefspec , spec)); } if (spec.IsWildcard()) { ExpandWildcard(spec, matched); } else { ExpandSingle(spec, matched); } } ICollection<Ref> additionalTags = Sharpen.Collections.EmptyList<Ref>(); TagOpt tagopt = transport.GetTagOpt(); if (tagopt == TagOpt.AUTO_FOLLOW) { additionalTags = ExpandAutoFollowTags(); } else { if (tagopt == TagOpt.FETCH_TAGS) { ExpandFetchTags(); } } bool includedTags; if (!askFor.IsEmpty() && !AskForIsComplete()) { FetchObjects(monitor); includedTags = conn.DidFetchIncludeTags(); // Connection was used for object transfer. If we // do another fetch we must open a new connection. // CloseConnection(result); } else { includedTags = false; } if (tagopt == TagOpt.AUTO_FOLLOW && !additionalTags.IsEmpty()) { // There are more tags that we want to follow, but // not all were asked for on the initial request. // Sharpen.Collections.AddAll(have, askFor.Keys); askFor.Clear(); foreach (Ref r in additionalTags) { ObjectId id = r.GetPeeledObjectId(); if (id == null) { id = r.GetObjectId(); } if (transport.local.HasObject(id)) { WantTag(r); } } if (!askFor.IsEmpty() && (!includedTags || !AskForIsComplete())) { ReopenConnection(); if (!askFor.IsEmpty()) { FetchObjects(monitor); } } } } finally { CloseConnection(result); } RevWalk walk = new RevWalk(transport.local); try { if (monitor is BatchingProgressMonitor) { ((BatchingProgressMonitor)monitor).SetDelayStart(250, TimeUnit.MILLISECONDS); } monitor.BeginTask(JGitText.Get().updatingReferences, localUpdates.Count); if (transport.IsRemoveDeletedRefs()) { DeleteStaleTrackingRefs(result, walk); } foreach (TrackingRefUpdate u in localUpdates) { try { monitor.Update(1); u.Update(walk); result.Add(u); } catch (IOException err) { throw new TransportException(MessageFormat.Format(JGitText.Get().failureUpdatingTrackingRef , u.GetLocalName(), err.Message), err); } } monitor.EndTask(); } finally { walk.Release(); } if (!fetchHeadUpdates.IsEmpty()) { try { UpdateFETCH_HEAD(result); } catch (IOException err) { throw new TransportException(MessageFormat.Format(JGitText.Get().failureUpdatingFETCH_HEAD , err.Message), err); } } }
public Stash Create(NGit.ProgressMonitor monitor, string message) { if (monitor != null) { monitor.Start(1); monitor.BeginTask("Stashing changes", 100); } UserConfig config = _repo.GetConfig().Get(UserConfig.KEY); RevWalk rw = new RevWalk(_repo); ObjectId headId = _repo.Resolve(Constants.HEAD); var parent = rw.ParseCommit(headId); PersonIdent author = new PersonIdent(config.GetAuthorName() ?? "unknown", config.GetAuthorEmail() ?? "unknown@(none)."); if (string.IsNullOrEmpty(message)) { // Use the commit summary as message message = parent.Abbreviate(7).ToString() + " " + parent.GetShortMessage(); int i = message.IndexOfAny(new char[] { '\r', '\n' }); if (i != -1) { message = message.Substring(0, i); } } // Create the index tree commit ObjectInserter inserter = _repo.NewObjectInserter(); DirCache dc = _repo.ReadDirCache(); if (monitor != null) { monitor.Update(10); } var tree_id = dc.WriteTree(inserter); inserter.Release(); if (monitor != null) { monitor.Update(10); } string commitMsg = "index on " + _repo.GetBranch() + ": " + message; ObjectId indexCommit = GitUtil.CreateCommit(_repo, commitMsg + "\n", new ObjectId[] { headId }, tree_id, author, author); if (monitor != null) { monitor.Update(20); } // Create the working dir commit tree_id = WriteWorkingDirectoryTree(parent.Tree, dc); commitMsg = "WIP on " + _repo.GetBranch() + ": " + message; var wipCommit = GitUtil.CreateCommit(_repo, commitMsg + "\n", new ObjectId[] { headId, indexCommit }, tree_id, author, author); if (monitor != null) { monitor.Update(20); } string prevCommit = null; FileInfo sf = StashRefFile; if (sf.Exists) { prevCommit = File.ReadAllText(sf.FullName).Trim(' ', '\t', '\r', '\n'); } Stash s = new Stash(prevCommit, wipCommit.Name, author, commitMsg); FileInfo stashLog = StashLogFile; File.AppendAllText(stashLog.FullName, s.FullLine + "\n"); File.WriteAllText(sf.FullName, s.CommitId + "\n"); if (monitor != null) { monitor.Update(5); } // Wipe all local changes GitUtil.HardReset(_repo, Constants.HEAD); monitor.EndTask(); s.StashCollection = this; return(s); }
// By default there is no locking. /// <exception cref="System.IO.IOException"></exception> private void ResolveDeltas(ProgressMonitor progress) { progress.BeginTask(JGitText.Get().resolvingDeltas, deltaCount); int last = entryCount; for (int i = 0; i < last; i++) { int before = entryCount; ResolveDeltas(entries[i]); progress.Update(entryCount - before); if (progress.IsCancelled()) { throw new IOException(JGitText.Get().downloadCancelledDuringIndexing); } } progress.EndTask(); }
/// <summary>Parse the pack stream.</summary> /// <remarks>Parse the pack stream.</remarks> /// <param name="receiving"> /// receives progress feedback during the initial receiving /// objects phase. If null, /// <see cref="NGit.NullProgressMonitor">NGit.NullProgressMonitor</see> /// will be /// used. /// </param> /// <param name="resolving">receives progress feedback during the resolving objects phase. /// </param> /// <returns> /// the pack lock, if one was requested by setting /// <see cref="SetLockMessage(string)">SetLockMessage(string)</see> /// . /// </returns> /// <exception cref="System.IO.IOException">the stream is malformed, or contains corrupt objects. /// </exception> public virtual PackLock Parse(ProgressMonitor receiving, ProgressMonitor resolving ) { if (receiving == null) { receiving = NullProgressMonitor.INSTANCE; } if (resolving == null) { resolving = NullProgressMonitor.INSTANCE; } if (receiving == resolving) { receiving.Start(2); } try { ReadPackHeader(); entries = new PackedObjectInfo[(int)objectCount]; baseById = new ObjectIdOwnerMap<PackParser.DeltaChain>(); baseByPos = new LongMap<PackParser.UnresolvedDelta>(); deferredCheckBlobs = new BlockList<PackedObjectInfo>(); receiving.BeginTask(JGitText.Get().receivingObjects, (int)objectCount); try { for (int done = 0; done < objectCount; done++) { IndexOneObject(); receiving.Update(1); if (receiving.IsCancelled()) { throw new IOException(JGitText.Get().downloadCancelled); } } ReadPackFooter(); EndInput(); } finally { receiving.EndTask(); } if (!deferredCheckBlobs.IsEmpty()) { DoDeferredCheckBlobs(); } if (deltaCount > 0) { if (resolving is BatchingProgressMonitor) { ((BatchingProgressMonitor)resolving).SetDelayStart(1000, TimeUnit.MILLISECONDS); } resolving.BeginTask(JGitText.Get().resolvingDeltas, deltaCount); ResolveDeltas(resolving); if (entryCount < objectCount) { if (!IsAllowThin()) { throw new IOException(MessageFormat.Format(JGitText.Get().packHasUnresolvedDeltas , Sharpen.Extensions.ValueOf(objectCount - entryCount))); } ResolveDeltasWithExternalBases(resolving); if (entryCount < objectCount) { throw new IOException(MessageFormat.Format(JGitText.Get().packHasUnresolvedDeltas , Sharpen.Extensions.ValueOf(objectCount - entryCount))); } } resolving.EndTask(); } packDigest = null; baseById = null; baseByPos = null; } finally { try { if (readCurs != null) { readCurs.Release(); } } finally { readCurs = null; } try { inflater.Release(); } finally { inflater = null; } } return null; }
/// <exception cref="System.IO.IOException"></exception> internal virtual void OpenIndex(ProgressMonitor pm) { if (this.index != null) { return; } if (this.tmpIdx == null) { this.tmpIdx = FilePath.CreateTempFile("jgit-walk-", ".idx"); } else { if (this.tmpIdx.IsFile()) { try { this.index = PackIndex.Open(this.tmpIdx); return; } catch (FileNotFoundException) { } } } // Fall through and get the file. WalkRemoteObjectDatabase.FileStream s; s = this.connection.Open("pack/" + this.idxName); pm.BeginTask("Get " + Sharpen.Runtime.Substring(this.idxName, 0, 12) + "..idx", s .length < 0 ? ProgressMonitor.UNKNOWN : (int)(s.length / 1024)); try { FileOutputStream fos = new FileOutputStream(this.tmpIdx); try { byte[] buf = new byte[2048]; int cnt; while (!pm.IsCancelled() && (cnt = [email protected](buf)) >= 0) { fos.Write(buf, 0, cnt); pm.Update(cnt / 1024); } } finally { fos.Close(); } } catch (IOException err) { FileUtils.Delete(this.tmpIdx); throw; } finally { [email protected](); } pm.EndTask(); if (pm.IsCancelled()) { FileUtils.Delete(this.tmpIdx); return; } try { this.index = PackIndex.Open(this.tmpIdx); } catch (IOException e) { FileUtils.Delete(this.tmpIdx); throw; } }
/// <exception cref="System.IO.IOException"></exception> internal virtual void Search(ProgressMonitor monitor, ObjectToPack[] toSearch, int off, int cnt) { try { for (int end = off + cnt; off < end; off++) { res = window[resSlot]; if (0 < maxMemory) { Clear(res); int tail = Next(resSlot); long need = EstimateSize(toSearch[off]); while (maxMemory < loaded + need && tail != resSlot) { Clear(window[tail]); tail = Next(tail); } } res.Set(toSearch[off]); if ([email protected]()) { // We don't actually want to make a delta for // them, just need to push them into the window // so they can be read by other objects. // KeepInWindow(); } else { // Search for a delta for the current window slot. // monitor.Update(1); Search(); } } } finally { if (deflater != null) { deflater.Finish(); } } }
/// <summary>Consume data from the input stream until the packfile is indexed.</summary> /// <remarks>Consume data from the input stream until the packfile is indexed.</remarks> /// <param name="progress">progress feedback</param> /// <exception cref="System.IO.IOException">System.IO.IOException</exception> public virtual void Index(ProgressMonitor progress) { progress.Start(2); try { try { ReadPackHeader(); entries = new PackedObjectInfo[(int)objectCount]; baseById = new ObjectIdSubclassMap<IndexPack.DeltaChain>(); baseByPos = new LongMap<IndexPack.UnresolvedDelta>(); deferredCheckBlobs = new AList<PackedObjectInfo>(); progress.BeginTask(JGitText.Get().receivingObjects, (int)objectCount); for (int done = 0; done < objectCount; done++) { IndexOneObject(); progress.Update(1); if (progress.IsCancelled()) { throw new IOException(JGitText.Get().downloadCancelled); } } ReadPackFooter(); EndInput(); if (!deferredCheckBlobs.IsEmpty()) { DoDeferredCheckBlobs(); } progress.EndTask(); if (deltaCount > 0) { if (packOut == null) { throw new IOException(JGitText.Get().needPackOut); } ResolveDeltas(progress); if (entryCount < objectCount) { if (!fixThin) { throw new IOException(MessageFormat.Format(JGitText.Get().packHasUnresolvedDeltas , (objectCount - entryCount))); } FixThinPack(progress); } } if (packOut != null && (keepEmpty || entryCount > 0)) { packOut.GetChannel().Force(true); } packDigest = null; baseById = null; baseByPos = null; if (dstIdx != null && (keepEmpty || entryCount > 0)) { WriteIdx(); } } finally { try { if (readCurs != null) { readCurs.Release(); } } finally { readCurs = null; } try { inflater.Release(); } finally { inflater = null; objectDatabase.Close(); } progress.EndTask(); if (packOut != null) { packOut.Close(); } } if (keepEmpty || entryCount > 0) { if (dstPack != null) { dstPack.SetReadOnly(); } if (dstIdx != null) { dstIdx.SetReadOnly(); } } } catch (IOException err) { if (dstPack != null) { FileUtils.Delete(dstPack); } if (dstIdx != null) { FileUtils.Delete(dstIdx); } throw; } }