public override void setUp() { base.setUp(); _os = new MemoryStream(); _cos = new PackOutputStream(_os); _packBase = new FileInfo(Path.Combine(trash.FullName, "tmp_pack")); _packFile = new FileInfo(Path.Combine(trash.FullName, "tmp_pack._pack")); _indexFile = new FileInfo(Path.Combine(trash.FullName, "tmp_pack.idx")); _writer = new PackWriter(db, new TextProgressMonitor()); }
/// <exception cref="System.IO.IOException"></exception> internal override void SelectObjectRepresentation(PackWriter packer, ObjectToPack otp, WindowCursor curs) { ObjectDirectory.PackList pList = packList.Get(); for (; ;) { foreach (PackFile p in pList.packs) { try { LocalObjectRepresentation rep = p.Representation(curs, otp); if (rep != null) { packer.Select(otp, rep); } } catch (PackMismatchException) { // Pack was modified; refresh the entire pack list. // pList = ScanPacks(pList); goto SEARCH_continue; } catch (IOException) { // Assume the pack is corrupted. // RemovePack(p); } } goto SEARCH_break; SEARCH_continue :; } SEARCH_break :; foreach (FileObjectDatabase.AlternateHandle h in MyAlternates()) { h.db.SelectObjectRepresentation(packer, otp, curs); } }
public byte[] GetBytes() { using (MemoryStream memoryStream = new MemoryStream()) using (PackWriter writer = new PackWriter(memoryStream)) { foreach (PPDPackStreamWriter streamWriter in writer.Write(new string[] { "Version", "Data" })) { switch (streamWriter.Filename) { case "Version": streamWriter.WriteByte(2); break; case "Data": var data = GetBytesImpl(); streamWriter.Write(data, 0, data.Length); break; } } return(memoryStream.ToArray()); } }
public BundleWriter(Repository repo, ProgressMonitor monitor) { packWriter = new PackWriter(repo, monitor); includeObjects = new Dictionary<string, ObjectId>(); assumeCommits = new List<RevCommit>(); }
/// <exception cref="System.IO.IOException"></exception> private static void Write(FilePath[] files, PackWriter pw) { long begin = files[0].GetParentFile().LastModified(); NullProgressMonitor m = NullProgressMonitor.INSTANCE; OutputStream @out; @out = new BufferedOutputStream(new FileOutputStream(files[0])); try { pw.WritePack(m, m, @out); } finally { @out.Close(); } @out = new BufferedOutputStream(new FileOutputStream(files[1])); try { pw.WriteIndex(@out); } finally { @out.Close(); } Touch(begin, files[0].GetParentFile()); }
public virtual void TestObjectMovedWithinPack() { // Create an object and pack it. // Repository eden = CreateBareRepository(); RevObject o1 = WriteBlob(eden, "o1"); FilePath[] out1 = Pack(eden, o1); NUnit.Framework.Assert.AreEqual(o1.Name, Parse(o1).Name); // Force close the old pack. // WhackCache(); // Now overwrite the old pack in place. This method of creating a // different pack under the same file name is partially broken. We // should also have a different file name because the list of objects // within the pack has been modified. // RevObject o2 = WriteBlob(eden, "o2"); PackWriter pw = new PackWriter(eden); pw.AddObject(o2); pw.AddObject(o1); Write(out1, pw); pw.Release(); // Try the old name, then the new name. The old name should cause the // pack to reload when it opens and the index and pack mismatch. // NUnit.Framework.Assert.AreEqual(o1.Name, Parse(o1).Name); NUnit.Framework.Assert.AreEqual(o2.Name, Parse(o2).Name); }
/// <exception cref="System.IO.IOException"></exception> private void SendPack() { bool sideband = options.Contains(OPTION_SIDE_BAND) || options.Contains(OPTION_SIDE_BAND_64K ); ProgressMonitor pm = NullProgressMonitor.INSTANCE; OutputStream packOut = rawOut; SideBandOutputStream msgOut = null; if (sideband) { int bufsz = SideBandOutputStream.SMALL_BUF; if (options.Contains(OPTION_SIDE_BAND_64K)) { bufsz = SideBandOutputStream.MAX_BUF; } packOut = new SideBandOutputStream(SideBandOutputStream.CH_DATA, bufsz, rawOut); if (!options.Contains(OPTION_NO_PROGRESS)) { msgOut = new SideBandOutputStream(SideBandOutputStream.CH_PROGRESS, bufsz, rawOut ); pm = new SideBandProgressMonitor(msgOut); } } PackConfig cfg = packConfig; if (cfg == null) { cfg = new PackConfig(db); } PackWriter pw = new PackWriter(cfg, walk.GetObjectReader()); try { pw.SetUseCachedPacks(true); pw.SetReuseDeltaCommits(true); pw.SetDeltaBaseAsOffset(options.Contains(OPTION_OFS_DELTA)); pw.SetThin(options.Contains(OPTION_THIN_PACK)); pw.SetReuseValidatingObjects(false); if (commonBase.IsEmpty()) { ICollection <ObjectId> tagTargets = new HashSet <ObjectId>(); foreach (Ref @ref in refs.Values) { if (@ref.GetPeeledObjectId() != null) { tagTargets.AddItem(@ref.GetPeeledObjectId()); } else { if (@ref.GetObjectId() == null) { continue; } else { if (@ref.GetName().StartsWith(Constants.R_HEADS)) { tagTargets.AddItem(@ref.GetObjectId()); } } } } pw.SetTagTargets(tagTargets); } RevWalk rw = walk; if (wantAll.IsEmpty()) { pw.PreparePack(pm, wantIds, commonBase); } else { walk.Reset(); ObjectWalk ow = walk.ToObjectWalkWithSameObjects(); pw.PreparePack(pm, ow, wantAll, commonBase); rw = ow; } if (options.Contains(OPTION_INCLUDE_TAG)) { foreach (Ref vref in refs.Values) { Ref @ref = vref; ObjectId objectId = @ref.GetObjectId(); // If the object was already requested, skip it. if (wantAll.IsEmpty()) { if (wantIds.Contains(objectId)) { continue; } } else { RevObject obj = rw.LookupOrNull(objectId); if (obj != null && obj.Has(WANT)) { continue; } } if ([email protected]()) { @ref = db.Peel(@ref); } ObjectId peeledId = @ref.GetPeeledObjectId(); if (peeledId == null) { continue; } objectId = @ref.GetObjectId(); if (pw.WillInclude(peeledId) && !pw.WillInclude(objectId)) { pw.AddObject(rw.ParseAny(objectId)); } } } pw.WritePack(pm, NullProgressMonitor.INSTANCE, packOut); statistics = pw.GetStatistics(); if (msgOut != null) { string msg = pw.GetStatistics().GetMessage() + '\n'; msgOut.Write(Constants.Encode(msg)); msgOut.Flush(); } } finally { pw.Release(); } if (sideband) { pckOut.End(); } if (logger != null && statistics != null) { logger.OnPackStatistics(statistics); } }
private void SendPack() { bool thin = _options.Contains(OptionThinPack); bool progress = !_options.Contains(OptionNoProgress); bool sideband = _options.Contains(OptionSideBand) || _options.Contains(OptionSideBand64K); ProgressMonitor pm = NullProgressMonitor.Instance; Stream _packOut = _rawOut; if (sideband) { int bufsz = SideBandOutputStream.SMALL_BUF; if (_options.Contains(OptionSideBand64K)) { bufsz = SideBandOutputStream.MAX_BUF; } _packOut = new SideBandOutputStream(SideBandOutputStream.CH_DATA, bufsz, _rawOut); if (progress) pm = new SideBandProgressMonitor(new SideBandOutputStream(SideBandOutputStream.CH_PROGRESS, bufsz, _rawOut)); } var pw = new PackWriter(_db, pm, NullProgressMonitor.Instance) { DeltaBaseAsOffset = _options.Contains(OptionOfsDelta), Thin = thin }; pw.preparePack(_wantAll, _commonBase); if (_options.Contains(OptionIncludeTag)) { foreach (Ref r in _refs.Values) { RevObject o; try { o = _walk.parseAny(r.ObjectId); } catch (IOException) { continue; } RevTag t = (o as RevTag); if (o.has(WANT) || (t == null)) continue; if (!pw.willInclude(t) && pw.willInclude(t.getObject())) pw.addObject(t); } } pw.writePack(_packOut); if (sideband) { _packOut.Flush(); _pckOut.End(); } else { _rawOut.Flush(); } }
/// <exception cref="System.IO.IOException"></exception> private void SendPack() { bool sideband = options.Contains(OPTION_SIDE_BAND) || options.Contains(OPTION_SIDE_BAND_64K ); if (!biDirectionalPipe) { // Ensure the request was fully consumed. Any remaining input must // be a protocol error. If we aren't at EOF the implementation is broken. int eof = rawIn.Read(); if (0 <= eof) { throw new CorruptObjectException(MessageFormat.Format(JGitText.Get().expectedEOFReceived , "\\x" + Sharpen.Extensions.ToHexString(eof))); } } ProgressMonitor pm = NullProgressMonitor.INSTANCE; OutputStream packOut = rawOut; SideBandOutputStream msgOut = null; if (sideband) { int bufsz = SideBandOutputStream.SMALL_BUF; if (options.Contains(OPTION_SIDE_BAND_64K)) { bufsz = SideBandOutputStream.MAX_BUF; } packOut = new SideBandOutputStream(SideBandOutputStream.CH_DATA, bufsz, rawOut); if (!options.Contains(OPTION_NO_PROGRESS)) { msgOut = new SideBandOutputStream(SideBandOutputStream.CH_PROGRESS, bufsz, rawOut ); pm = new SideBandProgressMonitor(msgOut); } } try { if (wantAll.IsEmpty()) { preUploadHook.OnSendPack(this, wantIds, commonBase); } else { preUploadHook.OnSendPack(this, wantAll, commonBase); } } catch (UploadPackMayNotContinueException noPack) { if (sideband && noPack.Message != null) { noPack.SetOutput(); SideBandOutputStream err = new SideBandOutputStream(SideBandOutputStream.CH_ERROR , SideBandOutputStream.SMALL_BUF, rawOut); err.Write(Constants.Encode(noPack.Message)); err.Flush(); } throw; } PackConfig cfg = packConfig; if (cfg == null) { cfg = new PackConfig(db); } PackWriter pw = new PackWriter(cfg, walk.GetObjectReader()); try { pw.SetUseCachedPacks(true); pw.SetReuseDeltaCommits(true); pw.SetDeltaBaseAsOffset(options.Contains(OPTION_OFS_DELTA)); pw.SetThin(options.Contains(OPTION_THIN_PACK)); pw.SetReuseValidatingObjects(false); if (commonBase.IsEmpty()) { ICollection<ObjectId> tagTargets = new HashSet<ObjectId>(); foreach (Ref @ref in refs.Values) { if (@ref.GetPeeledObjectId() != null) { tagTargets.AddItem(@ref.GetPeeledObjectId()); } else { if (@ref.GetObjectId() == null) { continue; } else { if (@ref.GetName().StartsWith(Constants.R_HEADS)) { tagTargets.AddItem(@ref.GetObjectId()); } } } } pw.SetTagTargets(tagTargets); } RevWalk rw = walk; if (wantAll.IsEmpty()) { pw.PreparePack(pm, wantIds, commonBase); } else { walk.Reset(); ObjectWalk ow = walk.ToObjectWalkWithSameObjects(); pw.PreparePack(pm, ow, wantAll, commonBase); rw = ow; } if (options.Contains(OPTION_INCLUDE_TAG)) { foreach (Ref vref in refs.Values) { Ref @ref = vref; ObjectId objectId = @ref.GetObjectId(); // If the object was already requested, skip it. if (wantAll.IsEmpty()) { if (wantIds.Contains(objectId)) { continue; } } else { RevObject obj = rw.LookupOrNull(objectId); if (obj != null && obj.Has(WANT)) { continue; } } if ([email protected]()) { @ref = db.Peel(@ref); } ObjectId peeledId = @ref.GetPeeledObjectId(); if (peeledId == null) { continue; } objectId = @ref.GetObjectId(); if (pw.WillInclude(peeledId) && !pw.WillInclude(objectId)) { pw.AddObject(rw.ParseAny(objectId)); } } } pw.WritePack(pm, NullProgressMonitor.INSTANCE, packOut); statistics = pw.GetStatistics(); if (msgOut != null) { string msg = pw.GetStatistics().GetMessage() + '\n'; msgOut.Write(Constants.Encode(msg)); msgOut.Flush(); } } finally { pw.Release(); } if (sideband) { pckOut.End(); } if (logger != null && statistics != null) { logger.OnPackStatistics(statistics); } }
private void writePack(IDictionary<string, RemoteRefUpdate> refUpdates, ProgressMonitor monitor) { PackWriter writer = new PackWriter(local, monitor); List<ObjectId> remoteObjects = new List<ObjectId>(Refs.Count); List<ObjectId> newObjects = new List<ObjectId>(refUpdates.Count); foreach (Ref r in Refs) remoteObjects.Add(r.ObjectId); remoteObjects.AddRange(additionalHaves); foreach (RemoteRefUpdate r in refUpdates.Values) { if (!ObjectId.ZeroId.Equals(r.NewObjectId)) newObjects.Add(r.NewObjectId); } writer.Thin = _thinPack; writer.DeltaBaseAsOffset = _capableOfsDelta; writer.preparePack(newObjects, remoteObjects); long start = SystemReader.getInstance().getCurrentTime(); writer.writePack(outStream); packTransferTime = SystemReader.getInstance().getCurrentTime() - start; }
public void testObjectMovedWithinPack() { // Create an object and pack it. // Core.Repository eden = createNewEmptyRepo(); RevObject o1 = WriteBlob(eden, "o1"); FileInfo[] out1 = Pack(eden, o1); Assert.AreEqual(o1.Name, Parse(o1).Name); // Force close the old pack. // WhackCache(); // Now overwrite the old pack in place. This method of creating a // different pack under the same file name is partially broken. We // should also have a different file name because the list of objects // within the pack has been modified. // RevObject o2 = WriteBlob(eden, "o2"); var pw = new PackWriter(eden, NullProgressMonitor.Instance); pw.addObject(o2); pw.addObject(o1); Write(out1, pw); // Try the old name, then the new name. The old name should cause the // pack to reload when it opens and the index and pack mismatch. // Assert.AreEqual(o1.Name, Parse(o1).Name); Assert.AreEqual(o2.Name, Parse(o2).Name); }
/// <exception cref="System.IO.IOException"></exception> private void WritePack(IDictionary<string, RemoteRefUpdate> refUpdates, ProgressMonitor monitor) { IList<ObjectId> remoteObjects = new AList<ObjectId>(GetRefs().Count); IList<ObjectId> newObjects = new AList<ObjectId>(refUpdates.Count); long start; PackWriter writer = new PackWriter(transport.GetPackConfig(), local.NewObjectReader ()); try { foreach (Ref r in GetRefs()) { remoteObjects.AddItem(r.GetObjectId()); } Sharpen.Collections.AddAll(remoteObjects, additionalHaves); foreach (RemoteRefUpdate r_1 in refUpdates.Values) { if (!ObjectId.ZeroId.Equals(r_1.GetNewObjectId())) { newObjects.AddItem(r_1.GetNewObjectId()); } } writer.SetUseCachedPacks(true); writer.SetThin(thinPack); writer.SetDeltaBaseAsOffset(capableOfsDelta); writer.PreparePack(monitor, newObjects, remoteObjects); start = Runtime.CurrentTimeMillis(); writer.WritePack(monitor, monitor, @out); } finally { writer.Release(); } @out.Flush(); packTransferTime = Runtime.CurrentTimeMillis() - start; }
private void Sendpack(IEnumerable<RemoteRefUpdate> updates, ProgressMonitor monitor) { string pathPack = null; string pathIdx = null; try { var pw = new PackWriter(_local, monitor); var need = new List<ObjectId>(); var have = new List<ObjectId>(); foreach (RemoteRefUpdate r in updates) { need.Add(r.NewObjectId); } foreach (Ref r in Refs) { have.Add(r.ObjectId); if (r.PeeledObjectId != null) { have.Add(r.PeeledObjectId); } } pw.preparePack(need, have); if (pw.getObjectsNumber() == 0) return; _packNames = new Dictionary<string, string>(); foreach (string n in _dest.getPackNames()) { _packNames.Add(n, n); } string b = "pack-" + pw.computeName().Name; string packName = b + IndexPack.PackSuffix; pathPack = "pack/" + packName; pathIdx = "pack/" + b + IndexPack.IndexSuffix; if (_packNames.Remove(packName)) { _dest.writeInfoPacks(new List<string>(_packNames.Keys)); _dest.deleteFile(pathIdx); } string wt = "Put " + b.Slice(0, 12); Stream os = _dest.writeFile(pathPack, monitor, wt + "." + IndexPack.PackSuffix); try { pw.writePack(os); } finally { os.Close(); } os = _dest.writeFile(pathIdx, monitor, wt + "..idx"); try { pw.writeIndex(os); } finally { os.Close(); } var infoPacks = new List<string> {packName}; infoPacks.AddRange(_packNames.Keys); _dest.writeInfoPacks(infoPacks); } catch (IOException err) { SafeDelete(pathIdx); SafeDelete(pathPack); throw new TransportException(_uri, "cannot store objects", err); } }
/// <exception cref="System.IO.IOException"></exception> internal abstract void SelectObjectRepresentation(PackWriter packer, ObjectToPack otp, WindowCursor curs);
private void Sendpack(IEnumerable <RemoteRefUpdate> updates, ProgressMonitor monitor) { string pathPack = null; string pathIdx = null; try { var pw = new PackWriter(_local, monitor); var need = new List <ObjectId>(); var have = new List <ObjectId>(); foreach (RemoteRefUpdate r in updates) { need.Add(r.NewObjectId); } foreach (Ref r in Refs) { have.Add(r.ObjectId); if (r.PeeledObjectId != null) { have.Add(r.PeeledObjectId); } } pw.preparePack(need, have); // We don't have to continue further if the pack will // be an empty pack, as the remote has all objects it // needs to complete this change. // if (pw.getObjectsNumber() == 0) { return; } _packNames = new Dictionary <string, string>(); foreach (string n in _dest.getPackNames()) { _packNames.put(n, n); } string b = "pack-" + pw.computeName().Name; string packName = b + IndexPack.PackSuffix; pathPack = "pack/" + packName; pathIdx = "pack/" + b + IndexPack.IndexSuffix; if (_packNames.remove(packName) != null) { // The remote already contains this pack. We should // remove the index before overwriting to prevent bad // offsets from appearing to clients. // _dest.writeInfoPacks(_packNames.Keys); _dest.deleteFile(pathIdx); } // Write the pack file, then the index, as readers look the // other direction (index, then pack file). // string wt = "Put " + b.Slice(0, 12); using (Stream os = _dest.writeFile(pathPack, monitor, wt + "." + IndexPack.PackSuffix)) { pw.writePack(os); } using (Stream os = _dest.writeFile(pathIdx, monitor, wt + "." + IndexPack.IndexSuffix)) { pw.writeIndex(os); } // Record the pack at the start of the pack info list. This // way clients are likely to consult the newest pack first, // and discover the most recent objects there. // var infoPacks = new List <string> { packName }; infoPacks.AddRange(_packNames.Keys); _dest.writeInfoPacks(infoPacks); } catch (IOException err) { SafeDelete(pathIdx); SafeDelete(pathPack); throw new TransportException(_uri, "cannot store objects", err); } }
/// <exception cref="NGit.Errors.TransportException"></exception> private void Sendpack(IList <RemoteRefUpdate> updates, ProgressMonitor monitor) { string pathPack = null; string pathIdx = null; PackWriter writer = new PackWriter(transport.GetPackConfig(), local.NewObjectReader ()); try { ICollection <ObjectId> need = new HashSet <ObjectId>(); ICollection <ObjectId> have = new HashSet <ObjectId>(); foreach (RemoteRefUpdate r in updates) { need.AddItem(r.GetNewObjectId()); } foreach (Ref r_1 in GetRefs()) { have.AddItem(r_1.GetObjectId()); if (r_1.GetPeeledObjectId() != null) { have.AddItem(r_1.GetPeeledObjectId()); } } writer.PreparePack(monitor, need, have); // We don't have to continue further if the pack will // be an empty pack, as the remote has all objects it // needs to complete this change. // if (writer.GetObjectCount() == 0) { return; } packNames = new LinkedHashMap <string, string>(); foreach (string n in dest.GetPackNames()) { packNames.Put(n, n); } string @base = "pack-" + writer.ComputeName().Name; string packName = @base + ".pack"; pathPack = "pack/" + packName; pathIdx = "pack/" + @base + ".idx"; if (Sharpen.Collections.Remove(packNames, packName) != null) { // The remote already contains this pack. We should // remove the index before overwriting to prevent bad // offsets from appearing to clients. // dest.WriteInfoPacks(packNames.Keys); dest.DeleteFile(pathIdx); } // Write the pack file, then the index, as readers look the // other direction (index, then pack file). // string wt = "Put " + Sharpen.Runtime.Substring(@base, 0, 12); OutputStream os = dest.WriteFile(pathPack, monitor, wt + "..pack"); try { os = new SafeBufferedOutputStream(os); writer.WritePack(monitor, monitor, os); } finally { os.Close(); } os = dest.WriteFile(pathIdx, monitor, wt + "..idx"); try { os = new SafeBufferedOutputStream(os); writer.WriteIndex(os); } finally { os.Close(); } // Record the pack at the start of the pack info list. This // way clients are likely to consult the newest pack first, // and discover the most recent objects there. // AList <string> infoPacks = new AList <string>(); infoPacks.AddItem(packName); Sharpen.Collections.AddAll(infoPacks, packNames.Keys); dest.WriteInfoPacks(infoPacks); } catch (IOException err) { SafeDelete(pathIdx); SafeDelete(pathPack); throw new TransportException(uri, JGitText.Get().cannotStoreObjects, err); } finally { writer.Release(); } }
private void sendPack() { bool thin = options.Contains(OPTION_THIN_PACK); bool progress = !options.Contains(OPTION_NO_PROGRESS); bool sideband = options.Contains(OPTION_SIDE_BAND) || options.Contains(OPTION_SIDE_BAND_64K); ProgressMonitor pm = new NullProgressMonitor(); Stream packOut = stream; if (sideband) { int bufsz = SideBandOutputStream.SMALL_BUF; if (options.Contains(OPTION_SIDE_BAND_64K)) bufsz = SideBandOutputStream.MAX_BUF; bufsz -= SideBandOutputStream.HDR_SIZE; packOut = new BufferedStream(new SideBandOutputStream(SideBandOutputStream.CH_DATA, pckOut), bufsz); if (progress) pm = new SideBandProgressMonitor(pckOut); } PackWriter pw; pw = new PackWriter(db, pm, new NullProgressMonitor()); pw.DeltaBaseAsOffset = options.Contains(OPTION_OFS_DELTA); pw.Thin = thin; pw.preparePack(wantAll, commonBase); if (options.Contains(OPTION_INCLUDE_TAG)) { foreach (Ref r in refs.Values) { RevObject o; try { o = walk.parseAny(r.ObjectId); } catch (IOException) { continue; } if (o.has(WANT) || !(o is RevTag)) continue; RevTag t = (RevTag) o; if (!pw.willInclude(t) && pw.willInclude(t.getObject())) pw.addObject(t); } } pw.writePack(packOut); if (sideband) { packOut.Flush(); pckOut.End(); } else { stream.Flush(); } }
/// <exception cref="System.IO.IOException"></exception> internal override void SelectObjectRepresentation(PackWriter packer, ObjectToPack otp, WindowCursor curs) { ObjectDirectory.PackList pList = packList.Get(); for (; ; ) { foreach (PackFile p in pList.packs) { try { LocalObjectRepresentation rep = p.Representation(curs, otp); if (rep != null) { packer.Select(otp, rep); } } catch (PackMismatchException) { // Pack was modified; refresh the entire pack list. // pList = ScanPacks(pList); goto SEARCH_continue; } catch (IOException) { // Assume the pack is corrupted. // RemovePack(p); } } goto SEARCH_break; SEARCH_continue: ; } SEARCH_break: ; foreach (FileObjectDatabase.AlternateHandle h in MyAlternates()) { h.db.SelectObjectRepresentation(packer, otp, curs); } }
/// <exception cref="System.IO.IOException"></exception> private PackFile WritePack <_T0, _T1>(ICollection <_T0> want, ICollection <_T1> have , ICollection <ObjectId> tagTargets, IList <PackIndex> excludeObjects) where _T0 : ObjectId where _T1 : ObjectId { FilePath tmpPack = null; FilePath tmpIdx = null; PackWriter pw = new PackWriter(repo); try { // prepare the PackWriter pw.SetDeltaBaseAsOffset(true); pw.SetReuseDeltaCommits(false); if (tagTargets != null) { pw.SetTagTargets(tagTargets); } if (excludeObjects != null) { foreach (PackIndex idx in excludeObjects) { pw.ExcludeObjects(idx); } } pw.PreparePack(pm, want, have); if (pw.GetObjectCount() == 0) { return(null); } // create temporary files string id = pw.ComputeName().GetName(); FilePath packdir = new FilePath(repo.ObjectsDirectory, "pack"); tmpPack = FilePath.CreateTempFile("gc_", ".pack_tmp", packdir); tmpIdx = new FilePath(packdir, Sharpen.Runtime.Substring(tmpPack.GetName(), 0, tmpPack .GetName().LastIndexOf('.')) + ".idx_tmp"); if (!tmpIdx.CreateNewFile()) { throw new IOException(MessageFormat.Format(JGitText.Get().cannotCreateIndexfile, tmpIdx.GetPath())); } // write the packfile FileChannel channel = new FileOutputStream(tmpPack).GetChannel(); OutputStream channelStream = Channels.NewOutputStream(channel); try { pw.WritePack(pm, pm, channelStream); } finally { channel.Force(true); channelStream.Close(); channel.Close(); } // write the packindex FileChannel idxChannel = new FileOutputStream(tmpIdx).GetChannel(); OutputStream idxStream = Channels.NewOutputStream(idxChannel); try { pw.WriteIndex(idxStream); } finally { idxChannel.Force(true); idxStream.Close(); idxChannel.Close(); } // rename the temporary files to real files FilePath realPack = NameFor(id, ".pack"); tmpPack.SetReadOnly(); FilePath realIdx = NameFor(id, ".idx"); realIdx.SetReadOnly(); bool delete = true; try { if (!tmpPack.RenameTo(realPack)) { return(null); } delete = false; if (!tmpIdx.RenameTo(realIdx)) { FilePath newIdx = new FilePath(realIdx.GetParentFile(), realIdx.GetName() + ".new" ); if (!tmpIdx.RenameTo(newIdx)) { newIdx = tmpIdx; } throw new IOException(MessageFormat.Format(JGitText.Get().panicCantRenameIndexFile , newIdx, realIdx)); } } finally { if (delete && tmpPack.Exists()) { tmpPack.Delete(); } if (delete && tmpIdx.Exists()) { tmpIdx.Delete(); } } return(((ObjectDirectory)repo.ObjectDatabase).OpenPack(realPack, realIdx)); } finally { pw.Release(); if (tmpPack != null && tmpPack.Exists()) { tmpPack.Delete(); } if (tmpIdx != null && tmpIdx.Exists()) { tmpIdx.Delete(); } } }
/// <exception cref="NGit.Errors.TransportException"></exception> private void Sendpack(IList<RemoteRefUpdate> updates, ProgressMonitor monitor) { string pathPack = null; string pathIdx = null; PackWriter writer = new PackWriter(transport.GetPackConfig(), local.NewObjectReader ()); try { IList<ObjectId> need = new AList<ObjectId>(); IList<ObjectId> have = new AList<ObjectId>(); foreach (RemoteRefUpdate r in updates) { need.AddItem(r.GetNewObjectId()); } foreach (Ref r_1 in GetRefs()) { have.AddItem(r_1.GetObjectId()); if (r_1.GetPeeledObjectId() != null) { have.AddItem(r_1.GetPeeledObjectId()); } } writer.PreparePack(monitor, need, have); // We don't have to continue further if the pack will // be an empty pack, as the remote has all objects it // needs to complete this change. // if (writer.GetObjectCount() == 0) { return; } packNames = new LinkedHashMap<string, string>(); foreach (string n in dest.GetPackNames()) { packNames.Put(n, n); } string @base = "pack-" + writer.ComputeName().Name; string packName = @base + ".pack"; pathPack = "pack/" + packName; pathIdx = "pack/" + @base + ".idx"; if (Sharpen.Collections.Remove(packNames, packName) != null) { // The remote already contains this pack. We should // remove the index before overwriting to prevent bad // offsets from appearing to clients. // dest.WriteInfoPacks(packNames.Keys); dest.DeleteFile(pathIdx); } // Write the pack file, then the index, as readers look the // other direction (index, then pack file). // string wt = "Put " + Sharpen.Runtime.Substring(@base, 0, 12); OutputStream os = dest.WriteFile(pathPack, monitor, wt + "..pack"); try { os = new BufferedOutputStream(os); writer.WritePack(monitor, monitor, os); } finally { os.Close(); } os = dest.WriteFile(pathIdx, monitor, wt + "..idx"); try { os = new BufferedOutputStream(os); writer.WriteIndex(os); } finally { os.Close(); } // Record the pack at the start of the pack info list. This // way clients are likely to consult the newest pack first, // and discover the most recent objects there. // AList<string> infoPacks = new AList<string>(); infoPacks.AddItem(packName); Sharpen.Collections.AddAll(infoPacks, packNames.Keys); dest.WriteInfoPacks(infoPacks); } catch (IOException err) { SafeDelete(pathIdx); SafeDelete(pathPack); throw new TransportException(uri, JGitText.Get().cannotStoreObjects, err); } finally { writer.Release(); } }
/// <exception cref="System.IO.IOException"></exception> internal override void SelectObjectRepresentation(PackWriter packer, ObjectToPack otp, WindowCursor curs) { wrapped.SelectObjectRepresentation(packer, otp, curs); }
/// <exception cref="System.IO.IOException"></exception> private void WritePack(IDictionary<string, RemoteRefUpdate> refUpdates, ProgressMonitor monitor) { ICollection<ObjectId> remoteObjects = new HashSet<ObjectId>(); ICollection<ObjectId> newObjects = new HashSet<ObjectId>(); PackWriter writer = new PackWriter(transport.GetPackConfig(), local.NewObjectReader ()); try { foreach (Ref r in GetRefs()) { remoteObjects.AddItem(r.GetObjectId()); } Sharpen.Collections.AddAll(remoteObjects, additionalHaves); foreach (RemoteRefUpdate r_1 in refUpdates.Values) { if (!ObjectId.ZeroId.Equals(r_1.GetNewObjectId())) { newObjects.AddItem(r_1.GetNewObjectId()); } } writer.SetUseCachedPacks(true); writer.SetThin(thinPack); writer.SetReuseValidatingObjects(false); writer.SetDeltaBaseAsOffset(capableOfsDelta); writer.PreparePack(monitor, newObjects, remoteObjects); writer.WritePack(monitor, monitor, @out); } finally { writer.Release(); } packTransferTime = writer.GetStatistics().GetTimeWriting(); }
/// <summary> /// Create a writer for a bundle. /// </summary> /// <param name="repo">repository where objects are stored.</param> /// <param name="monitor">operations progress monitor.</param> public BundleWriter(Repository repo, ProgressMonitor monitor) { _packWriter = new PackWriter(repo, monitor); _include = new Dictionary<String, ObjectId>(); _assume = new HashSet<RevCommit>(); }
/// <summary>Initialize a pack output stream.</summary> /// <remarks> /// Initialize a pack output stream. /// <p> /// This constructor is exposed to support debugging the JGit library only. /// Application or storage level code should not create a PackOutputStream, /// instead use /// <see cref="PackWriter">PackWriter</see> /// , and let the writer create the stream. /// </remarks> /// <param name="writeMonitor">monitor to update on object output progress.</param> /// <param name="out">target stream to receive all object contents.</param> /// <param name="pw">packer that is going to perform the output.</param> public PackOutputStream(ProgressMonitor writeMonitor, OutputStream @out, PackWriter pw) { this.writeMonitor = writeMonitor; this.@out = @out; this.packWriter = pw; this.checkCancelAt = BYTES_TO_WRITE_BEFORE_CANCEL_CHECK; }
/// <exception cref="System.IO.IOException"></exception> /// <exception cref="NGit.Errors.MissingObjectException"></exception> public void SelectObjectRepresentation(PackWriter packer, ProgressMonitor monitor , Iterable<ObjectToPack> objects) { foreach (ObjectToPack otp in objects) { db.SelectObjectRepresentation(packer, otp, this); monitor.Update(1); } }
/// <exception cref="System.IO.IOException"></exception> private void SendPack() { bool sideband = options.Contains(OPTION_SIDE_BAND) || options.Contains(OPTION_SIDE_BAND_64K ); if (!biDirectionalPipe) { // Ensure the request was fully consumed. Any remaining input must // be a protocol error. If we aren't at EOF the implementation is broken. int eof = rawIn.Read(); if (0 <= eof) { throw new CorruptObjectException(MessageFormat.Format(JGitText.Get().expectedEOFReceived , "\\x" + Sharpen.Extensions.ToHexString(eof))); } } ProgressMonitor pm = NullProgressMonitor.INSTANCE; OutputStream packOut = rawOut; SideBandOutputStream msgOut = null; if (sideband) { int bufsz = SideBandOutputStream.SMALL_BUF; if (options.Contains(OPTION_SIDE_BAND_64K)) { bufsz = SideBandOutputStream.MAX_BUF; } packOut = new SideBandOutputStream(SideBandOutputStream.CH_DATA, bufsz, rawOut); if (!options.Contains(OPTION_NO_PROGRESS)) { msgOut = new SideBandOutputStream(SideBandOutputStream.CH_PROGRESS, bufsz, rawOut ); pm = new SideBandProgressMonitor(msgOut); } } try { if (wantAll.IsEmpty()) { preUploadHook.OnSendPack(this, wantIds, commonBase); } else { preUploadHook.OnSendPack(this, wantAll, commonBase); } } catch (UploadPackMayNotContinueException noPack) { if (sideband && noPack.Message != null) { noPack.SetOutput(); SideBandOutputStream err = new SideBandOutputStream(SideBandOutputStream.CH_ERROR , SideBandOutputStream.SMALL_BUF, rawOut); err.Write(Constants.Encode(noPack.Message)); err.Flush(); } throw; } PackConfig cfg = packConfig; if (cfg == null) { cfg = new PackConfig(db); } PackWriter pw = new PackWriter(cfg, walk.GetObjectReader()); try { pw.SetUseCachedPacks(true); pw.SetReuseDeltaCommits(true); pw.SetDeltaBaseAsOffset(options.Contains(OPTION_OFS_DELTA)); pw.SetThin(options.Contains(OPTION_THIN_PACK)); pw.SetReuseValidatingObjects(false); if (commonBase.IsEmpty()) { ICollection <ObjectId> tagTargets = new HashSet <ObjectId>(); foreach (Ref @ref in refs.Values) { if (@ref.GetPeeledObjectId() != null) { tagTargets.AddItem(@ref.GetPeeledObjectId()); } else { if (@ref.GetObjectId() == null) { continue; } else { if (@ref.GetName().StartsWith(Constants.R_HEADS)) { tagTargets.AddItem(@ref.GetObjectId()); } } } } pw.SetTagTargets(tagTargets); } RevWalk rw = walk; if (wantAll.IsEmpty()) { pw.PreparePack(pm, wantIds, commonBase); } else { walk.Reset(); ObjectWalk ow = walk.ToObjectWalkWithSameObjects(); pw.PreparePack(pm, ow, wantAll, commonBase); rw = ow; } if (options.Contains(OPTION_INCLUDE_TAG)) { foreach (Ref vref in refs.Values) { Ref @ref = vref; ObjectId objectId = @ref.GetObjectId(); // If the object was already requested, skip it. if (wantAll.IsEmpty()) { if (wantIds.Contains(objectId)) { continue; } } else { RevObject obj = rw.LookupOrNull(objectId); if (obj != null && obj.Has(WANT)) { continue; } } if ([email protected]()) { @ref = db.Peel(@ref); } ObjectId peeledId = @ref.GetPeeledObjectId(); if (peeledId == null) { continue; } objectId = @ref.GetObjectId(); if (pw.WillInclude(peeledId) && !pw.WillInclude(objectId)) { pw.AddObject(rw.ParseAny(objectId)); } } } pw.WritePack(pm, NullProgressMonitor.INSTANCE, packOut); statistics = pw.GetStatistics(); if (msgOut != null) { string msg = pw.GetStatistics().GetMessage() + '\n'; msgOut.Write(Constants.Encode(msg)); msgOut.Flush(); } } finally { pw.Release(); } if (sideband) { pckOut.End(); } if (logger != null && statistics != null) { logger.OnPackStatistics(statistics); } }
private void SendPack() { bool thin = _options.Contains(OptionThinPack); bool progress = !_options.Contains(OptionNoProgress); bool sideband = _options.Contains(OptionSideBand) || _options.Contains(OptionSideBand64K); ProgressMonitor pm = NullProgressMonitor.Instance; Stream _packOut = _rawOut; if (sideband) { int bufsz = SideBandOutputStream.SMALL_BUF; if (_options.Contains(OptionSideBand64K)) { bufsz = SideBandOutputStream.MAX_BUF; } _packOut = new SideBandOutputStream(SideBandOutputStream.CH_DATA, bufsz, _rawOut); if (progress) { pm = new SideBandProgressMonitor(new SideBandOutputStream(SideBandOutputStream.CH_PROGRESS, bufsz, _rawOut)); } } var pw = new PackWriter(_db, pm, NullProgressMonitor.Instance) { DeltaBaseAsOffset = _options.Contains(OptionOfsDelta), Thin = thin }; pw.preparePack(_wantAll, _commonBase); if (_options.Contains(OptionIncludeTag)) { foreach (Ref r in _refs.Values) { RevObject o; try { o = _walk.parseAny(r.ObjectId); } catch (IOException) { continue; } RevTag t = (o as RevTag); if (o.has(WANT) || (t == null)) { continue; } if (!pw.willInclude(t) && pw.willInclude(t.getObject())) { pw.addObject(t); } } } pw.writePack(_packOut); if (sideband) { _packOut.Flush(); _pckOut.End(); } else { _rawOut.Flush(); } }
private FileInfo[] Pack(Core.Repository src, params RevObject[] list) { var pw = new PackWriter(src, NullProgressMonitor.Instance); foreach (RevObject o in list) { pw.addObject(o); } ObjectId name = pw.computeName(); FileInfo packFile = FullPackFileName(name); FileInfo idxFile = FullIndexFileName(name); var files = new[] { packFile, idxFile }; Write(files, pw); return files; }
/// <exception cref="System.IO.IOException"></exception> private FilePath[] Pack(Repository src, params RevObject[] list) { PackWriter pw = new PackWriter(src); foreach (RevObject o in list) { pw.AddObject(o); } ObjectId name = pw.ComputeName(); FilePath packFile = FullPackFileName(name, ".pack"); FilePath idxFile = FullPackFileName(name, ".idx"); FilePath[] files = new FilePath[] { packFile, idxFile }; Write(files, pw); pw.Release(); return files; }
private static void Write(FileInfo[] files, PackWriter pw) { FileInfo file = files[0]; long begin = file.Directory.lastModified(); using (var stream = file.Create()) { pw.writePack(stream); } file = files[1]; using (var stream = file.Create()) { pw.writeIndex(stream); } Touch(begin, files[0].Directory); }
/// <exception cref="System.IO.IOException"></exception> private void SendPack(bool sideband) { ProgressMonitor pm = NullProgressMonitor.INSTANCE; OutputStream packOut = rawOut; SideBandOutputStream msgOut = null; if (sideband) { int bufsz = SideBandOutputStream.SMALL_BUF; if (options.Contains(OPTION_SIDE_BAND_64K)) { bufsz = SideBandOutputStream.MAX_BUF; } packOut = new SideBandOutputStream(SideBandOutputStream.CH_DATA, bufsz, rawOut); if (!options.Contains(OPTION_NO_PROGRESS)) { msgOut = new SideBandOutputStream(SideBandOutputStream.CH_PROGRESS, bufsz, rawOut ); pm = new SideBandProgressMonitor(msgOut); } } try { if (wantAll.IsEmpty()) { preUploadHook.OnSendPack(this, wantIds, commonBase); } else { preUploadHook.OnSendPack(this, wantAll, commonBase); } } catch (ServiceMayNotContinueException noPack) { if (sideband && noPack.Message != null) { noPack.SetOutput(); SideBandOutputStream err = new SideBandOutputStream(SideBandOutputStream.CH_ERROR , SideBandOutputStream.SMALL_BUF, rawOut); err.Write(Constants.Encode(noPack.Message)); err.Flush(); } throw; } PackConfig cfg = packConfig; if (cfg == null) { cfg = new PackConfig(db); } PackWriter pw = new PackWriter(cfg, walk.GetObjectReader()); try { pw.SetUseCachedPacks(true); pw.SetReuseDeltaCommits(true); pw.SetDeltaBaseAsOffset(options.Contains(OPTION_OFS_DELTA)); pw.SetThin(options.Contains(OPTION_THIN_PACK)); pw.SetReuseValidatingObjects(false); if (commonBase.IsEmpty() && refs != null) { ICollection<ObjectId> tagTargets = new HashSet<ObjectId>(); foreach (Ref @ref in refs.Values) { if (@ref.GetPeeledObjectId() != null) { tagTargets.AddItem(@ref.GetPeeledObjectId()); } else { if (@ref.GetObjectId() == null) { continue; } else { if (@ref.GetName().StartsWith(Constants.R_HEADS)) { tagTargets.AddItem(@ref.GetObjectId()); } } } } pw.SetTagTargets(tagTargets); } if (depth > 0) { pw.SetShallowPack(depth, unshallowCommits); } RevWalk rw = walk; if (wantAll.IsEmpty()) { pw.PreparePack(pm, wantIds, commonBase); } else { walk.Reset(); ObjectWalk ow = walk.ToObjectWalkWithSameObjects(); pw.PreparePack(pm, ow, wantAll, commonBase); rw = ow; } if (options.Contains(OPTION_INCLUDE_TAG) && refs != null) { foreach (Ref vref in refs.Values) { Ref @ref = vref; ObjectId objectId = @ref.GetObjectId(); // If the object was already requested, skip it. if (wantAll.IsEmpty()) { if (wantIds.Contains(objectId)) { continue; } } else { RevObject obj = rw.LookupOrNull(objectId); if (obj != null && obj.Has(WANT)) { continue; } } if ([email protected]()) { @ref = db.Peel(@ref); } ObjectId peeledId = @ref.GetPeeledObjectId(); if (peeledId == null) { continue; } objectId = @ref.GetObjectId(); if (pw.WillInclude(peeledId) && !pw.WillInclude(objectId)) { pw.AddObject(rw.ParseAny(objectId)); } } } pw.WritePack(pm, NullProgressMonitor.INSTANCE, packOut); statistics = pw.GetStatistics(); if (msgOut != null) { string msg = pw.GetStatistics().GetMessage() + '\n'; msgOut.Write(Constants.Encode(msg)); msgOut.Flush(); } } finally { pw.Release(); } if (sideband) { pckOut.End(); } if (statistics != null) { logger.OnPackStatistics(statistics); } }
/// <exception cref="System.IO.IOException"></exception> private void SendPack() { bool sideband = options.Contains(OPTION_SIDE_BAND) || options.Contains(OPTION_SIDE_BAND_64K ); ProgressMonitor pm = NullProgressMonitor.INSTANCE; OutputStream packOut = rawOut; if (sideband) { int bufsz = SideBandOutputStream.SMALL_BUF; if (options.Contains(OPTION_SIDE_BAND_64K)) { bufsz = SideBandOutputStream.MAX_BUF; } packOut = new SideBandOutputStream(SideBandOutputStream.CH_DATA, bufsz, rawOut); if (!options.Contains(OPTION_NO_PROGRESS)) { pm = new SideBandProgressMonitor(new SideBandOutputStream(SideBandOutputStream.CH_PROGRESS , bufsz, rawOut)); } } PackConfig cfg = packConfig; if (cfg == null) { cfg = new PackConfig(db); } PackWriter pw = new PackWriter(cfg, walk.GetObjectReader()); try { pw.SetDeltaBaseAsOffset(options.Contains(OPTION_OFS_DELTA)); pw.SetThin(options.Contains(OPTION_THIN_PACK)); pw.PreparePack(pm, wantAll, commonBase); if (options.Contains(OPTION_INCLUDE_TAG)) { foreach (Ref r in refs.Values) { RevObject o; try { o = walk.ParseAny(r.GetObjectId()); } catch (IOException) { continue; } if (o.Has(WANT) || !(o is RevTag)) { continue; } RevTag t = (RevTag)o; if (!pw.WillInclude(t) && pw.WillInclude(t.GetObject())) { pw.AddObject(t); } } } pw.WritePack(pm, NullProgressMonitor.INSTANCE, packOut); } finally { pw.Release(); } packOut.Flush(); if (sideband) { pckOut.End(); } }
private void Sendpack(IEnumerable<RemoteRefUpdate> updates, ProgressMonitor monitor) { string pathPack = null; string pathIdx = null; try { var pw = new PackWriter(_local, monitor); var need = new List<ObjectId>(); var have = new List<ObjectId>(); foreach (RemoteRefUpdate r in updates) { need.Add(r.NewObjectId); } foreach (Ref r in Refs) { have.Add(r.ObjectId); if (r.PeeledObjectId != null) { have.Add(r.PeeledObjectId); } } pw.preparePack(need, have); // We don't have to continue further if the pack will // be an empty pack, as the remote has all objects it // needs to complete this change. // if (pw.getObjectsNumber() == 0) return; _packNames = new Dictionary<string, string>(); foreach (string n in _dest.getPackNames()) { _packNames.put(n, n); } string b = "pack-" + pw.computeName().Name; string packName = b + IndexPack.PackSuffix; pathPack = "pack/" + packName; pathIdx = "pack/" + b + IndexPack.IndexSuffix; if (_packNames.remove(packName) != null) { // The remote already contains this pack. We should // remove the index before overwriting to prevent bad // offsets from appearing to clients. // _dest.writeInfoPacks(_packNames.Keys); _dest.deleteFile(pathIdx); } // Write the pack file, then the index, as readers look the // other direction (index, then pack file). // string wt = "Put " + b.Slice(0, 12); using (Stream os = _dest.writeFile(pathPack, monitor, wt + "." + IndexPack.PackSuffix)) { pw.writePack(os); } using (Stream os = _dest.writeFile(pathIdx, monitor, wt + "." + IndexPack.IndexSuffix)) { pw.writeIndex(os); } // Record the pack at the start of the pack info list. This // way clients are likely to consult the newest pack first, // and discover the most recent objects there. // var infoPacks = new List<string> { packName }; infoPacks.AddRange(_packNames.Keys); _dest.writeInfoPacks(infoPacks); } catch (IOException err) { SafeDelete(pathIdx); SafeDelete(pathPack); throw new TransportException(_uri, "cannot store objects", err); } }
private void writePack(IDictionary<string, RemoteRefUpdate> refUpdates, ProgressMonitor monitor) { PackWriter writer = new PackWriter(local, monitor); List<ObjectId> remoteObjects = new List<ObjectId>(Refs.Count); List<ObjectId> newObjects = new List<ObjectId>(refUpdates.Count); foreach (Ref r in Refs) remoteObjects.Add(r.ObjectId); remoteObjects.AddRange(additionalHaves); foreach (RemoteRefUpdate r in refUpdates.Values) { if (!ObjectId.ZeroId.Equals(r.NewObjectId)) newObjects.Add(r.NewObjectId); } writer.Thin = _thinPack; writer.DeltaBaseAsOffset = _capableOfsDelta; writer.preparePack(newObjects, remoteObjects); writer.writePack(stream); }
public override void OnPackStatistics(PackWriter.Statistics stats) { }
/// <summary> /// Create a writer for a bundle. /// </summary> /// <param name="repo">repository where objects are stored.</param> /// <param name="monitor">operations progress monitor.</param> public BundleWriter(Repository repo, ProgressMonitor monitor) { _packWriter = new PackWriter(repo, monitor); _include = new Dictionary <String, ObjectId>(); _assume = new HashSet <RevCommit>(); }
// Do nothing. /// <summary>Notice to the logger after a pack has been sent.</summary> /// <remarks>Notice to the logger after a pack has been sent.</remarks> /// <param name="stats">the statistics after sending a pack to the client.</param> public abstract void OnPackStatistics(PackWriter.Statistics stats);
/// <summary>Generate and write the bundle to the output stream.</summary> /// <remarks> /// Generate and write the bundle to the output stream. /// <p> /// This method can only be called once per BundleWriter instance. /// </remarks> /// <param name="monitor">progress monitor to report bundle writing status to.</param> /// <param name="os"> /// the stream the bundle is written to. The stream should be /// buffered by the caller. The caller is responsible for closing /// the stream. /// </param> /// <exception cref="System.IO.IOException"> /// an error occurred reading a local object's data to include in /// the bundle, or writing compressed object data to the output /// stream. /// </exception> public virtual void WriteBundle(ProgressMonitor monitor, OutputStream os) { PackConfig pc = packConfig; if (pc == null) { pc = new PackConfig(db); } PackWriter packWriter = new PackWriter(pc, db.NewObjectReader()); try { HashSet<ObjectId> inc = new HashSet<ObjectId>(); HashSet<ObjectId> exc = new HashSet<ObjectId>(); Sharpen.Collections.AddAll(inc, include.Values); foreach (RevCommit r in assume) { exc.AddItem(r.Id); } packWriter.SetDeltaBaseAsOffset(true); packWriter.SetThin(exc.Count > 0); packWriter.SetReuseValidatingObjects(false); if (exc.Count == 0) { packWriter.SetTagTargets(tagTargets); } packWriter.PreparePack(monitor, inc, exc); TextWriter w = new OutputStreamWriter(os, Constants.CHARSET); w.Write(NGit.Transport.TransportBundleConstants.V2_BUNDLE_SIGNATURE); w.Write('\n'); char[] tmp = new char[Constants.OBJECT_ID_STRING_LENGTH]; foreach (RevCommit a in assume) { w.Write('-'); a.CopyTo(tmp, w); if (a.RawBuffer != null) { w.Write(' '); w.Write(a.GetShortMessage()); } w.Write('\n'); } foreach (KeyValuePair<string, ObjectId> e in include.EntrySet()) { e.Value.CopyTo(tmp, w); w.Write(' '); w.Write(e.Key); w.Write('\n'); } w.Write('\n'); w.Flush(); packWriter.WritePack(monitor, monitor, os); } finally { packWriter.Release(); } }
private void Sendpack(IEnumerable <RemoteRefUpdate> updates, ProgressMonitor monitor) { string pathPack = null; string pathIdx = null; try { var pw = new PackWriter(_local, monitor); var need = new List <ObjectId>(); var have = new List <ObjectId>(); foreach (RemoteRefUpdate r in updates) { need.Add(r.NewObjectId); } foreach (Ref r in Refs) { have.Add(r.ObjectId); if (r.PeeledObjectId != null) { have.Add(r.PeeledObjectId); } } pw.preparePack(need, have); if (pw.getObjectsNumber() == 0) { return; } _packNames = new Dictionary <string, string>(); foreach (string n in _dest.getPackNames()) { _packNames.Add(n, n); } string b = "pack-" + pw.computeName().Name; string packName = b + IndexPack.PackSuffix; pathPack = "pack/" + packName; pathIdx = "pack/" + b + IndexPack.IndexSuffix; if (_packNames.Remove(packName)) { _dest.writeInfoPacks(new List <string>(_packNames.Keys)); _dest.deleteFile(pathIdx); } string wt = "Put " + b.Slice(0, 12); Stream os = _dest.writeFile(pathPack, monitor, wt + "." + IndexPack.PackSuffix); try { pw.writePack(os); } finally { os.Close(); } os = _dest.writeFile(pathIdx, monitor, wt + "..idx"); try { pw.writeIndex(os); } finally { os.Close(); } var infoPacks = new List <string> { packName }; infoPacks.AddRange(_packNames.Keys); _dest.writeInfoPacks(infoPacks); } catch (IOException err) { SafeDelete(pathIdx); SafeDelete(pathPack); throw new TransportException(_uri, "cannot store objects", err); } }