public Ref(PackFile pack, long position, T v, Queue queue) : base(v) { _queue = queue; this.pack = pack; this.position = position; }
private static Dictionary <string, PackFile> ReuseMap(PackList old) { var forReuse = new Dictionary <string, PackFile>(); foreach (PackFile p in old.packs) { if (p.IsInvalid) { // The pack instance is corrupted, and cannot be safely used // again. Do not include it in our reuse map. // p.Close(); continue; } PackFile prior = forReuse[p.File.Name] = p; if (prior != null) { // This should never occur. It should be impossible for us // to have two pack files with the same name, as all of them // came out of the same directory. If it does, we promised to // close any PackFiles we did not reuse, so close the one we // just evicted out of the reuse map. // prior.Close(); } } return(forReuse); }
public override void Materialize(WindowCursor curs) { if (CachedBytes != null) { return; } if (Type != ObjCommit) { UnpackedObjectCache.Entry cache = PackFile.readCache(DataOffset); if (cache != null) { curs.Release(); CachedBytes = cache.data; return; } } try { CachedBytes = PackFile.decompress(DataOffset, Size, curs); curs.Release(); if (Type != ObjCommit) { PackFile.saveCache(DataOffset, CachedBytes, Type); } } catch (IOException dfe) { throw new CorruptObjectException("object at " + DataOffset + " in " + PackFile.File.FullName + " has bad zlib stream", dfe); } }
/// <summary> /// Clear all entries related to a single file. /// <para /> /// Typically this method is invoked during <see cref="PackFile.Close()"/>, when we /// know the pack is never going to be useful to us again (for example, it no /// longer exists on disk). A concurrent reader loading an entry from this /// same pack may cause the pack to become stuck in the cache anyway. /// </summary> /// <param name="pack">the file to purge all entries of.</param> internal void removeAll(PackFile pack) { for (int s = 0; s < _tableSize; s++) { Entry <V> e1 = _table.get(s); bool hasDead = false; for (Entry <V> e = e1; e != null; e = e.Next) { if (e.Ref.pack == pack) { e.Kill(); hasDead = true; } else if (e.Dead) { hasDead = true; } } if (hasDead) { _table.compareAndSet(s, e1, Clean(e1)); } } Gc(); }
internal override WindowRef createRef(PackFile p, long o, ByteWindow v) { var @ref = new WindowRef(p, o, v, queue); _openBytes.addAndGet(@ref.Size); return(@ref); }
public static void store(PackFile pack, long position, byte[] data, int objectType) { if (data == null) { throw new ArgumentNullException("data"); } lock (locker) { if (data.Length > _maxByteCount) { return; // Too large to cache. } Slot e = Cache[Hash(position)]; ClearEntry(e); _openByteCount += data.Length; ReleaseMemory(); e.provider = pack; e.position = position; e.sz = data.Length; e.data = new WeakReference <Entry>(new Entry(data, objectType)); MoveToHead(e); } }
internal override WindowRef createRef(PackFile p, long o, ByteWindow v) { var @ref = new WindowRef(p, o, v, queue); long c = _openBytes.get(); _openBytes.compareAndSet(c, c + @ref.Size); return(@ref); }
private void Close(PackFile pack) { if (!pack.endWindowCache()) { return; } _openFiles.decrementAndGet(); }
public override PackedObjectLoader GetBaseLoader(WindowCursor curs) { PackedObjectLoader or = PackFile.Get(curs, _deltaBase); if (or == null) { throw new MissingObjectException(_deltaBase, "delta base"); } return(or); }
private void Close(PackFile pack) { if (!pack.endWindowCache()) { return; } int c = _openFiles.get(); _openFiles.compareAndSet(c, c - 1); }
private static int indexOf(PackFile[] list, PackFile pack) { for (int i = 0; i < list.Length; i++) { if (list[i] == pack) { return(i); } } return(-1); }
/// <summary> /// Lookup a cached object, creating and loading it if it doesn't exist. /// </summary> /// <param name="pack">the pack that "contains" the cached object.</param> /// <param name="position">offset within <paramref name="pack"/> of the object.</param> /// <returns>The object reference.</returns> /// <exception cref="Exception"> /// The object reference was not in the cache and could not be /// obtained by <see cref="load(PackFile, long)"/> /// </exception> internal V getOrLoad(PackFile pack, long position) { int slot = this.Slot(pack, position); Entry <V> e1 = _table.get(slot); V v = Scan(e1, pack, position); if (v != null) { return(v); } lock (Lock(pack, position)) { Entry <V> e2 = _table.get(slot); if (e2 != e1) { v = Scan(e2, pack, position); if (v != null) { return(v); } } v = load(pack, position); Ref <V> @ref = createRef(pack, position, v); Hit(@ref); while (true) { var n = new Entry <V>(Clean(e2), @ref); if (_table.compareAndSet(slot, e2, n)) { break; } e2 = _table.get(slot); } } if (_evictLock.WaitOne()) { try { Gc(); Evict(); } finally { _evictLock.Set(); } } return(v); }
private void InsertPack(PackFile pf) { PackList o, n; do { o = _packList.get(); PackFile[] oldList = o.packs; var newList = new PackFile[1 + oldList.Length]; newList[0] = pf; Array.Copy(oldList, 0, newList, 1, oldList.Length); n = new PackList(o.lastRead, o.lastModified, newList); } while (!_packList.compareAndSet(o, n)); }
public static void purge(PackFile file) { lock (locker) { foreach (Slot e in Cache) { if (e.provider == file) { ClearEntry(e); Unlink(e); } } } }
/// <summary> /// Copy bytes from the window to a caller supplied buffer. /// </summary> /// <param name="pack">The file the desired window is stored within.</param> /// <param name="position">Position within the file to read from.</param> /// <param name="dstbuf">Destination buffer to copy into.</param> /// <param name="dstoff">Offset within <paramref name="dstbuf"/> to start copying into.</param> /// <param name="cnt"> /// The number of bytes to copy. This value may exceed the number of /// bytes remaining in the window starting at offset <paramref name="position"/>. /// </param> /// <returns> /// number of bytes actually copied; this may be less than /// <paramref name="cnt"/> if <paramref name="cnt"/> exceeded the number of /// bytes available. /// </returns> /// <remarks> /// This cursor does not match the provider or id and the proper /// window could not be acquired through the provider's cache. /// </remarks> public int Copy(PackFile pack, long position, byte[] dstbuf, int dstoff, int cnt) { long length = pack.Length; int need = cnt; while (need > 0 && position < length) { Pin(pack, position); int r = _byteWindow.copy(position, dstbuf, dstoff, need); position += r; dstoff += r; need -= r; } return(cnt - need); }
private void Pin(PackFile pack, long position) { ByteWindow w = _byteWindow; if (w == null || !w.contains(pack, position)) { // If memory is low, we may need what is in our window field to // be cleaned up by the GC during the get for the next window. // So we always clear it, even though we are just going to set // it again. // _byteWindow = null; _byteWindow = WindowCache.get(pack, position); } }
public static ByteWindow get(PackFile pack, long offset) { WindowCache c = _cache; ByteWindow r = c.getOrLoad(pack, c.ToStart(offset)); if (c != _cache) { // The cache was reconfigured while we were using the old one // to load this window. The window is still valid, but our // cache may think its still live. Ensure the window is removed // from the old cache so resources can be released. // c.removeAll(); } return(r); }
public static Entry get(PackFile pack, long position) { lock (locker) { Slot e = Cache[Hash(position)]; if (e.provider == pack && e.position == position) { Entry buf = e.data.get(); if (buf != null) { MoveToHead(e); return(buf); } } return(null); } }
public override void Materialize(WindowCursor curs) { if (curs == null) { throw new System.ArgumentNullException("curs"); } if (CachedBytes != null) { return; } if (Type != ObjCommit) { UnpackedObjectCache.Entry cache = PackFile.readCache(DataOffset); if (cache != null) { curs.Release(); Type = cache.type; Size = cache.data.Length; CachedBytes = cache.data; return; } } try { PackedObjectLoader baseLoader = GetBaseLoader(curs); baseLoader.Materialize(curs); CachedBytes = BinaryDelta.Apply(baseLoader.CachedBytes, PackFile.decompress(DataOffset, _deltaSize, curs)); curs.Release(); Type = baseLoader.Type; Size = CachedBytes.Length; if (Type != ObjCommit) { PackFile.saveCache(DataOffset, CachedBytes, Type); } } catch (IOException dfe) { throw new CorruptObjectException("object at " + DataOffset + " in " + PackFile.File.FullName + " has bad zlib stream", dfe); } }
private void RemovePack(PackFile deadPack) { PackList o, n; do { o = _packList.get(); PackFile[] oldList = o.packs; int j = indexOf(oldList, deadPack); if (j < 0) { break; } var newList = new PackFile[oldList.Length - 1]; Array.Copy(oldList, 0, newList, 0, j); Array.Copy(oldList, j + 1, newList, j, newList.Length - j); n = new PackList(o.lastRead, o.lastModified, newList); } while (!_packList.compareAndSet(o, n)); deadPack.Close(); }
private V Scan(Entry <V> n, PackFile pack, long position) { for (; n != null; n = n.Next) { Ref <V> r = n.Ref; if (r.pack == pack && r.position == position) { V v = r.get(); if (v != null) { Hit(r); return(v); } n.Kill(); break; } } return(null); }
internal override ByteWindow load(PackFile pack, long offset) { if (pack.beginWindowCache()) { _openFiles.incrementAndGet(); } try { if (_memoryMap) { return(pack.MemoryMappedByteWindow(offset, _windowSize)); } return(pack.Read(offset, _windowSize)); } catch (Exception) { Close(pack); throw; } }
internal override ByteWindow load(PackFile pack, long offset) { if (pack.beginWindowCache()) { int c = _openFiles.get(); _openFiles.compareAndSet(c, c + 1); } try { if (_memoryMap) { return(pack.MemoryMappedByteWindow(offset, _windowSize)); } return(pack.Read(offset, _windowSize)); } catch (Exception) { Close(pack); throw; } }
/// <summary> /// Pump bytes into the supplied inflater as input. /// </summary> /// <param name="pack">The file the desired window is stored within.</param> /// <param name="position">Position within the file to read from.</param> /// <param name="dstbuf"> /// Destination buffer the inflater should output decompressed /// data to. /// </param> /// <param name="dstoff">Current offset within <paramref name="dstbuf"/> to inflate into.</param> /// <returns> /// Updated <paramref name="dstoff"/> based on the number of bytes /// successfully inflated into <paramref name="dstbuf"/>. /// </returns> /// <remarks> /// this cursor does not match the provider or id and the proper /// window could not be acquired through the provider's cache. /// </remarks> public int Inflate(PackFile pack, long position, byte[] dstbuf, int dstoff) { if (_inflater == null) { _inflater = InflaterCache.Instance.get(); } else { _inflater.Reset(); } while (true) { Pin(pack, position); dstoff = _byteWindow.Inflate(position, dstbuf, dstoff, _inflater); if (_inflater.IsFinished) { return(dstoff); } position = _byteWindow.End; } }
public void InflateVerify(PackFile pack, long position) { if (_inflater == null) { _inflater = InflaterCache.Instance.get(); } else { _inflater.Reset(); } while (true) { Pin(pack, position); _byteWindow.inflateVerify(position, _inflater); if (_inflater.IsFinished) { return; } position = _byteWindow.End; } }
protected PackedObjectLoader(PackFile packFile, long dataOffset, long objectOffset) { _packFile = packFile; _dataOffset = dataOffset; _objectOffset = objectOffset; }
protected DeltaPackedObjectLoader(PackFile pr, long dataOffset, long objectOffset, int deltaSz) : base(pr, dataOffset, objectOffset) { Type = -1; _deltaSize = deltaSz; }
public override PackedObjectLoader GetBaseLoader(WindowCursor windowCursor) { return(PackFile.ResolveBase(windowCursor, _deltaBase)); }
public DeltaOfsPackedObjectLoader(PackFile pr, long dataOffset, long objectOffset, int deltaSz, long @base) : base(pr, dataOffset, objectOffset, deltaSz) { _deltaBase = @base; }
internal ByteArrayWindow(PackFile pack, long o, byte[] b) : base(pack, o, b.Length) { _array = b; }