/// <summary> /// Gets the latest BackupRecord in this BackupStore. /// Returns null if no backups. /// </summary> /// <param name="bsname"></param> /// <returns></returns> public BackupRecord GetBackupRecord(BackupSetReference bsname) { var bset = LoadBackupSet(bsname); if (bset.Backups.Count > 0) { return(GetBackupRecord(bsname, bset.Backups[^ 1].hash));
public BackupBrowser(string backupset, string?backuphash, BackupCore.Core bcore, int backupdst = 0) { ContinueLoop = true; BCore = bcore; BackupDst = backupdst; BackupSetReference backupSetReference = new BackupSetReference("test", false, false, false); if (!BCore.DestinationAvailable) { backupSetReference = backupSetReference with { Cache = true }; } (string hash, BackupCore.BackupRecord record)targetbackuphashandrecord; if (backuphash == null) { targetbackuphashandrecord = BCore.DefaultDstDependencies[BackupDst].Backups.GetBackupHashAndRecord(backupSetReference); } else { targetbackuphashandrecord = BCore.DefaultDstDependencies[BackupDst].Backups.GetBackupHashAndRecord(backupSetReference, backuphash, 0); } BackupHash = targetbackuphashandrecord.hash; BackupSet = backupset; BackupCore.BackupRecord backuprecord = targetbackuphashandrecord.record; BackupTree = BackupCore.MetadataNode.Load(BCore.DefaultDstDependencies[BackupDst].Blobs, backuprecord.MetadataTreeHash); CurrentNode = BackupTree; }
public void FinalizeBackupAddition(BackupSetReference bsname, byte[] backuphash, byte[] mtreehash, HashTreeNode mtreereferences) { BlobLocation backupblocation = GetBlobLocation(backuphash); int? backupRefCount = backupblocation.GetBSetReferenceCount(bsname); if (!backupRefCount.HasValue || backupRefCount == 0) { BlobLocation mtreeblocation = GetBlobLocation(mtreehash); int? mtreeRefCount = mtreeblocation.GetBSetReferenceCount(bsname); if (!mtreeRefCount.HasValue || mtreeRefCount == 0) { ISkippableChildrenIterator <byte[]> childReferences = mtreereferences.GetChildIterator(); foreach (var blobhash in childReferences) { BlobLocation blocation = GetBlobLocation(blobhash); int? refCount = blocation.GetBSetReferenceCount(bsname); if (refCount.HasValue && refCount > 0) // This was already stored { childReferences.SkipChildrenOfCurrent(); } else if (blocation.BlockHashes != null) { foreach (var mbref in blocation.BlockHashes) { IncrementReferenceCountNoRecurse(bsname, mbref, 1); } } IncrementReferenceCountNoRecurse(bsname, blocation, blobhash, 1); } IncrementReferenceCountNoRecurse(bsname, mtreeblocation, mtreehash, 1); } IncrementReferenceCountNoRecurse(bsname, backupblocation, backuphash, 1); } }
public void DecrementReferenceCount(BackupSetReference backupsetname, byte[] blobhash, BlobLocation.BlobType blobtype, bool includefiles) { BlobLocation rootBlobLocation = GetBlobLocation(blobhash); if (rootBlobLocation.GetBSetReferenceCount(backupsetname) == 1) // To be deleted? { IBlobReferenceIterator blobReferences = GetAllBlobReferences(blobhash, blobtype, includefiles, false); foreach (var reference in blobReferences) { BlobLocation blocation = GetBlobLocation(reference); // When we finish iterating over the children, decrement this blob blobReferences.PostOrderAction(() => IncrementReferenceCountNoRecurse(backupsetname, blocation, reference, -1)); try { if (blocation.GetBSetReferenceCount(backupsetname) != 1) // Not to be deleted? { // Dont need to decrement child references if this wont be deleted blobReferences.SkipChildrenOfCurrent(); } } catch (KeyNotFoundException) { throw; } } } IncrementReferenceCountNoRecurse(backupsetname, rootBlobLocation, blobhash, -1); // must delete parent last so parent can be loaded/used in GetAllBlobReferences() }
public int?GetBSetReferenceCount(BackupSetReference backupSet) { if (BSetReferenceCounts.TryGetValue(new BackupSetKey(backupSet.BackupSetName, backupSet.Shallow, backupSet.BlobListCache), out int refCount)) { return(refCount); } return(null); }
public bool ContainsHash(BackupSetReference backupset, byte[] hash) { BlobLocation?blocation = IndexStore.GetRecord(hash); if (blocation != null) { return(blocation.GetBSetReferenceCount(backupset).HasValue); } return(false); }
// TODO: should we just have to iteratively remove each backup in the bset? public void RemoveAllBackupSetReferences(BackupSetReference bsname) { foreach (KeyValuePair <byte[], BlobLocation> hashblob in IndexStore) { int?refCount = hashblob.Value.GetBSetReferenceCount(bsname); if (refCount != null) { IncrementReferenceCountNoRecurse(bsname, hashblob.Key, -refCount.Value); } } }
public IEnumerable <string> GetBackupsAndMetadataReferencesAsStrings(BackupSetReference bsname) { var bset = LoadBackupSet(bsname); foreach ((byte[] backupref, bool _) in bset.Backups) { yield return(HashTools.ByteArrayToHexViaLookup32(backupref)); foreach (byte[] reference in Dependencies.Blobs.GetAllBlobReferences(backupref, BlobLocation.BlobType.BackupRecord, false, false)) { yield return(HashTools.ByteArrayToHexViaLookup32(reference)); } } }
/// <summary> /// /// </summary> /// <param name="bsname"></param> /// <param name="message"></param> /// <param name="metadatatreehash"></param> /// <param name="shallow"></param> /// <returns>The hash of the new backup</returns> public byte[] AddBackup(BackupSetReference bsname, string message, byte[] metadatatreehash, DateTime backupTime, BackupSet?bset = null) { if (bset == null) { bset = LoadBackupSet(bsname); } BackupRecord newbackup = new(message, metadatatreehash, backupTime); byte[] brbytes = newbackup.serialize(); byte[] backuphash = BlobStore.StoreData(new List <BlobStore>(1) { Dependencies.Blobs }, bsname, brbytes); bset.Backups.Add((backuphash, bsname.Shallow)); return(backuphash); }
private void IncrementReferenceCountNoRecurse(BackupSetReference backupset, BlobLocation blocation, byte[] blobhash, int amount) { bool originallyshallow = blocation.TotalNonShallowReferenceCount == 0; int? refCount = blocation.GetBSetReferenceCount(backupset); int newRefCount = refCount.GetValueOrDefault(0) + amount; blocation.SetBSetReferenceCount(backupset, newRefCount); if (newRefCount == 0) { blocation.RemoveBSetReference(backupset); } else if (newRefCount < 0) { throw new Exception("Negative reference count in blobstore"); } if (blocation.BlockHashes == null) // Can't delete from disk if this is a multiblock reference (does not directly store data on disk) { if (blocation.TotalNonShallowReferenceCount == 0) { if (!originallyshallow) { try { if (blocation.EncryptedHash == null) { throw new Exception("Hash should not be null"); } Dependencies.DeleteBlob(blocation.EncryptedHash, blocation.RelativeFilePath); } catch (Exception e) { throw new Exception("Error deleting unreferenced file.", e); } } } } if (blocation.TotalReferenceCount == 0) { IndexStore.Remove(blobhash); } }
public void RemoveBackup(BackupSetReference bsname, string backuphashprefix, bool dst_wo_cache, bool force_delete = false) { var bset = LoadBackupSet(bsname); if (bset.CacheUsed && dst_wo_cache && !force_delete) { // TODO: Do we need this check in more places? throw new Core.BackupRemoveException("Deleting a backup from a backup destination that uses a cache, " + "without that cache present may cause errors when merging cache."); } var match = HashByPrefix(bsname, backuphashprefix); // TODO: Better error messages depending on return value of HashByPrefix() // TODO: Cleanup usage of strings vs byte[] for hashes between backup store and Core if (match == null || match.Value.multiplematches == true) { throw new KeyNotFoundException(); } byte[] backuphash; if (match.Value.singlematchhash != null) { backuphash = match.Value.singlematchhash; } else { throw new Exception("HashByPrefix returned an incorrect type"); } int i; for (i = 0; i < bset.Backups.Count; i++) { if (bset.Backups[i].hash.SequenceEqual(backuphash)) { break; } } Dependencies.Blobs.DecrementReferenceCount(bsname, backuphash, BlobLocation.BlobType.BackupRecord, !bset.Backups[i].shallow); bset.Backups.RemoveAt(i); SaveBackupSet(bset, bsname); }
public void FinalizeBlobAddition(BackupSetReference bsname, byte[] blobhash, BlobLocation.BlobType blobType) { // Handle root blob BlobLocation rootblocation = GetBlobLocation(blobhash); if (rootblocation.TotalReferenceCount == 0) { IBlobReferenceIterator blobReferences = GetAllBlobReferences(blobhash, blobType, true, false); // Loop through children foreach (byte[] reference in blobReferences) { BlobLocation blocation = GetBlobLocation(reference); if (blocation.TotalReferenceCount > 0) // This was already stored { blobReferences.SkipChildrenOfCurrent(); } IncrementReferenceCountNoRecurse(bsname, blocation, blobhash, 1); } } // Increment root blob IncrementReferenceCountNoRecurse(bsname, rootblocation, blobhash, 1); }
/// <summary> /// Backup data sychronously. /// </summary> /// <param name="relpath"></param> /// <returns>A list of hashes representing the file contents.</returns> public static byte[] StoreData(IEnumerable <BlobStore> blobStores, BackupSetReference backupset, Stream readerbuffer) { BlockingCollection <HashBlobPair> fileblobqueue = new(); byte[] filehash = new byte[20]; // Overall hash of file SplitData(readerbuffer, filehash, fileblobqueue); List <byte[]> blobshashes = new(); while (!fileblobqueue.IsCompleted) { if (fileblobqueue.TryTake(out HashBlobPair? blob)) { blobStores.AsParallel().ForAll(bs => bs.AddBlob(backupset, blob)); blobshashes.Add(blob.Hash); } } if (blobshashes.Count > 1) { // Multiple blobs so create hashlist reference to reference them all together blobStores.AsParallel().ForAll(bs => bs.AddMultiBlobReferenceBlob(backupset, filehash, blobshashes)); } return(filehash); }
public void RemoveBSetReference(BackupSetReference backupSet) { BSetReferenceCounts.Remove(new BackupSetKey(backupSet.BackupSetName, backupSet.Shallow, backupSet.BlobListCache)); }
public void SetBSetReferenceCount(BackupSetReference backupSet, int count) { BSetReferenceCounts[new BackupSetKey(backupSet.BackupSetName, backupSet.Shallow, backupSet.BlobListCache)] = count; }
// TODO: Update transfer logic with new reference counting logic public void TransferBackup(BlobStore dst, BackupSetReference dstbackupset, byte[] bblobhash, bool includefiles) { TransferBlobAndReferences(dst, dstbackupset, bblobhash, BlobLocation.BlobType.BackupRecord, includefiles); }
// TODO: If include files is false, should we require dstbackupset.EndsWith(Core.ShallowSuffix)? public void TransferBlobAndReferences(BlobStore dst, BackupSetReference dstbackupset, byte[] blobhash, BlobLocation.BlobType blobtype, bool includefiles) { bool refInDst; bool shallowInDst; BlobLocation?rootDstBlobLocation = null; try { rootDstBlobLocation = dst.GetBlobLocation(blobhash); refInDst = true; shallowInDst = rootDstBlobLocation.TotalNonShallowReferenceCount == 0; } catch (KeyNotFoundException) { refInDst = false; shallowInDst = false; // Meaningless when ref not in dst } if (!refInDst || (shallowInDst && includefiles)) { byte[]? blob; if (refInDst) { blob = RetrieveData(blobhash); } else { (rootDstBlobLocation, blob) = TransferBlobNoReferences(dst, dstbackupset, blobhash, GetBlobLocation(blobhash)); } IBlobReferenceIterator blobReferences = GetAllBlobReferences(blobhash, blobtype, includefiles, false); blobReferences.SupplyData(blob); foreach (var reference in blobReferences) { bool iterRefInDst; bool iterShallowInDst; BlobLocation?dstBlobLocation = null; try { dstBlobLocation = dst.GetBlobLocation(reference); iterRefInDst = true; iterShallowInDst = dstBlobLocation.TotalNonShallowReferenceCount == 0; } catch (KeyNotFoundException) { iterRefInDst = false; iterShallowInDst = false; // Meaningless when ref not in dst } if (!iterRefInDst || (iterShallowInDst && includefiles)) { if (iterRefInDst) { blob = RetrieveData(reference); } else { (dstBlobLocation, blob) = TransferBlobNoReferences(dst, dstbackupset, reference, GetBlobLocation(reference)); } blobReferences.SupplyData(blob); } else { // Dont need to increment child references if this already exists blobReferences.SkipChildrenOfCurrent(); } //if (!iterRefInDst) // Don't increment child reference if already present? //{ // When we finish iterating over the children, increment this blob #pragma warning disable CS8604 // Possible null reference argument. blobReferences.PostOrderAction(() => dst.IncrementReferenceCountNoRecurse(dstbackupset, dstBlobLocation, reference, 1)); #pragma warning restore CS8604 // Possible null reference argument. //} } } #pragma warning disable CS8604 // Possible null reference argument. dst.IncrementReferenceCountNoRecurse(dstbackupset, rootDstBlobLocation, blobhash, 1); #pragma warning restore CS8604 // Possible null reference argument. }
private BlobLocation AddBlob(BackupSetReference backupset, HashBlobPair blob, List <byte[]>?blockreferences, bool shallow = false) { // We navigate down // Where we will put the blob data if we dont already have it stored BlobLocation posblocation; if (shallow) { posblocation = new BlobLocation(blockreferences); } else { if (blockreferences == null) { if (blob.Block == null) { throw new Exception("Block can only be null in multirefernce blob"); } posblocation = new BlobLocation(null, "", blob.Block.Length); } else { posblocation = new BlobLocation(blockreferences); } } // Where the data is already stored if it exists (BlobLocation bloc, bool datastored)? existingblocstored; lock (this) { // Have we already stored this? existingblocstored = AddHash(blob.Hash, posblocation); } if (existingblocstored == null) // ExistBloc == null means posbloc was just added { if (!shallow) { if (blockreferences == null) { if (blob.Block == null) { throw new Exception("Block can only be null in multirefernce blob"); } (posblocation.EncryptedHash, posblocation.RelativeFilePath) = WriteBlob(blob.Hash, blob.Block); } } else { posblocation.RelativeFilePath = ""; posblocation.EncryptedHash = blob.Hash; } // Dont change reference counts until finalization // IncrementReferenceCountNoRecurse(backupset, posblocation, blob.Hash, 1); return(posblocation); } else // Existbloc already stored at dst { (BlobLocation existingbloc, bool datastored) = existingblocstored.Value; // Is the data not already stored in the blobstore (are all references shallow thus far)? if (existingbloc.BlockHashes == null) { if (!datastored) { // Data is not already stored // Dont save if we are writing a bloblistcache if (!backupset.BlobListCache) { // If we are saving to a cache and the bloblist cache indicates the destination has the data // Then dont store, Else save //BackupSetReference blobListCacheReference = backupset with { BlobListCache = true }; if (!(backupset.Cache && existingbloc.GetBSetReferenceCount(backupset).HasValue)) { if (blob.Block == null) { throw new Exception("Block can only be null in multirefernce blob"); } (existingbloc.EncryptedHash, existingbloc.RelativeFilePath) = WriteBlob(blob.Hash, blob.Block); } } } } // Dont change reference counts until finalization // IncrementReferenceCountNoRecurse(backupset, existingbloc, blob.Hash, 1); return(existingbloc); } }
/// <summary> /// /// </summary> /// <param name="dst"></param> /// <param name="blobhash"></param> /// <returns>True Blob exists in destination</returns> private (BlobLocation bloc, byte[]? blob) TransferBlobNoReferences(BlobStore dst, BackupSetReference dstbackupset, byte[] blobhash, BlobLocation blocation) { if (blocation.BlockHashes == null) { byte[] blob = LoadBlob(blocation, blobhash); return(dst.AddBlob(dstbackupset, new HashBlobPair(blobhash, blob)), blob); } else { return(dst.AddMultiBlobReferenceBlob(dstbackupset, blobhash, blocation.BlockHashes), null); } }
private BlobLocation AddMultiBlobReferenceBlob(BackupSetReference backupset, byte[] hash, List <byte[]> hashlist) { HashBlobPair referenceblob = new(hash, null); return(AddBlob(backupset, referenceblob, hashlist)); }
private void IncrementReferenceCountNoRecurse(BackupSetReference backupset, byte[] blobhash, int amount) => IncrementReferenceCountNoRecurse(backupset, GetBlobLocation(blobhash), blobhash, amount);
public static byte[] StoreData(IEnumerable <BlobStore> blobStores, BackupSetReference backupset, byte[] inputdata) { return(StoreData(blobStores, backupset, new MemoryStream(inputdata))); }
public byte[] LoadBackupSetData(BackupSetReference backupsetname) { return(DstFSInterop.LoadIndexFileAsync(backupsetname.StringRepr(), IndexFileType.BackupSet).Result); }
public void StoreBackupSetData(BackupSetReference backupsetname, byte[] bsdata) { DstFSInterop.StoreIndexFileAsync(backupsetname.StringRepr(), IndexFileType.BackupSet, bsdata).Wait(); }
/// <summary> /// Add a single blob to blobstore. /// </summary> /// <param name="blob"></param> /// <param name="type"></param> /// <returns>The BlobLocation the blob is saved to.</returns> private BlobLocation AddBlob(BackupSetReference backupset, HashBlobPair blob) { return(AddBlob(backupset, blob, null)); }