public static MetadataNode Load(BlobStore blobs, byte[] hash, MetadataNode parent = null) { var curmn = new MetadataNode(); Dictionary <string, byte[]> savedobjects = BinaryEncoding.dict_decode(blobs.RetrieveData(hash)); FileMetadata dirmetadata = FileMetadata.deserialize(savedobjects["DirMetadata-v1"]); curmn.DirMetadata = dirmetadata; ConcurrentDictionary <string, FileMetadata> files = new ConcurrentDictionary <string, FileMetadata>(); foreach (var binfm in BinaryEncoding.enum_decode(savedobjects["Files-v1"])) { FileMetadata newfm = FileMetadata.deserialize(binfm); files[newfm.FileName] = newfm; } curmn.Files = files; ConcurrentDictionary <string, MetadataNode> directories = new ConcurrentDictionary <string, MetadataNode>(); var dirs = BinaryEncoding.enum_decode(savedobjects["Directories-v2"]); for (int i = 0; i < dirs.Count; i++) { MetadataNode newmn = Load(blobs, dirs[i], curmn); directories[newmn.DirMetadata.FileName] = newmn; } curmn.Parent = parent; curmn.Directories = directories; return(curmn); }
public static MetadataNode Load(BlobStore blobs, byte[] hash, MetadataNode?parent = null) { var curmn = new MetadataNode(); Dictionary <string, byte[]> savedobjects = BinaryEncoding.dict_decode(blobs.RetrieveData(hash)); FileMetadata dirmetadata = FileMetadata.deserialize(savedobjects["DirMetadata-v1"]); curmn.DirMetadata = dirmetadata; ConcurrentDictionary <string, FileMetadata> files = new ConcurrentDictionary <string, FileMetadata>(); var encodedFiles = BinaryEncoding.enum_decode(savedobjects["Files-v1"]) ?? new List <byte[]?>(); foreach (var binfm in encodedFiles) { if (binfm == null) { throw new NullReferenceException("Encoded file metadatas cannot be null"); } FileMetadata newfm = FileMetadata.deserialize(binfm); files[newfm.FileName] = newfm; } curmn.Files = files; ConcurrentDictionary <string, MetadataNode> directories = new ConcurrentDictionary <string, MetadataNode>(); var dirs = BinaryEncoding.enum_decode(savedobjects["Directories-v2"]) ?? new List <byte[]?>(); for (int i = 0; i < dirs.Count; i++) { var dir = dirs[i] ?? throw new NullReferenceException("Encoded directory cannot be null"); MetadataNode newmn = Load(blobs, dir, curmn); directories[newmn.DirMetadata.FileName] = newmn; } curmn.Parent = parent; curmn.Directories = directories; return(curmn); }
/// <summary> /// Loads a lagern index. /// </summary> /// <returns></returns> private (BlobStore blobs, BackupStore backups) LoadIndex() { BlobStoreDependencies blobStoreDependencies = new BlobStoreDependencies(DstFSInterop); BlobStore blobs = BlobStore.deserialize(DstFSInterop.LoadIndexFileAsync(null, IndexFileType.BlobIndex).Result, blobStoreDependencies); BackupStoreDependencies backupStoreDependencies = new BackupStoreDependencies(DstFSInterop, blobs); BackupStore backups = new BackupStore(backupStoreDependencies); return(blobs, backups); }
public void CacheBlobList(string backupsetname, BlobStore cacheblobs) { string bloblistcachebsname = backupsetname + Core.BlobListCacheSuffix; cacheblobs.RemoveAllBackupSetReferences(bloblistcachebsname); foreach (KeyValuePair <byte[], BlobLocation> hashblob in GetAllHashesAndBlobLocations(backupsetname)) { cacheblobs.AddBlob(bloblistcachebsname, new HashBlobPair(hashblob.Key, null), hashblob.Value.BlockHashes, true); } }
/// <summary> /// /// </summary> /// <param name="dst"></param> /// <param name="blobhash"></param> /// <returns>True Blob exists in destination</returns> private (BlobLocation bloc, byte[] blob) TransferBlobNoReferences(BlobStore dst, string dstbackupset, byte[] blobhash, BlobLocation blocation) { if (blocation.BlockHashes == null) { byte[] blob = LoadBlob(blocation, blobhash); return(dst.AddBlob(dstbackupset, new HashBlobPair(blobhash, blob)), blob); } else { return(dst.AddMultiBlobReferenceBlob(dstbackupset, blobhash, blocation.BlockHashes), null); } }
/// <summary> /// /// </summary> /// <param name="bsname"></param> /// <param name="message"></param> /// <param name="metadatatreehash"></param> /// <param name="shallow"></param> /// <returns>The hash of the new backup</returns> public byte[] AddBackup(BackupSetReference bsname, string message, byte[] metadatatreehash, DateTime backupTime, BackupSet?bset = null) { if (bset == null) { bset = LoadBackupSet(bsname); } BackupRecord newbackup = new(message, metadatatreehash, backupTime); byte[] brbytes = newbackup.serialize(); byte[] backuphash = BlobStore.StoreData(new List <BlobStore>(1) { Dependencies.Blobs }, bsname, brbytes); bset.Backups.Add((backuphash, bsname.Shallow)); return(backuphash); }
public void TransferBlobAndReferences(BlobStore dst, string dstbackupset, byte[] blobhash, BlobLocation.BlobType blobtype, bool includefiles) { BlobLocation rootDstBlobLocation; try { rootDstBlobLocation = dst.GetBlobLocation(blobhash); } catch (KeyNotFoundException) { byte[] blob; (rootDstBlobLocation, blob) = TransferBlobNoReferences(dst, dstbackupset, blobhash, GetBlobLocation(blobhash)); IBlobReferenceIterator blobReferences = GetAllBlobReferences(blobhash, blobtype, includefiles, false); blobReferences.SupplyData(blob); foreach (var reference in blobReferences) { BlobLocation dstBlobLocation; try { dstBlobLocation = dst.GetBlobLocation(reference); // Dont need to increment child references if this already exists blobReferences.SkipChildren(); } catch (KeyNotFoundException) { (dstBlobLocation, blob) = TransferBlobNoReferences(dst, dstbackupset, reference, GetBlobLocation(reference)); blobReferences.SupplyData(blob); } // When we finish iterating over the children, increment this blob blobReferences.PostOrderAction(() => dst.IncrementReferenceCountNoRecurse(dstbackupset, dstBlobLocation, reference, 1)); } } dst.IncrementReferenceCountNoRecurse(dstbackupset, rootDstBlobLocation, blobhash, 1); }
public BackupStoreDependencies(IDstFSInterop cloudinterop, BlobStore blobs) { DstFSInterop = cloudinterop; Blobs = blobs; }
// TODO: Update transfer logic with new reference counting logic public void TransferBackup(BlobStore dst, string dstbackupset, byte[] bblobhash, bool includefiles) { TransferBlobAndReferences(dst, dstbackupset, bblobhash, BlobLocation.BlobType.BackupRecord, includefiles); }