public async Task FlushMeta() { await metaSem.WaitAsync(); try { var cmds = db.CommandsInTransientCache(); var groupedCmds = cmds.OrderBy(c => c.Index).GroupBy(c => c.Path); foreach (var group in groupedCmds) { var g = group.ToList(); var path = group.Key; var seg = new Formats.MetaSegment { Commands = g.Select(e => e.ToProtoObject()).ToList() }; var protoSegs = seg.ToListOfByteArrays(); var isFile = seg.Commands[0].ToDBObject().MetaType == DB.SQLMap.CommandMetaType.File; var sum = g[0].Index; var nextIndex = 0; for (; ; nextIndex++) { var indexId = isFile ? generator.GenerateMetaFileID((uint)nextIndex, path) : generator.GenerateMetaFolderID((uint)nextIndex, path); var exists = null != db.FindMatchingSegmentInAssurancesByIndexId(indexId); if (!exists) { break; } } foreach (var psi in protoSegs.Select((ps, i) => new { ps, i })) { sum += Formats.MetaSegment.FromByteArray(psi.ps).Commands.Count; var idx = nextIndex + psi.i; var indexId = isFile ? generator.GenerateMetaFileID((uint)idx, path) : generator.GenerateMetaFolderID((uint)idx, path); await uploadChunk(psi.ps, psi.ps.SHA256(), indexId, _inAssuranceAdditionTransaction : () => { db.CommandsFlushedForPath(path, indexSmallerThan: sum, _isAlreadyInTransaction: true); }); // TODO: cache uploaded chunk } } } finally { metaSem.Release(); } }
public static void TestMetaSegmentConverison() { var ms = new Binsync.Core.Formats.MetaSegment(); var c1 = new Binsync.Core.Formats.MetaSegment.Command { CMD = Binsync.Core.Formats.MetaSegment.Command.CMDV.ADD, TYPE = Binsync.Core.Formats.MetaSegment.Command.TYPEV.FOLDER, FOLDER_ORIGIN = new Binsync.Core.Formats.MetaSegment.Command.FolderOrigin { Name = "a", FileSize = 1 }, FILE_ORIGIN = null, }; ms.Commands.Add(c1); var c2 = new Binsync.Core.Formats.MetaSegment.Command { CMD = Binsync.Core.Formats.MetaSegment.Command.CMDV.ADD, TYPE = Binsync.Core.Formats.MetaSegment.Command.TYPEV.BLOCK, FILE_ORIGIN = new Binsync.Core.Formats.MetaSegment.Command.FileOrigin { Hash = new byte[] { 1 }, Size = 2, Start = 3, }, }; ms.Commands.Add(c2); var msRe = Binsync.Core.Formats.MetaSegment.FromByteArray(ms.ToByteArray()); var c1re = msRe.Commands[0].ToDBObject().ToProtoObject(); var c2re = msRe.Commands[1].ToDBObject().ToProtoObject(); Assert.AreEqual(Binsync.Core.Formats.MetaSegment.Command.CMDV.ADD, c1re.CMD); Assert.AreEqual(Binsync.Core.Formats.MetaSegment.Command.TYPEV.FOLDER, c1re.TYPE); Assert.AreEqual("a", c1re.FOLDER_ORIGIN.Name); Assert.AreEqual(1, c1re.FOLDER_ORIGIN.FileSize); Assert.AreEqual(null, c1re.FILE_ORIGIN); Assert.AreEqual(Binsync.Core.Formats.MetaSegment.Command.CMDV.ADD, c2re.CMD); Assert.AreEqual(Binsync.Core.Formats.MetaSegment.Command.TYPEV.BLOCK, c2re.TYPE); Assert.AreEqual(null, c2re.FOLDER_ORIGIN); Assert.AreEqual(1, c2re.FILE_ORIGIN.Hash.Length); Assert.AreEqual(1, c2re.FILE_ORIGIN.Hash[0]); Assert.AreEqual(2, c2re.FILE_ORIGIN.Size); Assert.AreEqual(3, c2re.FILE_ORIGIN.Start); }
// TODO: fill list with tuples (compressed array and plain hash). compress here // optimize static void fillListWithMetas(MetaSegment seg, List <byte[]> list, int customSize) { byte[] saved = seg.ToByteArray(); if (saved.Length > customSize) { var segL = new MetaSegment(); var segR = new MetaSegment(); foreach (var element in seg.Commands.Select((obj, inc) => new { obj, inc })) { (element.inc < seg.Commands.Count / 2 ? segL : segR).Commands.Add(element.obj); } fillListWithMetas(segL, list, customSize); fillListWithMetas(segR, list, customSize); } else { list.Add(saved); } }