コード例 #1
0
ファイル: FileLoader.cs プロジェクト: wuyasec/inVtero.net
        public int LoadFromMem(byte[] Input)
        {
            int written = 0;
            var hashArr = FractHashTree.CreateRecsFromMemory(Input, MinHashSize, GetHP);
            var Count   = hashArr.Length;

            using (var fs = new FileStream(DBFile, FileMode.Open, FileAccess.ReadWrite, FileShare.ReadWrite, DB_READ_SIZE))
            {
                // we need 2 pages now since were block reading and we might pick a hash that start's scan
                // at the very end of a page
                byte[] buff = new byte[DB_READ_SIZE];
                byte[] zero = new byte[HASH_REC_BYTES];
                int    i = 0, firstIndex = 0, zeroIndex = 0;
                bool   WriteBack = false;

                do
                {
                    var Index = hashArr[i].Index;
                    // convert Index to PageIndex
                    var DBPage = (long)((Index & SortMask) & ~DB_PAGE_MASK);

                    // find block offset for this hash
                    fs.Seek(DBPage, SeekOrigin.Begin);
                    fs.Read(buff, 0, DB_READ_SIZE);
                    WriteBack = false;

                    do
                    {
                        // skip duplicates
                        if (i + 1 < Count &&
                            hashArr[i].Index == hashArr[i + 1].Index &&
                            hashArr[i].CompressedHash == hashArr[i + 1].CompressedHash)
                        {
                            i++;
                            continue;
                        }

                        if (i < Count)
                        {
                            // re-read Inxex since we could be on the inner loop
                            Index = hashArr[i].Index;
                            // Index inside of a page
                            var PageIndex = Index & DB_PAGE_MASK;

                            // Hash to populate the DB with
                            var toWrite = HashRec.ToByteArr(hashArr[i]);

                            // do we already have this hash from disk?
                            firstIndex = buff.SearchBytes(toWrite, (int)PageIndex, toWrite.Length);
                            if (firstIndex < 0)
                            {
                                zeroIndex = buff.SearchBytes(zero, (int)PageIndex, zero.Length);
                                if (zeroIndex >= 0)
                                {
                                    // we want the modified buffer to get written back
                                    WriteBack = true;
                                    int j, k;
                                    // update buff with new hash entry for write back
                                    //Array.Copy(toWrite, 0, buff, zeroIndex, toWrite.Length);
                                    for (j = zeroIndex, k = 0; j < zeroIndex + toWrite.Length; j++, k++)
                                    {
                                        buff[j] = toWrite[k];
                                    }

                                    written++;
                                    // set to the origional index, shift down since were bit aligned
                                    HDB.SetIdxBit(Index);
                                }
                                else if (zeroIndex < 0)
                                {
                                    var strerr = $"HASH TABLE SATURATED! YOU NEED TO MAKE THE DB LARGER!!";
                                    WriteColor(ConsoleColor.Red, strerr);
                                    throw new ApplicationException(strerr);
                                }
                            }
                        }
                        i++;

                        // continue to next entry if it's in the same block
                    } while (i < Count && (((hashArr[i].Index & SortMask) & ~DB_PAGE_MASK) == (ulong)DBPage));

                    if (WriteBack)
                    {
                        // reset seek position
                        fs.Seek(DBPage, SeekOrigin.Begin);
                        // only write back 1 page if we can help it
                        fs.Write(buff, 0, DB_READ_SIZE);
                    }
                } while (i < Count);
            }
            return(written);
        }
コード例 #2
0
ファイル: FractHashTree.cs プロジェクト: wuyasec/inVtero.net
        public static HashRec[] CreateRecsFromMemory(byte[] MemPage, int minBlockSize, Func <HashLib.IHash> getHP, int rID = 0, long VA = 0, int OnlySize = 0, bool PreSerialize = false)
        {
            if (MemPage == null)
            {
                return(null);
            }

            int RawSize = MemPage.Length;
            var topCnt  = BlockCount(RawSize, PAGE_SIZE);

            if (getHP == null)
            {
                getHP = new Func <HashLib.IHash>(() => { return(HashLib.HashFactory.Crypto.CreateTiger2()); });
            }

            var  levelMap   = LevelMaps(RawSize, minBlockSize);
            int  LevelCount = levelMap.Count();
            long TotalHashs = levelMap[LevelCount - 1].Item1 + levelMap[LevelCount - 1].Item2;

            HashLib.IHash[] localHashProv = new HashLib.IHash[LevelCount];

            var sHash = new HashRec[TotalHashs];

            if (OnlySize != 0)
            {
                LevelCount   = 1;
                minBlockSize = OnlySize;
                TotalHashs   = BlockCount(RawSize, minBlockSize);

                sHash = new HashRec[TotalHashs];
            }

            // smallest to largest orginization
            for (int i = 0; i < LevelCount; i++)
            {
                localHashProv[i] = getHP();
            }

            for (byte lvl = 0; lvl < LevelCount; lvl++)
            {
                var blockSize = minBlockSize << lvl;
                var blockCnt  = BlockCount(RawSize, blockSize);

                var hashLevelIndex = levelMap[lvl].Item1;

                localHashProv[lvl].Initialize();

                for (int arri = 0; arri < blockCnt; arri++)
                {
                    localHashProv[lvl].TransformBytes(MemPage, arri * blockSize, blockSize);
                    var hashBytes = localHashProv[lvl].TransformFinal().GetBytes();

                    sHash[hashLevelIndex + arri] = new HashRec(hashBytes, lvl, rID);

                    // trying to reduce some load in the DB commit path
                    if (PreSerialize)
                    {
                        sHash[hashLevelIndex + arri].Serialized = HashRec.ToByteArr(sHash[hashLevelIndex + arri]);
                    }
                }
            }
            return(sHash);
        }
コード例 #3
0
        public static HashRec[] CreateRecsFromMemory(byte[] MemPage, int minBlockSize, Func <HashLib.IHash> getHP, int rID = 0, long VA = 0, int OnlySize = 0, bool PreSerialize = false, bool FullHashes = false)
        {
            if (MemPage == null)
            {
                return(null);
            }

            //var LevelCount = 1;


            int RawSize    = MemPage.Length;
            var TotalHashs = BlockCount(RawSize, minBlockSize);

            var sHash = new HashRec[TotalHashs];

            if (OnlySize != 0)
            {
                minBlockSize = OnlySize;
                TotalHashs   = BlockCount(RawSize, minBlockSize);
                sHash        = new HashRec[TotalHashs];
            }

            //var topCnt = BlockCount(RawSize, PAGE_SIZE);
            if (getHP == null)
            {
                getHP = new Func <HashLib.IHash>(() => { return(HashLib.HashFactory.Crypto.CreateTiger2()); });
            }

            /*
             * var levelMap = LevelMaps(RawSize, minBlockSize);
             * int LevelCount = levelMap.Count();
             * long TotalHashs = levelMap[LevelCount - 1].Item1 + levelMap[LevelCount - 1].Item2;
             */
            HashLib.IHash localHashProv;

            // smallest to largest orginization
            // for (int i = 0; i < LevelCount; i++)
            localHashProv = getHP();

            //for (byte lvl = 0; lvl < LevelCount; lvl++)
            //{
            //var blockSize = minBlockSize << lvl;
            //var blockCnt = BlockCount(RawSize, OnlySize);

            //ar hashLevelIndex = levelMap[lvl].Item1;

            localHashProv.Initialize();

            for (int arri = 0; arri < TotalHashs; arri++)
            {
                localHashProv.TransformBytes(MemPage, arri * minBlockSize, minBlockSize);
                var hashBytes = localHashProv.TransformFinal().GetBytes();

                sHash[arri] = new HashRec(hashBytes, 0, rID);

                if (VA != 0)
                {
                    sHash[arri].Address = VA + (arri * minBlockSize);
                }

                //if (!FullHashes)
                //    sHash[arri].FullHash = null;

                // trying to reduce some load in the DB commit path
                if (PreSerialize)
                {
                    sHash[arri].Serialized = HashRec.ToByteArr(sHash[arri]);
                }
            }
            //}
            return(sHash);
        }
コード例 #4
0
        void DumpBufToDisk(ParallelOptions po)
        {
            Stopwatch sw;
            long      TotalDBWrites  = 0;
            long      TotalRequested = 0;
            long      DBPage         = 0;

            SortMask = HDB.DBEntriesMask << HASH_SHIFT;
            do
            {
                var hashArrTpl = ReadyQueue.Take(po.CancellationToken);
                var hashArr    = hashArrTpl.Item2;
                var Count      = hashArrTpl.Item1;

                ParallelAlgorithms.Sort <HashRec>(hashArr, 0, Count, GetICompareer <HashRec>(SortByDBSizeMask));
                TotalRequested += Count;

                if (Vtero.VerboseLevel >= 1)
                {
                    WriteColor(ConsoleColor.Cyan, $"Hash entries to store: {Count:N0}");
                }

                using (var fs = new FileStream(DBFile, FileMode.Open, FileAccess.ReadWrite, FileShare.ReadWrite, DB_READ_SIZE))
                {
                    // we need 2 pages now since were block reading and we might pick a hash that start's scan
                    // at the very end of a page
                    byte[] buff = new byte[DB_READ_SIZE];
                    byte[] zero = new byte[HASH_REC_BYTES];
                    int    i = 0, firstIndex = 0, zeroIndex = 0;
                    bool   WriteBack = false;

                    sw = Stopwatch.StartNew();
                    do
                    {
                        var Index = hashArr[i].Index;
                        // convert Index to PageIndex
                        DBPage = (long)((Index & SortMask) & ~DB_PAGE_MASK);

                        // find block offset for this hash
                        fs.Seek(DBPage, SeekOrigin.Begin);
                        fs.Read(buff, 0, DB_READ_SIZE);
                        WriteBack = false;
                        if (po.CancellationToken.IsCancellationRequested)
                        {
                            return;
                        }
                        po.CancellationToken.ThrowIfCancellationRequested();

                        do
                        {
                            // skip duplicates
                            if (i + 1 < Count &&
                                hashArr[i].Index == hashArr[i + 1].Index)
                            //&& UnsafeHelp.UnsafeCompare(hashArr[i].HashData, hashArr[i + 1].HashData))
                            {
                                i++;
                                continue;
                            }

                            if (i < Count)
                            {
                                // re-read Inxex since we could be on the inner loop
                                Index = hashArr[i].Index;
                                // Index inside of a page
                                var PageIndex = (int)(Index & DB_PAGE_MASK);

                                // Hash to populate the DB with
                                var toWrite = BitConverter.GetBytes(hashArr[i].CompressedHash);

                                // do we already have this hash from disk?
                                firstIndex = buff.SearchBytes(toWrite, PageIndex, HASH_REC_BYTES);
                                if (firstIndex < 0)
                                {
                                    zeroIndex = buff.SearchBytes(zero, PageIndex, HASH_REC_BYTES);
                                    if (zeroIndex >= 0)
                                    {
                                        // we want the modified buffer to get written back
                                        WriteBack = true;

                                        toWrite = HashRec.ToByteArr(hashArr[i]);

                                        // update buff with new hash entry for write back
                                        //Array.Copy(toWrite, 0, buff, zeroIndex, toWrite.Length);
                                        for (int j = zeroIndex, k = 0; j < zeroIndex + toWrite.Length; j++, k++)
                                        {
                                            buff[j] = toWrite[k];
                                        }

                                        TotalDBWrites++;

                                        // set to the origional index, shift down since were bit aligned
                                        HDB.SetIdxBit(Index);
                                    }
                                    else if (zeroIndex < 0)
                                    {
                                        var strerr = $"HASH TABLE SATURATED!!! ({DBPage:X}:{PageIndex:X}) YOU NEED TO MAKE THE DB LARGER!!";
                                        WriteColor(ConsoleColor.Red, strerr);
                                        source.Cancel();
                                    }
                                }
                            }
                            i++;

                            if (i % 100000 == 0 && sw.Elapsed.TotalSeconds > 0)
                            {
                                WriteColor(ConsoleColor.Cyan, $"DB commit entries: {i:N0} - per second {(i / sw.Elapsed.TotalSeconds):N0}");
                            }

                            // continue to next entry if it's in the same block
                        } while (i < Count && (((hashArr[i].Index & SortMask) & ~DB_PAGE_MASK) == (ulong)DBPage));

                        if (WriteBack)
                        {
                            if (po.CancellationToken.IsCancellationRequested)
                            {
                                return;
                            }
                            // reset seek position
                            fs.Seek(DBPage, SeekOrigin.Begin);
                            // only write back 1 page if we can help it
                            fs.Write(buff, 0, DB_READ_SIZE);
                        }
                    } while (i < Count);

                    WriteColor(ConsoleColor.Cyan, $"DB entries: {i:N0} - per second {(i / sw.Elapsed.TotalSeconds):N0}");
                    //aPool.Return(hashArr);
                }
            } while (!DoneHashLoad || ReadyQueue.Count() > 0);

            WriteColor(ConsoleColor.Cyan, $"Finished DB write {TotalDBWrites:N0} NEW entries. Requsted {TotalRequested:N0} (reduced count reflects de-duplication). Task time: {sw.Elapsed}");
        }