public dataOperations.hashValues[] hashFile(HFSPlusCatalogFile file)
        {
            // take a file, return hashes for its data fork and resource fork
            dataOperations.hashValues[] hv = new dataOperations.hashValues[2];

            GPTScheme gpts = new GPTScheme(i);
            HFSPlus   hfsp = new HFSPlus(i, gpts.entries[file.partitionAssoc]);

            volumeStream        vs  = new volumeStream(hfsp);
            extentsOverflowFile eof = new extentsOverflowFile(new HFSPlusFile(hfsp.volHead.extentsFile, forkStream.forkType.data), vs);

            if (file.dataFork.forkDataValues.logicalSize > 0)
            {
                HFSPlusFile hfspfile = new HFSPlusFile(file, eof);
                forkStream  fs       = new forkStream(vs, hfspfile, forkStream.forkType.data);

                hv[0] = dataOperations.getHashValues(fs, (long)hfspfile.dataLogicalSize);
            }

            if (file.resourceFork != null)
            {
                if (file.resourceFork.forkDataValues.logicalSize > 0)
                {
                    HFSPlusFile hfspfile = new HFSPlusFile(file.resourceFork, forkStream.forkType.resource);
                    forkStream  fs       = new forkStream(vs, hfspfile, forkStream.forkType.resource);

                    hv[1] = dataOperations.getHashValues(fs, (long)hfspfile.rsrcLogicalSize);
                }
            }

            return(hv);
        }
        public void showForkData(HFSPlusCatalogFile entry, uint block, forkStream.forkType type)
        {
            GPTScheme           gpts = new GPTScheme(i);
            HFSPlus             hfsp = new HFSPlus(i, gpts.entries[entry.partitionAssoc]);
            volumeStream        vs   = new volumeStream(hfsp);
            extentsOverflowFile eof  = new extentsOverflowFile(new HFSPlusFile(hfsp.volHead.extentsFile, forkStream.forkType.data), vs);

            HFSPlusFile hfsp_file = new HFSPlusFile(entry, eof);
            forkStream  fs;

            if (type == forkStream.forkType.data)
            {
                fs = new forkStream(vs, hfsp_file, forkStream.forkType.data);
            }
            else
            {
                fs = new forkStream(vs, hfsp_file, forkStream.forkType.resource);
            }

            contentDisplay = hexHeadLine + "\r\n";

            if (fs.Length > 0)
            {
                byte[] showBlock = new byte[hfsp.blockSize];

                fs.Seek(hfsp.blockSize * block, SeekOrigin.Begin);
                fs.Read(showBlock, 0, (int)hfsp.blockSize);

                rawDataDisplay(showBlock);
            }
        }
示例#3
0
        public absHFSPlusBTree(HFSPlusFile knownExtents, volumeStream hfsp)
        {
            extents = knownExtents;

            // grab a bunch of information to ensure the header node is captured
            byte[] firstBlock = new byte[hfsp.volume.blockSize];

            this.fs = new forkStream(hfsp, knownExtents, forkStream.forkType.data);

            fs.Read(firstBlock, 0, firstBlock.Count());

            // nodeSize is byte 30 of header record which comes immediately after 14 byte descriptor
            this.nodeSize = dataOperations.convToLE(BitConverter.ToUInt16(firstBlock, 32));

            byte[] headerData = new byte[nodeSize];
            headerData = getNodeData(0, nodeSize);
            header     = new headerNode(ref headerData);

            // check whether all of the data extents are known
            long treeSize = header.headerInfo.totalNodes * header.headerInfo.nodeSize;

            if (fs.Length >= treeSize && fs.Length > 0)
            {
                isRawDataComplete = true;

                buildMap(fs);
            }
        }
示例#4
0
        protected byte[] buildMap(forkStream fs)
        {
            List <byte[]> mapContent = new List <byte[]>();

            mapContent.Add(header.map.bitmapComponent);
            uint fLink = this.header.BTNodeDescriptor.fLink;

            uint mapSize = (uint)header.map.bitmapComponent.Length;

            // if fLink > 0, there are more map nodes with map data to be read
            while (fLink > 0)
            {
                byte[] nodeRawData = new byte[this.nodeSize];
                fs.Seek(fLink * this.nodeSize, System.IO.SeekOrigin.Begin);
                fs.Read(nodeRawData, 0, this.nodeSize);
                mapNode currentMap = new mapNode(ref nodeRawData);
                mapContent.Add(currentMap.bitmapComponent);

                mapSize += (uint)currentMap.bitmapComponent.Length;

                fLink = currentMap.BTNodeDescriptor.fLink;
            }

            byte[] mapData = new byte[mapSize];

            int position = 0;

            foreach (byte[] component in mapContent)
            {
                Array.Copy(component, 0, mapData, position, component.Length);
                position += component.Length;
            }

            return(mapData);
        }
        public absHFSPlusBTree(HFSPlusFile knownExtents, volumeStream hfsp)
        {
            extents = knownExtents;

            // grab a bunch of information to ensure the header node is captured
            byte[] firstBlock = new byte[hfsp.volume.blockSize];

            this.fs = new forkStream(hfsp, knownExtents, forkStream.forkType.data);

            fs.Read(firstBlock, 0, firstBlock.Count());

            // nodeSize is byte 30 of header record which comes immediately after 14 byte descriptor
            this.nodeSize = dataOperations.convToLE(BitConverter.ToUInt16(firstBlock, 32));

            byte[] headerData = new byte[nodeSize];
            headerData = getNodeData(0, nodeSize);
            header = new headerNode(ref headerData);

            // check whether all of the data extents are known
            long treeSize = header.headerInfo.totalNodes * header.headerInfo.nodeSize;
            if (fs.Length >= treeSize && fs.Length > 0)
            {
                isRawDataComplete = true;

                buildMap(fs);
            }
        }
        private TreeNode iterateHashChildren(TreeNode parent, volumeStream vs)
        {
            TreeNode replaceParent = new TreeNode();

            replaceParent.Tag = parent.Tag;


            foreach (TreeNode child in parent.Nodes)
            {
                TreeNode replaceChild = new TreeNode();

                if (child.Tag is HFSPlusCatalogFolder)
                {
                    replaceChild     = iterateHashChildren(child, vs);
                    replaceChild.Tag = child.Tag;
                }
                else if (child.Tag is HFSPlusCatalogFile)
                {
                    HFSPlusCatalogFile        tag    = (HFSPlusCatalogFile)child.Tag;
                    dataOperations.hashValues hashes = new dataOperations.hashValues();

                    if (tag.dataFork != null && tag.dataFork.forkDataValues.logicalSize > 0)
                    {
                        HFSPlusFile theFileData = new HFSPlusFile(tag.dataFork, forkStream.forkType.data);

                        forkStream fs = new forkStream(vs, theFileData, forkStream.forkType.data);

                        dataOperations.hashValues hv = dataOperations.getHashValues(fs, (long)theFileData.dataLogicalSize);

                        hashes.md5hash = hv.md5hash;
                    }

                    if (tag.resourceFork != null && tag.resourceFork.forkDataValues.logicalSize > 0)
                    {
                        HFSPlusFile theFileResource = new HFSPlusFile(tag.dataFork, forkStream.forkType.data);

                        forkStream fs = new forkStream(vs, theFileResource, forkStream.forkType.data);

                        dataOperations.hashValues hv = dataOperations.getHashValues(fs, (long)theFileResource.dataLogicalSize);

                        hashes.sha1hash = hv.sha1hash;
                    }

                    tag.hashes = hashes;

                    replaceChild.Tag = tag;
                }
                else
                {
                    replaceChild.Tag = child.Tag;
                }

                replaceChild.Text = child.Text;
                replaceParent.Nodes.Add(replaceChild);
            }

            replaceParent.Text = parent.Text;

            return(replaceParent);
        }
        public void exportFile(HFSPlusCatalogFile entry, forkStream.forkType type, string path)
        {
            if (entry.dataFork.forkDataValues.logicalSize > 0 || entry.resourceFork.forkDataValues.logicalSize > 0)
            {
                GPTScheme           gpts = new GPTScheme(i);
                HFSPlus             hfsp = new HFSPlus(i, gpts.entries[entry.partitionAssoc]);
                volumeStream        vs   = new volumeStream(hfsp);
                extentsOverflowFile eof  = new extentsOverflowFile(new HFSPlusFile(hfsp.volHead.extentsFile, forkStream.forkType.data), vs);

                HFSPlusFile hfsp_file = new HFSPlusFile(entry, eof);
                forkStream  fs;
                long        dataSize = 0;

                if (type == forkStream.forkType.data)
                {
                    fs       = new forkStream(vs, hfsp_file, forkStream.forkType.data);
                    dataSize = (long)entry.dataFork.forkDataValues.logicalSize;
                }
                else
                {
                    fs       = new forkStream(vs, hfsp_file, forkStream.forkType.resource);
                    dataSize = (long)entry.resourceFork.forkDataValues.logicalSize;
                }

                fs.Position = 0;

                FileStream   writeStream = new FileStream(path, FileMode.Create);
                BinaryWriter bw          = new BinaryWriter(writeStream);

                long   bytesWritten = 0;
                byte[] buffer;

                while (bytesWritten < dataSize)
                {
                    if (bytesWritten + 8192 <= dataSize)
                    {
                        buffer = new byte[8192];
                        fs.Read(buffer, 0, 8192);

                        bw.Write(buffer, 0, 8192);

                        bytesWritten += 8192;
                    }
                    else
                    {
                        buffer = new byte[dataSize - bytesWritten];
                        fs.Read(buffer, 0, buffer.Length);

                        bw.Write(buffer, 0, buffer.Length);

                        bytesWritten += buffer.Length;
                    }
                }

                bw.Close();
                writeStream.Close();
            }
        }
示例#8
0
        public HFSPlusFile(hfsPlusForkData forkEntry, forkStream.forkType type)
        {
            fileContent.dataExtents = new List<hfsPlusForkData.HFSPlusExtentRecord>();
            fileContent.resourceExtents = new List<hfsPlusForkData.HFSPlusExtentRecord>();

            switch (type)
            {
                case forkStream.forkType.data:
                    addDataFork(forkEntry);
                    break;
                case forkStream.forkType.resource:
                    addResourceFork(forkEntry);
                    break;
            }
        }
        public void showForkData(HFSPlusCatalogFile entry, forkStream.forkType type)
        {
            GPTScheme           gpts = new GPTScheme(i);
            HFSPlus             hfsp = new HFSPlus(i, gpts.entries[entry.partitionAssoc]);
            volumeStream        vs   = new volumeStream(hfsp);
            extentsOverflowFile eof  = new extentsOverflowFile(new HFSPlusFile(hfsp.volHead.extentsFile, forkStream.forkType.data), vs);

            HFSPlusFile hfsp_file = new HFSPlusFile(entry, eof);
            forkStream  fs;

            if (type == forkStream.forkType.data)
            {
                fs = new forkStream(vs, hfsp_file, forkStream.forkType.data);
            }
            else
            {
                fs = new forkStream(vs, hfsp_file, forkStream.forkType.resource);
            }

            throw new NotImplementedException();
        }
示例#10
0
        public dataOperations.hashes hashFileStream(forkStream fs)
        {
            MD5  md5sum  = new MD5CryptoServiceProvider();
            SHA1 sha1sum = new SHA1CryptoServiceProvider();

            byte[] md5result;
            byte[] sha1result;

            dataOperations.hashes result = new dataOperations.hashes();

            md5result = md5sum.ComputeHash(fs);

            StringBuilder sbmd5 = new StringBuilder();

            for (int i = 0; i < md5result.Length; i++)
            {
                sbmd5.Append(md5result[i].ToString("X2"));
            }

            result.md5sum = sbmd5.ToString();

            sha1result = sha1sum.ComputeHash(fs);

            StringBuilder sbsha1 = new StringBuilder();

            for (int i = 0; i < md5result.Length; i++)
            {
                sbsha1.Append(md5result[i].ToString("X2"));
            }

            result.sha1sum = sbsha1.ToString();

            fs.Close();

            return(result);
        }
示例#11
0
        public dataOperations.hashes hashFileStream(forkStream fs)
        {
            MD5 md5sum = new MD5CryptoServiceProvider();
            SHA1 sha1sum = new SHA1CryptoServiceProvider();

            byte[] md5result;
            byte[] sha1result;

            dataOperations.hashes result = new dataOperations.hashes();

            md5result = md5sum.ComputeHash(fs);

            StringBuilder sbmd5 = new StringBuilder();

            for (int i = 0; i < md5result.Length; i++)
            {
                sbmd5.Append(md5result[i].ToString("X2"));
            }

            result.md5sum = sbmd5.ToString();

            sha1result = sha1sum.ComputeHash(fs);

            StringBuilder sbsha1 = new StringBuilder();

            for (int i = 0; i < md5result.Length; i++)
            {
                sbsha1.Append(md5result[i].ToString("X2"));
            }

            result.sha1sum = sbsha1.ToString();

            fs.Close();

            return result;
        }
示例#12
0
        public imageMap(absImageStream ais)
        {
            if (ais.scheme == absImageStream.schemeType.GPT)
            {
                GPTScheme gpts = new GPTScheme(ais);

                mapBlock block = new mapBlock();
                block.location = 0;

                if (gpts.protectiveMBRExists)
                {
                    block.length = 1;
                    block.name = "MBR";
                    block.type = tileType.MBR;

                    partitionblocks.Add(block);
                }

                if (gpts.headerFound)
                {
                    block.location = 1;
                    block.length = 1;
                    block.name = "GPT Header";
                    block.type = tileType.GPT;

                    partitionblocks.Add(block);

                    block.location = gpts.tablestart;
                    block.length = gpts.tablelength / ais.sectorSize;
                    if (block.length < 1) block.length = 1;
                    block.name = "GPT Primary Table";
                    block.type = tileType.GPT;

                    partitionblocks.Add(block);
                }

                if (gpts.backupFound)
                {
                    block.location = gpts.backupHeader.mainheader;
                    block.length = 1;
                    block.name = "Backup GPT Header";
                    block.type = tileType.GPT;

                    partitionblocks.Add(block);

                    block.location = gpts.backupHeader.tablestart;
                    block.length = gpts.tablelength / ais.sectorSize;
                    if (block.length < 1) block.length = 1;
                    block.name = "GPT Backup Table";
                    block.type = tileType.GPT;

                    partitionblocks.Add(block);
                }

                foreach (GPTScheme.entry entry in gpts.entries)
                {
                    block.location = entry.partStartLBA;
                    block.length = entry.partLength;
                    block.name = entry.name;
                    block.type = tileType.vol_unknown;

                    if (gpts.findPartitionType(entry) == GPTScheme.partitionType.HFSPlus)
                    {
                        HFSPlus hfsp = new HFSPlus(ais, entry);

                        block.mapSectorsPerBlock = (int)hfsp.volHead.blockSize / ais.sectorSize;
                        forkStream fs = new forkStream(new volumeStream(hfsp), new HFSPlusFile(hfsp.volHead.allocationFile, forkStream.forkType.data), forkStream.forkType.data);

                        block.allocationMap = new byte[(int)fs.Length];
                        fs.Read(block.allocationMap, 0, (int)fs.Length);
                    }
                    else
                    {
                        block.allocationMap = null;
                    }

                    partitionblocks.Add(block);
                }

            }

            partitionblocks.Sort(CompareBlocksByPosition);
        }
        private TreeNode iterateHashChildren(TreeNode parent, volumeStream vs)
        {
            TreeNode replaceParent = new TreeNode();
            replaceParent.Tag = parent.Tag;

            foreach (TreeNode child in parent.Nodes)
            {
                TreeNode replaceChild = new TreeNode();

                if (child.Tag is HFSPlusCatalogFolder)
                {
                    replaceChild = iterateHashChildren(child, vs);
                    replaceChild.Tag = child.Tag;
                }
                else if (child.Tag is HFSPlusCatalogFile)
                {
                    HFSPlusCatalogFile tag = (HFSPlusCatalogFile)child.Tag;
                    dataOperations.hashValues hashes = new dataOperations.hashValues();

                    if (tag.dataFork != null && tag.dataFork.forkDataValues.logicalSize > 0)
                    {
                        HFSPlusFile theFileData = new HFSPlusFile(tag.dataFork, forkStream.forkType.data);

                        forkStream fs = new forkStream(vs, theFileData, forkStream.forkType.data);

                        dataOperations.hashValues hv = dataOperations.getHashValues(fs, (long)theFileData.dataLogicalSize);

                        hashes.md5hash = hv.md5hash;
                    }

                    if (tag.resourceFork != null && tag.resourceFork.forkDataValues.logicalSize > 0)
                    {
                        HFSPlusFile theFileResource = new HFSPlusFile(tag.dataFork, forkStream.forkType.data);

                        forkStream fs = new forkStream(vs, theFileResource, forkStream.forkType.data);

                        dataOperations.hashValues hv = dataOperations.getHashValues(fs, (long)theFileResource.dataLogicalSize);

                        hashes.sha1hash = hv.sha1hash;
                    }

                    tag.hashes = hashes;

                    replaceChild.Tag = tag;
                }
                else
                {
                    replaceChild.Tag = child.Tag;
                }

                replaceChild.Text = child.Text;
                replaceParent.Nodes.Add(replaceChild);
            }

            replaceParent.Text = parent.Text;

            return replaceParent;
        }
        public void showForkData(HFSPlusCatalogFile entry, forkStream.forkType type)
        {
            GPTScheme gpts = new GPTScheme(i);
            HFSPlus hfsp = new HFSPlus(i, gpts.entries[entry.partitionAssoc]);
            volumeStream vs = new volumeStream(hfsp);
            extentsOverflowFile eof = new extentsOverflowFile(new HFSPlusFile(hfsp.volHead.extentsFile, forkStream.forkType.data), vs);

            HFSPlusFile hfsp_file = new HFSPlusFile(entry, eof);
            forkStream fs;
            if (type == forkStream.forkType.data)
            {
                fs = new forkStream(vs, hfsp_file, forkStream.forkType.data);
            }
            else
            {
                fs = new forkStream(vs, hfsp_file, forkStream.forkType.resource);
            }

            throw new NotImplementedException();
        }
        public void showForkData(HFSPlusCatalogFile entry, uint block, forkStream.forkType type)
        {
            GPTScheme gpts = new GPTScheme(i);
            HFSPlus hfsp = new HFSPlus(i, gpts.entries[entry.partitionAssoc]);
            volumeStream vs = new volumeStream(hfsp);
            extentsOverflowFile eof = new extentsOverflowFile(new HFSPlusFile(hfsp.volHead.extentsFile, forkStream.forkType.data), vs);

            HFSPlusFile hfsp_file = new HFSPlusFile(entry, eof);
            forkStream fs;
            if (type == forkStream.forkType.data)
            {
                fs = new forkStream(vs, hfsp_file, forkStream.forkType.data);
            }
            else
            {
                fs = new forkStream(vs, hfsp_file, forkStream.forkType.resource);
            }

            contentDisplay = hexHeadLine + "\r\n";

            if (fs.Length > 0)
            {
                byte[] showBlock = new byte[hfsp.blockSize];

                fs.Seek(hfsp.blockSize * block, SeekOrigin.Begin);
                fs.Read(showBlock, 0, (int)hfsp.blockSize);

                rawDataDisplay(showBlock);
            }
        }
        public dataOperations.hashValues[] hashFile(HFSPlusCatalogFile file)
        {
            // take a file, return hashes for its data fork and resource fork
            dataOperations.hashValues[] hv = new dataOperations.hashValues[2];

            GPTScheme gpts = new GPTScheme(i);
            HFSPlus hfsp = new HFSPlus(i, gpts.entries[file.partitionAssoc]);

            volumeStream vs = new volumeStream(hfsp);
            extentsOverflowFile eof = new extentsOverflowFile(new HFSPlusFile(hfsp.volHead.extentsFile, forkStream.forkType.data),vs);

            if (file.dataFork.forkDataValues.logicalSize > 0)
            {
                HFSPlusFile hfspfile = new HFSPlusFile(file, eof);
                forkStream fs = new forkStream(vs, hfspfile, forkStream.forkType.data);

                hv[0] = dataOperations.getHashValues(fs, (long)hfspfile.dataLogicalSize);
            }

            if (file.resourceFork != null)
            {
                if (file.resourceFork.forkDataValues.logicalSize > 0)
                {
                    HFSPlusFile hfspfile = new HFSPlusFile(file.resourceFork, forkStream.forkType.resource);
                    forkStream fs = new forkStream(vs, hfspfile, forkStream.forkType.resource);

                    hv[1] = dataOperations.getHashValues(fs, (long)hfspfile.rsrcLogicalSize);
                }
            }

            return hv;
        }
        public void exportFile(HFSPlusCatalogFile entry, forkStream.forkType type, string path)
        {
            if (entry.dataFork.forkDataValues.logicalSize > 0 || entry.resourceFork.forkDataValues.logicalSize > 0)
            {
                GPTScheme gpts = new GPTScheme(i);
                HFSPlus hfsp = new HFSPlus(i, gpts.entries[entry.partitionAssoc]);
                volumeStream vs = new volumeStream(hfsp);
                extentsOverflowFile eof = new extentsOverflowFile(new HFSPlusFile(hfsp.volHead.extentsFile, forkStream.forkType.data), vs);

                HFSPlusFile hfsp_file = new HFSPlusFile(entry, eof);
                forkStream fs;
                long dataSize = 0;

                if (type == forkStream.forkType.data)
                {
                    fs = new forkStream(vs, hfsp_file, forkStream.forkType.data);
                    dataSize = (long)entry.dataFork.forkDataValues.logicalSize;
                }
                else
                {
                    fs = new forkStream(vs, hfsp_file, forkStream.forkType.resource);
                    dataSize = (long)entry.resourceFork.forkDataValues.logicalSize;
                }

                fs.Position = 0;

                FileStream writeStream = new FileStream(path, FileMode.Create);
                BinaryWriter bw = new BinaryWriter(writeStream);

                long bytesWritten = 0;
                byte[] buffer;

                while (bytesWritten < dataSize)
                {
                    if (bytesWritten + 8192 <= dataSize)
                    {
                        buffer = new byte[8192];
                        fs.Read(buffer, 0, 8192);

                        bw.Write(buffer, 0, 8192);

                        bytesWritten += 8192;
                    }
                    else
                    {
                        buffer = new byte[dataSize - bytesWritten];
                        fs.Read(buffer, 0, buffer.Length);

                        bw.Write(buffer, 0, buffer.Length);

                        bytesWritten += buffer.Length;
                    }
                }

                bw.Close();
                writeStream.Close();
            }
        }
示例#18
0
        protected byte[] buildMap(forkStream fs)
        {
            List<byte[]> mapContent = new List<byte[]>();

            mapContent.Add(header.map.bitmapComponent);
            uint fLink = this.header.BTNodeDescriptor.fLink;

            uint mapSize = (uint)header.map.bitmapComponent.Length;

            // if fLink > 0, there are more map nodes with map data to be read
            while (fLink > 0)
            {
                byte[] nodeRawData = new byte[this.nodeSize];
                fs.Seek(fLink * this.nodeSize, System.IO.SeekOrigin.Begin);
                fs.Read(nodeRawData, 0, this.nodeSize);
                mapNode currentMap = new mapNode(ref nodeRawData);
                mapContent.Add(currentMap.bitmapComponent);

                mapSize += (uint)currentMap.bitmapComponent.Length;

                fLink = currentMap.BTNodeDescriptor.fLink;
            }

            byte[] mapData = new byte[mapSize];

            int position = 0;
            foreach (byte[] component in mapContent)
            {
                Array.Copy(component, 0, mapData, position, component.Length);
                position += component.Length;
            }

            return mapData;
        }
示例#19
0
        public imageMap(absImageStream ais)
        {
            if (ais.scheme == absImageStream.schemeType.GPT)
            {
                GPTScheme gpts = new GPTScheme(ais);

                mapBlock block = new mapBlock();
                block.location = 0;

                if (gpts.protectiveMBRExists)
                {
                    block.length = 1;
                    block.name   = "MBR";
                    block.type   = tileType.MBR;

                    partitionblocks.Add(block);
                }

                if (gpts.headerFound)
                {
                    block.location = 1;
                    block.length   = 1;
                    block.name     = "GPT Header";
                    block.type     = tileType.GPT;

                    partitionblocks.Add(block);

                    block.location = gpts.tablestart;
                    block.length   = gpts.tablelength / ais.sectorSize;
                    if (block.length < 1)
                    {
                        block.length = 1;
                    }
                    block.name = "GPT Primary Table";
                    block.type = tileType.GPT;

                    partitionblocks.Add(block);
                }

                if (gpts.backupFound)
                {
                    block.location = gpts.backupHeader.mainheader;
                    block.length   = 1;
                    block.name     = "Backup GPT Header";
                    block.type     = tileType.GPT;

                    partitionblocks.Add(block);

                    block.location = gpts.backupHeader.tablestart;
                    block.length   = gpts.tablelength / ais.sectorSize;
                    if (block.length < 1)
                    {
                        block.length = 1;
                    }
                    block.name = "GPT Backup Table";
                    block.type = tileType.GPT;

                    partitionblocks.Add(block);
                }

                foreach (GPTScheme.entry entry in gpts.entries)
                {
                    block.location = entry.partStartLBA;
                    block.length   = entry.partLength;
                    block.name     = entry.name;
                    block.type     = tileType.vol_unknown;

                    if (gpts.findPartitionType(entry) == GPTScheme.partitionType.HFSPlus)
                    {
                        HFSPlus hfsp = new HFSPlus(ais, entry);

                        block.mapSectorsPerBlock = (int)hfsp.volHead.blockSize / ais.sectorSize;
                        forkStream fs = new forkStream(new volumeStream(hfsp), new HFSPlusFile(hfsp.volHead.allocationFile, forkStream.forkType.data), forkStream.forkType.data);

                        block.allocationMap = new byte[(int)fs.Length];
                        fs.Read(block.allocationMap, 0, (int)fs.Length);
                    }
                    else
                    {
                        block.allocationMap = null;
                    }

                    partitionblocks.Add(block);
                }
            }

            partitionblocks.Sort(CompareBlocksByPosition);
        }