Ejemplo n.º 1
0
        public dataOperations.hashValues[] hashFile(HFSPlusCatalogFile file)
        {
            // take a file, return hashes for its data fork and resource fork
            dataOperations.hashValues[] hv = new dataOperations.hashValues[2];

            GPTScheme gpts = new GPTScheme(i);
            HFSPlus   hfsp = new HFSPlus(i, gpts.entries[file.partitionAssoc]);

            volumeStream        vs  = new volumeStream(hfsp);
            extentsOverflowFile eof = new extentsOverflowFile(new HFSPlusFile(hfsp.volHead.extentsFile, forkStream.forkType.data), vs);

            if (file.dataFork.forkDataValues.logicalSize > 0)
            {
                HFSPlusFile hfspfile = new HFSPlusFile(file, eof);
                forkStream  fs       = new forkStream(vs, hfspfile, forkStream.forkType.data);

                hv[0] = dataOperations.getHashValues(fs, (long)hfspfile.dataLogicalSize);
            }

            if (file.resourceFork != null)
            {
                if (file.resourceFork.forkDataValues.logicalSize > 0)
                {
                    HFSPlusFile hfspfile = new HFSPlusFile(file.resourceFork, forkStream.forkType.resource);
                    forkStream  fs       = new forkStream(vs, hfspfile, forkStream.forkType.resource);

                    hv[1] = dataOperations.getHashValues(fs, (long)hfspfile.rsrcLogicalSize);
                }
            }

            return(hv);
        }
Ejemplo n.º 2
0
        public void showForkData(HFSPlusCatalogFile entry, uint block, forkStream.forkType type)
        {
            GPTScheme           gpts = new GPTScheme(i);
            HFSPlus             hfsp = new HFSPlus(i, gpts.entries[entry.partitionAssoc]);
            volumeStream        vs   = new volumeStream(hfsp);
            extentsOverflowFile eof  = new extentsOverflowFile(new HFSPlusFile(hfsp.volHead.extentsFile, forkStream.forkType.data), vs);

            HFSPlusFile hfsp_file = new HFSPlusFile(entry, eof);
            forkStream  fs;

            if (type == forkStream.forkType.data)
            {
                fs = new forkStream(vs, hfsp_file, forkStream.forkType.data);
            }
            else
            {
                fs = new forkStream(vs, hfsp_file, forkStream.forkType.resource);
            }

            contentDisplay = hexHeadLine + "\r\n";

            if (fs.Length > 0)
            {
                byte[] showBlock = new byte[hfsp.blockSize];

                fs.Seek(hfsp.blockSize * block, SeekOrigin.Begin);
                fs.Read(showBlock, 0, (int)hfsp.blockSize);

                rawDataDisplay(showBlock);
            }
        }
Ejemplo n.º 3
0
        public absHFSPlusBTree(HFSPlusFile knownExtents, volumeStream hfsp)
        {
            extents = knownExtents;

            // grab a bunch of information to ensure the header node is captured
            byte[] firstBlock = new byte[hfsp.volume.blockSize];

            this.fs = new forkStream(hfsp, knownExtents, forkStream.forkType.data);

            fs.Read(firstBlock, 0, firstBlock.Count());

            // nodeSize is byte 30 of header record which comes immediately after 14 byte descriptor
            this.nodeSize = dataOperations.convToLE(BitConverter.ToUInt16(firstBlock, 32));

            byte[] headerData = new byte[nodeSize];
            headerData = getNodeData(0, nodeSize);
            header     = new headerNode(ref headerData);

            // check whether all of the data extents are known
            long treeSize = header.headerInfo.totalNodes * header.headerInfo.nodeSize;

            if (fs.Length >= treeSize && fs.Length > 0)
            {
                isRawDataComplete = true;

                buildMap(fs);
            }
        }
Ejemplo n.º 4
0
        protected byte[] buildMap(forkStream fs)
        {
            List <byte[]> mapContent = new List <byte[]>();

            mapContent.Add(header.map.bitmapComponent);
            uint fLink = this.header.BTNodeDescriptor.fLink;

            uint mapSize = (uint)header.map.bitmapComponent.Length;

            // if fLink > 0, there are more map nodes with map data to be read
            while (fLink > 0)
            {
                byte[] nodeRawData = new byte[this.nodeSize];
                fs.Seek(fLink * this.nodeSize, System.IO.SeekOrigin.Begin);
                fs.Read(nodeRawData, 0, this.nodeSize);
                mapNode currentMap = new mapNode(ref nodeRawData);
                mapContent.Add(currentMap.bitmapComponent);

                mapSize += (uint)currentMap.bitmapComponent.Length;

                fLink = currentMap.BTNodeDescriptor.fLink;
            }

            byte[] mapData = new byte[mapSize];

            int position = 0;

            foreach (byte[] component in mapContent)
            {
                Array.Copy(component, 0, mapData, position, component.Length);
                position += component.Length;
            }

            return(mapData);
        }
Ejemplo n.º 5
0
        public absHFSPlusBTree(HFSPlusFile knownExtents, volumeStream hfsp)
        {
            extents = knownExtents;

            // grab a bunch of information to ensure the header node is captured
            byte[] firstBlock = new byte[hfsp.volume.blockSize];

            this.fs = new forkStream(hfsp, knownExtents, forkStream.forkType.data);

            fs.Read(firstBlock, 0, firstBlock.Count());

            // nodeSize is byte 30 of header record which comes immediately after 14 byte descriptor
            this.nodeSize = dataOperations.convToLE(BitConverter.ToUInt16(firstBlock, 32));

            byte[] headerData = new byte[nodeSize];
            headerData = getNodeData(0, nodeSize);
            header = new headerNode(ref headerData);

            // check whether all of the data extents are known
            long treeSize = header.headerInfo.totalNodes * header.headerInfo.nodeSize;
            if (fs.Length >= treeSize && fs.Length > 0)
            {
                isRawDataComplete = true;

                buildMap(fs);
            }
        }
Ejemplo n.º 6
0
        private TreeNode iterateHashChildren(TreeNode parent, volumeStream vs)
        {
            TreeNode replaceParent = new TreeNode();

            replaceParent.Tag = parent.Tag;


            foreach (TreeNode child in parent.Nodes)
            {
                TreeNode replaceChild = new TreeNode();

                if (child.Tag is HFSPlusCatalogFolder)
                {
                    replaceChild     = iterateHashChildren(child, vs);
                    replaceChild.Tag = child.Tag;
                }
                else if (child.Tag is HFSPlusCatalogFile)
                {
                    HFSPlusCatalogFile        tag    = (HFSPlusCatalogFile)child.Tag;
                    dataOperations.hashValues hashes = new dataOperations.hashValues();

                    if (tag.dataFork != null && tag.dataFork.forkDataValues.logicalSize > 0)
                    {
                        HFSPlusFile theFileData = new HFSPlusFile(tag.dataFork, forkStream.forkType.data);

                        forkStream fs = new forkStream(vs, theFileData, forkStream.forkType.data);

                        dataOperations.hashValues hv = dataOperations.getHashValues(fs, (long)theFileData.dataLogicalSize);

                        hashes.md5hash = hv.md5hash;
                    }

                    if (tag.resourceFork != null && tag.resourceFork.forkDataValues.logicalSize > 0)
                    {
                        HFSPlusFile theFileResource = new HFSPlusFile(tag.dataFork, forkStream.forkType.data);

                        forkStream fs = new forkStream(vs, theFileResource, forkStream.forkType.data);

                        dataOperations.hashValues hv = dataOperations.getHashValues(fs, (long)theFileResource.dataLogicalSize);

                        hashes.sha1hash = hv.sha1hash;
                    }

                    tag.hashes = hashes;

                    replaceChild.Tag = tag;
                }
                else
                {
                    replaceChild.Tag = child.Tag;
                }

                replaceChild.Text = child.Text;
                replaceParent.Nodes.Add(replaceChild);
            }

            replaceParent.Text = parent.Text;

            return(replaceParent);
        }
Ejemplo n.º 7
0
        public void exportFile(HFSPlusCatalogFile entry, forkStream.forkType type, string path)
        {
            if (entry.dataFork.forkDataValues.logicalSize > 0 || entry.resourceFork.forkDataValues.logicalSize > 0)
            {
                GPTScheme           gpts = new GPTScheme(i);
                HFSPlus             hfsp = new HFSPlus(i, gpts.entries[entry.partitionAssoc]);
                volumeStream        vs   = new volumeStream(hfsp);
                extentsOverflowFile eof  = new extentsOverflowFile(new HFSPlusFile(hfsp.volHead.extentsFile, forkStream.forkType.data), vs);

                HFSPlusFile hfsp_file = new HFSPlusFile(entry, eof);
                forkStream  fs;
                long        dataSize = 0;

                if (type == forkStream.forkType.data)
                {
                    fs       = new forkStream(vs, hfsp_file, forkStream.forkType.data);
                    dataSize = (long)entry.dataFork.forkDataValues.logicalSize;
                }
                else
                {
                    fs       = new forkStream(vs, hfsp_file, forkStream.forkType.resource);
                    dataSize = (long)entry.resourceFork.forkDataValues.logicalSize;
                }

                fs.Position = 0;

                FileStream   writeStream = new FileStream(path, FileMode.Create);
                BinaryWriter bw          = new BinaryWriter(writeStream);

                long   bytesWritten = 0;
                byte[] buffer;

                while (bytesWritten < dataSize)
                {
                    if (bytesWritten + 8192 <= dataSize)
                    {
                        buffer = new byte[8192];
                        fs.Read(buffer, 0, 8192);

                        bw.Write(buffer, 0, 8192);

                        bytesWritten += 8192;
                    }
                    else
                    {
                        buffer = new byte[dataSize - bytesWritten];
                        fs.Read(buffer, 0, buffer.Length);

                        bw.Write(buffer, 0, buffer.Length);

                        bytesWritten += buffer.Length;
                    }
                }

                bw.Close();
                writeStream.Close();
            }
        }
Ejemplo n.º 8
0
        public HFSPlusFile(hfsPlusForkData forkEntry, forkStream.forkType type)
        {
            fileContent.dataExtents = new List<hfsPlusForkData.HFSPlusExtentRecord>();
            fileContent.resourceExtents = new List<hfsPlusForkData.HFSPlusExtentRecord>();

            switch (type)
            {
                case forkStream.forkType.data:
                    addDataFork(forkEntry);
                    break;
                case forkStream.forkType.resource:
                    addResourceFork(forkEntry);
                    break;
            }
        }
Ejemplo n.º 9
0
        public void showForkData(HFSPlusCatalogFile entry, forkStream.forkType type)
        {
            GPTScheme           gpts = new GPTScheme(i);
            HFSPlus             hfsp = new HFSPlus(i, gpts.entries[entry.partitionAssoc]);
            volumeStream        vs   = new volumeStream(hfsp);
            extentsOverflowFile eof  = new extentsOverflowFile(new HFSPlusFile(hfsp.volHead.extentsFile, forkStream.forkType.data), vs);

            HFSPlusFile hfsp_file = new HFSPlusFile(entry, eof);
            forkStream  fs;

            if (type == forkStream.forkType.data)
            {
                fs = new forkStream(vs, hfsp_file, forkStream.forkType.data);
            }
            else
            {
                fs = new forkStream(vs, hfsp_file, forkStream.forkType.resource);
            }

            throw new NotImplementedException();
        }
Ejemplo n.º 10
0
        public dataOperations.hashes hashFileStream(forkStream fs)
        {
            MD5  md5sum  = new MD5CryptoServiceProvider();
            SHA1 sha1sum = new SHA1CryptoServiceProvider();

            byte[] md5result;
            byte[] sha1result;

            dataOperations.hashes result = new dataOperations.hashes();

            md5result = md5sum.ComputeHash(fs);

            StringBuilder sbmd5 = new StringBuilder();

            for (int i = 0; i < md5result.Length; i++)
            {
                sbmd5.Append(md5result[i].ToString("X2"));
            }

            result.md5sum = sbmd5.ToString();

            sha1result = sha1sum.ComputeHash(fs);

            StringBuilder sbsha1 = new StringBuilder();

            for (int i = 0; i < md5result.Length; i++)
            {
                sbsha1.Append(md5result[i].ToString("X2"));
            }

            result.sha1sum = sbsha1.ToString();

            fs.Close();

            return(result);
        }
Ejemplo n.º 11
0
        public dataOperations.hashes hashFileStream(forkStream fs)
        {
            MD5 md5sum = new MD5CryptoServiceProvider();
            SHA1 sha1sum = new SHA1CryptoServiceProvider();

            byte[] md5result;
            byte[] sha1result;

            dataOperations.hashes result = new dataOperations.hashes();

            md5result = md5sum.ComputeHash(fs);

            StringBuilder sbmd5 = new StringBuilder();

            for (int i = 0; i < md5result.Length; i++)
            {
                sbmd5.Append(md5result[i].ToString("X2"));
            }

            result.md5sum = sbmd5.ToString();

            sha1result = sha1sum.ComputeHash(fs);

            StringBuilder sbsha1 = new StringBuilder();

            for (int i = 0; i < md5result.Length; i++)
            {
                sbsha1.Append(md5result[i].ToString("X2"));
            }

            result.sha1sum = sbsha1.ToString();

            fs.Close();

            return result;
        }
Ejemplo n.º 12
0
        public imageMap(absImageStream ais)
        {
            if (ais.scheme == absImageStream.schemeType.GPT)
            {
                GPTScheme gpts = new GPTScheme(ais);

                mapBlock block = new mapBlock();
                block.location = 0;

                if (gpts.protectiveMBRExists)
                {
                    block.length = 1;
                    block.name = "MBR";
                    block.type = tileType.MBR;

                    partitionblocks.Add(block);
                }

                if (gpts.headerFound)
                {
                    block.location = 1;
                    block.length = 1;
                    block.name = "GPT Header";
                    block.type = tileType.GPT;

                    partitionblocks.Add(block);

                    block.location = gpts.tablestart;
                    block.length = gpts.tablelength / ais.sectorSize;
                    if (block.length < 1) block.length = 1;
                    block.name = "GPT Primary Table";
                    block.type = tileType.GPT;

                    partitionblocks.Add(block);
                }

                if (gpts.backupFound)
                {
                    block.location = gpts.backupHeader.mainheader;
                    block.length = 1;
                    block.name = "Backup GPT Header";
                    block.type = tileType.GPT;

                    partitionblocks.Add(block);

                    block.location = gpts.backupHeader.tablestart;
                    block.length = gpts.tablelength / ais.sectorSize;
                    if (block.length < 1) block.length = 1;
                    block.name = "GPT Backup Table";
                    block.type = tileType.GPT;

                    partitionblocks.Add(block);
                }

                foreach (GPTScheme.entry entry in gpts.entries)
                {
                    block.location = entry.partStartLBA;
                    block.length = entry.partLength;
                    block.name = entry.name;
                    block.type = tileType.vol_unknown;

                    if (gpts.findPartitionType(entry) == GPTScheme.partitionType.HFSPlus)
                    {
                        HFSPlus hfsp = new HFSPlus(ais, entry);

                        block.mapSectorsPerBlock = (int)hfsp.volHead.blockSize / ais.sectorSize;
                        forkStream fs = new forkStream(new volumeStream(hfsp), new HFSPlusFile(hfsp.volHead.allocationFile, forkStream.forkType.data), forkStream.forkType.data);

                        block.allocationMap = new byte[(int)fs.Length];
                        fs.Read(block.allocationMap, 0, (int)fs.Length);
                    }
                    else
                    {
                        block.allocationMap = null;
                    }

                    partitionblocks.Add(block);
                }

            }

            partitionblocks.Sort(CompareBlocksByPosition);
        }
Ejemplo n.º 13
0
        private TreeNode iterateHashChildren(TreeNode parent, volumeStream vs)
        {
            TreeNode replaceParent = new TreeNode();
            replaceParent.Tag = parent.Tag;

            foreach (TreeNode child in parent.Nodes)
            {
                TreeNode replaceChild = new TreeNode();

                if (child.Tag is HFSPlusCatalogFolder)
                {
                    replaceChild = iterateHashChildren(child, vs);
                    replaceChild.Tag = child.Tag;
                }
                else if (child.Tag is HFSPlusCatalogFile)
                {
                    HFSPlusCatalogFile tag = (HFSPlusCatalogFile)child.Tag;
                    dataOperations.hashValues hashes = new dataOperations.hashValues();

                    if (tag.dataFork != null && tag.dataFork.forkDataValues.logicalSize > 0)
                    {
                        HFSPlusFile theFileData = new HFSPlusFile(tag.dataFork, forkStream.forkType.data);

                        forkStream fs = new forkStream(vs, theFileData, forkStream.forkType.data);

                        dataOperations.hashValues hv = dataOperations.getHashValues(fs, (long)theFileData.dataLogicalSize);

                        hashes.md5hash = hv.md5hash;
                    }

                    if (tag.resourceFork != null && tag.resourceFork.forkDataValues.logicalSize > 0)
                    {
                        HFSPlusFile theFileResource = new HFSPlusFile(tag.dataFork, forkStream.forkType.data);

                        forkStream fs = new forkStream(vs, theFileResource, forkStream.forkType.data);

                        dataOperations.hashValues hv = dataOperations.getHashValues(fs, (long)theFileResource.dataLogicalSize);

                        hashes.sha1hash = hv.sha1hash;
                    }

                    tag.hashes = hashes;

                    replaceChild.Tag = tag;
                }
                else
                {
                    replaceChild.Tag = child.Tag;
                }

                replaceChild.Text = child.Text;
                replaceParent.Nodes.Add(replaceChild);
            }

            replaceParent.Text = parent.Text;

            return replaceParent;
        }
Ejemplo n.º 14
0
        public void showForkData(HFSPlusCatalogFile entry, forkStream.forkType type)
        {
            GPTScheme gpts = new GPTScheme(i);
            HFSPlus hfsp = new HFSPlus(i, gpts.entries[entry.partitionAssoc]);
            volumeStream vs = new volumeStream(hfsp);
            extentsOverflowFile eof = new extentsOverflowFile(new HFSPlusFile(hfsp.volHead.extentsFile, forkStream.forkType.data), vs);

            HFSPlusFile hfsp_file = new HFSPlusFile(entry, eof);
            forkStream fs;
            if (type == forkStream.forkType.data)
            {
                fs = new forkStream(vs, hfsp_file, forkStream.forkType.data);
            }
            else
            {
                fs = new forkStream(vs, hfsp_file, forkStream.forkType.resource);
            }

            throw new NotImplementedException();
        }
Ejemplo n.º 15
0
        public void showForkData(HFSPlusCatalogFile entry, uint block, forkStream.forkType type)
        {
            GPTScheme gpts = new GPTScheme(i);
            HFSPlus hfsp = new HFSPlus(i, gpts.entries[entry.partitionAssoc]);
            volumeStream vs = new volumeStream(hfsp);
            extentsOverflowFile eof = new extentsOverflowFile(new HFSPlusFile(hfsp.volHead.extentsFile, forkStream.forkType.data), vs);

            HFSPlusFile hfsp_file = new HFSPlusFile(entry, eof);
            forkStream fs;
            if (type == forkStream.forkType.data)
            {
                fs = new forkStream(vs, hfsp_file, forkStream.forkType.data);
            }
            else
            {
                fs = new forkStream(vs, hfsp_file, forkStream.forkType.resource);
            }

            contentDisplay = hexHeadLine + "\r\n";

            if (fs.Length > 0)
            {
                byte[] showBlock = new byte[hfsp.blockSize];

                fs.Seek(hfsp.blockSize * block, SeekOrigin.Begin);
                fs.Read(showBlock, 0, (int)hfsp.blockSize);

                rawDataDisplay(showBlock);
            }
        }
Ejemplo n.º 16
0
        public dataOperations.hashValues[] hashFile(HFSPlusCatalogFile file)
        {
            // take a file, return hashes for its data fork and resource fork
            dataOperations.hashValues[] hv = new dataOperations.hashValues[2];

            GPTScheme gpts = new GPTScheme(i);
            HFSPlus hfsp = new HFSPlus(i, gpts.entries[file.partitionAssoc]);

            volumeStream vs = new volumeStream(hfsp);
            extentsOverflowFile eof = new extentsOverflowFile(new HFSPlusFile(hfsp.volHead.extentsFile, forkStream.forkType.data),vs);

            if (file.dataFork.forkDataValues.logicalSize > 0)
            {
                HFSPlusFile hfspfile = new HFSPlusFile(file, eof);
                forkStream fs = new forkStream(vs, hfspfile, forkStream.forkType.data);

                hv[0] = dataOperations.getHashValues(fs, (long)hfspfile.dataLogicalSize);
            }

            if (file.resourceFork != null)
            {
                if (file.resourceFork.forkDataValues.logicalSize > 0)
                {
                    HFSPlusFile hfspfile = new HFSPlusFile(file.resourceFork, forkStream.forkType.resource);
                    forkStream fs = new forkStream(vs, hfspfile, forkStream.forkType.resource);

                    hv[1] = dataOperations.getHashValues(fs, (long)hfspfile.rsrcLogicalSize);
                }
            }

            return hv;
        }
Ejemplo n.º 17
0
        public void exportFile(HFSPlusCatalogFile entry, forkStream.forkType type, string path)
        {
            if (entry.dataFork.forkDataValues.logicalSize > 0 || entry.resourceFork.forkDataValues.logicalSize > 0)
            {
                GPTScheme gpts = new GPTScheme(i);
                HFSPlus hfsp = new HFSPlus(i, gpts.entries[entry.partitionAssoc]);
                volumeStream vs = new volumeStream(hfsp);
                extentsOverflowFile eof = new extentsOverflowFile(new HFSPlusFile(hfsp.volHead.extentsFile, forkStream.forkType.data), vs);

                HFSPlusFile hfsp_file = new HFSPlusFile(entry, eof);
                forkStream fs;
                long dataSize = 0;

                if (type == forkStream.forkType.data)
                {
                    fs = new forkStream(vs, hfsp_file, forkStream.forkType.data);
                    dataSize = (long)entry.dataFork.forkDataValues.logicalSize;
                }
                else
                {
                    fs = new forkStream(vs, hfsp_file, forkStream.forkType.resource);
                    dataSize = (long)entry.resourceFork.forkDataValues.logicalSize;
                }

                fs.Position = 0;

                FileStream writeStream = new FileStream(path, FileMode.Create);
                BinaryWriter bw = new BinaryWriter(writeStream);

                long bytesWritten = 0;
                byte[] buffer;

                while (bytesWritten < dataSize)
                {
                    if (bytesWritten + 8192 <= dataSize)
                    {
                        buffer = new byte[8192];
                        fs.Read(buffer, 0, 8192);

                        bw.Write(buffer, 0, 8192);

                        bytesWritten += 8192;
                    }
                    else
                    {
                        buffer = new byte[dataSize - bytesWritten];
                        fs.Read(buffer, 0, buffer.Length);

                        bw.Write(buffer, 0, buffer.Length);

                        bytesWritten += buffer.Length;
                    }
                }

                bw.Close();
                writeStream.Close();
            }
        }
Ejemplo n.º 18
0
        protected byte[] buildMap(forkStream fs)
        {
            List<byte[]> mapContent = new List<byte[]>();

            mapContent.Add(header.map.bitmapComponent);
            uint fLink = this.header.BTNodeDescriptor.fLink;

            uint mapSize = (uint)header.map.bitmapComponent.Length;

            // if fLink > 0, there are more map nodes with map data to be read
            while (fLink > 0)
            {
                byte[] nodeRawData = new byte[this.nodeSize];
                fs.Seek(fLink * this.nodeSize, System.IO.SeekOrigin.Begin);
                fs.Read(nodeRawData, 0, this.nodeSize);
                mapNode currentMap = new mapNode(ref nodeRawData);
                mapContent.Add(currentMap.bitmapComponent);

                mapSize += (uint)currentMap.bitmapComponent.Length;

                fLink = currentMap.BTNodeDescriptor.fLink;
            }

            byte[] mapData = new byte[mapSize];

            int position = 0;
            foreach (byte[] component in mapContent)
            {
                Array.Copy(component, 0, mapData, position, component.Length);
                position += component.Length;
            }

            return mapData;
        }
Ejemplo n.º 19
0
        public imageMap(absImageStream ais)
        {
            if (ais.scheme == absImageStream.schemeType.GPT)
            {
                GPTScheme gpts = new GPTScheme(ais);

                mapBlock block = new mapBlock();
                block.location = 0;

                if (gpts.protectiveMBRExists)
                {
                    block.length = 1;
                    block.name   = "MBR";
                    block.type   = tileType.MBR;

                    partitionblocks.Add(block);
                }

                if (gpts.headerFound)
                {
                    block.location = 1;
                    block.length   = 1;
                    block.name     = "GPT Header";
                    block.type     = tileType.GPT;

                    partitionblocks.Add(block);

                    block.location = gpts.tablestart;
                    block.length   = gpts.tablelength / ais.sectorSize;
                    if (block.length < 1)
                    {
                        block.length = 1;
                    }
                    block.name = "GPT Primary Table";
                    block.type = tileType.GPT;

                    partitionblocks.Add(block);
                }

                if (gpts.backupFound)
                {
                    block.location = gpts.backupHeader.mainheader;
                    block.length   = 1;
                    block.name     = "Backup GPT Header";
                    block.type     = tileType.GPT;

                    partitionblocks.Add(block);

                    block.location = gpts.backupHeader.tablestart;
                    block.length   = gpts.tablelength / ais.sectorSize;
                    if (block.length < 1)
                    {
                        block.length = 1;
                    }
                    block.name = "GPT Backup Table";
                    block.type = tileType.GPT;

                    partitionblocks.Add(block);
                }

                foreach (GPTScheme.entry entry in gpts.entries)
                {
                    block.location = entry.partStartLBA;
                    block.length   = entry.partLength;
                    block.name     = entry.name;
                    block.type     = tileType.vol_unknown;

                    if (gpts.findPartitionType(entry) == GPTScheme.partitionType.HFSPlus)
                    {
                        HFSPlus hfsp = new HFSPlus(ais, entry);

                        block.mapSectorsPerBlock = (int)hfsp.volHead.blockSize / ais.sectorSize;
                        forkStream fs = new forkStream(new volumeStream(hfsp), new HFSPlusFile(hfsp.volHead.allocationFile, forkStream.forkType.data), forkStream.forkType.data);

                        block.allocationMap = new byte[(int)fs.Length];
                        fs.Read(block.allocationMap, 0, (int)fs.Length);
                    }
                    else
                    {
                        block.allocationMap = null;
                    }

                    partitionblocks.Add(block);
                }
            }

            partitionblocks.Sort(CompareBlocksByPosition);
        }