Пример #1
0
        private TreeNode iterateHashChildren(TreeNode parent, volumeStream vs)
        {
            TreeNode replaceParent = new TreeNode();

            replaceParent.Tag = parent.Tag;


            foreach (TreeNode child in parent.Nodes)
            {
                TreeNode replaceChild = new TreeNode();

                if (child.Tag is HFSPlusCatalogFolder)
                {
                    replaceChild     = iterateHashChildren(child, vs);
                    replaceChild.Tag = child.Tag;
                }
                else if (child.Tag is HFSPlusCatalogFile)
                {
                    HFSPlusCatalogFile        tag    = (HFSPlusCatalogFile)child.Tag;
                    dataOperations.hashValues hashes = new dataOperations.hashValues();

                    if (tag.dataFork != null && tag.dataFork.forkDataValues.logicalSize > 0)
                    {
                        HFSPlusFile theFileData = new HFSPlusFile(tag.dataFork, forkStream.forkType.data);

                        forkStream fs = new forkStream(vs, theFileData, forkStream.forkType.data);

                        dataOperations.hashValues hv = dataOperations.getHashValues(fs, (long)theFileData.dataLogicalSize);

                        hashes.md5hash = hv.md5hash;
                    }

                    if (tag.resourceFork != null && tag.resourceFork.forkDataValues.logicalSize > 0)
                    {
                        HFSPlusFile theFileResource = new HFSPlusFile(tag.dataFork, forkStream.forkType.data);

                        forkStream fs = new forkStream(vs, theFileResource, forkStream.forkType.data);

                        dataOperations.hashValues hv = dataOperations.getHashValues(fs, (long)theFileResource.dataLogicalSize);

                        hashes.sha1hash = hv.sha1hash;
                    }

                    tag.hashes = hashes;

                    replaceChild.Tag = tag;
                }
                else
                {
                    replaceChild.Tag = child.Tag;
                }

                replaceChild.Text = child.Text;
                replaceParent.Nodes.Add(replaceChild);
            }

            replaceParent.Text = parent.Text;

            return(replaceParent);
        }
Пример #2
0
        private void exportFileToolStripMenuItem_Click(object sender, EventArgs e)
        {
            if (listView2.SelectedItems[0].Tag is HFSPlusCatalogFile)
            {
                HFSPlusCatalogFile theFile = (HFSPlusCatalogFile)listView2.SelectedItems[0].Tag;

                SaveFileDialog sfd = new SaveFileDialog();

                sfd.FileName = listView2.SelectedItems[0].Text;
                sfd.Title    = "Export file";

                sfd.ShowDialog();

                if (sfd.FileName != "")
                {
                    if (theFile.dataFork != null && theFile.dataFork.forkDataValues.logicalSize > 0)
                    {
                        dc.exportFile(theFile, forkStream.forkType.data, sfd.FileName);
                    }

                    if (theFile.resourceFork != null && theFile.resourceFork.forkDataValues.logicalSize > 0)
                    {
                        dc.exportFile(theFile, forkStream.forkType.resource, sfd.FileName + ".rsrc");
                    }
                }
            }
        }
Пример #3
0
        public dataOperations.hashValues[] hashFile(HFSPlusCatalogFile file)
        {
            // take a file, return hashes for its data fork and resource fork
            dataOperations.hashValues[] hv = new dataOperations.hashValues[2];

            GPTScheme gpts = new GPTScheme(i);
            HFSPlus   hfsp = new HFSPlus(i, gpts.entries[file.partitionAssoc]);

            volumeStream        vs  = new volumeStream(hfsp);
            extentsOverflowFile eof = new extentsOverflowFile(new HFSPlusFile(hfsp.volHead.extentsFile, forkStream.forkType.data), vs);

            if (file.dataFork.forkDataValues.logicalSize > 0)
            {
                HFSPlusFile hfspfile = new HFSPlusFile(file, eof);
                forkStream  fs       = new forkStream(vs, hfspfile, forkStream.forkType.data);

                hv[0] = dataOperations.getHashValues(fs, (long)hfspfile.dataLogicalSize);
            }

            if (file.resourceFork != null)
            {
                if (file.resourceFork.forkDataValues.logicalSize > 0)
                {
                    HFSPlusFile hfspfile = new HFSPlusFile(file.resourceFork, forkStream.forkType.resource);
                    forkStream  fs       = new forkStream(vs, hfspfile, forkStream.forkType.resource);

                    hv[1] = dataOperations.getHashValues(fs, (long)hfspfile.rsrcLogicalSize);
                }
            }

            return(hv);
        }
Пример #4
0
        public void showForkData(HFSPlusCatalogFile entry, uint block, forkStream.forkType type)
        {
            GPTScheme           gpts = new GPTScheme(i);
            HFSPlus             hfsp = new HFSPlus(i, gpts.entries[entry.partitionAssoc]);
            volumeStream        vs   = new volumeStream(hfsp);
            extentsOverflowFile eof  = new extentsOverflowFile(new HFSPlusFile(hfsp.volHead.extentsFile, forkStream.forkType.data), vs);

            HFSPlusFile hfsp_file = new HFSPlusFile(entry, eof);
            forkStream  fs;

            if (type == forkStream.forkType.data)
            {
                fs = new forkStream(vs, hfsp_file, forkStream.forkType.data);
            }
            else
            {
                fs = new forkStream(vs, hfsp_file, forkStream.forkType.resource);
            }

            contentDisplay = hexHeadLine + "\r\n";

            if (fs.Length > 0)
            {
                byte[] showBlock = new byte[hfsp.blockSize];

                fs.Seek(hfsp.blockSize * block, SeekOrigin.Begin);
                fs.Read(showBlock, 0, (int)hfsp.blockSize);

                rawDataDisplay(showBlock);
            }
        }
Пример #5
0
        public void exportFile(HFSPlusCatalogFile entry, forkStream.forkType type, string path)
        {
            if (entry.dataFork.forkDataValues.logicalSize > 0 || entry.resourceFork.forkDataValues.logicalSize > 0)
            {
                GPTScheme           gpts = new GPTScheme(i);
                HFSPlus             hfsp = new HFSPlus(i, gpts.entries[entry.partitionAssoc]);
                volumeStream        vs   = new volumeStream(hfsp);
                extentsOverflowFile eof  = new extentsOverflowFile(new HFSPlusFile(hfsp.volHead.extentsFile, forkStream.forkType.data), vs);

                HFSPlusFile hfsp_file = new HFSPlusFile(entry, eof);
                forkStream  fs;
                long        dataSize = 0;

                if (type == forkStream.forkType.data)
                {
                    fs       = new forkStream(vs, hfsp_file, forkStream.forkType.data);
                    dataSize = (long)entry.dataFork.forkDataValues.logicalSize;
                }
                else
                {
                    fs       = new forkStream(vs, hfsp_file, forkStream.forkType.resource);
                    dataSize = (long)entry.resourceFork.forkDataValues.logicalSize;
                }

                fs.Position = 0;

                FileStream   writeStream = new FileStream(path, FileMode.Create);
                BinaryWriter bw          = new BinaryWriter(writeStream);

                long   bytesWritten = 0;
                byte[] buffer;

                while (bytesWritten < dataSize)
                {
                    if (bytesWritten + 8192 <= dataSize)
                    {
                        buffer = new byte[8192];
                        fs.Read(buffer, 0, 8192);

                        bw.Write(buffer, 0, 8192);

                        bytesWritten += 8192;
                    }
                    else
                    {
                        buffer = new byte[dataSize - bytesWritten];
                        fs.Read(buffer, 0, buffer.Length);

                        bw.Write(buffer, 0, buffer.Length);

                        bytesWritten += buffer.Length;
                    }
                }

                bw.Close();
                writeStream.Close();
            }
        }
Пример #6
0
        public HFSPlusFile(HFSPlusCatalogFile catalogEntry, extentsOverflowFile eofInput)
        {
            fileContent.dataExtents = new List<hfsPlusForkData.HFSPlusExtentRecord>();
            fileContent.resourceExtents = new List<hfsPlusForkData.HFSPlusExtentRecord>();

            if(catalogEntry.dataFork != null) addDataFork(catalogEntry.dataFork);
            if(catalogEntry.resourceFork != null) addResourceFork(catalogEntry.resourceFork);
            getAllExtents(eofInput, catalogEntry.fileID);
        }
Пример #7
0
        private void getRecords()
        {
            foreach (rawKeyAndRecord record in this.rawRecords)
            {
                short thisRecordType = dataOperations.convToLE(BitConverter.ToInt16(record.recordData, 0));

                catalogFile.HFSPlusCatalogKey key = new catalogFile.HFSPlusCatalogKey();

                key.keyLength = record.keyLength;
                key.parentID  = dataOperations.convToLE(BitConverter.ToUInt32(record.keyData, 0));
                byte[] nodeName = new byte[record.keyLength - 6];
                Array.Copy(record.keyData, 6, nodeName, 0, record.keyLength - 6);
                key.nodeName = nodeName;

                byte[] rawData = record.recordData;

                switch ((recordType)thisRecordType)
                {
                case recordType.kHFSFileRecord:
                    HFSPlusCatalogFile fileRecord = new HFSPlusCatalogFile(ref rawData);

                    fileRecord.key = key;

                    fileRecords.Add(fileRecord);

                    break;

                case recordType.kHFSFolderRecord:
                    HFSPlusCatalogFolder folderRecord = new HFSPlusCatalogFolder(ref rawData);

                    folderRecord.key = key;

                    folderRecords.Add(folderRecord);

                    break;

                case recordType.kHFSFileThreadRecord:
                case recordType.kHFSFolderThreadRecord:
                    HFSPlusCatalogThread threadRecord = new HFSPlusCatalogThread();

                    threadRecord.key  = key;
                    threadRecord.type = (recordType)thisRecordType;

                    threadRecord.reserved = dataOperations.convToLE(BitConverter.ToInt16(rawData, 2));
                    threadRecord.parentID = dataOperations.convToLE(BitConverter.ToUInt32(rawData, 4));

                    threadRecord.nodeName = new byte[rawData.Length - 8];

                    Array.Copy(rawData, 8, threadRecord.nodeName, 0, rawData.Length - 8);

                    threadRecords.Add(threadRecord);

                    break;
                }
            }
        }
Пример #8
0
        private void getRecords()
        {
            foreach(rawKeyAndRecord record in this.rawRecords)
            {
                short thisRecordType = dataOperations.convToLE(BitConverter.ToInt16(record.recordData, 0));

                catalogFile.HFSPlusCatalogKey key = new catalogFile.HFSPlusCatalogKey();

                key.keyLength = record.keyLength;
                key.parentID = dataOperations.convToLE(BitConverter.ToUInt32(record.keyData, 0));
                byte[] nodeName = new byte[record.keyLength - 6];
                Array.Copy(record.keyData, 6, nodeName, 0, record.keyLength - 6);
                key.nodeName = nodeName;

                byte[] rawData = record.recordData;

                switch ((recordType)thisRecordType)
                {
                    case recordType.kHFSFileRecord:
                        HFSPlusCatalogFile fileRecord = new HFSPlusCatalogFile(ref rawData);

                        fileRecord.key = key;

                        fileRecords.Add(fileRecord);

                        break;
                    case recordType.kHFSFolderRecord:
                        HFSPlusCatalogFolder folderRecord = new HFSPlusCatalogFolder(ref rawData);

                        folderRecord.key = key;

                        folderRecords.Add(folderRecord);

                        break;
                    case recordType.kHFSFileThreadRecord: case recordType.kHFSFolderThreadRecord:
                        HFSPlusCatalogThread threadRecord = new HFSPlusCatalogThread();

                        threadRecord.key = key;
                        threadRecord.type = (recordType)thisRecordType;

                        threadRecord.reserved = dataOperations.convToLE(BitConverter.ToInt16(rawData, 2));
                        threadRecord.parentID = dataOperations.convToLE(BitConverter.ToUInt32(rawData, 4));

                        threadRecord.nodeName = new byte[rawData.Length - 8];

                        Array.Copy(rawData, 8, threadRecord.nodeName, 0, rawData.Length - 8);

                        threadRecords.Add(threadRecord);

                        break;
                }
            }
        }
Пример #9
0
        public HFSPlusFile(HFSPlusCatalogFile catalogEntry, extentsOverflowFile eofInput)
        {
            fileContent.dataExtents     = new List <hfsPlusForkData.HFSPlusExtentRecord>();
            fileContent.resourceExtents = new List <hfsPlusForkData.HFSPlusExtentRecord>();

            if (catalogEntry.dataFork != null)
            {
                addDataFork(catalogEntry.dataFork);
            }
            if (catalogEntry.resourceFork != null)
            {
                addResourceFork(catalogEntry.resourceFork);
            }
            getAllExtents(eofInput, catalogEntry.fileID);
        }
Пример #10
0
        public TreeNode getDirectoryChildren(HFSPlusCatalogFolder folderRecord, catalogFile cf, extentsOverflowFile eof, attributesFile af)
        {
            TreeNode returnDir = new TreeNode();

            // get every file and directory inside the current one
            returnDir = cf.getDirectoryAndChildren(folderRecord, eof, this.partitionNo);

            foreach (TreeNode child in returnDir.Nodes)
            {
                // check if there are any alternate data streams for the files
                if (child.Tag is HFSPlusCatalogFile)
                {
                    HFSPlusCatalogFile data = (HFSPlusCatalogFile)child.Tag;

                    attributesFile.HFSPlusAttrKey attrKey = new attributesFile.HFSPlusAttrKey();

                    attrKey.fileID     = data.fileID;
                    attrKey.startBlock = 0;
                    attributesLeafNode.attributesDataForFile allAttributes = af.getAttrFileDataWithKey(attrKey);

                    foreach (attributesLeafNode.HFSPlusAttrForkData fork in allAttributes.forks)
                    {
                        TreeNode attribute = new TreeNode();

                        attributesLeafNode.HFSPlusAttrForkData tag = fork;
                        tag.partitionAssoc = folderRecord.partitionAssoc;

                        attribute.Text = child.Text + " > " + System.Text.Encoding.BigEndianUnicode.GetString(fork.key.attrName);
                        attribute.Tag  = tag;

                        returnDir.Nodes.Add(attribute);
                    }
                    foreach (attributesLeafNode.HFSPlusAttrInlineData inline in allAttributes.inline)
                    {
                        TreeNode attribute = new TreeNode();

                        attributesLeafNode.HFSPlusAttrInlineData tag = inline;
                        tag.partitionAssoc = folderRecord.partitionAssoc;

                        attribute.Text = child.Text + " > " + System.Text.Encoding.BigEndianUnicode.GetString(inline.key.attrName);
                        attribute.Tag  = tag;
                        returnDir.Nodes.Add(attribute);
                    }
                }
            }

            return(returnDir);
        }
Пример #11
0
        public void showForkData(HFSPlusCatalogFile entry, forkStream.forkType type)
        {
            GPTScheme           gpts = new GPTScheme(i);
            HFSPlus             hfsp = new HFSPlus(i, gpts.entries[entry.partitionAssoc]);
            volumeStream        vs   = new volumeStream(hfsp);
            extentsOverflowFile eof  = new extentsOverflowFile(new HFSPlusFile(hfsp.volHead.extentsFile, forkStream.forkType.data), vs);

            HFSPlusFile hfsp_file = new HFSPlusFile(entry, eof);
            forkStream  fs;

            if (type == forkStream.forkType.data)
            {
                fs = new forkStream(vs, hfsp_file, forkStream.forkType.data);
            }
            else
            {
                fs = new forkStream(vs, hfsp_file, forkStream.forkType.resource);
            }

            throw new NotImplementedException();
        }
Пример #12
0
        private void addMetaFilesToTree(ref TreeNode result)
        {
            TreeNode catalog = new TreeNode();
            TreeNode extents = new TreeNode();
            TreeNode startup = new TreeNode();
            TreeNode attributes = new TreeNode();
            TreeNode allocation = new TreeNode();

            HFSPlusCatalogFile catalogProperties = new HFSPlusCatalogFile();
            HFSPlusCatalogFile extentsProperties = new HFSPlusCatalogFile();
            HFSPlusCatalogFile startupProperties = new HFSPlusCatalogFile();
            HFSPlusCatalogFile attributesProperties = new HFSPlusCatalogFile();
            HFSPlusCatalogFile allocationProperties = new HFSPlusCatalogFile();

            catalog.Text = "$CATALOG";
            catalogProperties.dataFork = this.volHead.catalogFile;
            catalogProperties.accessDate = FromHFSPlusTime(0);
            catalogProperties.attributeModDate = FromHFSPlusTime(0);
            catalogProperties.backupDate = FromHFSPlusTime(0);
            catalogProperties.contentModDate = FromHFSPlusTime(0);
            catalogProperties.createDate = FromHFSPlusTime(0);
            catalogProperties.fileID = 4;
            catalogProperties.path = this.volHead.path + "\\" + catalog.Text;
            catalog.Tag = catalogProperties;

            extents.Text = "$EXTENTSOVERFLOW";
            extentsProperties.dataFork = this.volHead.extentsFile;
            extentsProperties.accessDate = FromHFSPlusTime(0);
            extentsProperties.attributeModDate = FromHFSPlusTime(0);
            extentsProperties.backupDate = FromHFSPlusTime(0);
            extentsProperties.contentModDate = FromHFSPlusTime(0);
            extentsProperties.createDate = FromHFSPlusTime(0);
            extentsProperties.fileID = 3;
            extentsProperties.path = this.volHead.path + "\\" + extents.Text;
            extents.Tag = extentsProperties;

            startup.Text = "$STARTUP";
            startupProperties.dataFork = this.volHead.startupFile;
            startupProperties.accessDate = FromHFSPlusTime(0);
            startupProperties.attributeModDate = FromHFSPlusTime(0);
            startupProperties.backupDate = FromHFSPlusTime(0);
            startupProperties.contentModDate = FromHFSPlusTime(0);
            startupProperties.createDate = FromHFSPlusTime(0);
            startupProperties.fileID = 7;
            startupProperties.path = this.volHead.path + "\\" + startup.Text;
            startup.Tag = startupProperties;

            attributes.Text = "$ATTRIBUTES";
            attributesProperties.dataFork = this.volHead.attributesFile;
            catalogProperties.accessDate = FromHFSPlusTime(0);
            catalogProperties.attributeModDate = FromHFSPlusTime(0);
            catalogProperties.backupDate = FromHFSPlusTime(0);
            catalogProperties.contentModDate = FromHFSPlusTime(0);
            catalogProperties.createDate = FromHFSPlusTime(0);
            attributesProperties.fileID = 8;
            attributesProperties.path = this.volHead.path + "\\" + attributes.Text;
            attributes.Tag = attributesProperties;

            allocation.Text = "$ALLOCATION";
            allocationProperties.dataFork = this.volHead.allocationFile;
            allocationProperties.accessDate = FromHFSPlusTime(0);
            allocationProperties.attributeModDate = FromHFSPlusTime(0);
            allocationProperties.backupDate = FromHFSPlusTime(0);
            allocationProperties.contentModDate = FromHFSPlusTime(0);
            allocationProperties.createDate = FromHFSPlusTime(0);
            allocationProperties.fileID = 6;
            allocationProperties.path = this.volHead.path + "\\" + allocation.Text;
            allocation.Tag = allocationProperties;

            result.Nodes.Add(catalog);
            result.Nodes.Add(extents);
            result.Nodes.Add(startup);
            result.Nodes.Add(attributes);
            result.Nodes.Add(allocation);
        }
Пример #13
0
        private void addMetaFilesToTree(ref TreeNode result)
        {
            TreeNode catalog    = new TreeNode();
            TreeNode extents    = new TreeNode();
            TreeNode startup    = new TreeNode();
            TreeNode attributes = new TreeNode();
            TreeNode allocation = new TreeNode();

            HFSPlusCatalogFile catalogProperties    = new HFSPlusCatalogFile();
            HFSPlusCatalogFile extentsProperties    = new HFSPlusCatalogFile();
            HFSPlusCatalogFile startupProperties    = new HFSPlusCatalogFile();
            HFSPlusCatalogFile attributesProperties = new HFSPlusCatalogFile();
            HFSPlusCatalogFile allocationProperties = new HFSPlusCatalogFile();


            catalog.Text = "$CATALOG";
            catalogProperties.dataFork         = this.volHead.catalogFile;
            catalogProperties.accessDate       = FromHFSPlusTime(0);
            catalogProperties.attributeModDate = FromHFSPlusTime(0);
            catalogProperties.backupDate       = FromHFSPlusTime(0);
            catalogProperties.contentModDate   = FromHFSPlusTime(0);
            catalogProperties.createDate       = FromHFSPlusTime(0);
            catalogProperties.fileID           = 4;
            catalogProperties.path             = this.volHead.path + "\\" + catalog.Text;
            catalog.Tag = catalogProperties;

            extents.Text = "$EXTENTSOVERFLOW";
            extentsProperties.dataFork         = this.volHead.extentsFile;
            extentsProperties.accessDate       = FromHFSPlusTime(0);
            extentsProperties.attributeModDate = FromHFSPlusTime(0);
            extentsProperties.backupDate       = FromHFSPlusTime(0);
            extentsProperties.contentModDate   = FromHFSPlusTime(0);
            extentsProperties.createDate       = FromHFSPlusTime(0);
            extentsProperties.fileID           = 3;
            extentsProperties.path             = this.volHead.path + "\\" + extents.Text;
            extents.Tag = extentsProperties;

            startup.Text = "$STARTUP";
            startupProperties.dataFork         = this.volHead.startupFile;
            startupProperties.accessDate       = FromHFSPlusTime(0);
            startupProperties.attributeModDate = FromHFSPlusTime(0);
            startupProperties.backupDate       = FromHFSPlusTime(0);
            startupProperties.contentModDate   = FromHFSPlusTime(0);
            startupProperties.createDate       = FromHFSPlusTime(0);
            startupProperties.fileID           = 7;
            startupProperties.path             = this.volHead.path + "\\" + startup.Text;
            startup.Tag = startupProperties;

            attributes.Text = "$ATTRIBUTES";
            attributesProperties.dataFork      = this.volHead.attributesFile;
            catalogProperties.accessDate       = FromHFSPlusTime(0);
            catalogProperties.attributeModDate = FromHFSPlusTime(0);
            catalogProperties.backupDate       = FromHFSPlusTime(0);
            catalogProperties.contentModDate   = FromHFSPlusTime(0);
            catalogProperties.createDate       = FromHFSPlusTime(0);
            attributesProperties.fileID        = 8;
            attributesProperties.path          = this.volHead.path + "\\" + attributes.Text;
            attributes.Tag = attributesProperties;

            allocation.Text = "$ALLOCATION";
            allocationProperties.dataFork         = this.volHead.allocationFile;
            allocationProperties.accessDate       = FromHFSPlusTime(0);
            allocationProperties.attributeModDate = FromHFSPlusTime(0);
            allocationProperties.backupDate       = FromHFSPlusTime(0);
            allocationProperties.contentModDate   = FromHFSPlusTime(0);
            allocationProperties.createDate       = FromHFSPlusTime(0);
            allocationProperties.fileID           = 6;
            allocationProperties.path             = this.volHead.path + "\\" + allocation.Text;
            allocation.Tag = allocationProperties;

            result.Nodes.Add(catalog);
            result.Nodes.Add(extents);
            result.Nodes.Add(startup);
            result.Nodes.Add(attributes);
            result.Nodes.Add(allocation);
        }
Пример #14
0
        public dataOperations.hashValues[] hashFile(HFSPlusCatalogFile file)
        {
            // take a file, return hashes for its data fork and resource fork
            dataOperations.hashValues[] hv = new dataOperations.hashValues[2];

            GPTScheme gpts = new GPTScheme(i);
            HFSPlus hfsp = new HFSPlus(i, gpts.entries[file.partitionAssoc]);

            volumeStream vs = new volumeStream(hfsp);
            extentsOverflowFile eof = new extentsOverflowFile(new HFSPlusFile(hfsp.volHead.extentsFile, forkStream.forkType.data),vs);

            if (file.dataFork.forkDataValues.logicalSize > 0)
            {
                HFSPlusFile hfspfile = new HFSPlusFile(file, eof);
                forkStream fs = new forkStream(vs, hfspfile, forkStream.forkType.data);

                hv[0] = dataOperations.getHashValues(fs, (long)hfspfile.dataLogicalSize);
            }

            if (file.resourceFork != null)
            {
                if (file.resourceFork.forkDataValues.logicalSize > 0)
                {
                    HFSPlusFile hfspfile = new HFSPlusFile(file.resourceFork, forkStream.forkType.resource);
                    forkStream fs = new forkStream(vs, hfspfile, forkStream.forkType.resource);

                    hv[1] = dataOperations.getHashValues(fs, (long)hfspfile.rsrcLogicalSize);
                }
            }

            return hv;
        }
Пример #15
0
        public void showForkData(HFSPlusCatalogFile entry, uint block, forkStream.forkType type)
        {
            GPTScheme gpts = new GPTScheme(i);
            HFSPlus hfsp = new HFSPlus(i, gpts.entries[entry.partitionAssoc]);
            volumeStream vs = new volumeStream(hfsp);
            extentsOverflowFile eof = new extentsOverflowFile(new HFSPlusFile(hfsp.volHead.extentsFile, forkStream.forkType.data), vs);

            HFSPlusFile hfsp_file = new HFSPlusFile(entry, eof);
            forkStream fs;
            if (type == forkStream.forkType.data)
            {
                fs = new forkStream(vs, hfsp_file, forkStream.forkType.data);
            }
            else
            {
                fs = new forkStream(vs, hfsp_file, forkStream.forkType.resource);
            }

            contentDisplay = hexHeadLine + "\r\n";

            if (fs.Length > 0)
            {
                byte[] showBlock = new byte[hfsp.blockSize];

                fs.Seek(hfsp.blockSize * block, SeekOrigin.Begin);
                fs.Read(showBlock, 0, (int)hfsp.blockSize);

                rawDataDisplay(showBlock);
            }
        }
Пример #16
0
        private void onRowClick(object sender, ListViewItemSelectionChangedEventArgs e)
        {
            comboBox1.Visible = false;
            if (e.Item.Tag is HFSPlusCatalogFolder)
            {
                HFSPlusCatalogFolder details = (HFSPlusCatalogFolder)e.Item.Tag;
                propertyGrid1.SelectedObject = details;
                resetHex();
            }
            else if (e.Item.Tag is HFSPlusCatalogFile)
            {
                resetHex();
                HFSPlusCatalogFile details = (HFSPlusCatalogFile)e.Item.Tag;
                propertyGrid1.SelectedObject = details;
                if (details.dataFork.forkDataValues.logicalSize > 0 || details.resourceFork.forkDataValues.logicalSize > 0)
                {
                    comboBox1.Visible = true;
                }
                if (details.dataFork.forkDataValues.logicalSize > 0)
                {
                    dc.showForkData(details, 0, forkStream.forkType.data);
                    dc.selectedFile = details;

                    hexText.Text         = dc.contentDisplay;
                    showTotalBlocks.Text = details.dataFork.forkDataValues.totalBlocks.ToString();

                    if (details.dataFork.forkDataValues.totalBlocks > 1)
                    {
                        nextBlock.Enabled   = true;
                        goToBlock.Enabled   = true;
                        blockNumBox.Enabled = true;
                    }

                    dc.fileDataBlock       = 1;
                    showCurrentBlock.Text  = dc.fileDataBlock.ToString();
                    comboBox1.SelectedItem = dc.forkview[0];
                }
                else if (details.resourceFork.forkDataValues.logicalSize > 0)
                {
                    dc.showForkData(details, 0, forkStream.forkType.resource);
                    dc.selectedFile = details;

                    hexText.Text         = dc.contentDisplay;
                    showTotalBlocks.Text = details.resourceFork.forkDataValues.totalBlocks.ToString();

                    if (details.resourceFork.forkDataValues.totalBlocks > 1)
                    {
                        nextBlock.Enabled   = true;
                        goToBlock.Enabled   = true;
                        blockNumBox.Enabled = true;
                    }

                    dc.fileDataBlock       = 1;
                    showCurrentBlock.Text  = dc.fileDataBlock.ToString();
                    comboBox1.SelectedItem = dc.forkview[1];
                }
                else
                {
                    hexText.Text          = "";
                    showCurrentBlock.Text = "";
                }
                comboBox1.DataSource = dc.forkview;
            }
            else if (e.Item.Tag is HFSPlus.volumeHeader)
            {
                HFSPlus.volumeHeader details = (HFSPlus.volumeHeader)e.Item.Tag;
                propertyGrid1.SelectedObject = details;
                resetHex();
            }
            else if (e.Item.Tag is attributesLeafNode.HFSPlusAttrInlineData)
            {
                resetHex();

                attributesLeafNode.HFSPlusAttrInlineData attrDetails = (attributesLeafNode.HFSPlusAttrInlineData)e.Item.Tag;
                propertyGrid1.SelectedObject = attrDetails;

                if (attrDetails.otherData.Length > 0)
                {
                    dc.showInlineAttrData((attributesLeafNode.HFSPlusAttrInlineData)e.Item.Tag);
                    hexText.Text = dc.contentDisplay;
                }
            }
        }
Пример #17
0
        public void showForkData(HFSPlusCatalogFile entry, forkStream.forkType type)
        {
            GPTScheme gpts = new GPTScheme(i);
            HFSPlus hfsp = new HFSPlus(i, gpts.entries[entry.partitionAssoc]);
            volumeStream vs = new volumeStream(hfsp);
            extentsOverflowFile eof = new extentsOverflowFile(new HFSPlusFile(hfsp.volHead.extentsFile, forkStream.forkType.data), vs);

            HFSPlusFile hfsp_file = new HFSPlusFile(entry, eof);
            forkStream fs;
            if (type == forkStream.forkType.data)
            {
                fs = new forkStream(vs, hfsp_file, forkStream.forkType.data);
            }
            else
            {
                fs = new forkStream(vs, hfsp_file, forkStream.forkType.resource);
            }

            throw new NotImplementedException();
        }
Пример #18
0
        public TreeNode getDirectoryAndChildren(HFSPlusCatalogFolder folderRecord, extentsOverflowFile eof, int partitionAssoc)
        {
            TreeNode returnDir = new TreeNode();

            if (folderRecord.key.nodeName != null)
            {
                returnDir.Text = System.Text.Encoding.BigEndianUnicode.GetString(folderRecord.key.nodeName);
                returnDir.Text = returnDir.Text.Replace('\0', ' ');
            }
            folderRecord.partitionAssoc = partitionAssoc;
            returnDir.Tag = folderRecord;

            HFSPlusCatalogKey matchParentDir = new HFSPlusCatalogKey();

            // find the first HFSPlusFileRecord for whom the current directory is the parent
            matchParentDir.parentID = folderRecord.folderID;
            if (folderRecord.key.nodeName != null)
            {
                matchParentDir.nodeName = folderRecord.key.nodeName;
            }
            uint readThisNode = getLeafNodeContainingRecord(matchParentDir);
            bool nextLeaf     = true;

            // records with the same parent are stored sequentially in the file,
            // but may continue over into the next node
            while (nextLeaf)
            {
                byte[] leafData = new byte[this.nodeSize];

                fs.Seek(readThisNode * this.nodeSize, SeekOrigin.Begin);
                fs.Read(leafData, 0, this.nodeSize);

                catalogLeafNode currentLeaf = new catalogLeafNode(ref leafData);

                foreach (HFSPlusCatalogFolder folder in currentLeaf.folderRecords)
                {
                    if (folder.key.parentID == folderRecord.folderID)
                    {
                        TreeNode childDir = new TreeNode();
                        if (folder.key.nodeName != null)
                        {
                            childDir.Text = System.Text.Encoding.BigEndianUnicode.GetString(folder.key.nodeName);
                            childDir.Text = childDir.Text.Replace('\0', ' ');
                        }

                        // set the treenode data for the child item
                        folder.path           = folderRecord.path + "\\" + childDir.Text;
                        folder.partitionAssoc = partitionAssoc;

                        childDir.Tag = folder;
                        returnDir.Nodes.Add(childDir);
                    }
                }

                foreach (HFSPlusCatalogFile file in currentLeaf.fileRecords)
                {
                    if (file.key.parentID == folderRecord.folderID)
                    {
                        TreeNode childFile = new TreeNode();

                        HFSPlusCatalogFile eachFile = file;
                        eachFile.partitionAssoc = partitionAssoc;

                        // HFSPlusFile should be able to get all of a file's blocks as part of the constructor
                        HFSPlusFile blockFinder = new HFSPlusFile(eachFile, eof);

                        //add the discovered extents back into the return object
                        //eachFile.dataFork.forkDataValues.extents.Clear();
                        //eachFile.resourceFork.forkDataValues.extents.Clear();
                        //foreach (hfsPlusForkData.HFSPlusExtentRecord extent in blockFinder.fileContent.dataExtents)
                        //{
                        //    eachFile.dataFork.forkDataValues.extents.Add(extent);
                        //}
                        //foreach (hfsPlusForkData.HFSPlusExtentRecord extent in blockFinder.fileContent.resourceExtents)
                        //{
                        //    eachFile.resourceFork.forkDataValues.extents.Add(extent);
                        //}

                        // if it can't... cry?
                        if (!(blockFinder.allDataBlocksKnown && blockFinder.allResourceBlocksKnown))
                        {
                            throw new Exception("Disk_Reader.HFSPlusFile class failed to get all blocks.");
                        }

                        // a handful of volume metadata files have highly specialised permissions
                        HFSPlusCatalogFolder tag = (HFSPlusCatalogFolder)returnDir.Tag;
                        if (tag.key.parentID == 2)
                        {
                            if (returnDir.Text == "    HFS+ Private Data")
                            {
                                HFSPlusCatalogRecord.HFSPlusPermissions resetPermissions = new HFSPlusCatalogRecord.HFSPlusPermissions();
                                resetPermissions      = eachFile.permissions;
                                resetPermissions.type = HFSPlusCatalogRecord.HFSPlusPermissions.specialType.iNodeNum;

                                eachFile.permissions = resetPermissions;
                            }
                        }
                        else if (eachFile.userInfo.fileType == 0x686C6E6B && eachFile.userInfo.fileCreator == 0x6866732B)
                        {
                            HFSPlusCatalogRecord.HFSPlusPermissions resetPermissions = new HFSPlusCatalogRecord.HFSPlusPermissions();
                            resetPermissions      = eachFile.permissions;
                            resetPermissions.type = HFSPlusCatalogRecord.HFSPlusPermissions.specialType.linkCount;

                            eachFile.permissions = resetPermissions;
                        }
                        else if (eachFile.permissions.fileMode.blockSpecial || eachFile.permissions.fileMode.charSpecial)
                        {
                            HFSPlusCatalogRecord.HFSPlusPermissions resetPermissions = new HFSPlusCatalogRecord.HFSPlusPermissions();
                            resetPermissions      = eachFile.permissions;
                            resetPermissions.type = HFSPlusCatalogRecord.HFSPlusPermissions.specialType.rawDevice;

                            eachFile.permissions = resetPermissions;
                        }
                        else
                        {
                            HFSPlusCatalogRecord.HFSPlusPermissions resetPermissions = new HFSPlusCatalogRecord.HFSPlusPermissions();
                            resetPermissions      = eachFile.permissions;
                            resetPermissions.type = HFSPlusCatalogRecord.HFSPlusPermissions.specialType.reserved;

                            eachFile.permissions = resetPermissions;
                        }

                        childFile.Text = System.Text.Encoding.BigEndianUnicode.GetString(file.key.nodeName);
                        if (folderRecord.key.nodeName != null)
                        {
                            childFile.Text = System.Text.Encoding.BigEndianUnicode.GetString(file.key.nodeName);
                            childFile.Text = childFile.Text.Replace('\0', ' ');
                        }

                        // set the treenode data for the child item
                        eachFile.path = folderRecord.path + "\\" + childFile.Text;

                        childFile.Tag = eachFile;

                        returnDir.Nodes.Add(childFile);
                    }
                }

                bool lastRecordMatchesKey =
                    matchParentDir.parentID == dataOperations.convToLE(
                        BitConverter.ToUInt32(
                            currentLeaf.rawRecords[currentLeaf.rawRecords.Count() - 1].keyData, 0));

                // if the last record in the current leaf is within the parent directory,
                // the records may continue in the next leaf, so skip to the node in flink
                // in the next instance of the loop
                if (returnDir.Nodes.Count < folderRecord.valence)
                {
                    readThisNode = currentLeaf.BTNodeDescriptor.fLink;
                }
                else
                {
                    nextLeaf = false;
                }
            }

            return(returnDir);
        }
Пример #19
0
        public void exportFile(HFSPlusCatalogFile entry, forkStream.forkType type, string path)
        {
            if (entry.dataFork.forkDataValues.logicalSize > 0 || entry.resourceFork.forkDataValues.logicalSize > 0)
            {
                GPTScheme gpts = new GPTScheme(i);
                HFSPlus hfsp = new HFSPlus(i, gpts.entries[entry.partitionAssoc]);
                volumeStream vs = new volumeStream(hfsp);
                extentsOverflowFile eof = new extentsOverflowFile(new HFSPlusFile(hfsp.volHead.extentsFile, forkStream.forkType.data), vs);

                HFSPlusFile hfsp_file = new HFSPlusFile(entry, eof);
                forkStream fs;
                long dataSize = 0;

                if (type == forkStream.forkType.data)
                {
                    fs = new forkStream(vs, hfsp_file, forkStream.forkType.data);
                    dataSize = (long)entry.dataFork.forkDataValues.logicalSize;
                }
                else
                {
                    fs = new forkStream(vs, hfsp_file, forkStream.forkType.resource);
                    dataSize = (long)entry.resourceFork.forkDataValues.logicalSize;
                }

                fs.Position = 0;

                FileStream writeStream = new FileStream(path, FileMode.Create);
                BinaryWriter bw = new BinaryWriter(writeStream);

                long bytesWritten = 0;
                byte[] buffer;

                while (bytesWritten < dataSize)
                {
                    if (bytesWritten + 8192 <= dataSize)
                    {
                        buffer = new byte[8192];
                        fs.Read(buffer, 0, 8192);

                        bw.Write(buffer, 0, 8192);

                        bytesWritten += 8192;
                    }
                    else
                    {
                        buffer = new byte[dataSize - bytesWritten];
                        fs.Read(buffer, 0, buffer.Length);

                        bw.Write(buffer, 0, buffer.Length);

                        bytesWritten += buffer.Length;
                    }
                }

                bw.Close();
                writeStream.Close();
            }
        }
Пример #20
0
        private ListViewItem getNodeRowContents(TreeNode theTree)
        {
            ListViewItem row = new ListViewItem(theTree.Text);

            if (theTree.Tag != null)
            {
                string tagType = theTree.Tag.GetType().ToString();

                switch (tagType)
                {
                case "Disk_Reader.HFSPlusCatalogFolder":
                    HFSPlusCatalogFolder folderTag = (HFSPlusCatalogFolder)theTree.Tag;
                    row.Tag = folderTag;


                    row.SubItems.Add(folderTag.folderID.ToString());
                    if (folderTag.createDate > HFSPlus.FromHFSPlusTime(0))
                    {
                        row.SubItems.Add(folderTag.createDate.ToString());
                    }
                    else
                    {
                        row.SubItems.Add("");
                    }

                    if (folderTag.contentModDate > HFSPlus.FromHFSPlusTime(0))
                    {
                        row.SubItems.Add(folderTag.contentModDate.ToString());
                    }
                    else
                    {
                        row.SubItems.Add("");
                    }

                    if (folderTag.attributeModDate > HFSPlus.FromHFSPlusTime(0))
                    {
                        row.SubItems.Add(folderTag.attributeModDate.ToString());
                    }
                    else
                    {
                        row.SubItems.Add("");
                    }

                    if (folderTag.backupDate > HFSPlus.FromHFSPlusTime(0))
                    {
                        row.SubItems.Add(folderTag.backupDate.ToString());
                    }
                    else
                    {
                        row.SubItems.Add("");
                    }

                    if (folderTag.accessDate > HFSPlus.FromHFSPlusTime(0))
                    {
                        row.SubItems.Add(folderTag.accessDate.ToString());
                    }
                    else
                    {
                        row.SubItems.Add("");
                    }

                    string folderPermissions = "";
                    if (folderTag.permissions.fileMode.owner.read)
                    {
                        folderPermissions += "r";
                    }
                    else
                    {
                        folderPermissions += "-";
                    }
                    if (folderTag.permissions.fileMode.owner.write)
                    {
                        folderPermissions += "w";
                    }
                    else
                    {
                        folderPermissions += "-";
                    }
                    if (folderTag.permissions.fileMode.owner.execute)
                    {
                        folderPermissions += "x";
                    }
                    else
                    {
                        folderPermissions += "-";
                    }
                    folderPermissions += "/";
                    if (folderTag.permissions.fileMode.group.read)
                    {
                        folderPermissions += "r";
                    }
                    else
                    {
                        folderPermissions += "-";
                    }
                    if (folderTag.permissions.fileMode.group.write)
                    {
                        folderPermissions += "w";
                    }
                    else
                    {
                        folderPermissions += "-";
                    }
                    if (folderTag.permissions.fileMode.group.execute)
                    {
                        folderPermissions += "x";
                    }
                    else
                    {
                        folderPermissions += "-";
                    }
                    folderPermissions += "/";
                    if (folderTag.permissions.fileMode.other.read)
                    {
                        folderPermissions += "r";
                    }
                    else
                    {
                        folderPermissions += "-";
                    }
                    if (folderTag.permissions.fileMode.other.write)
                    {
                        folderPermissions += "w";
                    }
                    else
                    {
                        folderPermissions += "-";
                    }
                    if (folderTag.permissions.fileMode.other.execute)
                    {
                        folderPermissions += "x";
                    }
                    else
                    {
                        folderPermissions += "-";
                    }
                    row.SubItems.Add(folderPermissions);
                    row.SubItems.Add("");               // data fork size
                    row.SubItems.Add("");               // resource fork size
                    row.SubItems.Add("");               // data start sector LBA
                    row.SubItems.Add("");               // rsrc start sector
                    row.SubItems.Add("");               // data fragments count
                    row.SubItems.Add("");               // rsrc fragments count
                    row.SubItems.Add("");               // data fork MD5
                    row.SubItems.Add("");               // data fork SHA1
                    row.SubItems.Add("");               // resource fork MD5
                    row.SubItems.Add("");               // resource fork SHA1
                    row.SubItems.Add("");               // is deleted
                    row.SubItems.Add(folderTag.path);

                    break;

                case "Disk_Reader.HFSPlus+volumeHeader":
                    HFSPlus.volumeHeader headerTag = (HFSPlus.volumeHeader)theTree.Tag;
                    row.Tag = headerTag;

                    row.SubItems.Add("");               // CNID
                    if (headerTag.createDate > HFSPlus.FromHFSPlusTime(0))
                    {
                        row.SubItems.Add(headerTag.createDate.ToString());
                    }
                    else
                    {
                        row.SubItems.Add("");
                    }

                    if (headerTag.modifyDate > HFSPlus.FromHFSPlusTime(0))
                    {
                        row.SubItems.Add(headerTag.modifyDate.ToString());
                    }
                    else
                    {
                        row.SubItems.Add("");
                    }
                    row.SubItems.Add("");               // attribute mod date

                    if (headerTag.backupDate > HFSPlus.FromHFSPlusTime(0))
                    {
                        row.SubItems.Add(headerTag.backupDate.ToString());
                    }
                    else
                    {
                        row.SubItems.Add("");
                    }
                    row.SubItems.Add("");               // access date
                    row.SubItems.Add("");               // permissions
                    row.SubItems.Add("");               // data fork size
                    row.SubItems.Add("");               // resource fork size
                    row.SubItems.Add("");               // data start sector LBA
                    row.SubItems.Add("");               // rsrc start sector
                    row.SubItems.Add("");               // data fragments count
                    row.SubItems.Add("");               // rsrc fragments count
                    row.SubItems.Add("");               // data fork MD5
                    row.SubItems.Add("");               // data fork SHA1
                    row.SubItems.Add("");               // resource fork MD5
                    row.SubItems.Add("");               // resource fork SHA1
                    row.SubItems.Add("");               // is deleted
                    row.SubItems.Add(headerTag.path);

                    break;

                case "Disk_Reader.HFSPlusCatalogFile":

                    HFSPlusCatalogFile fileTag = (HFSPlusCatalogFile)theTree.Tag;
                    row.Tag = fileTag;

                    row.SubItems.Add(fileTag.fileID.ToString());
                    if (fileTag.createDate > HFSPlus.FromHFSPlusTime(0))                                    // creation date
                    {
                        row.SubItems.Add(fileTag.createDate.ToString());
                    }
                    else
                    {
                        row.SubItems.Add("");
                    }

                    if (fileTag.contentModDate > HFSPlus.FromHFSPlusTime(0))                                // content mod date
                    {
                        row.SubItems.Add(fileTag.contentModDate.ToString());
                    }
                    else
                    {
                        row.SubItems.Add("");
                    }

                    if (fileTag.attributeModDate > HFSPlus.FromHFSPlusTime(0))                              // attributes mod date
                    {
                        row.SubItems.Add(fileTag.attributeModDate.ToString());
                    }
                    else
                    {
                        row.SubItems.Add("");
                    }

                    if (fileTag.backupDate > HFSPlus.FromHFSPlusTime(0))                                    // backup date
                    {
                        row.SubItems.Add(fileTag.backupDate.ToString());
                    }
                    else
                    {
                        row.SubItems.Add("");
                    }

                    if (fileTag.accessDate > HFSPlus.FromHFSPlusTime(0))                                    // access date - Mac OS X does not use this - only POSIX implementations
                    {
                        row.SubItems.Add(fileTag.accessDate.ToString());
                    }
                    else
                    {
                        row.SubItems.Add("");
                    }

                    string filePermissions = "";
                    if (fileTag.permissions.fileMode.owner.read)
                    {
                        filePermissions += "r";
                    }
                    else
                    {
                        filePermissions += "-";
                    }
                    if (fileTag.permissions.fileMode.owner.write)
                    {
                        filePermissions += "w";
                    }
                    else
                    {
                        filePermissions += "-";
                    }
                    if (fileTag.permissions.fileMode.owner.execute)
                    {
                        filePermissions += "x";
                    }
                    else
                    {
                        filePermissions += "-";
                    }
                    filePermissions += "/";
                    if (fileTag.permissions.fileMode.group.read)
                    {
                        filePermissions += "r";
                    }
                    else
                    {
                        filePermissions += "-";
                    }
                    if (fileTag.permissions.fileMode.group.write)
                    {
                        filePermissions += "w";
                    }
                    else
                    {
                        filePermissions += "-";
                    }
                    if (fileTag.permissions.fileMode.group.execute)
                    {
                        filePermissions += "x";
                    }
                    else
                    {
                        filePermissions += "-";
                    }
                    filePermissions += "/";
                    if (fileTag.permissions.fileMode.other.read)
                    {
                        filePermissions += "r";
                    }
                    else
                    {
                        filePermissions += "-";
                    }
                    if (fileTag.permissions.fileMode.other.write)
                    {
                        filePermissions += "w";
                    }
                    else
                    {
                        filePermissions += "-";
                    }
                    if (fileTag.permissions.fileMode.other.execute)
                    {
                        filePermissions += "x";
                    }
                    else
                    {
                        filePermissions += "-";
                    }
                    row.SubItems.Add(filePermissions);                                                      // file permissions
                    row.SubItems.Add(fileTag.dataFork.forkDataValues.logicalSize.ToString());               // data fork size
                    int rsrccount = 0;
                    if (fileTag.resourceFork != null)
                    {
                        row.SubItems.Add(fileTag.resourceFork.forkDataValues.logicalSize.ToString());

                        // only try to iterate through resource fork extents if a resource fork exists
                        // (volume metadata files do not have a resource fork)
                        for (int i = 0; i < fileTag.dataFork.forkDataValues.extents.Count(); i++)
                        {
                            if (fileTag.resourceFork.forkDataValues.extents[i].blockCount > 0)
                            {
                                rsrccount++;
                            }
                        }
                    }
                    else
                    {
                        row.SubItems.Add("0");                                                              // resource fork size
                    }

                    if (fileTag.dataFork.forkDataValues.extents[0].startBlock > 0)
                    {
                        row.SubItems.Add(fileTag.dataFork.forkDataValues.extents[0].startBlock.ToString());
                    }
                    else
                    {
                        row.SubItems.Add("");                                                               // start sector LBA
                    }
                    if (fileTag.resourceFork != null)
                    {
                        if (fileTag.resourceFork.forkDataValues.extents[0].startBlock > 0)
                        {
                            row.SubItems.Add(fileTag.resourceFork.forkDataValues.extents[0].startBlock.ToString());
                        }
                        else
                        {
                            row.SubItems.Add("");                                                           // resource start sector
                        }
                    }
                    int datacount = 0;
                    for (int i = 0; i < fileTag.dataFork.forkDataValues.extents.Count(); i++)
                    {
                        if (fileTag.dataFork.forkDataValues.extents[i].blockCount > 0)
                        {
                            datacount++;
                        }
                    }
                    row.SubItems.Add(datacount.ToString());                                                 // data fragments count

                    row.SubItems.Add(rsrccount.ToString());                                                 // resource fragments count
                    row.SubItems.Add("");                                                                   // data fork MD5
                    row.SubItems.Add("");                                                                   // data fork SHA1
                    row.SubItems.Add("");                                                                   // resource fork MD5
                    row.SubItems.Add("");                                                                   // resource fork SHA1
                    row.SubItems.Add("");                                                                   // is deleted
                    row.SubItems.Add(fileTag.path);

                    break;

                case "Disk_Reader.attributesLeafNode+HFSPlusAttrForkData":
                    break;

                case "Disk_Reader.attributesLeafNode+HFSPlusAttrInlineData":
                    attributesLeafNode.HFSPlusAttrInlineData inlineTag = (attributesLeafNode.HFSPlusAttrInlineData)theTree.Tag;
                    row.Tag = inlineTag;


                    row.SubItems.Add(inlineTag.key.fileID.ToString());
                    row.SubItems.Add("");                                    // creation date
                    row.SubItems.Add("");                                    // content mod date
                    row.SubItems.Add("");                                    // attributes mod date
                    row.SubItems.Add("");                                    // backup date
                    row.SubItems.Add("");                                    // access date
                    row.SubItems.Add("");                                    // file permissions
                    row.SubItems.Add(inlineTag.otherData.Length.ToString()); // data fork size
                    row.SubItems.Add("");                                    // resource fork size
                    row.SubItems.Add("");                                    // data start sector LBA
                    row.SubItems.Add("");                                    // rsrc start sector LBA
                    row.SubItems.Add("");                                    // data fragments count
                    row.SubItems.Add("");                                    // rsrc fragments count
                    row.SubItems.Add("");                                    // data fork MD5
                    row.SubItems.Add("");                                    // data fork SHA1
                    row.SubItems.Add("");                                    // resource fork MD5
                    row.SubItems.Add("");                                    // resource fork SHA1
                    row.SubItems.Add("");                                    // is deleted
                    row.SubItems.Add("");                                    // path
                    break;
                }
            }
            return(row);
        }