Esempio n. 1
0
        public TreeNode getFullDirectoryList()
        {
            TreeNode result = new TreeNode();

            HFSPlusFile rawExtentsOverflow = new HFSPlusFile(volHead.extentsFile, forkStream.forkType.data);
            HFSPlusFile rawCatalog         = new HFSPlusFile(volHead.catalogFile, forkStream.forkType.data);
            HFSPlusFile rawAttributesFile  = new HFSPlusFile(volHead.attributesFile, forkStream.forkType.data);

            volumeStream        hfsp_vs = new volumeStream(this);
            catalogFile         cf      = new catalogFile(rawCatalog, hfsp_vs);
            extentsOverflowFile eof     = new extentsOverflowFile(rawExtentsOverflow, hfsp_vs);
            attributesFile      af      = new attributesFile(rawAttributesFile, hfsp_vs);

            addMetaFilesToTree(ref result);

            result = buildDirectoryTree(result, cf, eof, af);

            if (filecount == volHead.fileCount + 5)
            {
                volumeHeader vh = this.volHead;
                vh.fileCountVerified = true;
                this.volHead         = vh;
            }

            if (foldercount == volHead.folderCount)
            {
                volumeHeader vh = this.volHead;
                vh.folderCountVerified = true;
                this.volHead           = vh;
            }

            return(result);
        }
        public void showForkData(HFSPlusCatalogFile entry, uint block, forkStream.forkType type)
        {
            GPTScheme           gpts = new GPTScheme(i);
            HFSPlus             hfsp = new HFSPlus(i, gpts.entries[entry.partitionAssoc]);
            volumeStream        vs   = new volumeStream(hfsp);
            extentsOverflowFile eof  = new extentsOverflowFile(new HFSPlusFile(hfsp.volHead.extentsFile, forkStream.forkType.data), vs);

            HFSPlusFile hfsp_file = new HFSPlusFile(entry, eof);
            forkStream  fs;

            if (type == forkStream.forkType.data)
            {
                fs = new forkStream(vs, hfsp_file, forkStream.forkType.data);
            }
            else
            {
                fs = new forkStream(vs, hfsp_file, forkStream.forkType.resource);
            }

            contentDisplay = hexHeadLine + "\r\n";

            if (fs.Length > 0)
            {
                byte[] showBlock = new byte[hfsp.blockSize];

                fs.Seek(hfsp.blockSize * block, SeekOrigin.Begin);
                fs.Read(showBlock, 0, (int)hfsp.blockSize);

                rawDataDisplay(showBlock);
            }
        }
        public dataOperations.hashValues[] hashFile(HFSPlusCatalogFile file)
        {
            // take a file, return hashes for its data fork and resource fork
            dataOperations.hashValues[] hv = new dataOperations.hashValues[2];

            GPTScheme gpts = new GPTScheme(i);
            HFSPlus   hfsp = new HFSPlus(i, gpts.entries[file.partitionAssoc]);

            volumeStream        vs  = new volumeStream(hfsp);
            extentsOverflowFile eof = new extentsOverflowFile(new HFSPlusFile(hfsp.volHead.extentsFile, forkStream.forkType.data), vs);

            if (file.dataFork.forkDataValues.logicalSize > 0)
            {
                HFSPlusFile hfspfile = new HFSPlusFile(file, eof);
                forkStream  fs       = new forkStream(vs, hfspfile, forkStream.forkType.data);

                hv[0] = dataOperations.getHashValues(fs, (long)hfspfile.dataLogicalSize);
            }

            if (file.resourceFork != null)
            {
                if (file.resourceFork.forkDataValues.logicalSize > 0)
                {
                    HFSPlusFile hfspfile = new HFSPlusFile(file.resourceFork, forkStream.forkType.resource);
                    forkStream  fs       = new forkStream(vs, hfspfile, forkStream.forkType.resource);

                    hv[1] = dataOperations.getHashValues(fs, (long)hfspfile.rsrcLogicalSize);
                }
            }

            return(hv);
        }
        public void exportFile(HFSPlusCatalogFile entry, forkStream.forkType type, string path)
        {
            if (entry.dataFork.forkDataValues.logicalSize > 0 || entry.resourceFork.forkDataValues.logicalSize > 0)
            {
                GPTScheme           gpts = new GPTScheme(i);
                HFSPlus             hfsp = new HFSPlus(i, gpts.entries[entry.partitionAssoc]);
                volumeStream        vs   = new volumeStream(hfsp);
                extentsOverflowFile eof  = new extentsOverflowFile(new HFSPlusFile(hfsp.volHead.extentsFile, forkStream.forkType.data), vs);

                HFSPlusFile hfsp_file = new HFSPlusFile(entry, eof);
                forkStream  fs;
                long        dataSize = 0;

                if (type == forkStream.forkType.data)
                {
                    fs       = new forkStream(vs, hfsp_file, forkStream.forkType.data);
                    dataSize = (long)entry.dataFork.forkDataValues.logicalSize;
                }
                else
                {
                    fs       = new forkStream(vs, hfsp_file, forkStream.forkType.resource);
                    dataSize = (long)entry.resourceFork.forkDataValues.logicalSize;
                }

                fs.Position = 0;

                FileStream   writeStream = new FileStream(path, FileMode.Create);
                BinaryWriter bw          = new BinaryWriter(writeStream);

                long   bytesWritten = 0;
                byte[] buffer;

                while (bytesWritten < dataSize)
                {
                    if (bytesWritten + 8192 <= dataSize)
                    {
                        buffer = new byte[8192];
                        fs.Read(buffer, 0, 8192);

                        bw.Write(buffer, 0, 8192);

                        bytesWritten += 8192;
                    }
                    else
                    {
                        buffer = new byte[dataSize - bytesWritten];
                        fs.Read(buffer, 0, buffer.Length);

                        bw.Write(buffer, 0, buffer.Length);

                        bytesWritten += buffer.Length;
                    }
                }

                bw.Close();
                writeStream.Close();
            }
        }
Esempio n. 5
0
        public HFSPlusFile(HFSPlusCatalogFile catalogEntry, extentsOverflowFile eofInput)
        {
            fileContent.dataExtents = new List<hfsPlusForkData.HFSPlusExtentRecord>();
            fileContent.resourceExtents = new List<hfsPlusForkData.HFSPlusExtentRecord>();

            if(catalogEntry.dataFork != null) addDataFork(catalogEntry.dataFork);
            if(catalogEntry.resourceFork != null) addResourceFork(catalogEntry.resourceFork);
            getAllExtents(eofInput, catalogEntry.fileID);
        }
Esempio n. 6
0
        public HFSPlusFile(HFSPlusCatalogFile catalogEntry, extentsOverflowFile eofInput)
        {
            fileContent.dataExtents     = new List <hfsPlusForkData.HFSPlusExtentRecord>();
            fileContent.resourceExtents = new List <hfsPlusForkData.HFSPlusExtentRecord>();

            if (catalogEntry.dataFork != null)
            {
                addDataFork(catalogEntry.dataFork);
            }
            if (catalogEntry.resourceFork != null)
            {
                addResourceFork(catalogEntry.resourceFork);
            }
            getAllExtents(eofInput, catalogEntry.fileID);
        }
        public TreeNode getSubDirectories(TreeNode tn)
        {
            TreeNode  result = tn;
            GPTScheme gpts   = new GPTScheme(i);

            if (tn.Tag is HFSPlusCatalogFolder)
            {
                HFSPlusCatalogFolder folder = (HFSPlusCatalogFolder)tn.Tag;
                HFSPlus             hfsp    = new HFSPlus(i, gpts.getValidTable()[folder.partitionAssoc]);
                volumeStream        vs      = new volumeStream(hfsp);
                extentsOverflowFile eof     = new extentsOverflowFile(new HFSPlusFile(hfsp.volHead.extentsFile, forkStream.forkType.data), vs);
                catalogFile         cf      = new catalogFile(new HFSPlusFile(hfsp.volHead.catalogFile, forkStream.forkType.data), vs);

                result     = hfsp.getDirectoryChildren(folder, cf, eof);
                result.Tag = tn.Tag;
            }
            return(result);
        }
Esempio n. 8
0
        public TreeNode getRootDirectoryContents(catalogFile cf, extentsOverflowFile eof, attributesFile af)
        {
            HFSPlusCatalogFolder rootFolderParentRecord = new HFSPlusCatalogFolder();

            rootFolderParentRecord.folderID       = 1;
            rootFolderParentRecord.partitionAssoc = this.partitionNo;

            TreeNode rootDirParent = getDirectoryChildren(rootFolderParentRecord, cf, eof);

            HFSPlusCatalogFolder rootFolderRecord = new HFSPlusCatalogFolder();

            rootFolderRecord      = (HFSPlusCatalogFolder)rootDirParent.Nodes[0].Tag;
            rootFolderRecord.path = this.volHead.path;

            TreeNode rootDir = getDirectoryChildren(rootFolderRecord, cf, eof, af);

            addMetaFilesToTree(ref rootDir);

            foreach (TreeNode child in rootDir.Nodes)
            {
                if (child.Tag is HFSPlusCatalogFolder)
                {
                    TreeNode tn      = getDirectoryChildren((HFSPlusCatalogFolder)child.Tag, cf, eof);
                    int      counter = 0;
                    foreach (TreeNode childNode in tn.Nodes)
                    {
                        if (childNode.Tag is HFSPlusCatalogFolder)
                        {
                            counter++;
                        }
                    }

                    if (counter > 0)
                    {
                        // if there are children, add a placeholder
                        child.Nodes.Add("");
                    }
                }
            }

            return(rootDir);
        }
        public void showForkData(HFSPlusCatalogFile entry, forkStream.forkType type)
        {
            GPTScheme           gpts = new GPTScheme(i);
            HFSPlus             hfsp = new HFSPlus(i, gpts.entries[entry.partitionAssoc]);
            volumeStream        vs   = new volumeStream(hfsp);
            extentsOverflowFile eof  = new extentsOverflowFile(new HFSPlusFile(hfsp.volHead.extentsFile, forkStream.forkType.data), vs);

            HFSPlusFile hfsp_file = new HFSPlusFile(entry, eof);
            forkStream  fs;

            if (type == forkStream.forkType.data)
            {
                fs = new forkStream(vs, hfsp_file, forkStream.forkType.data);
            }
            else
            {
                fs = new forkStream(vs, hfsp_file, forkStream.forkType.resource);
            }

            throw new NotImplementedException();
        }
Esempio n. 10
0
        private TreeNode getHFSPTree(HFSPlus hfsp, HFSPlusCatalogFolder folderID)
        {
            TreeNode     tn      = new TreeNode();
            volumeStream hfsp_vs = new volumeStream(hfsp);

            HFSPlusFile rawCatalog         = new HFSPlusFile(hfsp.volHead.catalogFile, forkStream.forkType.data);
            HFSPlusFile rawAttributes      = new HFSPlusFile(hfsp.volHead.attributesFile, forkStream.forkType.data);
            HFSPlusFile rawExtentsOverflow = new HFSPlusFile(hfsp.volHead.extentsFile, forkStream.forkType.data);
            // need to get all attributes files

            HFSPlusCatalogFolder folderRecord = folderID;

            catalogFile         catalog    = new catalogFile(rawCatalog, hfsp_vs);
            attributesFile      attributes = new attributesFile(rawAttributes, hfsp_vs);
            extentsOverflowFile eof        = new extentsOverflowFile(rawExtentsOverflow, hfsp_vs);

            displayTree = hfsp.getFullDirectoryList(folderRecord, catalog, eof, attributes);

            tn = displayTree;

            return(tn);
        }
Esempio n. 11
0
        private void getAllExtents(extentsOverflowFile eofInput, uint CNID)
        {
            extentsOverflowFile.HFSPlusExtentKey extentKey = new extentsOverflowFile.HFSPlusExtentKey();
            extentsOverflowLeafNode.extentsOverflowLeafRecord record;

            extentKey.fileID = CNID;
            extentKey.type   = extentsOverflowFile.forkType.data;

            while (this.knownDataBlocks < this.totalDataBlocks)
            {
                extentKey.startBlock = knownDataBlocks;
                record = eofInput.getExtentRecordWithKey(extentKey);

                int i = 0;
                while (i < 8 && record.extents[i].blockCount > 0)
                {
                    this.fileContent.dataExtents.Add(record.extents[i]);
                    this.knownDataBlocks += record.extents[i].blockCount;
                    i++;
                }
                this.allDataBlocksKnown = knownDataBlocks == totalDataBlocks;
            }

            extentKey.type = extentsOverflowFile.forkType.resource;
            while (this.knownResourceBlocks < this.totalResourceBlocks)
            {
                extentKey.startBlock = knownResourceBlocks;
                record = eofInput.getExtentRecordWithKey(extentKey);
                int i = 0;
                while (record.extents[i].blockCount > 0 && i < 8)
                {
                    this.fileContent.resourceExtents.Add(record.extents[i]);
                    this.knownResourceBlocks += record.extents[i].blockCount;
                    i++;
                }
                this.allResourceBlocksKnown = knownResourceBlocks == totalResourceBlocks;
            }
        }
Esempio n. 12
0
        private TreeNode getVolumeTree(GPTScheme.entry partition, GPTScheme.partitionType type)
        {
            TreeNode tn = new TreeNode();

            if (type == GPTScheme.partitionType.HFSPlus)
            {
                HFSPlus      hfsp    = new HFSPlus(i, partition);
                volumeStream hfsp_vs = new volumeStream(hfsp);

                HFSPlusFile rawCatalog    = new HFSPlusFile(hfsp.volHead.catalogFile, forkStream.forkType.data);
                HFSPlusFile rawAttributes = new HFSPlusFile(hfsp.volHead.attributesFile, forkStream.forkType.data);
                HFSPlusFile rawExtents    = new HFSPlusFile(hfsp.volHead.extentsFile, forkStream.forkType.data);

                extentsOverflowFile extentsOverflow = new extentsOverflowFile(rawExtents, hfsp_vs);
                catalogFile         catalog         = new catalogFile(rawCatalog, hfsp_vs);
                attributesFile      attributes      = new attributesFile(rawAttributes, hfsp_vs);

                tn     = hfsp.getRootDirectoryContents(catalog, extentsOverflow, attributes);
                tn.Tag = hfsp.volHead;
            }

            return(tn);
        }
Esempio n. 13
0
        private TreeNode buildDirectoryTree(TreeNode parent, catalogFile cf, extentsOverflowFile eof, attributesFile af)
        {
            TreeNode replaceParent = new TreeNode();

            replaceParent.Tag  = parent.Tag;
            replaceParent.Text = parent.Text;

            foreach (TreeNode childItem in parent.Nodes)
            {
                if (childItem.Tag is HFSPlusCatalogFolder)
                {
                    HFSPlusCatalogFolder childDirectoryRecord = (HFSPlusCatalogFolder)childItem.Tag;

                    TreeNode contents = getDirectoryChildren(childDirectoryRecord, cf, eof, af);

                    contents = buildDirectoryTree(contents, cf, eof, af);

                    replaceParent.Nodes.Add(contents);
                    foldercount++;
                }
                else if (childItem.Tag is HFSPlusCatalogFile)
                {
                    replaceParent.Nodes.Add(childItem);
                    filecount++;
                }
                else if (childItem.Tag is attributesLeafNode.HFSPlusAttrInlineData)
                {
                    replaceParent.Nodes.Add(childItem);
                }
                else if (childItem.Tag is attributesLeafNode.HFSPlusAttrForkData)
                {
                    replaceParent.Nodes.Add(childItem);
                }
            }

            return(replaceParent);
        }
Esempio n. 14
0
        public TreeNode getFullDirectoryList()
        {
            TreeNode result = new TreeNode();

            HFSPlusFile rawExtentsOverflow = new HFSPlusFile(volHead.extentsFile, forkStream.forkType.data);
            HFSPlusFile rawCatalog = new HFSPlusFile(volHead.catalogFile, forkStream.forkType.data);
            HFSPlusFile rawAttributesFile = new HFSPlusFile(volHead.attributesFile, forkStream.forkType.data);

            volumeStream hfsp_vs = new volumeStream(this);
            catalogFile cf = new catalogFile(rawCatalog, hfsp_vs);
            extentsOverflowFile eof = new extentsOverflowFile(rawExtentsOverflow, hfsp_vs);
            attributesFile af = new attributesFile(rawAttributesFile, hfsp_vs);

            addMetaFilesToTree(ref result);

            result = buildDirectoryTree(result, cf, eof, af);

            if (filecount == volHead.fileCount + 5)
            {
                volumeHeader vh = this.volHead;
                vh.fileCountVerified = true;
                this.volHead = vh;
            }

            if (foldercount == volHead.folderCount)
            {
                volumeHeader vh = this.volHead;
                vh.folderCountVerified = true;
                this.volHead = vh;
            }

            return result;
        }
Esempio n. 15
0
        public TreeNode getDirectoryChildren(HFSPlusCatalogFolder folderRecord, catalogFile cf, extentsOverflowFile eof, attributesFile af)
        {
            TreeNode returnDir = new TreeNode();

            // get every file and directory inside the current one
            returnDir = cf.getDirectoryAndChildren(folderRecord, eof, this.partitionNo);

            foreach (TreeNode child in returnDir.Nodes)
            {
                // check if there are any alternate data streams for the files
                if (child.Tag is HFSPlusCatalogFile)
                {
                    HFSPlusCatalogFile data = (HFSPlusCatalogFile)child.Tag;

                    attributesFile.HFSPlusAttrKey attrKey = new attributesFile.HFSPlusAttrKey();

                    attrKey.fileID     = data.fileID;
                    attrKey.startBlock = 0;
                    attributesLeafNode.attributesDataForFile allAttributes = af.getAttrFileDataWithKey(attrKey);

                    foreach (attributesLeafNode.HFSPlusAttrForkData fork in allAttributes.forks)
                    {
                        TreeNode attribute = new TreeNode();

                        attributesLeafNode.HFSPlusAttrForkData tag = fork;
                        tag.partitionAssoc = folderRecord.partitionAssoc;

                        attribute.Text = child.Text + " > " + System.Text.Encoding.BigEndianUnicode.GetString(fork.key.attrName);
                        attribute.Tag  = tag;

                        returnDir.Nodes.Add(attribute);
                    }
                    foreach (attributesLeafNode.HFSPlusAttrInlineData inline in allAttributes.inline)
                    {
                        TreeNode attribute = new TreeNode();

                        attributesLeafNode.HFSPlusAttrInlineData tag = inline;
                        tag.partitionAssoc = folderRecord.partitionAssoc;

                        attribute.Text = child.Text + " > " + System.Text.Encoding.BigEndianUnicode.GetString(inline.key.attrName);
                        attribute.Tag  = tag;
                        returnDir.Nodes.Add(attribute);
                    }
                }
            }

            return(returnDir);
        }
Esempio n. 16
0
        public TreeNode getDirectoryChildren(HFSPlusCatalogFolder folderRecord, catalogFile cf, extentsOverflowFile eof)
        {
            TreeNode returnDir = new TreeNode();

            // get every file and directory inside the current one
            returnDir = cf.getDirectoryAndChildren(folderRecord, eof, this.partitionNo);

            foreach (TreeNode child in returnDir.Nodes)
            {
                if (child.Tag is HFSPlusCatalogFolder)
                {
                    TreeNode tn      = cf.getDirectoryAndChildren((HFSPlusCatalogFolder)child.Tag, eof, this.partitionNo);
                    int      counter = 0;
                    foreach (TreeNode childNode in tn.Nodes)
                    {
                        if (childNode.Tag is HFSPlusCatalogFolder)
                        {
                            counter++;
                        }
                    }

                    if (counter > 0)
                    {
                        // if there are children, add a placeholder
                        child.Nodes.Add("");
                    }
                }
            }

            return(returnDir);
        }
Esempio n. 17
0
        public TreeNode getFullDirectoryList(HFSPlusCatalogFolder folderRecord, catalogFile cf, extentsOverflowFile eof, attributesFile af)
        {
            TreeNode returnDir = new TreeNode();
            returnDir.Tag = folderRecord;
            returnDir.Text = System.Text.Encoding.BigEndianUnicode.GetString(folderRecord.key.nodeName);

            returnDir = getDirectoryChildren(folderRecord, cf, eof, af);

            returnDir = buildDirectoryTree(returnDir, cf, eof, af);

            return returnDir;
        }
Esempio n. 18
0
        public TreeNode getRootDirectoryContents(catalogFile cf, extentsOverflowFile eof, attributesFile af)
        {
            HFSPlusCatalogFolder rootFolderParentRecord = new HFSPlusCatalogFolder();

            rootFolderParentRecord.folderID = 1;
            rootFolderParentRecord.partitionAssoc = this.partitionNo;

            TreeNode rootDirParent = getDirectoryChildren(rootFolderParentRecord, cf, eof);

            HFSPlusCatalogFolder rootFolderRecord = new HFSPlusCatalogFolder();
            rootFolderRecord = (HFSPlusCatalogFolder)rootDirParent.Nodes[0].Tag;
            rootFolderRecord.path = this.volHead.path;

            TreeNode rootDir = getDirectoryChildren(rootFolderRecord, cf, eof, af);

            addMetaFilesToTree(ref rootDir);

            foreach (TreeNode child in rootDir.Nodes)
            {
                if (child.Tag is HFSPlusCatalogFolder)
                {
                    TreeNode tn = getDirectoryChildren((HFSPlusCatalogFolder)child.Tag, cf, eof);
                    int counter = 0;
                    foreach(TreeNode childNode in tn.Nodes)
                    {
                        if (childNode.Tag is HFSPlusCatalogFolder)
                        {
                            counter++;
                        }
                    }

                    if (counter > 0)
                    {
                        // if there are children, add a placeholder
                        child.Nodes.Add("");
                    }
                }
            }

            return rootDir;
        }
Esempio n. 19
0
        private TreeNode getVolumeTree(GPTScheme.entry partition, GPTScheme.partitionType type)
        {
            TreeNode tn = new TreeNode();

            if (type == GPTScheme.partitionType.HFSPlus)
            {
                HFSPlus hfsp = new HFSPlus(i, partition);
                volumeStream hfsp_vs = new volumeStream(hfsp);

                HFSPlusFile rawCatalog = new HFSPlusFile(hfsp.volHead.catalogFile, forkStream.forkType.data);
                HFSPlusFile rawAttributes = new HFSPlusFile(hfsp.volHead.attributesFile, forkStream.forkType.data);
                HFSPlusFile rawExtents = new HFSPlusFile(hfsp.volHead.extentsFile, forkStream.forkType.data);

                extentsOverflowFile extentsOverflow = new extentsOverflowFile(rawExtents, hfsp_vs);
                catalogFile catalog = new catalogFile(rawCatalog, hfsp_vs);
                attributesFile attributes = new attributesFile(rawAttributes, hfsp_vs);

                tn = hfsp.getRootDirectoryContents(catalog, extentsOverflow, attributes);
                tn.Tag = hfsp.volHead;
            }

            return tn;
        }
Esempio n. 20
0
        public TreeNode getDirectoryChildren(HFSPlusCatalogFolder folderRecord, catalogFile cf, extentsOverflowFile eof, attributesFile af)
        {
            TreeNode returnDir = new TreeNode();

            // get every file and directory inside the current one
            returnDir = cf.getDirectoryAndChildren(folderRecord, eof, this.partitionNo);

            foreach (TreeNode child in returnDir.Nodes)
            {
                // check if there are any alternate data streams for the files
                if (child.Tag is HFSPlusCatalogFile)
                {
                    HFSPlusCatalogFile data = (HFSPlusCatalogFile)child.Tag;

                    attributesFile.HFSPlusAttrKey attrKey = new attributesFile.HFSPlusAttrKey();

                    attrKey.fileID = data.fileID;
                    attrKey.startBlock = 0;
                    attributesLeafNode.attributesDataForFile allAttributes = af.getAttrFileDataWithKey(attrKey);

                    foreach (attributesLeafNode.HFSPlusAttrForkData fork in allAttributes.forks)
                    {
                        TreeNode attribute = new TreeNode();

                        attributesLeafNode.HFSPlusAttrForkData tag = fork;
                        tag.partitionAssoc = folderRecord.partitionAssoc;

                        attribute.Text = child.Text + " > " + System.Text.Encoding.BigEndianUnicode.GetString(fork.key.attrName);
                        attribute.Tag = tag;

                        returnDir.Nodes.Add(attribute);
                    }
                    foreach (attributesLeafNode.HFSPlusAttrInlineData inline in allAttributes.inline)
                    {
                        TreeNode attribute = new TreeNode();

                        attributesLeafNode.HFSPlusAttrInlineData tag = inline;
                        tag.partitionAssoc = folderRecord.partitionAssoc;

                        attribute.Text = child.Text + " > " + System.Text.Encoding.BigEndianUnicode.GetString(inline.key.attrName);
                        attribute.Tag = tag;
                        returnDir.Nodes.Add(attribute);
                    }
                }
            }

            return returnDir;
        }
Esempio n. 21
0
        public void exportFile(HFSPlusCatalogFile entry, forkStream.forkType type, string path)
        {
            if (entry.dataFork.forkDataValues.logicalSize > 0 || entry.resourceFork.forkDataValues.logicalSize > 0)
            {
                GPTScheme gpts = new GPTScheme(i);
                HFSPlus hfsp = new HFSPlus(i, gpts.entries[entry.partitionAssoc]);
                volumeStream vs = new volumeStream(hfsp);
                extentsOverflowFile eof = new extentsOverflowFile(new HFSPlusFile(hfsp.volHead.extentsFile, forkStream.forkType.data), vs);

                HFSPlusFile hfsp_file = new HFSPlusFile(entry, eof);
                forkStream fs;
                long dataSize = 0;

                if (type == forkStream.forkType.data)
                {
                    fs = new forkStream(vs, hfsp_file, forkStream.forkType.data);
                    dataSize = (long)entry.dataFork.forkDataValues.logicalSize;
                }
                else
                {
                    fs = new forkStream(vs, hfsp_file, forkStream.forkType.resource);
                    dataSize = (long)entry.resourceFork.forkDataValues.logicalSize;
                }

                fs.Position = 0;

                FileStream writeStream = new FileStream(path, FileMode.Create);
                BinaryWriter bw = new BinaryWriter(writeStream);

                long bytesWritten = 0;
                byte[] buffer;

                while (bytesWritten < dataSize)
                {
                    if (bytesWritten + 8192 <= dataSize)
                    {
                        buffer = new byte[8192];
                        fs.Read(buffer, 0, 8192);

                        bw.Write(buffer, 0, 8192);

                        bytesWritten += 8192;
                    }
                    else
                    {
                        buffer = new byte[dataSize - bytesWritten];
                        fs.Read(buffer, 0, buffer.Length);

                        bw.Write(buffer, 0, buffer.Length);

                        bytesWritten += buffer.Length;
                    }
                }

                bw.Close();
                writeStream.Close();
            }
        }
Esempio n. 22
0
        public TreeNode getSubDirectories(TreeNode tn)
        {
            TreeNode result = tn;
            GPTScheme gpts = new GPTScheme(i);

            if (tn.Tag is HFSPlusCatalogFolder)
            {
                HFSPlusCatalogFolder folder = (HFSPlusCatalogFolder)tn.Tag;
                HFSPlus hfsp = new HFSPlus(i, gpts.getValidTable()[folder.partitionAssoc]);
                volumeStream vs = new volumeStream(hfsp);
                extentsOverflowFile eof = new extentsOverflowFile(new HFSPlusFile(hfsp.volHead.extentsFile, forkStream.forkType.data), vs);
                catalogFile cf = new catalogFile(new HFSPlusFile(hfsp.volHead.catalogFile, forkStream.forkType.data), vs);

                result = hfsp.getDirectoryChildren(folder, cf, eof);
                result.Tag = tn.Tag;
            }
            return result;
        }
Esempio n. 23
0
        public dataOperations.hashValues[] hashFile(HFSPlusCatalogFile file)
        {
            // take a file, return hashes for its data fork and resource fork
            dataOperations.hashValues[] hv = new dataOperations.hashValues[2];

            GPTScheme gpts = new GPTScheme(i);
            HFSPlus hfsp = new HFSPlus(i, gpts.entries[file.partitionAssoc]);

            volumeStream vs = new volumeStream(hfsp);
            extentsOverflowFile eof = new extentsOverflowFile(new HFSPlusFile(hfsp.volHead.extentsFile, forkStream.forkType.data),vs);

            if (file.dataFork.forkDataValues.logicalSize > 0)
            {
                HFSPlusFile hfspfile = new HFSPlusFile(file, eof);
                forkStream fs = new forkStream(vs, hfspfile, forkStream.forkType.data);

                hv[0] = dataOperations.getHashValues(fs, (long)hfspfile.dataLogicalSize);
            }

            if (file.resourceFork != null)
            {
                if (file.resourceFork.forkDataValues.logicalSize > 0)
                {
                    HFSPlusFile hfspfile = new HFSPlusFile(file.resourceFork, forkStream.forkType.resource);
                    forkStream fs = new forkStream(vs, hfspfile, forkStream.forkType.resource);

                    hv[1] = dataOperations.getHashValues(fs, (long)hfspfile.rsrcLogicalSize);
                }
            }

            return hv;
        }
Esempio n. 24
0
        public void showForkData(HFSPlusCatalogFile entry, uint block, forkStream.forkType type)
        {
            GPTScheme gpts = new GPTScheme(i);
            HFSPlus hfsp = new HFSPlus(i, gpts.entries[entry.partitionAssoc]);
            volumeStream vs = new volumeStream(hfsp);
            extentsOverflowFile eof = new extentsOverflowFile(new HFSPlusFile(hfsp.volHead.extentsFile, forkStream.forkType.data), vs);

            HFSPlusFile hfsp_file = new HFSPlusFile(entry, eof);
            forkStream fs;
            if (type == forkStream.forkType.data)
            {
                fs = new forkStream(vs, hfsp_file, forkStream.forkType.data);
            }
            else
            {
                fs = new forkStream(vs, hfsp_file, forkStream.forkType.resource);
            }

            contentDisplay = hexHeadLine + "\r\n";

            if (fs.Length > 0)
            {
                byte[] showBlock = new byte[hfsp.blockSize];

                fs.Seek(hfsp.blockSize * block, SeekOrigin.Begin);
                fs.Read(showBlock, 0, (int)hfsp.blockSize);

                rawDataDisplay(showBlock);
            }
        }
Esempio n. 25
0
        public TreeNode getDirectoryChildren(HFSPlusCatalogFolder folderRecord, catalogFile cf, extentsOverflowFile eof)
        {
            TreeNode returnDir = new TreeNode();

            // get every file and directory inside the current one
            returnDir = cf.getDirectoryAndChildren(folderRecord, eof, this.partitionNo);

            foreach (TreeNode child in returnDir.Nodes)
            {
                if (child.Tag is HFSPlusCatalogFolder)
                {
                    TreeNode tn = cf.getDirectoryAndChildren((HFSPlusCatalogFolder)child.Tag, eof, this.partitionNo);
                    int counter = 0;
                    foreach (TreeNode childNode in tn.Nodes)
                    {
                        if (childNode.Tag is HFSPlusCatalogFolder)
                        {
                            counter++;
                        }
                    }

                    if (counter > 0)
                    {
                        // if there are children, add a placeholder
                        child.Nodes.Add("");
                    }
                }
            }

            return returnDir;
        }
Esempio n. 26
0
        public void showForkData(HFSPlusCatalogFile entry, forkStream.forkType type)
        {
            GPTScheme gpts = new GPTScheme(i);
            HFSPlus hfsp = new HFSPlus(i, gpts.entries[entry.partitionAssoc]);
            volumeStream vs = new volumeStream(hfsp);
            extentsOverflowFile eof = new extentsOverflowFile(new HFSPlusFile(hfsp.volHead.extentsFile, forkStream.forkType.data), vs);

            HFSPlusFile hfsp_file = new HFSPlusFile(entry, eof);
            forkStream fs;
            if (type == forkStream.forkType.data)
            {
                fs = new forkStream(vs, hfsp_file, forkStream.forkType.data);
            }
            else
            {
                fs = new forkStream(vs, hfsp_file, forkStream.forkType.resource);
            }

            throw new NotImplementedException();
        }
Esempio n. 27
0
        public TreeNode getDirectoryAndChildren(HFSPlusCatalogFolder folderRecord, extentsOverflowFile eof, int partitionAssoc)
        {
            TreeNode returnDir = new TreeNode();

            if (folderRecord.key.nodeName != null)
            {
                returnDir.Text = System.Text.Encoding.BigEndianUnicode.GetString(folderRecord.key.nodeName);
                returnDir.Text = returnDir.Text.Replace('\0', ' ');
            }
            folderRecord.partitionAssoc = partitionAssoc;
            returnDir.Tag = folderRecord;

            HFSPlusCatalogKey matchParentDir = new HFSPlusCatalogKey();

            // find the first HFSPlusFileRecord for whom the current directory is the parent
            matchParentDir.parentID = folderRecord.folderID;
            if (folderRecord.key.nodeName != null)
            {
                matchParentDir.nodeName = folderRecord.key.nodeName;
            }
            uint readThisNode = getLeafNodeContainingRecord(matchParentDir);
            bool nextLeaf     = true;

            // records with the same parent are stored sequentially in the file,
            // but may continue over into the next node
            while (nextLeaf)
            {
                byte[] leafData = new byte[this.nodeSize];

                fs.Seek(readThisNode * this.nodeSize, SeekOrigin.Begin);
                fs.Read(leafData, 0, this.nodeSize);

                catalogLeafNode currentLeaf = new catalogLeafNode(ref leafData);

                foreach (HFSPlusCatalogFolder folder in currentLeaf.folderRecords)
                {
                    if (folder.key.parentID == folderRecord.folderID)
                    {
                        TreeNode childDir = new TreeNode();
                        if (folder.key.nodeName != null)
                        {
                            childDir.Text = System.Text.Encoding.BigEndianUnicode.GetString(folder.key.nodeName);
                            childDir.Text = childDir.Text.Replace('\0', ' ');
                        }

                        // set the treenode data for the child item
                        folder.path           = folderRecord.path + "\\" + childDir.Text;
                        folder.partitionAssoc = partitionAssoc;

                        childDir.Tag = folder;
                        returnDir.Nodes.Add(childDir);
                    }
                }

                foreach (HFSPlusCatalogFile file in currentLeaf.fileRecords)
                {
                    if (file.key.parentID == folderRecord.folderID)
                    {
                        TreeNode childFile = new TreeNode();

                        HFSPlusCatalogFile eachFile = file;
                        eachFile.partitionAssoc = partitionAssoc;

                        // HFSPlusFile should be able to get all of a file's blocks as part of the constructor
                        HFSPlusFile blockFinder = new HFSPlusFile(eachFile, eof);

                        //add the discovered extents back into the return object
                        //eachFile.dataFork.forkDataValues.extents.Clear();
                        //eachFile.resourceFork.forkDataValues.extents.Clear();
                        //foreach (hfsPlusForkData.HFSPlusExtentRecord extent in blockFinder.fileContent.dataExtents)
                        //{
                        //    eachFile.dataFork.forkDataValues.extents.Add(extent);
                        //}
                        //foreach (hfsPlusForkData.HFSPlusExtentRecord extent in blockFinder.fileContent.resourceExtents)
                        //{
                        //    eachFile.resourceFork.forkDataValues.extents.Add(extent);
                        //}

                        // if it can't... cry?
                        if (!(blockFinder.allDataBlocksKnown && blockFinder.allResourceBlocksKnown))
                        {
                            throw new Exception("Disk_Reader.HFSPlusFile class failed to get all blocks.");
                        }

                        // a handful of volume metadata files have highly specialised permissions
                        HFSPlusCatalogFolder tag = (HFSPlusCatalogFolder)returnDir.Tag;
                        if (tag.key.parentID == 2)
                        {
                            if (returnDir.Text == "    HFS+ Private Data")
                            {
                                HFSPlusCatalogRecord.HFSPlusPermissions resetPermissions = new HFSPlusCatalogRecord.HFSPlusPermissions();
                                resetPermissions      = eachFile.permissions;
                                resetPermissions.type = HFSPlusCatalogRecord.HFSPlusPermissions.specialType.iNodeNum;

                                eachFile.permissions = resetPermissions;
                            }
                        }
                        else if (eachFile.userInfo.fileType == 0x686C6E6B && eachFile.userInfo.fileCreator == 0x6866732B)
                        {
                            HFSPlusCatalogRecord.HFSPlusPermissions resetPermissions = new HFSPlusCatalogRecord.HFSPlusPermissions();
                            resetPermissions      = eachFile.permissions;
                            resetPermissions.type = HFSPlusCatalogRecord.HFSPlusPermissions.specialType.linkCount;

                            eachFile.permissions = resetPermissions;
                        }
                        else if (eachFile.permissions.fileMode.blockSpecial || eachFile.permissions.fileMode.charSpecial)
                        {
                            HFSPlusCatalogRecord.HFSPlusPermissions resetPermissions = new HFSPlusCatalogRecord.HFSPlusPermissions();
                            resetPermissions      = eachFile.permissions;
                            resetPermissions.type = HFSPlusCatalogRecord.HFSPlusPermissions.specialType.rawDevice;

                            eachFile.permissions = resetPermissions;
                        }
                        else
                        {
                            HFSPlusCatalogRecord.HFSPlusPermissions resetPermissions = new HFSPlusCatalogRecord.HFSPlusPermissions();
                            resetPermissions      = eachFile.permissions;
                            resetPermissions.type = HFSPlusCatalogRecord.HFSPlusPermissions.specialType.reserved;

                            eachFile.permissions = resetPermissions;
                        }

                        childFile.Text = System.Text.Encoding.BigEndianUnicode.GetString(file.key.nodeName);
                        if (folderRecord.key.nodeName != null)
                        {
                            childFile.Text = System.Text.Encoding.BigEndianUnicode.GetString(file.key.nodeName);
                            childFile.Text = childFile.Text.Replace('\0', ' ');
                        }

                        // set the treenode data for the child item
                        eachFile.path = folderRecord.path + "\\" + childFile.Text;

                        childFile.Tag = eachFile;

                        returnDir.Nodes.Add(childFile);
                    }
                }

                bool lastRecordMatchesKey =
                    matchParentDir.parentID == dataOperations.convToLE(
                        BitConverter.ToUInt32(
                            currentLeaf.rawRecords[currentLeaf.rawRecords.Count() - 1].keyData, 0));

                // if the last record in the current leaf is within the parent directory,
                // the records may continue in the next leaf, so skip to the node in flink
                // in the next instance of the loop
                if (returnDir.Nodes.Count < folderRecord.valence)
                {
                    readThisNode = currentLeaf.BTNodeDescriptor.fLink;
                }
                else
                {
                    nextLeaf = false;
                }
            }

            return(returnDir);
        }
Esempio n. 28
0
        public TreeNode getFullDirectoryList(HFSPlusCatalogFolder folderRecord, catalogFile cf, extentsOverflowFile eof, attributesFile af)
        {
            TreeNode returnDir = new TreeNode();

            returnDir.Tag  = folderRecord;
            returnDir.Text = System.Text.Encoding.BigEndianUnicode.GetString(folderRecord.key.nodeName);

            returnDir = getDirectoryChildren(folderRecord, cf, eof, af);

            returnDir = buildDirectoryTree(returnDir, cf, eof, af);

            return(returnDir);
        }
Esempio n. 29
0
        private TreeNode buildDirectoryTree(TreeNode parent, catalogFile cf, extentsOverflowFile eof, attributesFile af)
        {
            TreeNode replaceParent = new TreeNode();

            replaceParent.Tag = parent.Tag;
            replaceParent.Text = parent.Text;

            foreach (TreeNode childItem in parent.Nodes)
            {
                if(childItem.Tag is HFSPlusCatalogFolder)
                {
                    HFSPlusCatalogFolder childDirectoryRecord = (HFSPlusCatalogFolder)childItem.Tag;

                    TreeNode contents = getDirectoryChildren(childDirectoryRecord, cf, eof, af);

                    contents = buildDirectoryTree(contents, cf, eof, af);

                    replaceParent.Nodes.Add(contents);
                    foldercount++;
                }
                else if (childItem.Tag is HFSPlusCatalogFile)
                {
                    replaceParent.Nodes.Add(childItem);
                    filecount++;
                }
                else if (childItem.Tag is attributesLeafNode.HFSPlusAttrInlineData)
                {
                    replaceParent.Nodes.Add(childItem);
                }
                else if (childItem.Tag is attributesLeafNode.HFSPlusAttrForkData)
                {
                    replaceParent.Nodes.Add(childItem);
                }
            }

            return replaceParent;
        }
Esempio n. 30
0
        private void getAllExtents(extentsOverflowFile eofInput, uint CNID)
        {
            extentsOverflowFile.HFSPlusExtentKey extentKey = new extentsOverflowFile.HFSPlusExtentKey();
            extentsOverflowLeafNode.extentsOverflowLeafRecord record;

            extentKey.fileID = CNID;
            extentKey.type = extentsOverflowFile.forkType.data;

            while (this.knownDataBlocks < this.totalDataBlocks)
            {
                extentKey.startBlock = knownDataBlocks;
                record = eofInput.getExtentRecordWithKey(extentKey);

                int i = 0;
                while (i < 8 && record.extents[i].blockCount > 0)
                {
                    this.fileContent.dataExtents.Add(record.extents[i]);
                    this.knownDataBlocks += record.extents[i].blockCount;
                    i++;
                }
                this.allDataBlocksKnown = knownDataBlocks == totalDataBlocks;
            }

            extentKey.type = extentsOverflowFile.forkType.resource;
            while (this.knownResourceBlocks < this.totalResourceBlocks)
            {
                extentKey.startBlock = knownResourceBlocks;
                record = eofInput.getExtentRecordWithKey(extentKey);
                int i = 0;
                while (record.extents[i].blockCount > 0 && i < 8)
                {
                    this.fileContent.resourceExtents.Add(record.extents[i]);
                    this.knownResourceBlocks += record.extents[i].blockCount;
                    i++;
                }
                this.allResourceBlocksKnown = knownResourceBlocks == totalResourceBlocks;
            }
        }
Esempio n. 31
0
        public TreeNode getDirectoryAndChildren(HFSPlusCatalogFolder folderRecord, extentsOverflowFile eof, int partitionAssoc)
        {
            TreeNode returnDir = new TreeNode();
            if (folderRecord.key.nodeName != null)
            {
                returnDir.Text = System.Text.Encoding.BigEndianUnicode.GetString(folderRecord.key.nodeName);
                returnDir.Text = returnDir.Text.Replace('\0', ' ');
            }
            folderRecord.partitionAssoc = partitionAssoc;
            returnDir.Tag = folderRecord;

            HFSPlusCatalogKey matchParentDir = new HFSPlusCatalogKey();

            // find the first HFSPlusFileRecord for whom the current directory is the parent
            matchParentDir.parentID = folderRecord.folderID;
            if(folderRecord.key.nodeName != null) matchParentDir.nodeName = folderRecord.key.nodeName;
            uint readThisNode = getLeafNodeContainingRecord(matchParentDir);
            bool nextLeaf = true;

            // records with the same parent are stored sequentially in the file,
            // but may continue over into the next node
            while (nextLeaf)
            {
                byte[] leafData = new byte[this.nodeSize];

                fs.Seek(readThisNode * this.nodeSize, SeekOrigin.Begin);
                fs.Read(leafData, 0, this.nodeSize);

                catalogLeafNode currentLeaf = new catalogLeafNode(ref leafData);

                foreach (HFSPlusCatalogFolder folder in currentLeaf.folderRecords)
                {
                    if (folder.key.parentID == folderRecord.folderID)
                    {
                        TreeNode childDir = new TreeNode();
                        if (folder.key.nodeName != null)
                        {
                            childDir.Text = System.Text.Encoding.BigEndianUnicode.GetString(folder.key.nodeName);
                            childDir.Text = childDir.Text.Replace('\0', ' ');
                        }

                        // set the treenode data for the child item
                        folder.path = folderRecord.path + "\\" + childDir.Text;
                        folder.partitionAssoc = partitionAssoc;

                        childDir.Tag = folder;
                        returnDir.Nodes.Add(childDir);
                    }
                }

                foreach (HFSPlusCatalogFile file in currentLeaf.fileRecords)
                {
                    if (file.key.parentID == folderRecord.folderID)
                    {
                        TreeNode childFile = new TreeNode();

                        HFSPlusCatalogFile eachFile = file;
                        eachFile.partitionAssoc = partitionAssoc;

                        // HFSPlusFile should be able to get all of a file's blocks as part of the constructor
                        HFSPlusFile blockFinder = new HFSPlusFile(eachFile, eof);

                        //add the discovered extents back into the return object
                        //eachFile.dataFork.forkDataValues.extents.Clear();
                        //eachFile.resourceFork.forkDataValues.extents.Clear();
                        //foreach (hfsPlusForkData.HFSPlusExtentRecord extent in blockFinder.fileContent.dataExtents)
                        //{
                        //    eachFile.dataFork.forkDataValues.extents.Add(extent);
                        //}
                        //foreach (hfsPlusForkData.HFSPlusExtentRecord extent in blockFinder.fileContent.resourceExtents)
                        //{
                        //    eachFile.resourceFork.forkDataValues.extents.Add(extent);
                        //}

                        // if it can't... cry?
                        if (!(blockFinder.allDataBlocksKnown && blockFinder.allResourceBlocksKnown))
                        {
                            throw new Exception("Disk_Reader.HFSPlusFile class failed to get all blocks.");
                        }

                        // a handful of volume metadata files have highly specialised permissions
                        HFSPlusCatalogFolder tag = (HFSPlusCatalogFolder)returnDir.Tag;
                        if (tag.key.parentID == 2)
                        {
                            if (returnDir.Text == "    HFS+ Private Data")
                            {
                                HFSPlusCatalogRecord.HFSPlusPermissions resetPermissions = new HFSPlusCatalogRecord.HFSPlusPermissions();
                                resetPermissions = eachFile.permissions;
                                resetPermissions.type = HFSPlusCatalogRecord.HFSPlusPermissions.specialType.iNodeNum;

                                eachFile.permissions = resetPermissions;
                            }
                        }
                        else if (eachFile.userInfo.fileType == 0x686C6E6B && eachFile.userInfo.fileCreator == 0x6866732B)
                        {
                            HFSPlusCatalogRecord.HFSPlusPermissions resetPermissions = new HFSPlusCatalogRecord.HFSPlusPermissions();
                            resetPermissions = eachFile.permissions;
                            resetPermissions.type = HFSPlusCatalogRecord.HFSPlusPermissions.specialType.linkCount;

                            eachFile.permissions = resetPermissions;
                        }
                        else if (eachFile.permissions.fileMode.blockSpecial || eachFile.permissions.fileMode.charSpecial)
                        {
                            HFSPlusCatalogRecord.HFSPlusPermissions resetPermissions = new HFSPlusCatalogRecord.HFSPlusPermissions();
                            resetPermissions = eachFile.permissions;
                            resetPermissions.type = HFSPlusCatalogRecord.HFSPlusPermissions.specialType.rawDevice;

                            eachFile.permissions = resetPermissions;
                        }
                        else
                        {
                            HFSPlusCatalogRecord.HFSPlusPermissions resetPermissions = new HFSPlusCatalogRecord.HFSPlusPermissions();
                            resetPermissions = eachFile.permissions;
                            resetPermissions.type = HFSPlusCatalogRecord.HFSPlusPermissions.specialType.reserved;

                            eachFile.permissions = resetPermissions;
                        }

                        childFile.Text = System.Text.Encoding.BigEndianUnicode.GetString(file.key.nodeName);
                        if (folderRecord.key.nodeName != null)
                        {
                            childFile.Text = System.Text.Encoding.BigEndianUnicode.GetString(file.key.nodeName);
                            childFile.Text = childFile.Text.Replace('\0', ' ');
                        }

                        // set the treenode data for the child item
                        eachFile.path = folderRecord.path + "\\" + childFile.Text;

                        childFile.Tag = eachFile;

                        returnDir.Nodes.Add(childFile);
                    }
                }

                bool lastRecordMatchesKey =
                    matchParentDir.parentID == dataOperations.convToLE(
                        BitConverter.ToUInt32(
                            currentLeaf.rawRecords[currentLeaf.rawRecords.Count() - 1].keyData, 0));

                // if the last record in the current leaf is within the parent directory,
                // the records may continue in the next leaf, so skip to the node in flink
                // in the next instance of the loop
                if (returnDir.Nodes.Count < folderRecord.valence)
                {
                    readThisNode = currentLeaf.BTNodeDescriptor.fLink;
                }
                else
                {
                    nextLeaf = false;
                }
            }

            return returnDir;
        }
Esempio n. 32
0
        private TreeNode getHFSPTree(HFSPlus hfsp, HFSPlusCatalogFolder folderID)
        {
            TreeNode tn = new TreeNode();
            volumeStream hfsp_vs = new volumeStream(hfsp);

            HFSPlusFile rawCatalog = new HFSPlusFile(hfsp.volHead.catalogFile, forkStream.forkType.data);
            HFSPlusFile rawAttributes = new HFSPlusFile(hfsp.volHead.attributesFile, forkStream.forkType.data);
            HFSPlusFile rawExtentsOverflow = new HFSPlusFile(hfsp.volHead.extentsFile, forkStream.forkType.data);
            // need to get all attributes files

            HFSPlusCatalogFolder folderRecord = folderID;

            catalogFile catalog = new catalogFile(rawCatalog, hfsp_vs);
            attributesFile attributes = new attributesFile(rawAttributes, hfsp_vs);
            extentsOverflowFile eof = new extentsOverflowFile(rawExtentsOverflow, hfsp_vs);
            displayTree = hfsp.getFullDirectoryList(folderRecord, catalog, eof, attributes);

            tn = displayTree;

            return tn;
        }