public absHFSPlusBTree(HFSPlusFile knownExtents, volumeStream hfsp) { extents = knownExtents; // grab a bunch of information to ensure the header node is captured byte[] firstBlock = new byte[hfsp.volume.blockSize]; this.fs = new forkStream(hfsp, knownExtents, forkStream.forkType.data); fs.Read(firstBlock, 0, firstBlock.Count()); // nodeSize is byte 30 of header record which comes immediately after 14 byte descriptor this.nodeSize = dataOperations.convToLE(BitConverter.ToUInt16(firstBlock, 32)); byte[] headerData = new byte[nodeSize]; headerData = getNodeData(0, nodeSize); header = new headerNode(ref headerData); // check whether all of the data extents are known long treeSize = header.headerInfo.totalNodes * header.headerInfo.nodeSize; if (fs.Length >= treeSize && fs.Length > 0) { isRawDataComplete = true; buildMap(fs); } }
public dataOperations.hashValues[] hashFile(HFSPlusCatalogFile file) { // take a file, return hashes for its data fork and resource fork dataOperations.hashValues[] hv = new dataOperations.hashValues[2]; GPTScheme gpts = new GPTScheme(i); HFSPlus hfsp = new HFSPlus(i, gpts.entries[file.partitionAssoc]); volumeStream vs = new volumeStream(hfsp); extentsOverflowFile eof = new extentsOverflowFile(new HFSPlusFile(hfsp.volHead.extentsFile, forkStream.forkType.data), vs); if (file.dataFork.forkDataValues.logicalSize > 0) { HFSPlusFile hfspfile = new HFSPlusFile(file, eof); forkStream fs = new forkStream(vs, hfspfile, forkStream.forkType.data); hv[0] = dataOperations.getHashValues(fs, (long)hfspfile.dataLogicalSize); } if (file.resourceFork != null) { if (file.resourceFork.forkDataValues.logicalSize > 0) { HFSPlusFile hfspfile = new HFSPlusFile(file.resourceFork, forkStream.forkType.resource); forkStream fs = new forkStream(vs, hfspfile, forkStream.forkType.resource); hv[1] = dataOperations.getHashValues(fs, (long)hfspfile.rsrcLogicalSize); } } return(hv); }
public void showForkData(HFSPlusCatalogFile entry, uint block, forkStream.forkType type) { GPTScheme gpts = new GPTScheme(i); HFSPlus hfsp = new HFSPlus(i, gpts.entries[entry.partitionAssoc]); volumeStream vs = new volumeStream(hfsp); extentsOverflowFile eof = new extentsOverflowFile(new HFSPlusFile(hfsp.volHead.extentsFile, forkStream.forkType.data), vs); HFSPlusFile hfsp_file = new HFSPlusFile(entry, eof); forkStream fs; if (type == forkStream.forkType.data) { fs = new forkStream(vs, hfsp_file, forkStream.forkType.data); } else { fs = new forkStream(vs, hfsp_file, forkStream.forkType.resource); } contentDisplay = hexHeadLine + "\r\n"; if (fs.Length > 0) { byte[] showBlock = new byte[hfsp.blockSize]; fs.Seek(hfsp.blockSize * block, SeekOrigin.Begin); fs.Read(showBlock, 0, (int)hfsp.blockSize); rawDataDisplay(showBlock); } }
private TreeNode iterateHashChildren(TreeNode parent, volumeStream vs) { TreeNode replaceParent = new TreeNode(); replaceParent.Tag = parent.Tag; foreach (TreeNode child in parent.Nodes) { TreeNode replaceChild = new TreeNode(); if (child.Tag is HFSPlusCatalogFolder) { replaceChild = iterateHashChildren(child, vs); replaceChild.Tag = child.Tag; } else if (child.Tag is HFSPlusCatalogFile) { HFSPlusCatalogFile tag = (HFSPlusCatalogFile)child.Tag; dataOperations.hashValues hashes = new dataOperations.hashValues(); if (tag.dataFork != null && tag.dataFork.forkDataValues.logicalSize > 0) { HFSPlusFile theFileData = new HFSPlusFile(tag.dataFork, forkStream.forkType.data); forkStream fs = new forkStream(vs, theFileData, forkStream.forkType.data); dataOperations.hashValues hv = dataOperations.getHashValues(fs, (long)theFileData.dataLogicalSize); hashes.md5hash = hv.md5hash; } if (tag.resourceFork != null && tag.resourceFork.forkDataValues.logicalSize > 0) { HFSPlusFile theFileResource = new HFSPlusFile(tag.dataFork, forkStream.forkType.data); forkStream fs = new forkStream(vs, theFileResource, forkStream.forkType.data); dataOperations.hashValues hv = dataOperations.getHashValues(fs, (long)theFileResource.dataLogicalSize); hashes.sha1hash = hv.sha1hash; } tag.hashes = hashes; replaceChild.Tag = tag; } else { replaceChild.Tag = child.Tag; } replaceChild.Text = child.Text; replaceParent.Nodes.Add(replaceChild); } replaceParent.Text = parent.Text; return(replaceParent); }
public TreeNode getFullDirectoryList() { TreeNode result = new TreeNode(); HFSPlusFile rawExtentsOverflow = new HFSPlusFile(volHead.extentsFile, forkStream.forkType.data); HFSPlusFile rawCatalog = new HFSPlusFile(volHead.catalogFile, forkStream.forkType.data); HFSPlusFile rawAttributesFile = new HFSPlusFile(volHead.attributesFile, forkStream.forkType.data); volumeStream hfsp_vs = new volumeStream(this); catalogFile cf = new catalogFile(rawCatalog, hfsp_vs); extentsOverflowFile eof = new extentsOverflowFile(rawExtentsOverflow, hfsp_vs); attributesFile af = new attributesFile(rawAttributesFile, hfsp_vs); addMetaFilesToTree(ref result); result = buildDirectoryTree(result, cf, eof, af); if (filecount == volHead.fileCount + 5) { volumeHeader vh = this.volHead; vh.fileCountVerified = true; this.volHead = vh; } if (foldercount == volHead.folderCount) { volumeHeader vh = this.volHead; vh.folderCountVerified = true; this.volHead = vh; } return(result); }
public void exportFile(HFSPlusCatalogFile entry, forkStream.forkType type, string path) { if (entry.dataFork.forkDataValues.logicalSize > 0 || entry.resourceFork.forkDataValues.logicalSize > 0) { GPTScheme gpts = new GPTScheme(i); HFSPlus hfsp = new HFSPlus(i, gpts.entries[entry.partitionAssoc]); volumeStream vs = new volumeStream(hfsp); extentsOverflowFile eof = new extentsOverflowFile(new HFSPlusFile(hfsp.volHead.extentsFile, forkStream.forkType.data), vs); HFSPlusFile hfsp_file = new HFSPlusFile(entry, eof); forkStream fs; long dataSize = 0; if (type == forkStream.forkType.data) { fs = new forkStream(vs, hfsp_file, forkStream.forkType.data); dataSize = (long)entry.dataFork.forkDataValues.logicalSize; } else { fs = new forkStream(vs, hfsp_file, forkStream.forkType.resource); dataSize = (long)entry.resourceFork.forkDataValues.logicalSize; } fs.Position = 0; FileStream writeStream = new FileStream(path, FileMode.Create); BinaryWriter bw = new BinaryWriter(writeStream); long bytesWritten = 0; byte[] buffer; while (bytesWritten < dataSize) { if (bytesWritten + 8192 <= dataSize) { buffer = new byte[8192]; fs.Read(buffer, 0, 8192); bw.Write(buffer, 0, 8192); bytesWritten += 8192; } else { buffer = new byte[dataSize - bytesWritten]; fs.Read(buffer, 0, buffer.Length); bw.Write(buffer, 0, buffer.Length); bytesWritten += buffer.Length; } } bw.Close(); writeStream.Close(); } }
public forkStream(volumeStream inner, HFSPlusFile theFork, forkType type) { this.inner = inner; switch(type) { case forkType.data: SetLength((long)theFork.dataLogicalSize); this.fork = theFork.fileContent.dataExtents; break; case forkType.resource: SetLength((long)theFork.rsrcLogicalSize); this.fork = theFork.fileContent.resourceExtents; break; } }
public forkStream(volumeStream inner, HFSPlusFile theFork, forkType type) { this.inner = inner; switch (type) { case forkType.data: SetLength((long)theFork.dataLogicalSize); this.fork = theFork.fileContent.dataExtents; break; case forkType.resource: SetLength((long)theFork.rsrcLogicalSize); this.fork = theFork.fileContent.resourceExtents; break; } }
public void showForkData(HFSPlusCatalogFile entry, forkStream.forkType type) { GPTScheme gpts = new GPTScheme(i); HFSPlus hfsp = new HFSPlus(i, gpts.entries[entry.partitionAssoc]); volumeStream vs = new volumeStream(hfsp); extentsOverflowFile eof = new extentsOverflowFile(new HFSPlusFile(hfsp.volHead.extentsFile, forkStream.forkType.data), vs); HFSPlusFile hfsp_file = new HFSPlusFile(entry, eof); forkStream fs; if (type == forkStream.forkType.data) { fs = new forkStream(vs, hfsp_file, forkStream.forkType.data); } else { fs = new forkStream(vs, hfsp_file, forkStream.forkType.resource); } throw new NotImplementedException(); }
private TreeNode getHFSPTree(HFSPlus hfsp, HFSPlusCatalogFolder folderID) { TreeNode tn = new TreeNode(); volumeStream hfsp_vs = new volumeStream(hfsp); HFSPlusFile rawCatalog = new HFSPlusFile(hfsp.volHead.catalogFile, forkStream.forkType.data); HFSPlusFile rawAttributes = new HFSPlusFile(hfsp.volHead.attributesFile, forkStream.forkType.data); HFSPlusFile rawExtentsOverflow = new HFSPlusFile(hfsp.volHead.extentsFile, forkStream.forkType.data); // need to get all attributes files HFSPlusCatalogFolder folderRecord = folderID; catalogFile catalog = new catalogFile(rawCatalog, hfsp_vs); attributesFile attributes = new attributesFile(rawAttributes, hfsp_vs); extentsOverflowFile eof = new extentsOverflowFile(rawExtentsOverflow, hfsp_vs); displayTree = hfsp.getFullDirectoryList(folderRecord, catalog, eof, attributes); tn = displayTree; return(tn); }
private TreeNode getVolumeTree(GPTScheme.entry partition, GPTScheme.partitionType type) { TreeNode tn = new TreeNode(); if (type == GPTScheme.partitionType.HFSPlus) { HFSPlus hfsp = new HFSPlus(i, partition); volumeStream hfsp_vs = new volumeStream(hfsp); HFSPlusFile rawCatalog = new HFSPlusFile(hfsp.volHead.catalogFile, forkStream.forkType.data); HFSPlusFile rawAttributes = new HFSPlusFile(hfsp.volHead.attributesFile, forkStream.forkType.data); HFSPlusFile rawExtents = new HFSPlusFile(hfsp.volHead.extentsFile, forkStream.forkType.data); extentsOverflowFile extentsOverflow = new extentsOverflowFile(rawExtents, hfsp_vs); catalogFile catalog = new catalogFile(rawCatalog, hfsp_vs); attributesFile attributes = new attributesFile(rawAttributes, hfsp_vs); tn = hfsp.getRootDirectoryContents(catalog, extentsOverflow, attributes); tn.Tag = hfsp.volHead; } return(tn); }
public extentsOverflowFile(HFSPlusFile knownExtents, volumeStream vs) : base(knownExtents, vs) { }
public TreeNode getFullDirectoryList() { TreeNode result = new TreeNode(); HFSPlusFile rawExtentsOverflow = new HFSPlusFile(volHead.extentsFile, forkStream.forkType.data); HFSPlusFile rawCatalog = new HFSPlusFile(volHead.catalogFile, forkStream.forkType.data); HFSPlusFile rawAttributesFile = new HFSPlusFile(volHead.attributesFile, forkStream.forkType.data); volumeStream hfsp_vs = new volumeStream(this); catalogFile cf = new catalogFile(rawCatalog, hfsp_vs); extentsOverflowFile eof = new extentsOverflowFile(rawExtentsOverflow, hfsp_vs); attributesFile af = new attributesFile(rawAttributesFile, hfsp_vs); addMetaFilesToTree(ref result); result = buildDirectoryTree(result, cf, eof, af); if (filecount == volHead.fileCount + 5) { volumeHeader vh = this.volHead; vh.fileCountVerified = true; this.volHead = vh; } if (foldercount == volHead.folderCount) { volumeHeader vh = this.volHead; vh.folderCountVerified = true; this.volHead = vh; } return result; }
public dataOperations.hashValues[] hashFile(HFSPlusCatalogFile file) { // take a file, return hashes for its data fork and resource fork dataOperations.hashValues[] hv = new dataOperations.hashValues[2]; GPTScheme gpts = new GPTScheme(i); HFSPlus hfsp = new HFSPlus(i, gpts.entries[file.partitionAssoc]); volumeStream vs = new volumeStream(hfsp); extentsOverflowFile eof = new extentsOverflowFile(new HFSPlusFile(hfsp.volHead.extentsFile, forkStream.forkType.data),vs); if (file.dataFork.forkDataValues.logicalSize > 0) { HFSPlusFile hfspfile = new HFSPlusFile(file, eof); forkStream fs = new forkStream(vs, hfspfile, forkStream.forkType.data); hv[0] = dataOperations.getHashValues(fs, (long)hfspfile.dataLogicalSize); } if (file.resourceFork != null) { if (file.resourceFork.forkDataValues.logicalSize > 0) { HFSPlusFile hfspfile = new HFSPlusFile(file.resourceFork, forkStream.forkType.resource); forkStream fs = new forkStream(vs, hfspfile, forkStream.forkType.resource); hv[1] = dataOperations.getHashValues(fs, (long)hfspfile.rsrcLogicalSize); } } return hv; }
public catalogFile(HFSPlusFile knownExtents, volumeStream vs) : base(knownExtents, vs) { }
private TreeNode getHFSPTree(HFSPlus hfsp, HFSPlusCatalogFolder folderID) { TreeNode tn = new TreeNode(); volumeStream hfsp_vs = new volumeStream(hfsp); HFSPlusFile rawCatalog = new HFSPlusFile(hfsp.volHead.catalogFile, forkStream.forkType.data); HFSPlusFile rawAttributes = new HFSPlusFile(hfsp.volHead.attributesFile, forkStream.forkType.data); HFSPlusFile rawExtentsOverflow = new HFSPlusFile(hfsp.volHead.extentsFile, forkStream.forkType.data); // need to get all attributes files HFSPlusCatalogFolder folderRecord = folderID; catalogFile catalog = new catalogFile(rawCatalog, hfsp_vs); attributesFile attributes = new attributesFile(rawAttributes, hfsp_vs); extentsOverflowFile eof = new extentsOverflowFile(rawExtentsOverflow, hfsp_vs); displayTree = hfsp.getFullDirectoryList(folderRecord, catalog, eof, attributes); tn = displayTree; return tn; }
private TreeNode getVolumeTree(GPTScheme.entry partition, GPTScheme.partitionType type) { TreeNode tn = new TreeNode(); if (type == GPTScheme.partitionType.HFSPlus) { HFSPlus hfsp = new HFSPlus(i, partition); volumeStream hfsp_vs = new volumeStream(hfsp); HFSPlusFile rawCatalog = new HFSPlusFile(hfsp.volHead.catalogFile, forkStream.forkType.data); HFSPlusFile rawAttributes = new HFSPlusFile(hfsp.volHead.attributesFile, forkStream.forkType.data); HFSPlusFile rawExtents = new HFSPlusFile(hfsp.volHead.extentsFile, forkStream.forkType.data); extentsOverflowFile extentsOverflow = new extentsOverflowFile(rawExtents, hfsp_vs); catalogFile catalog = new catalogFile(rawCatalog, hfsp_vs); attributesFile attributes = new attributesFile(rawAttributes, hfsp_vs); tn = hfsp.getRootDirectoryContents(catalog, extentsOverflow, attributes); tn.Tag = hfsp.volHead; } return tn; }
public attributesFile(HFSPlusFile knownExtents, volumeStream vs) : base(knownExtents, vs) { }
private TreeNode iterateHashChildren(TreeNode parent, volumeStream vs) { TreeNode replaceParent = new TreeNode(); replaceParent.Tag = parent.Tag; foreach (TreeNode child in parent.Nodes) { TreeNode replaceChild = new TreeNode(); if (child.Tag is HFSPlusCatalogFolder) { replaceChild = iterateHashChildren(child, vs); replaceChild.Tag = child.Tag; } else if (child.Tag is HFSPlusCatalogFile) { HFSPlusCatalogFile tag = (HFSPlusCatalogFile)child.Tag; dataOperations.hashValues hashes = new dataOperations.hashValues(); if (tag.dataFork != null && tag.dataFork.forkDataValues.logicalSize > 0) { HFSPlusFile theFileData = new HFSPlusFile(tag.dataFork, forkStream.forkType.data); forkStream fs = new forkStream(vs, theFileData, forkStream.forkType.data); dataOperations.hashValues hv = dataOperations.getHashValues(fs, (long)theFileData.dataLogicalSize); hashes.md5hash = hv.md5hash; } if (tag.resourceFork != null && tag.resourceFork.forkDataValues.logicalSize > 0) { HFSPlusFile theFileResource = new HFSPlusFile(tag.dataFork, forkStream.forkType.data); forkStream fs = new forkStream(vs, theFileResource, forkStream.forkType.data); dataOperations.hashValues hv = dataOperations.getHashValues(fs, (long)theFileResource.dataLogicalSize); hashes.sha1hash = hv.sha1hash; } tag.hashes = hashes; replaceChild.Tag = tag; } else { replaceChild.Tag = child.Tag; } replaceChild.Text = child.Text; replaceParent.Nodes.Add(replaceChild); } replaceParent.Text = parent.Text; return replaceParent; }
public TreeNode getDirectoryAndChildren(HFSPlusCatalogFolder folderRecord, extentsOverflowFile eof, int partitionAssoc) { TreeNode returnDir = new TreeNode(); if (folderRecord.key.nodeName != null) { returnDir.Text = System.Text.Encoding.BigEndianUnicode.GetString(folderRecord.key.nodeName); returnDir.Text = returnDir.Text.Replace('\0', ' '); } folderRecord.partitionAssoc = partitionAssoc; returnDir.Tag = folderRecord; HFSPlusCatalogKey matchParentDir = new HFSPlusCatalogKey(); // find the first HFSPlusFileRecord for whom the current directory is the parent matchParentDir.parentID = folderRecord.folderID; if (folderRecord.key.nodeName != null) { matchParentDir.nodeName = folderRecord.key.nodeName; } uint readThisNode = getLeafNodeContainingRecord(matchParentDir); bool nextLeaf = true; // records with the same parent are stored sequentially in the file, // but may continue over into the next node while (nextLeaf) { byte[] leafData = new byte[this.nodeSize]; fs.Seek(readThisNode * this.nodeSize, SeekOrigin.Begin); fs.Read(leafData, 0, this.nodeSize); catalogLeafNode currentLeaf = new catalogLeafNode(ref leafData); foreach (HFSPlusCatalogFolder folder in currentLeaf.folderRecords) { if (folder.key.parentID == folderRecord.folderID) { TreeNode childDir = new TreeNode(); if (folder.key.nodeName != null) { childDir.Text = System.Text.Encoding.BigEndianUnicode.GetString(folder.key.nodeName); childDir.Text = childDir.Text.Replace('\0', ' '); } // set the treenode data for the child item folder.path = folderRecord.path + "\\" + childDir.Text; folder.partitionAssoc = partitionAssoc; childDir.Tag = folder; returnDir.Nodes.Add(childDir); } } foreach (HFSPlusCatalogFile file in currentLeaf.fileRecords) { if (file.key.parentID == folderRecord.folderID) { TreeNode childFile = new TreeNode(); HFSPlusCatalogFile eachFile = file; eachFile.partitionAssoc = partitionAssoc; // HFSPlusFile should be able to get all of a file's blocks as part of the constructor HFSPlusFile blockFinder = new HFSPlusFile(eachFile, eof); //add the discovered extents back into the return object //eachFile.dataFork.forkDataValues.extents.Clear(); //eachFile.resourceFork.forkDataValues.extents.Clear(); //foreach (hfsPlusForkData.HFSPlusExtentRecord extent in blockFinder.fileContent.dataExtents) //{ // eachFile.dataFork.forkDataValues.extents.Add(extent); //} //foreach (hfsPlusForkData.HFSPlusExtentRecord extent in blockFinder.fileContent.resourceExtents) //{ // eachFile.resourceFork.forkDataValues.extents.Add(extent); //} // if it can't... cry? if (!(blockFinder.allDataBlocksKnown && blockFinder.allResourceBlocksKnown)) { throw new Exception("Disk_Reader.HFSPlusFile class failed to get all blocks."); } // a handful of volume metadata files have highly specialised permissions HFSPlusCatalogFolder tag = (HFSPlusCatalogFolder)returnDir.Tag; if (tag.key.parentID == 2) { if (returnDir.Text == " HFS+ Private Data") { HFSPlusCatalogRecord.HFSPlusPermissions resetPermissions = new HFSPlusCatalogRecord.HFSPlusPermissions(); resetPermissions = eachFile.permissions; resetPermissions.type = HFSPlusCatalogRecord.HFSPlusPermissions.specialType.iNodeNum; eachFile.permissions = resetPermissions; } } else if (eachFile.userInfo.fileType == 0x686C6E6B && eachFile.userInfo.fileCreator == 0x6866732B) { HFSPlusCatalogRecord.HFSPlusPermissions resetPermissions = new HFSPlusCatalogRecord.HFSPlusPermissions(); resetPermissions = eachFile.permissions; resetPermissions.type = HFSPlusCatalogRecord.HFSPlusPermissions.specialType.linkCount; eachFile.permissions = resetPermissions; } else if (eachFile.permissions.fileMode.blockSpecial || eachFile.permissions.fileMode.charSpecial) { HFSPlusCatalogRecord.HFSPlusPermissions resetPermissions = new HFSPlusCatalogRecord.HFSPlusPermissions(); resetPermissions = eachFile.permissions; resetPermissions.type = HFSPlusCatalogRecord.HFSPlusPermissions.specialType.rawDevice; eachFile.permissions = resetPermissions; } else { HFSPlusCatalogRecord.HFSPlusPermissions resetPermissions = new HFSPlusCatalogRecord.HFSPlusPermissions(); resetPermissions = eachFile.permissions; resetPermissions.type = HFSPlusCatalogRecord.HFSPlusPermissions.specialType.reserved; eachFile.permissions = resetPermissions; } childFile.Text = System.Text.Encoding.BigEndianUnicode.GetString(file.key.nodeName); if (folderRecord.key.nodeName != null) { childFile.Text = System.Text.Encoding.BigEndianUnicode.GetString(file.key.nodeName); childFile.Text = childFile.Text.Replace('\0', ' '); } // set the treenode data for the child item eachFile.path = folderRecord.path + "\\" + childFile.Text; childFile.Tag = eachFile; returnDir.Nodes.Add(childFile); } } bool lastRecordMatchesKey = matchParentDir.parentID == dataOperations.convToLE( BitConverter.ToUInt32( currentLeaf.rawRecords[currentLeaf.rawRecords.Count() - 1].keyData, 0)); // if the last record in the current leaf is within the parent directory, // the records may continue in the next leaf, so skip to the node in flink // in the next instance of the loop if (returnDir.Nodes.Count < folderRecord.valence) { readThisNode = currentLeaf.BTNodeDescriptor.fLink; } else { nextLeaf = false; } } return(returnDir); }
public TreeNode getDirectoryAndChildren(HFSPlusCatalogFolder folderRecord, extentsOverflowFile eof, int partitionAssoc) { TreeNode returnDir = new TreeNode(); if (folderRecord.key.nodeName != null) { returnDir.Text = System.Text.Encoding.BigEndianUnicode.GetString(folderRecord.key.nodeName); returnDir.Text = returnDir.Text.Replace('\0', ' '); } folderRecord.partitionAssoc = partitionAssoc; returnDir.Tag = folderRecord; HFSPlusCatalogKey matchParentDir = new HFSPlusCatalogKey(); // find the first HFSPlusFileRecord for whom the current directory is the parent matchParentDir.parentID = folderRecord.folderID; if(folderRecord.key.nodeName != null) matchParentDir.nodeName = folderRecord.key.nodeName; uint readThisNode = getLeafNodeContainingRecord(matchParentDir); bool nextLeaf = true; // records with the same parent are stored sequentially in the file, // but may continue over into the next node while (nextLeaf) { byte[] leafData = new byte[this.nodeSize]; fs.Seek(readThisNode * this.nodeSize, SeekOrigin.Begin); fs.Read(leafData, 0, this.nodeSize); catalogLeafNode currentLeaf = new catalogLeafNode(ref leafData); foreach (HFSPlusCatalogFolder folder in currentLeaf.folderRecords) { if (folder.key.parentID == folderRecord.folderID) { TreeNode childDir = new TreeNode(); if (folder.key.nodeName != null) { childDir.Text = System.Text.Encoding.BigEndianUnicode.GetString(folder.key.nodeName); childDir.Text = childDir.Text.Replace('\0', ' '); } // set the treenode data for the child item folder.path = folderRecord.path + "\\" + childDir.Text; folder.partitionAssoc = partitionAssoc; childDir.Tag = folder; returnDir.Nodes.Add(childDir); } } foreach (HFSPlusCatalogFile file in currentLeaf.fileRecords) { if (file.key.parentID == folderRecord.folderID) { TreeNode childFile = new TreeNode(); HFSPlusCatalogFile eachFile = file; eachFile.partitionAssoc = partitionAssoc; // HFSPlusFile should be able to get all of a file's blocks as part of the constructor HFSPlusFile blockFinder = new HFSPlusFile(eachFile, eof); //add the discovered extents back into the return object //eachFile.dataFork.forkDataValues.extents.Clear(); //eachFile.resourceFork.forkDataValues.extents.Clear(); //foreach (hfsPlusForkData.HFSPlusExtentRecord extent in blockFinder.fileContent.dataExtents) //{ // eachFile.dataFork.forkDataValues.extents.Add(extent); //} //foreach (hfsPlusForkData.HFSPlusExtentRecord extent in blockFinder.fileContent.resourceExtents) //{ // eachFile.resourceFork.forkDataValues.extents.Add(extent); //} // if it can't... cry? if (!(blockFinder.allDataBlocksKnown && blockFinder.allResourceBlocksKnown)) { throw new Exception("Disk_Reader.HFSPlusFile class failed to get all blocks."); } // a handful of volume metadata files have highly specialised permissions HFSPlusCatalogFolder tag = (HFSPlusCatalogFolder)returnDir.Tag; if (tag.key.parentID == 2) { if (returnDir.Text == " HFS+ Private Data") { HFSPlusCatalogRecord.HFSPlusPermissions resetPermissions = new HFSPlusCatalogRecord.HFSPlusPermissions(); resetPermissions = eachFile.permissions; resetPermissions.type = HFSPlusCatalogRecord.HFSPlusPermissions.specialType.iNodeNum; eachFile.permissions = resetPermissions; } } else if (eachFile.userInfo.fileType == 0x686C6E6B && eachFile.userInfo.fileCreator == 0x6866732B) { HFSPlusCatalogRecord.HFSPlusPermissions resetPermissions = new HFSPlusCatalogRecord.HFSPlusPermissions(); resetPermissions = eachFile.permissions; resetPermissions.type = HFSPlusCatalogRecord.HFSPlusPermissions.specialType.linkCount; eachFile.permissions = resetPermissions; } else if (eachFile.permissions.fileMode.blockSpecial || eachFile.permissions.fileMode.charSpecial) { HFSPlusCatalogRecord.HFSPlusPermissions resetPermissions = new HFSPlusCatalogRecord.HFSPlusPermissions(); resetPermissions = eachFile.permissions; resetPermissions.type = HFSPlusCatalogRecord.HFSPlusPermissions.specialType.rawDevice; eachFile.permissions = resetPermissions; } else { HFSPlusCatalogRecord.HFSPlusPermissions resetPermissions = new HFSPlusCatalogRecord.HFSPlusPermissions(); resetPermissions = eachFile.permissions; resetPermissions.type = HFSPlusCatalogRecord.HFSPlusPermissions.specialType.reserved; eachFile.permissions = resetPermissions; } childFile.Text = System.Text.Encoding.BigEndianUnicode.GetString(file.key.nodeName); if (folderRecord.key.nodeName != null) { childFile.Text = System.Text.Encoding.BigEndianUnicode.GetString(file.key.nodeName); childFile.Text = childFile.Text.Replace('\0', ' '); } // set the treenode data for the child item eachFile.path = folderRecord.path + "\\" + childFile.Text; childFile.Tag = eachFile; returnDir.Nodes.Add(childFile); } } bool lastRecordMatchesKey = matchParentDir.parentID == dataOperations.convToLE( BitConverter.ToUInt32( currentLeaf.rawRecords[currentLeaf.rawRecords.Count() - 1].keyData, 0)); // if the last record in the current leaf is within the parent directory, // the records may continue in the next leaf, so skip to the node in flink // in the next instance of the loop if (returnDir.Nodes.Count < folderRecord.valence) { readThisNode = currentLeaf.BTNodeDescriptor.fLink; } else { nextLeaf = false; } } return returnDir; }