public HFSPlusCatalogFolder(ref byte[] rawData) : base(ref rawData) { this.flags = dataOperations.convToLE(BitConverter.ToUInt16(rawData, 2)); this.valence = dataOperations.convToLE(BitConverter.ToUInt32(rawData, 4)); this.folderID = dataOperations.convToLE(BitConverter.ToUInt32(rawData, 8)); this.createDate = HFSPlus.FromHFSPlusTime(dataOperations.convToLE(BitConverter.ToUInt32(rawData, 12))); this.contentModDate = HFSPlus.FromHFSPlusTime(dataOperations.convToLE(BitConverter.ToUInt32(rawData, 16))); this.attributeModDate = HFSPlus.FromHFSPlusTime(dataOperations.convToLE(BitConverter.ToUInt32(rawData, 20))); this.accessDate = HFSPlus.FromHFSPlusTime(dataOperations.convToLE(BitConverter.ToUInt32(rawData, 24))); this.backupDate = HFSPlus.FromHFSPlusTime(dataOperations.convToLE(BitConverter.ToUInt32(rawData, 28))); byte[] folderPermissions = new byte[16]; Array.Copy(rawData, 32, folderPermissions, 0, 16); byte[] folderUserInfo = new byte[16]; Array.Copy(rawData, 48, folderUserInfo, 0, 16); byte[] folderFinderInfo = new byte[16]; Array.Copy(rawData, 64, folderFinderInfo, 0, 16); this.userInfo = HFSPlusFinderInfo.getFolderUserInfo(ref folderUserInfo); this.finderInfo = HFSPlusFinderInfo.getFolderFinderInfo(ref folderFinderInfo); this.permissions = getHFSPlusPermissions(ref folderPermissions); this.textEncoding = dataOperations.convToLE(BitConverter.ToUInt32(rawData, 80)); this.reserved = dataOperations.convToLE(BitConverter.ToUInt32(rawData, 84)); }
public dataOperations.hashValues[] hashFile(HFSPlusCatalogFile file) { // take a file, return hashes for its data fork and resource fork dataOperations.hashValues[] hv = new dataOperations.hashValues[2]; GPTScheme gpts = new GPTScheme(i); HFSPlus hfsp = new HFSPlus(i, gpts.entries[file.partitionAssoc]); volumeStream vs = new volumeStream(hfsp); extentsOverflowFile eof = new extentsOverflowFile(new HFSPlusFile(hfsp.volHead.extentsFile, forkStream.forkType.data), vs); if (file.dataFork.forkDataValues.logicalSize > 0) { HFSPlusFile hfspfile = new HFSPlusFile(file, eof); forkStream fs = new forkStream(vs, hfspfile, forkStream.forkType.data); hv[0] = dataOperations.getHashValues(fs, (long)hfspfile.dataLogicalSize); } if (file.resourceFork != null) { if (file.resourceFork.forkDataValues.logicalSize > 0) { HFSPlusFile hfspfile = new HFSPlusFile(file.resourceFork, forkStream.forkType.resource); forkStream fs = new forkStream(vs, hfspfile, forkStream.forkType.resource); hv[1] = dataOperations.getHashValues(fs, (long)hfspfile.rsrcLogicalSize); } } return(hv); }
public void hashAll() { TreeNode replaceFileTree = new TreeNode(); setFileTreeFromImage(i); foreach (TreeNode child in fileTree.Nodes) { if (child.Tag is HFSPlus.volumeHeader) { HFSPlus.volumeHeader vh = (HFSPlus.volumeHeader)child.Tag; GPTScheme gpts = new GPTScheme(i); HFSPlus hfsp = new HFSPlus(i, gpts.entries[vh.partitionNo]); volumeStream vs = new volumeStream(hfsp); replaceFileTree.Nodes.Add(iterateHashChildren(child, vs)); } else { replaceFileTree.Nodes.Add(child); } } replaceFileTree.Tag = displayTree.Tag; this.fileTree = replaceFileTree; }
public void showForkData(HFSPlusCatalogFile entry, uint block, forkStream.forkType type) { GPTScheme gpts = new GPTScheme(i); HFSPlus hfsp = new HFSPlus(i, gpts.entries[entry.partitionAssoc]); volumeStream vs = new volumeStream(hfsp); extentsOverflowFile eof = new extentsOverflowFile(new HFSPlusFile(hfsp.volHead.extentsFile, forkStream.forkType.data), vs); HFSPlusFile hfsp_file = new HFSPlusFile(entry, eof); forkStream fs; if (type == forkStream.forkType.data) { fs = new forkStream(vs, hfsp_file, forkStream.forkType.data); } else { fs = new forkStream(vs, hfsp_file, forkStream.forkType.resource); } contentDisplay = hexHeadLine + "\r\n"; if (fs.Length > 0) { byte[] showBlock = new byte[hfsp.blockSize]; fs.Seek(hfsp.blockSize * block, SeekOrigin.Begin); fs.Read(showBlock, 0, (int)hfsp.blockSize); rawDataDisplay(showBlock); } }
public HFSPlusCatalogFile(ref byte[] rawData) : base(ref rawData) { ushort flags = dataOperations.convToLE(BitConverter.ToUInt16(rawData, 2)); this.fileLockedBit = (fileFlags.kHFSFileLockedBit & (fileFlags)flags) == fileFlags.kHFSFileLockedBit; this.fileLockedMask = (fileFlags.kHFSFileLockedMask & (fileFlags)flags) == fileFlags.kHFSFileLockedMask; this.threadExistsBit = (fileFlags.kHFSThreadExistsBit & (fileFlags)flags) == fileFlags.kHFSThreadExistsBit; this.threadExistsMask = (fileFlags.kHFSThreadExistsMask & (fileFlags)flags) == fileFlags.kHFSThreadExistsMask; this.reserved1 = dataOperations.convToLE(BitConverter.ToUInt32(rawData, 4)); this.fileID = dataOperations.convToLE(BitConverter.ToUInt32(rawData, 8)); this.createDate = HFSPlus.FromHFSPlusTime(dataOperations.convToLE(BitConverter.ToUInt32(rawData, 12))); this.contentModDate = HFSPlus.FromHFSPlusTime(dataOperations.convToLE(BitConverter.ToUInt32(rawData, 16))); this.attributeModDate = HFSPlus.FromHFSPlusTime(dataOperations.convToLE(BitConverter.ToUInt32(rawData, 20))); this.accessDate = HFSPlus.FromHFSPlusTime(dataOperations.convToLE(BitConverter.ToUInt32(rawData, 24))); this.backupDate = HFSPlus.FromHFSPlusTime(dataOperations.convToLE(BitConverter.ToUInt32(rawData, 28))); byte[] filePermissions = new byte[16]; Array.Copy(rawData, 32, filePermissions, 0, 16); byte[] fileUserInfo = new byte[16]; Array.Copy(rawData, 48, fileUserInfo, 0, 16); byte[] fileFinderInfo = new byte[16]; Array.Copy(rawData, 64, fileFinderInfo, 0, 16); this.userInfo = HFSPlusFinderInfo.getFileUserInfo(ref fileUserInfo); this.finderInfo = HFSPlusFinderInfo.getFileFinderInfo(ref fileFinderInfo); this.permissions = getHFSPlusPermissions(ref filePermissions); this.textEncoding = dataOperations.convToLE(BitConverter.ToUInt32(rawData, 80)); this.reserved2 = dataOperations.convToLE(BitConverter.ToUInt32(rawData, 84)); this.dataFork = new hfsPlusForkData(ref rawData, 88); this.resourceFork = new hfsPlusForkData(ref rawData, 168); }
public void exportFile(HFSPlusCatalogFile entry, forkStream.forkType type, string path) { if (entry.dataFork.forkDataValues.logicalSize > 0 || entry.resourceFork.forkDataValues.logicalSize > 0) { GPTScheme gpts = new GPTScheme(i); HFSPlus hfsp = new HFSPlus(i, gpts.entries[entry.partitionAssoc]); volumeStream vs = new volumeStream(hfsp); extentsOverflowFile eof = new extentsOverflowFile(new HFSPlusFile(hfsp.volHead.extentsFile, forkStream.forkType.data), vs); HFSPlusFile hfsp_file = new HFSPlusFile(entry, eof); forkStream fs; long dataSize = 0; if (type == forkStream.forkType.data) { fs = new forkStream(vs, hfsp_file, forkStream.forkType.data); dataSize = (long)entry.dataFork.forkDataValues.logicalSize; } else { fs = new forkStream(vs, hfsp_file, forkStream.forkType.resource); dataSize = (long)entry.resourceFork.forkDataValues.logicalSize; } fs.Position = 0; FileStream writeStream = new FileStream(path, FileMode.Create); BinaryWriter bw = new BinaryWriter(writeStream); long bytesWritten = 0; byte[] buffer; while (bytesWritten < dataSize) { if (bytesWritten + 8192 <= dataSize) { buffer = new byte[8192]; fs.Read(buffer, 0, 8192); bw.Write(buffer, 0, 8192); bytesWritten += 8192; } else { buffer = new byte[dataSize - bytesWritten]; fs.Read(buffer, 0, buffer.Length); bw.Write(buffer, 0, buffer.Length); bytesWritten += buffer.Length; } } bw.Close(); writeStream.Close(); } }
public TreeNode getSubDirectories(TreeNode tn) { TreeNode result = tn; GPTScheme gpts = new GPTScheme(i); if (tn.Tag is HFSPlusCatalogFolder) { HFSPlusCatalogFolder folder = (HFSPlusCatalogFolder)tn.Tag; HFSPlus hfsp = new HFSPlus(i, gpts.getValidTable()[folder.partitionAssoc]); volumeStream vs = new volumeStream(hfsp); extentsOverflowFile eof = new extentsOverflowFile(new HFSPlusFile(hfsp.volHead.extentsFile, forkStream.forkType.data), vs); catalogFile cf = new catalogFile(new HFSPlusFile(hfsp.volHead.catalogFile, forkStream.forkType.data), vs); result = hfsp.getDirectoryChildren(folder, cf, eof); result.Tag = tn.Tag; } return(result); }
public void showForkData(HFSPlusCatalogFile entry, forkStream.forkType type) { GPTScheme gpts = new GPTScheme(i); HFSPlus hfsp = new HFSPlus(i, gpts.entries[entry.partitionAssoc]); volumeStream vs = new volumeStream(hfsp); extentsOverflowFile eof = new extentsOverflowFile(new HFSPlusFile(hfsp.volHead.extentsFile, forkStream.forkType.data), vs); HFSPlusFile hfsp_file = new HFSPlusFile(entry, eof); forkStream fs; if (type == forkStream.forkType.data) { fs = new forkStream(vs, hfsp_file, forkStream.forkType.data); } else { fs = new forkStream(vs, hfsp_file, forkStream.forkType.resource); } throw new NotImplementedException(); }
private TreeNode getHFSPTree(HFSPlus hfsp, HFSPlusCatalogFolder folderID) { TreeNode tn = new TreeNode(); volumeStream hfsp_vs = new volumeStream(hfsp); HFSPlusFile rawCatalog = new HFSPlusFile(hfsp.volHead.catalogFile, forkStream.forkType.data); HFSPlusFile rawAttributes = new HFSPlusFile(hfsp.volHead.attributesFile, forkStream.forkType.data); HFSPlusFile rawExtentsOverflow = new HFSPlusFile(hfsp.volHead.extentsFile, forkStream.forkType.data); // need to get all attributes files HFSPlusCatalogFolder folderRecord = folderID; catalogFile catalog = new catalogFile(rawCatalog, hfsp_vs); attributesFile attributes = new attributesFile(rawAttributes, hfsp_vs); extentsOverflowFile eof = new extentsOverflowFile(rawExtentsOverflow, hfsp_vs); displayTree = hfsp.getFullDirectoryList(folderRecord, catalog, eof, attributes); tn = displayTree; return(tn); }
private TreeNode getVolumeTree(GPTScheme.entry partition, GPTScheme.partitionType type) { TreeNode tn = new TreeNode(); if (type == GPTScheme.partitionType.HFSPlus) { HFSPlus hfsp = new HFSPlus(i, partition); volumeStream hfsp_vs = new volumeStream(hfsp); HFSPlusFile rawCatalog = new HFSPlusFile(hfsp.volHead.catalogFile, forkStream.forkType.data); HFSPlusFile rawAttributes = new HFSPlusFile(hfsp.volHead.attributesFile, forkStream.forkType.data); HFSPlusFile rawExtents = new HFSPlusFile(hfsp.volHead.extentsFile, forkStream.forkType.data); extentsOverflowFile extentsOverflow = new extentsOverflowFile(rawExtents, hfsp_vs); catalogFile catalog = new catalogFile(rawCatalog, hfsp_vs); attributesFile attributes = new attributesFile(rawAttributes, hfsp_vs); tn = hfsp.getRootDirectoryContents(catalog, extentsOverflow, attributes); tn.Tag = hfsp.volHead; } return(tn); }
private TreeNode getVolumeTree(GPTScheme.entry partition, GPTScheme.partitionType type, HFSPlusCatalogFolder folderID) { TreeNode tn = new TreeNode(); try { if (type == GPTScheme.partitionType.HFSPlus) { HFSPlus hfsp = new HFSPlus(this.i, partition); tn = getHFSPTree(hfsp, folderID); } } catch (OutOfMemoryException) { return(tn); throw new OutOfMemoryException("The list view has been truncated as there are too many items to fit in system memory.\r\n\r\n" + "Try viewing a sub directory instead."); } return(tn); }
private TreeNode getVolumeTree(GPTScheme.entry partition, GPTScheme.partitionType type, HFSPlusCatalogFolder folderID) { TreeNode tn = new TreeNode(); try { if (type == GPTScheme.partitionType.HFSPlus) { HFSPlus hfsp = new HFSPlus(this.i, partition); tn = getHFSPTree(hfsp, folderID); } } catch (OutOfMemoryException) { return tn; throw new OutOfMemoryException( "The list view has been truncated as there are too many items to fit in system memory.\r\n\r\n" + "Try viewing a sub directory instead."); } return tn; }
private ListViewItem getNodeRowContents(TreeNode theTree) { ListViewItem row = new ListViewItem(theTree.Text); if (theTree.Tag != null) { string tagType = theTree.Tag.GetType().ToString(); switch (tagType) { case "Disk_Reader.HFSPlusCatalogFolder": HFSPlusCatalogFolder folderTag = (HFSPlusCatalogFolder)theTree.Tag; row.Tag = folderTag; row.SubItems.Add(folderTag.folderID.ToString()); if (folderTag.createDate > HFSPlus.FromHFSPlusTime(0)) { row.SubItems.Add(folderTag.createDate.ToString()); } else { row.SubItems.Add(""); } if (folderTag.contentModDate > HFSPlus.FromHFSPlusTime(0)) { row.SubItems.Add(folderTag.contentModDate.ToString()); } else { row.SubItems.Add(""); } if (folderTag.attributeModDate > HFSPlus.FromHFSPlusTime(0)) { row.SubItems.Add(folderTag.attributeModDate.ToString()); } else { row.SubItems.Add(""); } if (folderTag.backupDate > HFSPlus.FromHFSPlusTime(0)) { row.SubItems.Add(folderTag.backupDate.ToString()); } else { row.SubItems.Add(""); } if (folderTag.accessDate > HFSPlus.FromHFSPlusTime(0)) { row.SubItems.Add(folderTag.accessDate.ToString()); } else { row.SubItems.Add(""); } string folderPermissions = ""; if (folderTag.permissions.fileMode.owner.read) { folderPermissions += "r"; } else { folderPermissions += "-"; } if (folderTag.permissions.fileMode.owner.write) { folderPermissions += "w"; } else { folderPermissions += "-"; } if (folderTag.permissions.fileMode.owner.execute) { folderPermissions += "x"; } else { folderPermissions += "-"; } folderPermissions += "/"; if (folderTag.permissions.fileMode.group.read) { folderPermissions += "r"; } else { folderPermissions += "-"; } if (folderTag.permissions.fileMode.group.write) { folderPermissions += "w"; } else { folderPermissions += "-"; } if (folderTag.permissions.fileMode.group.execute) { folderPermissions += "x"; } else { folderPermissions += "-"; } folderPermissions += "/"; if (folderTag.permissions.fileMode.other.read) { folderPermissions += "r"; } else { folderPermissions += "-"; } if (folderTag.permissions.fileMode.other.write) { folderPermissions += "w"; } else { folderPermissions += "-"; } if (folderTag.permissions.fileMode.other.execute) { folderPermissions += "x"; } else { folderPermissions += "-"; } row.SubItems.Add(folderPermissions); row.SubItems.Add(""); // data fork size row.SubItems.Add(""); // resource fork size row.SubItems.Add(""); // data start sector LBA row.SubItems.Add(""); // rsrc start sector row.SubItems.Add(""); // data fragments count row.SubItems.Add(""); // rsrc fragments count row.SubItems.Add(""); // data fork MD5 row.SubItems.Add(""); // data fork SHA1 row.SubItems.Add(""); // resource fork MD5 row.SubItems.Add(""); // resource fork SHA1 row.SubItems.Add(""); // is deleted row.SubItems.Add(folderTag.path); break; case "Disk_Reader.HFSPlus+volumeHeader": HFSPlus.volumeHeader headerTag = (HFSPlus.volumeHeader)theTree.Tag; row.Tag = headerTag; row.SubItems.Add(""); // CNID if (headerTag.createDate > HFSPlus.FromHFSPlusTime(0)) { row.SubItems.Add(headerTag.createDate.ToString()); } else { row.SubItems.Add(""); } if (headerTag.modifyDate > HFSPlus.FromHFSPlusTime(0)) { row.SubItems.Add(headerTag.modifyDate.ToString()); } else { row.SubItems.Add(""); } row.SubItems.Add(""); // attribute mod date if (headerTag.backupDate > HFSPlus.FromHFSPlusTime(0)) { row.SubItems.Add(headerTag.backupDate.ToString()); } else { row.SubItems.Add(""); } row.SubItems.Add(""); // access date row.SubItems.Add(""); // permissions row.SubItems.Add(""); // data fork size row.SubItems.Add(""); // resource fork size row.SubItems.Add(""); // data start sector LBA row.SubItems.Add(""); // rsrc start sector row.SubItems.Add(""); // data fragments count row.SubItems.Add(""); // rsrc fragments count row.SubItems.Add(""); // data fork MD5 row.SubItems.Add(""); // data fork SHA1 row.SubItems.Add(""); // resource fork MD5 row.SubItems.Add(""); // resource fork SHA1 row.SubItems.Add(""); // is deleted row.SubItems.Add(headerTag.path); break; case "Disk_Reader.HFSPlusCatalogFile": HFSPlusCatalogFile fileTag = (HFSPlusCatalogFile)theTree.Tag; row.Tag = fileTag; row.SubItems.Add(fileTag.fileID.ToString()); if (fileTag.createDate > HFSPlus.FromHFSPlusTime(0)) // creation date { row.SubItems.Add(fileTag.createDate.ToString()); } else { row.SubItems.Add(""); } if (fileTag.contentModDate > HFSPlus.FromHFSPlusTime(0)) // content mod date { row.SubItems.Add(fileTag.contentModDate.ToString()); } else { row.SubItems.Add(""); } if (fileTag.attributeModDate > HFSPlus.FromHFSPlusTime(0)) // attributes mod date { row.SubItems.Add(fileTag.attributeModDate.ToString()); } else { row.SubItems.Add(""); } if (fileTag.backupDate > HFSPlus.FromHFSPlusTime(0)) // backup date { row.SubItems.Add(fileTag.backupDate.ToString()); } else { row.SubItems.Add(""); } if (fileTag.accessDate > HFSPlus.FromHFSPlusTime(0)) // access date - Mac OS X does not use this - only POSIX implementations { row.SubItems.Add(fileTag.accessDate.ToString()); } else { row.SubItems.Add(""); } string filePermissions = ""; if (fileTag.permissions.fileMode.owner.read) { filePermissions += "r"; } else { filePermissions += "-"; } if (fileTag.permissions.fileMode.owner.write) { filePermissions += "w"; } else { filePermissions += "-"; } if (fileTag.permissions.fileMode.owner.execute) { filePermissions += "x"; } else { filePermissions += "-"; } filePermissions += "/"; if (fileTag.permissions.fileMode.group.read) { filePermissions += "r"; } else { filePermissions += "-"; } if (fileTag.permissions.fileMode.group.write) { filePermissions += "w"; } else { filePermissions += "-"; } if (fileTag.permissions.fileMode.group.execute) { filePermissions += "x"; } else { filePermissions += "-"; } filePermissions += "/"; if (fileTag.permissions.fileMode.other.read) { filePermissions += "r"; } else { filePermissions += "-"; } if (fileTag.permissions.fileMode.other.write) { filePermissions += "w"; } else { filePermissions += "-"; } if (fileTag.permissions.fileMode.other.execute) { filePermissions += "x"; } else { filePermissions += "-"; } row.SubItems.Add(filePermissions); // file permissions row.SubItems.Add(fileTag.dataFork.forkDataValues.logicalSize.ToString()); // data fork size int rsrccount = 0; if (fileTag.resourceFork != null) { row.SubItems.Add(fileTag.resourceFork.forkDataValues.logicalSize.ToString()); // only try to iterate through resource fork extents if a resource fork exists // (volume metadata files do not have a resource fork) for (int i = 0; i < fileTag.dataFork.forkDataValues.extents.Count(); i++) { if (fileTag.resourceFork.forkDataValues.extents[i].blockCount > 0) { rsrccount++; } } } else { row.SubItems.Add("0"); // resource fork size } if (fileTag.dataFork.forkDataValues.extents[0].startBlock > 0) { row.SubItems.Add(fileTag.dataFork.forkDataValues.extents[0].startBlock.ToString()); } else { row.SubItems.Add(""); // start sector LBA } if (fileTag.resourceFork != null) { if (fileTag.resourceFork.forkDataValues.extents[0].startBlock > 0) { row.SubItems.Add(fileTag.resourceFork.forkDataValues.extents[0].startBlock.ToString()); } else { row.SubItems.Add(""); // resource start sector } } int datacount = 0; for (int i = 0; i < fileTag.dataFork.forkDataValues.extents.Count(); i++) { if (fileTag.dataFork.forkDataValues.extents[i].blockCount > 0) { datacount++; } } row.SubItems.Add(datacount.ToString()); // data fragments count row.SubItems.Add(rsrccount.ToString()); // resource fragments count row.SubItems.Add(""); // data fork MD5 row.SubItems.Add(""); // data fork SHA1 row.SubItems.Add(""); // resource fork MD5 row.SubItems.Add(""); // resource fork SHA1 row.SubItems.Add(""); // is deleted row.SubItems.Add(fileTag.path); break; case "Disk_Reader.attributesLeafNode+HFSPlusAttrForkData": break; case "Disk_Reader.attributesLeafNode+HFSPlusAttrInlineData": attributesLeafNode.HFSPlusAttrInlineData inlineTag = (attributesLeafNode.HFSPlusAttrInlineData)theTree.Tag; row.Tag = inlineTag; row.SubItems.Add(inlineTag.key.fileID.ToString()); row.SubItems.Add(""); // creation date row.SubItems.Add(""); // content mod date row.SubItems.Add(""); // attributes mod date row.SubItems.Add(""); // backup date row.SubItems.Add(""); // access date row.SubItems.Add(""); // file permissions row.SubItems.Add(inlineTag.otherData.Length.ToString()); // data fork size row.SubItems.Add(""); // resource fork size row.SubItems.Add(""); // data start sector LBA row.SubItems.Add(""); // rsrc start sector LBA row.SubItems.Add(""); // data fragments count row.SubItems.Add(""); // rsrc fragments count row.SubItems.Add(""); // data fork MD5 row.SubItems.Add(""); // data fork SHA1 row.SubItems.Add(""); // resource fork MD5 row.SubItems.Add(""); // resource fork SHA1 row.SubItems.Add(""); // is deleted row.SubItems.Add(""); // path break; } } return(row); }
public TreeNode getSubDirectories(TreeNode tn) { TreeNode result = tn; GPTScheme gpts = new GPTScheme(i); if (tn.Tag is HFSPlusCatalogFolder) { HFSPlusCatalogFolder folder = (HFSPlusCatalogFolder)tn.Tag; HFSPlus hfsp = new HFSPlus(i, gpts.getValidTable()[folder.partitionAssoc]); volumeStream vs = new volumeStream(hfsp); extentsOverflowFile eof = new extentsOverflowFile(new HFSPlusFile(hfsp.volHead.extentsFile, forkStream.forkType.data), vs); catalogFile cf = new catalogFile(new HFSPlusFile(hfsp.volHead.catalogFile, forkStream.forkType.data), vs); result = hfsp.getDirectoryChildren(folder, cf, eof); result.Tag = tn.Tag; } return result; }
public dataOperations.hashValues[] hashFile(HFSPlusCatalogFile file) { // take a file, return hashes for its data fork and resource fork dataOperations.hashValues[] hv = new dataOperations.hashValues[2]; GPTScheme gpts = new GPTScheme(i); HFSPlus hfsp = new HFSPlus(i, gpts.entries[file.partitionAssoc]); volumeStream vs = new volumeStream(hfsp); extentsOverflowFile eof = new extentsOverflowFile(new HFSPlusFile(hfsp.volHead.extentsFile, forkStream.forkType.data),vs); if (file.dataFork.forkDataValues.logicalSize > 0) { HFSPlusFile hfspfile = new HFSPlusFile(file, eof); forkStream fs = new forkStream(vs, hfspfile, forkStream.forkType.data); hv[0] = dataOperations.getHashValues(fs, (long)hfspfile.dataLogicalSize); } if (file.resourceFork != null) { if (file.resourceFork.forkDataValues.logicalSize > 0) { HFSPlusFile hfspfile = new HFSPlusFile(file.resourceFork, forkStream.forkType.resource); forkStream fs = new forkStream(vs, hfspfile, forkStream.forkType.resource); hv[1] = dataOperations.getHashValues(fs, (long)hfspfile.rsrcLogicalSize); } } return hv; }
private TreeNode getVolumeTree(GPTScheme.entry partition, GPTScheme.partitionType type) { TreeNode tn = new TreeNode(); if (type == GPTScheme.partitionType.HFSPlus) { HFSPlus hfsp = new HFSPlus(i, partition); volumeStream hfsp_vs = new volumeStream(hfsp); HFSPlusFile rawCatalog = new HFSPlusFile(hfsp.volHead.catalogFile, forkStream.forkType.data); HFSPlusFile rawAttributes = new HFSPlusFile(hfsp.volHead.attributesFile, forkStream.forkType.data); HFSPlusFile rawExtents = new HFSPlusFile(hfsp.volHead.extentsFile, forkStream.forkType.data); extentsOverflowFile extentsOverflow = new extentsOverflowFile(rawExtents, hfsp_vs); catalogFile catalog = new catalogFile(rawCatalog, hfsp_vs); attributesFile attributes = new attributesFile(rawAttributes, hfsp_vs); tn = hfsp.getRootDirectoryContents(catalog, extentsOverflow, attributes); tn.Tag = hfsp.volHead; } return tn; }
private TreeNode getHFSPTree(HFSPlus hfsp, HFSPlusCatalogFolder folderID) { TreeNode tn = new TreeNode(); volumeStream hfsp_vs = new volumeStream(hfsp); HFSPlusFile rawCatalog = new HFSPlusFile(hfsp.volHead.catalogFile, forkStream.forkType.data); HFSPlusFile rawAttributes = new HFSPlusFile(hfsp.volHead.attributesFile, forkStream.forkType.data); HFSPlusFile rawExtentsOverflow = new HFSPlusFile(hfsp.volHead.extentsFile, forkStream.forkType.data); // need to get all attributes files HFSPlusCatalogFolder folderRecord = folderID; catalogFile catalog = new catalogFile(rawCatalog, hfsp_vs); attributesFile attributes = new attributesFile(rawAttributes, hfsp_vs); extentsOverflowFile eof = new extentsOverflowFile(rawExtentsOverflow, hfsp_vs); displayTree = hfsp.getFullDirectoryList(folderRecord, catalog, eof, attributes); tn = displayTree; return tn; }
public imageMap(absImageStream ais) { if (ais.scheme == absImageStream.schemeType.GPT) { GPTScheme gpts = new GPTScheme(ais); mapBlock block = new mapBlock(); block.location = 0; if (gpts.protectiveMBRExists) { block.length = 1; block.name = "MBR"; block.type = tileType.MBR; partitionblocks.Add(block); } if (gpts.headerFound) { block.location = 1; block.length = 1; block.name = "GPT Header"; block.type = tileType.GPT; partitionblocks.Add(block); block.location = gpts.tablestart; block.length = gpts.tablelength / ais.sectorSize; if (block.length < 1) block.length = 1; block.name = "GPT Primary Table"; block.type = tileType.GPT; partitionblocks.Add(block); } if (gpts.backupFound) { block.location = gpts.backupHeader.mainheader; block.length = 1; block.name = "Backup GPT Header"; block.type = tileType.GPT; partitionblocks.Add(block); block.location = gpts.backupHeader.tablestart; block.length = gpts.tablelength / ais.sectorSize; if (block.length < 1) block.length = 1; block.name = "GPT Backup Table"; block.type = tileType.GPT; partitionblocks.Add(block); } foreach (GPTScheme.entry entry in gpts.entries) { block.location = entry.partStartLBA; block.length = entry.partLength; block.name = entry.name; block.type = tileType.vol_unknown; if (gpts.findPartitionType(entry) == GPTScheme.partitionType.HFSPlus) { HFSPlus hfsp = new HFSPlus(ais, entry); block.mapSectorsPerBlock = (int)hfsp.volHead.blockSize / ais.sectorSize; forkStream fs = new forkStream(new volumeStream(hfsp), new HFSPlusFile(hfsp.volHead.allocationFile, forkStream.forkType.data), forkStream.forkType.data); block.allocationMap = new byte[(int)fs.Length]; fs.Read(block.allocationMap, 0, (int)fs.Length); } else { block.allocationMap = null; } partitionblocks.Add(block); } } partitionblocks.Sort(CompareBlocksByPosition); }
public imageMap(absImageStream ais) { if (ais.scheme == absImageStream.schemeType.GPT) { GPTScheme gpts = new GPTScheme(ais); mapBlock block = new mapBlock(); block.location = 0; if (gpts.protectiveMBRExists) { block.length = 1; block.name = "MBR"; block.type = tileType.MBR; partitionblocks.Add(block); } if (gpts.headerFound) { block.location = 1; block.length = 1; block.name = "GPT Header"; block.type = tileType.GPT; partitionblocks.Add(block); block.location = gpts.tablestart; block.length = gpts.tablelength / ais.sectorSize; if (block.length < 1) { block.length = 1; } block.name = "GPT Primary Table"; block.type = tileType.GPT; partitionblocks.Add(block); } if (gpts.backupFound) { block.location = gpts.backupHeader.mainheader; block.length = 1; block.name = "Backup GPT Header"; block.type = tileType.GPT; partitionblocks.Add(block); block.location = gpts.backupHeader.tablestart; block.length = gpts.tablelength / ais.sectorSize; if (block.length < 1) { block.length = 1; } block.name = "GPT Backup Table"; block.type = tileType.GPT; partitionblocks.Add(block); } foreach (GPTScheme.entry entry in gpts.entries) { block.location = entry.partStartLBA; block.length = entry.partLength; block.name = entry.name; block.type = tileType.vol_unknown; if (gpts.findPartitionType(entry) == GPTScheme.partitionType.HFSPlus) { HFSPlus hfsp = new HFSPlus(ais, entry); block.mapSectorsPerBlock = (int)hfsp.volHead.blockSize / ais.sectorSize; forkStream fs = new forkStream(new volumeStream(hfsp), new HFSPlusFile(hfsp.volHead.allocationFile, forkStream.forkType.data), forkStream.forkType.data); block.allocationMap = new byte[(int)fs.Length]; fs.Read(block.allocationMap, 0, (int)fs.Length); } else { block.allocationMap = null; } partitionblocks.Add(block); } } partitionblocks.Sort(CompareBlocksByPosition); }