public HFSPlus(absImageStream fileSet, GPTScheme.entry partition) : base(fileSet, partition) { this.blockSize = 4096; // default block size this.partitionNo = partition.partitionNo; this.path = fileSet.bf.F.Name + "\\" + partition.name; byte[] rawHeader = new byte[512]; ais.Seek(this.volumeStart + 1024, SeekOrigin.Begin); ais.Read(rawHeader, 0, 512); if (rawHeader[0] == 0x48 && rawHeader[1] == 0x2B) { volHead = getVolumeHeader(rawHeader); } long volEnd = this.volumeStart + (partition.partLength * ais.sectorSize) + ais.sectorSize; this.totalSize = partition.partLength * ais.sectorSize; ais.Seek(volEnd - 1024, SeekOrigin.Begin); ais.Read(rawHeader, 0, 512); if (rawHeader[0] == 0x48 && rawHeader[1] == 0x2B) { backupVolHead = getVolumeHeader(rawHeader); } }
public dataOperations.hashValues[] hashFile(HFSPlusCatalogFile file) { // take a file, return hashes for its data fork and resource fork dataOperations.hashValues[] hv = new dataOperations.hashValues[2]; GPTScheme gpts = new GPTScheme(i); HFSPlus hfsp = new HFSPlus(i, gpts.entries[file.partitionAssoc]); volumeStream vs = new volumeStream(hfsp); extentsOverflowFile eof = new extentsOverflowFile(new HFSPlusFile(hfsp.volHead.extentsFile, forkStream.forkType.data), vs); if (file.dataFork.forkDataValues.logicalSize > 0) { HFSPlusFile hfspfile = new HFSPlusFile(file, eof); forkStream fs = new forkStream(vs, hfspfile, forkStream.forkType.data); hv[0] = dataOperations.getHashValues(fs, (long)hfspfile.dataLogicalSize); } if (file.resourceFork != null) { if (file.resourceFork.forkDataValues.logicalSize > 0) { HFSPlusFile hfspfile = new HFSPlusFile(file.resourceFork, forkStream.forkType.resource); forkStream fs = new forkStream(vs, hfspfile, forkStream.forkType.resource); hv[1] = dataOperations.getHashValues(fs, (long)hfspfile.rsrcLogicalSize); } } return(hv); }
private void setFileTreeFromImage(absImageStream i) { TreeNode partitionTN = new TreeNode(); fileTree = null; fileTree = new TreeNode(); switch (i.scheme) { case absImageStream.schemeType.GPT: GPTScheme ps = new GPTScheme(i); foreach (GPTScheme.entry partition in ps.getValidTable()) { GPTScheme.partitionType type = ps.findPartitionType(partition); partitionTN = getVolumeTree(partition, type); partitionTN.Text = partition.name; fileTree.Nodes.Add(partitionTN); } break; default: break; } }
public void showForkData(HFSPlusCatalogFile entry, uint block, forkStream.forkType type) { GPTScheme gpts = new GPTScheme(i); HFSPlus hfsp = new HFSPlus(i, gpts.entries[entry.partitionAssoc]); volumeStream vs = new volumeStream(hfsp); extentsOverflowFile eof = new extentsOverflowFile(new HFSPlusFile(hfsp.volHead.extentsFile, forkStream.forkType.data), vs); HFSPlusFile hfsp_file = new HFSPlusFile(entry, eof); forkStream fs; if (type == forkStream.forkType.data) { fs = new forkStream(vs, hfsp_file, forkStream.forkType.data); } else { fs = new forkStream(vs, hfsp_file, forkStream.forkType.resource); } contentDisplay = hexHeadLine + "\r\n"; if (fs.Length > 0) { byte[] showBlock = new byte[hfsp.blockSize]; fs.Seek(hfsp.blockSize * block, SeekOrigin.Begin); fs.Read(showBlock, 0, (int)hfsp.blockSize); rawDataDisplay(showBlock); } }
public void hashAll() { TreeNode replaceFileTree = new TreeNode(); setFileTreeFromImage(i); foreach (TreeNode child in fileTree.Nodes) { if (child.Tag is HFSPlus.volumeHeader) { HFSPlus.volumeHeader vh = (HFSPlus.volumeHeader)child.Tag; GPTScheme gpts = new GPTScheme(i); HFSPlus hfsp = new HFSPlus(i, gpts.entries[vh.partitionNo]); volumeStream vs = new volumeStream(hfsp); replaceFileTree.Nodes.Add(iterateHashChildren(child, vs)); } else { replaceFileTree.Nodes.Add(child); } } replaceFileTree.Tag = displayTree.Tag; this.fileTree = replaceFileTree; }
public void exportFile(HFSPlusCatalogFile entry, forkStream.forkType type, string path) { if (entry.dataFork.forkDataValues.logicalSize > 0 || entry.resourceFork.forkDataValues.logicalSize > 0) { GPTScheme gpts = new GPTScheme(i); HFSPlus hfsp = new HFSPlus(i, gpts.entries[entry.partitionAssoc]); volumeStream vs = new volumeStream(hfsp); extentsOverflowFile eof = new extentsOverflowFile(new HFSPlusFile(hfsp.volHead.extentsFile, forkStream.forkType.data), vs); HFSPlusFile hfsp_file = new HFSPlusFile(entry, eof); forkStream fs; long dataSize = 0; if (type == forkStream.forkType.data) { fs = new forkStream(vs, hfsp_file, forkStream.forkType.data); dataSize = (long)entry.dataFork.forkDataValues.logicalSize; } else { fs = new forkStream(vs, hfsp_file, forkStream.forkType.resource); dataSize = (long)entry.resourceFork.forkDataValues.logicalSize; } fs.Position = 0; FileStream writeStream = new FileStream(path, FileMode.Create); BinaryWriter bw = new BinaryWriter(writeStream); long bytesWritten = 0; byte[] buffer; while (bytesWritten < dataSize) { if (bytesWritten + 8192 <= dataSize) { buffer = new byte[8192]; fs.Read(buffer, 0, 8192); bw.Write(buffer, 0, 8192); bytesWritten += 8192; } else { buffer = new byte[dataSize - bytesWritten]; fs.Read(buffer, 0, buffer.Length); bw.Write(buffer, 0, buffer.Length); bytesWritten += buffer.Length; } } bw.Close(); writeStream.Close(); } }
public absVolume(absImageStream fileSet, GPTScheme.entry partition) { GPTScheme.entry partEntry = partition; ais = fileSet; blockSize = 512; volumeStart = (long)partition.partStartLBA * ais.sectorSize; volumeLength = (long)partition.partLength * ais.sectorSize; }
public TreeNode getSubDirectories(TreeNode tn) { TreeNode result = tn; GPTScheme gpts = new GPTScheme(i); if (tn.Tag is HFSPlusCatalogFolder) { HFSPlusCatalogFolder folder = (HFSPlusCatalogFolder)tn.Tag; HFSPlus hfsp = new HFSPlus(i, gpts.getValidTable()[folder.partitionAssoc]); volumeStream vs = new volumeStream(hfsp); extentsOverflowFile eof = new extentsOverflowFile(new HFSPlusFile(hfsp.volHead.extentsFile, forkStream.forkType.data), vs); catalogFile cf = new catalogFile(new HFSPlusFile(hfsp.volHead.catalogFile, forkStream.forkType.data), vs); result = hfsp.getDirectoryChildren(folder, cf, eof); result.Tag = tn.Tag; } return(result); }
public void showForkData(HFSPlusCatalogFile entry, forkStream.forkType type) { GPTScheme gpts = new GPTScheme(i); HFSPlus hfsp = new HFSPlus(i, gpts.entries[entry.partitionAssoc]); volumeStream vs = new volumeStream(hfsp); extentsOverflowFile eof = new extentsOverflowFile(new HFSPlusFile(hfsp.volHead.extentsFile, forkStream.forkType.data), vs); HFSPlusFile hfsp_file = new HFSPlusFile(entry, eof); forkStream fs; if (type == forkStream.forkType.data) { fs = new forkStream(vs, hfsp_file, forkStream.forkType.data); } else { fs = new forkStream(vs, hfsp_file, forkStream.forkType.resource); } throw new NotImplementedException(); }
private TreeNode getVolumeTree(GPTScheme.entry partition, GPTScheme.partitionType type, HFSPlusCatalogFolder folderID) { TreeNode tn = new TreeNode(); try { if (type == GPTScheme.partitionType.HFSPlus) { HFSPlus hfsp = new HFSPlus(this.i, partition); tn = getHFSPTree(hfsp, folderID); } } catch (OutOfMemoryException) { return tn; throw new OutOfMemoryException( "The list view has been truncated as there are too many items to fit in system memory.\r\n\r\n" + "Try viewing a sub directory instead."); } return tn; }
public void generateListViewContent(TreeNode startDirectory) { TreeNode partitionTN = new TreeNode(); if (startDirectory.Tag is absImageStream.imageProperties) { switch (i.scheme) { case absImageStream.schemeType.GPT: GPTScheme ps = new GPTScheme(i); foreach (GPTScheme.entry partition in ps.getValidTable()) { GPTScheme.partitionType type = ps.findPartitionType(partition); partitionTN = getVolumeTree(partition, type); partitionTN.Text = partition.name; } break; default: break; } } else if (startDirectory.Tag is HFSPlusCatalogFolder) { HFSPlusCatalogFolder tag = (HFSPlusCatalogFolder)startDirectory.Tag; switch (i.scheme) { case absImageStream.schemeType.GPT: GPTScheme ps = new GPTScheme(i); // if used, the following line causes the program to display only the direct children of the selected directory partitionTN = getSubDirectories(startDirectory); // if used, the following line will cause the program to display the entire contents of a directory tree branch recursively // partitionTN = getVolumeTree(ps.getValidTable()[tag.partitionAssoc], GPTScheme.partitionType.HFSPlus, tag); partitionTN.Text = startDirectory.Text; break; default: break; } } else if (startDirectory.Tag is HFSPlus.volumeHeader) { HFSPlus.volumeHeader tag = (HFSPlus.volumeHeader)startDirectory.Tag; switch (i.scheme) { case absImageStream.schemeType.GPT: GPTScheme ps = new GPTScheme(i); partitionTN = getVolumeTree(ps.getValidTable()[tag.partitionNo], GPTScheme.partitionType.HFSPlus); partitionTN.Text = startDirectory.Text; break; default: break; } } if (startDirectory.Tag != null) { addRowsToList(partitionTN); } }
public TreeNode getSubDirectories(TreeNode tn) { TreeNode result = tn; GPTScheme gpts = new GPTScheme(i); if (tn.Tag is HFSPlusCatalogFolder) { HFSPlusCatalogFolder folder = (HFSPlusCatalogFolder)tn.Tag; HFSPlus hfsp = new HFSPlus(i, gpts.getValidTable()[folder.partitionAssoc]); volumeStream vs = new volumeStream(hfsp); extentsOverflowFile eof = new extentsOverflowFile(new HFSPlusFile(hfsp.volHead.extentsFile, forkStream.forkType.data), vs); catalogFile cf = new catalogFile(new HFSPlusFile(hfsp.volHead.catalogFile, forkStream.forkType.data), vs); result = hfsp.getDirectoryChildren(folder, cf, eof); result.Tag = tn.Tag; } return result; }
public void generateListViewContent(TreeNode startDirectory) { TreeNode partitionTN = new TreeNode(); if (startDirectory.Tag is absImageStream.imageProperties) { switch (i.scheme) { case absImageStream.schemeType.GPT: GPTScheme ps = new GPTScheme(i); foreach (GPTScheme.entry partition in ps.getValidTable()) { GPTScheme.partitionType type = ps.findPartitionType(partition); partitionTN = getVolumeTree(partition, type); partitionTN.Text = partition.name; } break; default: break; } } else if (startDirectory.Tag is HFSPlusCatalogFolder) { HFSPlusCatalogFolder tag = (HFSPlusCatalogFolder)startDirectory.Tag; switch (i.scheme) { case absImageStream.schemeType.GPT: GPTScheme ps = new GPTScheme(i); // if used, the following line causes the program to display only the direct children of the selected directory partitionTN = getSubDirectories(startDirectory); // if used, the following line will cause the program to display the entire contents of a directory tree branch recursively // partitionTN = getVolumeTree(ps.getValidTable()[tag.partitionAssoc], GPTScheme.partitionType.HFSPlus, tag); partitionTN.Text = startDirectory.Text; break; default: break; } } else if (startDirectory.Tag is HFSPlus.volumeHeader) { HFSPlus.volumeHeader tag = (HFSPlus.volumeHeader) startDirectory.Tag; switch (i.scheme) { case absImageStream.schemeType.GPT: GPTScheme ps = new GPTScheme(i); partitionTN = getVolumeTree(ps.getValidTable()[tag.partitionNo], GPTScheme.partitionType.HFSPlus); partitionTN.Text = startDirectory.Text; break; default: break; } } if (startDirectory.Tag != null) { addRowsToList(partitionTN); } }
public imageMap(absImageStream ais) { if (ais.scheme == absImageStream.schemeType.GPT) { GPTScheme gpts = new GPTScheme(ais); mapBlock block = new mapBlock(); block.location = 0; if (gpts.protectiveMBRExists) { block.length = 1; block.name = "MBR"; block.type = tileType.MBR; partitionblocks.Add(block); } if (gpts.headerFound) { block.location = 1; block.length = 1; block.name = "GPT Header"; block.type = tileType.GPT; partitionblocks.Add(block); block.location = gpts.tablestart; block.length = gpts.tablelength / ais.sectorSize; if (block.length < 1) block.length = 1; block.name = "GPT Primary Table"; block.type = tileType.GPT; partitionblocks.Add(block); } if (gpts.backupFound) { block.location = gpts.backupHeader.mainheader; block.length = 1; block.name = "Backup GPT Header"; block.type = tileType.GPT; partitionblocks.Add(block); block.location = gpts.backupHeader.tablestart; block.length = gpts.tablelength / ais.sectorSize; if (block.length < 1) block.length = 1; block.name = "GPT Backup Table"; block.type = tileType.GPT; partitionblocks.Add(block); } foreach (GPTScheme.entry entry in gpts.entries) { block.location = entry.partStartLBA; block.length = entry.partLength; block.name = entry.name; block.type = tileType.vol_unknown; if (gpts.findPartitionType(entry) == GPTScheme.partitionType.HFSPlus) { HFSPlus hfsp = new HFSPlus(ais, entry); block.mapSectorsPerBlock = (int)hfsp.volHead.blockSize / ais.sectorSize; forkStream fs = new forkStream(new volumeStream(hfsp), new HFSPlusFile(hfsp.volHead.allocationFile, forkStream.forkType.data), forkStream.forkType.data); block.allocationMap = new byte[(int)fs.Length]; fs.Read(block.allocationMap, 0, (int)fs.Length); } else { block.allocationMap = null; } partitionblocks.Add(block); } } partitionblocks.Sort(CompareBlocksByPosition); }
public partitionType findPartitionType(GPTScheme.entry entry) { partitionType entryType = partitionType.unknown; byte[] HFSsigbytes = new byte[2]; i.Seek(entry.partStartLBA * i.sectorSize + 1024, SeekOrigin.Begin); i.Read(HFSsigbytes, 0, 2); string HFSsig = System.Text.Encoding.UTF8.GetString(HFSsigbytes); if (HFSsig == "H+") { entryType = partitionType.HFSPlus; } else if (HFSsig == "HX") { entryType = partitionType.HFSPlusCS; } return entryType; }
public dataOperations.hashValues[] hashFile(HFSPlusCatalogFile file) { // take a file, return hashes for its data fork and resource fork dataOperations.hashValues[] hv = new dataOperations.hashValues[2]; GPTScheme gpts = new GPTScheme(i); HFSPlus hfsp = new HFSPlus(i, gpts.entries[file.partitionAssoc]); volumeStream vs = new volumeStream(hfsp); extentsOverflowFile eof = new extentsOverflowFile(new HFSPlusFile(hfsp.volHead.extentsFile, forkStream.forkType.data),vs); if (file.dataFork.forkDataValues.logicalSize > 0) { HFSPlusFile hfspfile = new HFSPlusFile(file, eof); forkStream fs = new forkStream(vs, hfspfile, forkStream.forkType.data); hv[0] = dataOperations.getHashValues(fs, (long)hfspfile.dataLogicalSize); } if (file.resourceFork != null) { if (file.resourceFork.forkDataValues.logicalSize > 0) { HFSPlusFile hfspfile = new HFSPlusFile(file.resourceFork, forkStream.forkType.resource); forkStream fs = new forkStream(vs, hfspfile, forkStream.forkType.resource); hv[1] = dataOperations.getHashValues(fs, (long)hfspfile.rsrcLogicalSize); } } return hv; }
private TreeNode getVolumeTree(GPTScheme.entry partition, GPTScheme.partitionType type) { TreeNode tn = new TreeNode(); if (type == GPTScheme.partitionType.HFSPlus) { HFSPlus hfsp = new HFSPlus(i, partition); volumeStream hfsp_vs = new volumeStream(hfsp); HFSPlusFile rawCatalog = new HFSPlusFile(hfsp.volHead.catalogFile, forkStream.forkType.data); HFSPlusFile rawAttributes = new HFSPlusFile(hfsp.volHead.attributesFile, forkStream.forkType.data); HFSPlusFile rawExtents = new HFSPlusFile(hfsp.volHead.extentsFile, forkStream.forkType.data); extentsOverflowFile extentsOverflow = new extentsOverflowFile(rawExtents, hfsp_vs); catalogFile catalog = new catalogFile(rawCatalog, hfsp_vs); attributesFile attributes = new attributesFile(rawAttributes, hfsp_vs); tn = hfsp.getRootDirectoryContents(catalog, extentsOverflow, attributes); tn.Tag = hfsp.volHead; } return tn; }
public imageMap(absImageStream ais) { if (ais.scheme == absImageStream.schemeType.GPT) { GPTScheme gpts = new GPTScheme(ais); mapBlock block = new mapBlock(); block.location = 0; if (gpts.protectiveMBRExists) { block.length = 1; block.name = "MBR"; block.type = tileType.MBR; partitionblocks.Add(block); } if (gpts.headerFound) { block.location = 1; block.length = 1; block.name = "GPT Header"; block.type = tileType.GPT; partitionblocks.Add(block); block.location = gpts.tablestart; block.length = gpts.tablelength / ais.sectorSize; if (block.length < 1) { block.length = 1; } block.name = "GPT Primary Table"; block.type = tileType.GPT; partitionblocks.Add(block); } if (gpts.backupFound) { block.location = gpts.backupHeader.mainheader; block.length = 1; block.name = "Backup GPT Header"; block.type = tileType.GPT; partitionblocks.Add(block); block.location = gpts.backupHeader.tablestart; block.length = gpts.tablelength / ais.sectorSize; if (block.length < 1) { block.length = 1; } block.name = "GPT Backup Table"; block.type = tileType.GPT; partitionblocks.Add(block); } foreach (GPTScheme.entry entry in gpts.entries) { block.location = entry.partStartLBA; block.length = entry.partLength; block.name = entry.name; block.type = tileType.vol_unknown; if (gpts.findPartitionType(entry) == GPTScheme.partitionType.HFSPlus) { HFSPlus hfsp = new HFSPlus(ais, entry); block.mapSectorsPerBlock = (int)hfsp.volHead.blockSize / ais.sectorSize; forkStream fs = new forkStream(new volumeStream(hfsp), new HFSPlusFile(hfsp.volHead.allocationFile, forkStream.forkType.data), forkStream.forkType.data); block.allocationMap = new byte[(int)fs.Length]; fs.Read(block.allocationMap, 0, (int)fs.Length); } else { block.allocationMap = null; } partitionblocks.Add(block); } } partitionblocks.Sort(CompareBlocksByPosition); }