public dataOperations.hashValues[] hashFile(HFSPlusCatalogFile file) { // take a file, return hashes for its data fork and resource fork dataOperations.hashValues[] hv = new dataOperations.hashValues[2]; GPTScheme gpts = new GPTScheme(i); HFSPlus hfsp = new HFSPlus(i, gpts.entries[file.partitionAssoc]); volumeStream vs = new volumeStream(hfsp); extentsOverflowFile eof = new extentsOverflowFile(new HFSPlusFile(hfsp.volHead.extentsFile, forkStream.forkType.data), vs); if (file.dataFork.forkDataValues.logicalSize > 0) { HFSPlusFile hfspfile = new HFSPlusFile(file, eof); forkStream fs = new forkStream(vs, hfspfile, forkStream.forkType.data); hv[0] = dataOperations.getHashValues(fs, (long)hfspfile.dataLogicalSize); } if (file.resourceFork != null) { if (file.resourceFork.forkDataValues.logicalSize > 0) { HFSPlusFile hfspfile = new HFSPlusFile(file.resourceFork, forkStream.forkType.resource); forkStream fs = new forkStream(vs, hfspfile, forkStream.forkType.resource); hv[1] = dataOperations.getHashValues(fs, (long)hfspfile.rsrcLogicalSize); } } return(hv); }
public void hashAll() { TreeNode replaceFileTree = new TreeNode(); setFileTreeFromImage(i); foreach (TreeNode child in fileTree.Nodes) { if (child.Tag is HFSPlus.volumeHeader) { HFSPlus.volumeHeader vh = (HFSPlus.volumeHeader)child.Tag; GPTScheme gpts = new GPTScheme(i); HFSPlus hfsp = new HFSPlus(i, gpts.entries[vh.partitionNo]); volumeStream vs = new volumeStream(hfsp); replaceFileTree.Nodes.Add(iterateHashChildren(child, vs)); } else { replaceFileTree.Nodes.Add(child); } } replaceFileTree.Tag = displayTree.Tag; this.fileTree = replaceFileTree; }
public void showForkData(HFSPlusCatalogFile entry, uint block, forkStream.forkType type) { GPTScheme gpts = new GPTScheme(i); HFSPlus hfsp = new HFSPlus(i, gpts.entries[entry.partitionAssoc]); volumeStream vs = new volumeStream(hfsp); extentsOverflowFile eof = new extentsOverflowFile(new HFSPlusFile(hfsp.volHead.extentsFile, forkStream.forkType.data), vs); HFSPlusFile hfsp_file = new HFSPlusFile(entry, eof); forkStream fs; if (type == forkStream.forkType.data) { fs = new forkStream(vs, hfsp_file, forkStream.forkType.data); } else { fs = new forkStream(vs, hfsp_file, forkStream.forkType.resource); } contentDisplay = hexHeadLine + "\r\n"; if (fs.Length > 0) { byte[] showBlock = new byte[hfsp.blockSize]; fs.Seek(hfsp.blockSize * block, SeekOrigin.Begin); fs.Read(showBlock, 0, (int)hfsp.blockSize); rawDataDisplay(showBlock); } }
private TreeNode iterateHashChildren(TreeNode parent, volumeStream vs) { TreeNode replaceParent = new TreeNode(); replaceParent.Tag = parent.Tag; foreach (TreeNode child in parent.Nodes) { TreeNode replaceChild = new TreeNode(); if (child.Tag is HFSPlusCatalogFolder) { replaceChild = iterateHashChildren(child, vs); replaceChild.Tag = child.Tag; } else if (child.Tag is HFSPlusCatalogFile) { HFSPlusCatalogFile tag = (HFSPlusCatalogFile)child.Tag; dataOperations.hashValues hashes = new dataOperations.hashValues(); if (tag.dataFork != null && tag.dataFork.forkDataValues.logicalSize > 0) { HFSPlusFile theFileData = new HFSPlusFile(tag.dataFork, forkStream.forkType.data); forkStream fs = new forkStream(vs, theFileData, forkStream.forkType.data); dataOperations.hashValues hv = dataOperations.getHashValues(fs, (long)theFileData.dataLogicalSize); hashes.md5hash = hv.md5hash; } if (tag.resourceFork != null && tag.resourceFork.forkDataValues.logicalSize > 0) { HFSPlusFile theFileResource = new HFSPlusFile(tag.dataFork, forkStream.forkType.data); forkStream fs = new forkStream(vs, theFileResource, forkStream.forkType.data); dataOperations.hashValues hv = dataOperations.getHashValues(fs, (long)theFileResource.dataLogicalSize); hashes.sha1hash = hv.sha1hash; } tag.hashes = hashes; replaceChild.Tag = tag; } else { replaceChild.Tag = child.Tag; } replaceChild.Text = child.Text; replaceParent.Nodes.Add(replaceChild); } replaceParent.Text = parent.Text; return(replaceParent); }
public absHFSPlusBTree(HFSPlusFile knownExtents, volumeStream hfsp) { extents = knownExtents; // grab a bunch of information to ensure the header node is captured byte[] firstBlock = new byte[hfsp.volume.blockSize]; this.fs = new forkStream(hfsp, knownExtents, forkStream.forkType.data); fs.Read(firstBlock, 0, firstBlock.Count()); // nodeSize is byte 30 of header record which comes immediately after 14 byte descriptor this.nodeSize = dataOperations.convToLE(BitConverter.ToUInt16(firstBlock, 32)); byte[] headerData = new byte[nodeSize]; headerData = getNodeData(0, nodeSize); header = new headerNode(ref headerData); // check whether all of the data extents are known long treeSize = header.headerInfo.totalNodes * header.headerInfo.nodeSize; if (fs.Length >= treeSize && fs.Length > 0) { isRawDataComplete = true; buildMap(fs); } }
public TreeNode getFullDirectoryList() { TreeNode result = new TreeNode(); HFSPlusFile rawExtentsOverflow = new HFSPlusFile(volHead.extentsFile, forkStream.forkType.data); HFSPlusFile rawCatalog = new HFSPlusFile(volHead.catalogFile, forkStream.forkType.data); HFSPlusFile rawAttributesFile = new HFSPlusFile(volHead.attributesFile, forkStream.forkType.data); volumeStream hfsp_vs = new volumeStream(this); catalogFile cf = new catalogFile(rawCatalog, hfsp_vs); extentsOverflowFile eof = new extentsOverflowFile(rawExtentsOverflow, hfsp_vs); attributesFile af = new attributesFile(rawAttributesFile, hfsp_vs); addMetaFilesToTree(ref result); result = buildDirectoryTree(result, cf, eof, af); if (filecount == volHead.fileCount + 5) { volumeHeader vh = this.volHead; vh.fileCountVerified = true; this.volHead = vh; } if (foldercount == volHead.folderCount) { volumeHeader vh = this.volHead; vh.folderCountVerified = true; this.volHead = vh; } return(result); }
public void exportFile(HFSPlusCatalogFile entry, forkStream.forkType type, string path) { if (entry.dataFork.forkDataValues.logicalSize > 0 || entry.resourceFork.forkDataValues.logicalSize > 0) { GPTScheme gpts = new GPTScheme(i); HFSPlus hfsp = new HFSPlus(i, gpts.entries[entry.partitionAssoc]); volumeStream vs = new volumeStream(hfsp); extentsOverflowFile eof = new extentsOverflowFile(new HFSPlusFile(hfsp.volHead.extentsFile, forkStream.forkType.data), vs); HFSPlusFile hfsp_file = new HFSPlusFile(entry, eof); forkStream fs; long dataSize = 0; if (type == forkStream.forkType.data) { fs = new forkStream(vs, hfsp_file, forkStream.forkType.data); dataSize = (long)entry.dataFork.forkDataValues.logicalSize; } else { fs = new forkStream(vs, hfsp_file, forkStream.forkType.resource); dataSize = (long)entry.resourceFork.forkDataValues.logicalSize; } fs.Position = 0; FileStream writeStream = new FileStream(path, FileMode.Create); BinaryWriter bw = new BinaryWriter(writeStream); long bytesWritten = 0; byte[] buffer; while (bytesWritten < dataSize) { if (bytesWritten + 8192 <= dataSize) { buffer = new byte[8192]; fs.Read(buffer, 0, 8192); bw.Write(buffer, 0, 8192); bytesWritten += 8192; } else { buffer = new byte[dataSize - bytesWritten]; fs.Read(buffer, 0, buffer.Length); bw.Write(buffer, 0, buffer.Length); bytesWritten += buffer.Length; } } bw.Close(); writeStream.Close(); } }
public forkStream(volumeStream inner, HFSPlusFile theFork, forkType type) { this.inner = inner; switch(type) { case forkType.data: SetLength((long)theFork.dataLogicalSize); this.fork = theFork.fileContent.dataExtents; break; case forkType.resource: SetLength((long)theFork.rsrcLogicalSize); this.fork = theFork.fileContent.resourceExtents; break; } }
public forkStream(volumeStream inner, HFSPlusFile theFork, forkType type) { this.inner = inner; switch (type) { case forkType.data: SetLength((long)theFork.dataLogicalSize); this.fork = theFork.fileContent.dataExtents; break; case forkType.resource: SetLength((long)theFork.rsrcLogicalSize); this.fork = theFork.fileContent.resourceExtents; break; } }
public TreeNode getSubDirectories(TreeNode tn) { TreeNode result = tn; GPTScheme gpts = new GPTScheme(i); if (tn.Tag is HFSPlusCatalogFolder) { HFSPlusCatalogFolder folder = (HFSPlusCatalogFolder)tn.Tag; HFSPlus hfsp = new HFSPlus(i, gpts.getValidTable()[folder.partitionAssoc]); volumeStream vs = new volumeStream(hfsp); extentsOverflowFile eof = new extentsOverflowFile(new HFSPlusFile(hfsp.volHead.extentsFile, forkStream.forkType.data), vs); catalogFile cf = new catalogFile(new HFSPlusFile(hfsp.volHead.catalogFile, forkStream.forkType.data), vs); result = hfsp.getDirectoryChildren(folder, cf, eof); result.Tag = tn.Tag; } return(result); }
public void showForkData(HFSPlusCatalogFile entry, forkStream.forkType type) { GPTScheme gpts = new GPTScheme(i); HFSPlus hfsp = new HFSPlus(i, gpts.entries[entry.partitionAssoc]); volumeStream vs = new volumeStream(hfsp); extentsOverflowFile eof = new extentsOverflowFile(new HFSPlusFile(hfsp.volHead.extentsFile, forkStream.forkType.data), vs); HFSPlusFile hfsp_file = new HFSPlusFile(entry, eof); forkStream fs; if (type == forkStream.forkType.data) { fs = new forkStream(vs, hfsp_file, forkStream.forkType.data); } else { fs = new forkStream(vs, hfsp_file, forkStream.forkType.resource); } throw new NotImplementedException(); }
private TreeNode getHFSPTree(HFSPlus hfsp, HFSPlusCatalogFolder folderID) { TreeNode tn = new TreeNode(); volumeStream hfsp_vs = new volumeStream(hfsp); HFSPlusFile rawCatalog = new HFSPlusFile(hfsp.volHead.catalogFile, forkStream.forkType.data); HFSPlusFile rawAttributes = new HFSPlusFile(hfsp.volHead.attributesFile, forkStream.forkType.data); HFSPlusFile rawExtentsOverflow = new HFSPlusFile(hfsp.volHead.extentsFile, forkStream.forkType.data); // need to get all attributes files HFSPlusCatalogFolder folderRecord = folderID; catalogFile catalog = new catalogFile(rawCatalog, hfsp_vs); attributesFile attributes = new attributesFile(rawAttributes, hfsp_vs); extentsOverflowFile eof = new extentsOverflowFile(rawExtentsOverflow, hfsp_vs); displayTree = hfsp.getFullDirectoryList(folderRecord, catalog, eof, attributes); tn = displayTree; return(tn); }
private TreeNode getVolumeTree(GPTScheme.entry partition, GPTScheme.partitionType type) { TreeNode tn = new TreeNode(); if (type == GPTScheme.partitionType.HFSPlus) { HFSPlus hfsp = new HFSPlus(i, partition); volumeStream hfsp_vs = new volumeStream(hfsp); HFSPlusFile rawCatalog = new HFSPlusFile(hfsp.volHead.catalogFile, forkStream.forkType.data); HFSPlusFile rawAttributes = new HFSPlusFile(hfsp.volHead.attributesFile, forkStream.forkType.data); HFSPlusFile rawExtents = new HFSPlusFile(hfsp.volHead.extentsFile, forkStream.forkType.data); extentsOverflowFile extentsOverflow = new extentsOverflowFile(rawExtents, hfsp_vs); catalogFile catalog = new catalogFile(rawCatalog, hfsp_vs); attributesFile attributes = new attributesFile(rawAttributes, hfsp_vs); tn = hfsp.getRootDirectoryContents(catalog, extentsOverflow, attributes); tn.Tag = hfsp.volHead; } return(tn); }
public TreeNode getSubDirectories(TreeNode tn) { TreeNode result = tn; GPTScheme gpts = new GPTScheme(i); if (tn.Tag is HFSPlusCatalogFolder) { HFSPlusCatalogFolder folder = (HFSPlusCatalogFolder)tn.Tag; HFSPlus hfsp = new HFSPlus(i, gpts.getValidTable()[folder.partitionAssoc]); volumeStream vs = new volumeStream(hfsp); extentsOverflowFile eof = new extentsOverflowFile(new HFSPlusFile(hfsp.volHead.extentsFile, forkStream.forkType.data), vs); catalogFile cf = new catalogFile(new HFSPlusFile(hfsp.volHead.catalogFile, forkStream.forkType.data), vs); result = hfsp.getDirectoryChildren(folder, cf, eof); result.Tag = tn.Tag; } return result; }
public dataOperations.hashValues[] hashFile(HFSPlusCatalogFile file) { // take a file, return hashes for its data fork and resource fork dataOperations.hashValues[] hv = new dataOperations.hashValues[2]; GPTScheme gpts = new GPTScheme(i); HFSPlus hfsp = new HFSPlus(i, gpts.entries[file.partitionAssoc]); volumeStream vs = new volumeStream(hfsp); extentsOverflowFile eof = new extentsOverflowFile(new HFSPlusFile(hfsp.volHead.extentsFile, forkStream.forkType.data),vs); if (file.dataFork.forkDataValues.logicalSize > 0) { HFSPlusFile hfspfile = new HFSPlusFile(file, eof); forkStream fs = new forkStream(vs, hfspfile, forkStream.forkType.data); hv[0] = dataOperations.getHashValues(fs, (long)hfspfile.dataLogicalSize); } if (file.resourceFork != null) { if (file.resourceFork.forkDataValues.logicalSize > 0) { HFSPlusFile hfspfile = new HFSPlusFile(file.resourceFork, forkStream.forkType.resource); forkStream fs = new forkStream(vs, hfspfile, forkStream.forkType.resource); hv[1] = dataOperations.getHashValues(fs, (long)hfspfile.rsrcLogicalSize); } } return hv; }
private TreeNode iterateHashChildren(TreeNode parent, volumeStream vs) { TreeNode replaceParent = new TreeNode(); replaceParent.Tag = parent.Tag; foreach (TreeNode child in parent.Nodes) { TreeNode replaceChild = new TreeNode(); if (child.Tag is HFSPlusCatalogFolder) { replaceChild = iterateHashChildren(child, vs); replaceChild.Tag = child.Tag; } else if (child.Tag is HFSPlusCatalogFile) { HFSPlusCatalogFile tag = (HFSPlusCatalogFile)child.Tag; dataOperations.hashValues hashes = new dataOperations.hashValues(); if (tag.dataFork != null && tag.dataFork.forkDataValues.logicalSize > 0) { HFSPlusFile theFileData = new HFSPlusFile(tag.dataFork, forkStream.forkType.data); forkStream fs = new forkStream(vs, theFileData, forkStream.forkType.data); dataOperations.hashValues hv = dataOperations.getHashValues(fs, (long)theFileData.dataLogicalSize); hashes.md5hash = hv.md5hash; } if (tag.resourceFork != null && tag.resourceFork.forkDataValues.logicalSize > 0) { HFSPlusFile theFileResource = new HFSPlusFile(tag.dataFork, forkStream.forkType.data); forkStream fs = new forkStream(vs, theFileResource, forkStream.forkType.data); dataOperations.hashValues hv = dataOperations.getHashValues(fs, (long)theFileResource.dataLogicalSize); hashes.sha1hash = hv.sha1hash; } tag.hashes = hashes; replaceChild.Tag = tag; } else { replaceChild.Tag = child.Tag; } replaceChild.Text = child.Text; replaceParent.Nodes.Add(replaceChild); } replaceParent.Text = parent.Text; return replaceParent; }
public TreeNode getFullDirectoryList() { TreeNode result = new TreeNode(); HFSPlusFile rawExtentsOverflow = new HFSPlusFile(volHead.extentsFile, forkStream.forkType.data); HFSPlusFile rawCatalog = new HFSPlusFile(volHead.catalogFile, forkStream.forkType.data); HFSPlusFile rawAttributesFile = new HFSPlusFile(volHead.attributesFile, forkStream.forkType.data); volumeStream hfsp_vs = new volumeStream(this); catalogFile cf = new catalogFile(rawCatalog, hfsp_vs); extentsOverflowFile eof = new extentsOverflowFile(rawExtentsOverflow, hfsp_vs); attributesFile af = new attributesFile(rawAttributesFile, hfsp_vs); addMetaFilesToTree(ref result); result = buildDirectoryTree(result, cf, eof, af); if (filecount == volHead.fileCount + 5) { volumeHeader vh = this.volHead; vh.fileCountVerified = true; this.volHead = vh; } if (foldercount == volHead.folderCount) { volumeHeader vh = this.volHead; vh.folderCountVerified = true; this.volHead = vh; } return result; }
private TreeNode getHFSPTree(HFSPlus hfsp, HFSPlusCatalogFolder folderID) { TreeNode tn = new TreeNode(); volumeStream hfsp_vs = new volumeStream(hfsp); HFSPlusFile rawCatalog = new HFSPlusFile(hfsp.volHead.catalogFile, forkStream.forkType.data); HFSPlusFile rawAttributes = new HFSPlusFile(hfsp.volHead.attributesFile, forkStream.forkType.data); HFSPlusFile rawExtentsOverflow = new HFSPlusFile(hfsp.volHead.extentsFile, forkStream.forkType.data); // need to get all attributes files HFSPlusCatalogFolder folderRecord = folderID; catalogFile catalog = new catalogFile(rawCatalog, hfsp_vs); attributesFile attributes = new attributesFile(rawAttributes, hfsp_vs); extentsOverflowFile eof = new extentsOverflowFile(rawExtentsOverflow, hfsp_vs); displayTree = hfsp.getFullDirectoryList(folderRecord, catalog, eof, attributes); tn = displayTree; return tn; }
public extentsOverflowFile(HFSPlusFile knownExtents, volumeStream vs) : base(knownExtents, vs) { }
public attributesFile(HFSPlusFile knownExtents, volumeStream vs) : base(knownExtents, vs) { }
public catalogFile(HFSPlusFile knownExtents, volumeStream vs) : base(knownExtents, vs) { }
private TreeNode getVolumeTree(GPTScheme.entry partition, GPTScheme.partitionType type) { TreeNode tn = new TreeNode(); if (type == GPTScheme.partitionType.HFSPlus) { HFSPlus hfsp = new HFSPlus(i, partition); volumeStream hfsp_vs = new volumeStream(hfsp); HFSPlusFile rawCatalog = new HFSPlusFile(hfsp.volHead.catalogFile, forkStream.forkType.data); HFSPlusFile rawAttributes = new HFSPlusFile(hfsp.volHead.attributesFile, forkStream.forkType.data); HFSPlusFile rawExtents = new HFSPlusFile(hfsp.volHead.extentsFile, forkStream.forkType.data); extentsOverflowFile extentsOverflow = new extentsOverflowFile(rawExtents, hfsp_vs); catalogFile catalog = new catalogFile(rawCatalog, hfsp_vs); attributesFile attributes = new attributesFile(rawAttributes, hfsp_vs); tn = hfsp.getRootDirectoryContents(catalog, extentsOverflow, attributes); tn.Tag = hfsp.volHead; } return tn; }