public CHDThreadReadBuffer(hard_disk_info hardDisk) { _waitEvent = new AutoResetEvent(false); _outEvent = new AutoResetEvent(false); _finished = false; _hd = hardDisk; errorState = false; _tWorker = new Thread(MainLoop); _tWorker.Start(); }
internal hdErr ChdCheck(Message progress, hard_disk_info hdi, out string result) { try { _progress = progress; _result = ""; _resultType = hard_disk_verify(hdi, progress); result = _result; return _resultType; } catch (Exception e) { result = e.ToString(); return hdErr.HDERR_DECOMPRESSION_ERROR; } }
private static hdErr read_sector_map(hard_disk_info info) { info.map = new mapentry[info.totalblocks]; info.file.Seek(info.length, SeekOrigin.Begin); using (BinaryReader br = new BinaryReader(info.file, Encoding.UTF8, true)) { if (info.version <= 2) { for (int i = 0; i < info.totalblocks; i++) { UInt64 tmpu = br.ReadUInt64BE(); mapentry me = new mapentry() { offset = (tmpu << 20) >> 20, crc = 0, length = (tmpu >> 44), UseCount = 0 }; me.flags = mapFlags.MAP_ENTRY_FLAG_NO_CRC | ((me.length == info.blocksize) ? mapFlags.MAP_ENTRY_TYPE_UNCOMPRESSED : mapFlags.MAP_ENTRY_TYPE_COMPRESSED); info.map[i] = me; } Dictionary<ulong, int> selfhunkMap = new Dictionary<ulong, int>(); for (int i = 0; i < info.totalblocks; i++) { if (selfhunkMap.TryGetValue(info.map[i].offset, out int index)) { info.map[i].offset = (ulong)index; info.map[i].flags = mapFlags.MAP_ENTRY_FLAG_NO_CRC | mapFlags.MAP_ENTRY_TYPE_SELF_HUNK; } else selfhunkMap.Add(info.map[i].offset, i); } } else { for (int i = 0; i < info.totalblocks; i++) { mapentry me = new mapentry() { offset = br.ReadUInt64BE(), crc = br.ReadUInt32BE(), length = br.ReadUInt16BE(), flags = (mapFlags)br.ReadUInt16BE(), UseCount = 0 }; info.map[i] = me; } } for (int i = 0; i < info.totalblocks; i++) { if ((info.map[i].flags & mapFlags.MAP_ENTRY_FLAG_TYPE_MASK) == mapFlags.MAP_ENTRY_TYPE_SELF_HUNK) { info.map[info.map[i].offset].UseCount += 1; } } } return hdErr.HDERR_NONE; }
private hdErr read_block_into_cache(hard_disk_info info, int block) { bool checkCrc = true; mapentry mapEntry = info.map[block]; switch (mapEntry.flags & mapFlags.MAP_ENTRY_FLAG_TYPE_MASK) { case mapFlags.MAP_ENTRY_TYPE_COMPRESSED: { if (mapEntry.BlockCache != null) { Buffer.BlockCopy(mapEntry.BlockCache, 0, cache, 0, (int)info.blocksize); //already checked CRC for this block when the cache was made checkCrc = false; break; } info.file.Seek((long)info.map[block].offset, SeekOrigin.Begin); switch (info.compression) { case HDCOMPRESSION_ZLIB: case HDCOMPRESSION_ZLIB_PLUS: { using (var st = new System.IO.Compression.DeflateStream(info.file, System.IO.Compression.CompressionMode.Decompress, true)) { int bytes = st.Read(cache, 0, (int)info.blocksize); if (bytes != (int)info.blocksize) return hdErr.HDERR_READ_ERROR; if (mapEntry.UseCount > 0) { mapEntry.BlockCache = new byte[bytes]; Buffer.BlockCopy(cache, 0, mapEntry.BlockCache, 0, bytes); } } break; } default: { Console.WriteLine("Unknown compression"); return hdErr.HDERR_DECOMPRESSION_ERROR; } } break; } case mapFlags.MAP_ENTRY_TYPE_UNCOMPRESSED: { info.file.Seek((long)info.map[block].offset, SeekOrigin.Begin); int bytes = info.file.Read(cache, 0, (int)info.blocksize); if (bytes != (int)info.blocksize) return hdErr.HDERR_READ_ERROR; break; } case mapFlags.MAP_ENTRY_TYPE_MINI: { byte[] tmp = BitConverter.GetBytes(info.map[block].offset); for (int i = 0; i < 8; i++) { cache[i] = tmp[7 - i]; } for (int i = 8; i < info.blocksize; i++) { cache[i] = cache[i - 8]; } break; } case mapFlags.MAP_ENTRY_TYPE_SELF_HUNK: { hdErr ret = read_block_into_cache(info, (int)mapEntry.offset); if (ret != hdErr.HDERR_NONE) return ret; // check CRC in the read_block_into_cache call checkCrc = false; break; } default: return hdErr.HDERR_DECOMPRESSION_ERROR; } if (checkCrc && (mapEntry.flags & mapFlags.MAP_ENTRY_FLAG_NO_CRC) == 0) { if (!CRC.VerifyDigest(mapEntry.crc, cache, 0, info.blocksize)) return hdErr.HDERR_DECOMPRESSION_ERROR; } return hdErr.HDERR_NONE; }
public hdErr hard_disk_verify(hard_disk_info hardDisk, Message progress) { hdErr err; int block = 0; /* if this is a writeable disk image, we can't verify */ if ((hardDisk.flags & HDFLAGS_IS_WRITEABLE) != 0) return hdErr.HDERR_CANT_VERIFY; if (hardDisk.version >= 5 || hardDisk.compression > 2) { return hdErr.HDERR_UNSUPPORTED; } err = read_sector_map(hardDisk); if (err != hdErr.HDERR_NONE) return hdErr.HDERR_INVALID_FILE; /* init the MD5 computation */ MD5 md5 = (hardDisk.md5 != null) ? md5 = MD5.Create() : null; SHA1 sha1 = (hardDisk.sha1 != null) ? sha1 = SHA1.Create() : null; /* loop over source blocks until we run out */ ulong sizetoGo = hardDisk.totalbytes; cache = new byte[hardDisk.blocksize]; while (sizetoGo > 0) { /* progress */ if ((block % 1000) == 0) progress?.Invoke($"Verifying, {(100 - sizetoGo * 100 / hardDisk.totalbytes):N1}% complete...\r"); /* read the block into the cache */ err = read_block_into_cache(hardDisk, block); if (err != hdErr.HDERR_NONE) return err; int sizenext = sizetoGo > (ulong)hardDisk.blocksize ? (int)hardDisk.blocksize : (int)sizetoGo; md5?.TransformBlock(cache, 0, sizenext, null, 0); sha1?.TransformBlock(cache, 0, sizenext, null, 0); /* prepare for the next block */ block++; sizetoGo -= (ulong)sizenext; } /* compute the final MD5 */ byte[] tmp = new byte[0]; md5?.TransformFinalBlock(tmp, 0, 0); sha1?.TransformFinalBlock(tmp, 0, 0); if (hardDisk.md5 != null) { if (!ByteArrCompare(hardDisk.md5, md5.Hash)) { return hdErr.HDERR_DECOMPRESSION_ERROR; } } if (hardDisk.sha1 != null) { if (hardDisk.version == 4) { if (!ByteArrCompare(hardDisk.rawsha1, sha1.Hash)) { return hdErr.HDERR_DECOMPRESSION_ERROR; } } else { if (!ByteArrCompare(hardDisk.sha1, sha1.Hash)) { return hdErr.HDERR_DECOMPRESSION_ERROR; } } } return hdErr.HDERR_NONE; }
public static hdErr CheckFile(string file, string directory, bool isLinux, ref bool deepCheck, out uint?chdVersion, out byte[] chdSHA1, out byte[] chdMD5, ref bool fileErrorAbort) { chdSHA1 = null; chdMD5 = null; chdVersion = null; string filename = Path.Combine(directory, file); fileProcess?.Invoke(filename); //string ext = Path.GetExtension(filename).ToLower(); //if (ext != ".chd") //{ // return hdErr.HDERR_INVALID_FILE; //} if (!File.Exists(filename)) { fileSystemError?.Invoke("File: " + filename + " Error: File Could not be opened."); fileErrorAbort = true; return(hdErr.HDERR_CANNOT_OPEN_FILE); } Stream s; int retval = FileStream.OpenFileRead(filename, out s); if (retval != 0) { fileSystemError?.Invoke("File: " + filename + " Error: File Could not be opened."); fileErrorAbort = true; return(hdErr.HDERR_CANNOT_OPEN_FILE); } if (s == null) { fileSystemError?.Invoke("File: " + filename + " Error: File Could not be opened."); fileErrorAbort = true; return(hdErr.HDERR_CANNOT_OPEN_FILE); } if (s.Length < MaxHeader) { s.Close(); s.Dispose(); return(hdErr.HDERR_INVALID_FILE); } hard_disk_info hdi = new hard_disk_info(); hdErr res = ReadCHDHeader(s, ref hdi); if (res != hdErr.HDERR_NONE) { return(res); } chdVersion = hdi.version; chdMD5 = hdi.md5; chdSHA1 = hdi.sha1; if (!deepCheck) { s.Close(); s.Dispose(); return(res); } string error = null; if (hdi.version < 4 && hdi.compression < 3) { hdi.file = s; CHDLocalCheck clc = new CHDLocalCheck(); res = clc.ChdCheck(fileProgress, hdi, out error); s.Close(); s.Dispose(); } else { s.Close(); s.Dispose(); CHDManCheck cmc = new CHDManCheck(); res = cmc.ChdCheck(fileProgress, isLinux, filename, out error); } switch (res) { case hdErr.HDERR_NONE: break; case hdErr.HDERR_CHDMAN_NOT_FOUND: deepCheck = false; res = hdErr.HDERR_NONE; break; case hdErr.HDERR_DECOMPRESSION_ERROR: fileError?.Invoke(filename, error); break; case hdErr.HDERR_FILE_NOT_FOUND: fileSystemError?.Invoke("File: " + filename + " Error: Not Found scan Aborted."); fileErrorAbort = true; break; default: generalError?.Invoke(res + " " + error); break; } return(res); }
private static hdErr ReadCHDHeader(Stream file, ref hard_disk_info hardDisk) { for (int i = 0; i < id.Length; i++) { byte b = (byte)file.ReadByte(); if (b != id[i]) { return(hdErr.HDERR_INVALID_FILE); } } using (BinaryReader br = new BinaryReader(file, Encoding.UTF8, true)) { hardDisk.length = br.ReadUInt32BE(); hardDisk.version = br.ReadUInt32BE(); if (HeaderLengths[hardDisk.version] != hardDisk.length) { return(hdErr.HDERR_INVALID_DATA); } switch (hardDisk.version) { case 1: { hardDisk.flags = br.ReadUInt32BE(); hardDisk.compression = br.ReadUInt32BE(); UInt32 blocksize = br.ReadUInt32BE(); hardDisk.totalblocks = br.ReadUInt32BE(); // total number of CHD Blocks UInt32 cylinders = br.ReadUInt32BE(); UInt32 heads = br.ReadUInt32BE(); UInt32 sectors = br.ReadUInt32BE(); hardDisk.md5 = br.ReadBytes(16); hardDisk.parentmd5 = br.ReadBytes(16); const int HARD_DISK_SECTOR_SIZE = 512; hardDisk.totalbytes = cylinders * heads * sectors * HARD_DISK_SECTOR_SIZE; hardDisk.blocksize = blocksize * HARD_DISK_SECTOR_SIZE; break; } case 2: { hardDisk.flags = br.ReadUInt32BE(); hardDisk.compression = br.ReadUInt32BE(); UInt32 blocksize = br.ReadUInt32BE(); hardDisk.totalblocks = br.ReadUInt32BE(); UInt32 cylinders = br.ReadUInt32BE(); UInt32 heads = br.ReadUInt32BE(); UInt32 sectors = br.ReadUInt32BE(); hardDisk.md5 = br.ReadBytes(16); hardDisk.parentmd5 = br.ReadBytes(16); hardDisk.blocksize = br.ReadUInt32BE(); const int HARD_DISK_SECTOR_SIZE = 512; hardDisk.totalbytes = cylinders * heads * sectors * HARD_DISK_SECTOR_SIZE; break; } case 3: hardDisk.flags = br.ReadUInt32BE(); hardDisk.compression = br.ReadUInt32BE(); hardDisk.totalblocks = br.ReadUInt32BE(); // total number of CHD Blocks hardDisk.totalbytes = br.ReadUInt64BE(); // total byte size of the image hardDisk.metaoffset = br.ReadUInt64BE(); hardDisk.md5 = br.ReadBytes(16); hardDisk.parentmd5 = br.ReadBytes(16); hardDisk.blocksize = br.ReadUInt32BE(); // length of a CHD Block hardDisk.sha1 = br.ReadBytes(20); hardDisk.parentsha1 = br.ReadBytes(20); break; case 4: hardDisk.flags = br.ReadUInt32BE(); hardDisk.compression = br.ReadUInt32BE(); hardDisk.totalblocks = br.ReadUInt32BE(); // total number of CHD Blocks hardDisk.totalbytes = br.ReadUInt64BE(); // total byte size of the image hardDisk.metaoffset = br.ReadUInt64BE(); hardDisk.blocksize = br.ReadUInt32BE(); // length of a CHD Block hardDisk.sha1 = br.ReadBytes(20); hardDisk.parentsha1 = br.ReadBytes(20); hardDisk.rawsha1 = br.ReadBytes(20); break; case 5: // V5 header: hardDisk.compressions = new UInt32[4]; for (int i = 0; i < 4; i++) { hardDisk.compressions[i] = br.ReadUInt32BE(); } hardDisk.totalbytes = br.ReadUInt64BE(); // total byte size of the image hardDisk.mapoffset = br.ReadUInt64BE(); // offset to the map hardDisk.metaoffset = br.ReadUInt64BE(); hardDisk.blocksize = br.ReadUInt32BE(); // length of a CHD Block hardDisk.unitbytes = br.ReadUInt32BE(); // number of bytes per unit within each hunk hardDisk.rawsha1 = br.ReadBytes(20); hardDisk.sha1 = br.ReadBytes(20); hardDisk.parentsha1 = br.ReadBytes(20); return(hdErr.HDERR_NONE); default: return(hdErr.HDERR_UNSUPPORTED); } return(hdErr.HDERR_NONE); } }