private static unsafe PatchInfoHeader ReadPatchInfoHeader(MpqArchive archive, long offset) { // Always get a buffer big enough, even if the extra bytes are not present… // As of now (09/2011), the header should always be 28 bytes long, but this may change in the future… var sharedBuffer = CommonMethods.GetSharedBuffer(sizeof(PatchInfoHeader)); // No buffer should ever be smaller than 28 bytes… right ? if (archive.ReadArchiveData(sharedBuffer, 0, offset, 28) != 28) { throw new EndOfStreamException(ErrorMessages.GetString("PatchInfoHeaderEndOfStream")); // It's weird if we could not read the whole 28 bytes… (At worse, we should have read trash data) } var patchInfoHeader = new PatchInfoHeader(); patchInfoHeader.HeaderLength = (uint)sharedBuffer[0] | (uint)sharedBuffer[1] << 8 | (uint)sharedBuffer[2] << 16 | (uint)sharedBuffer[3] << 24; patchInfoHeader.Flags = (uint)sharedBuffer[4] | (uint)sharedBuffer[5] << 8 | (uint)sharedBuffer[6] << 16 | (uint)sharedBuffer[7] << 24; patchInfoHeader.PatchLength = (uint)sharedBuffer[8] | (uint)sharedBuffer[9] << 8 | (uint)sharedBuffer[10] << 16 | (uint)sharedBuffer[11] << 24; // Let's assume the MD5 is not mandatory… if (patchInfoHeader.HeaderLength >= 28) { for (int i = 0; i < 16; i++) { patchInfoHeader.PatchMD5[i] = sharedBuffer[12 + i]; } } return(patchInfoHeader); }
/// <summary>Opens the file for reading.</summary> /// <returns>Returns a Stream object which can be used to read data in the file.</returns> /// <remarks>Files can only be opened once, so don't forget to close the stream after you've used it.</remarks> public MpqFileStream Open() { if (IsDeleted) { throw new InvalidOperationException(ErrorMessages.GetString("MpqFileDeleted")); } return(new MpqFileStream(this)); }
private MpqCompressionNotSupportedException(string errorMessageName, byte methodId, string methodName) : base(string.Format(ErrorMessages.GetString(errorMessageName), methodId)) { CompressionMethodId = methodId; CompressionMethodName = methodName; }
internal MpqCompressionNotSupportedException(byte methodId, string methodName) : base(string.Format(ErrorMessages.GetString("CompressionNotSupported_Name"), methodName)) { }
internal MpqFileStream(MpqFile file, Stream baseStream = null) { try { PatchInfoHeader?patchInfoHeader; // Used to differentiate between regulat files and patch files. Also contains the patch header :p // Store bits of information as local variables, in order to adjust them later. bool singleUnit = (file.Flags & MpqFileFlags.SingleBlock) != 0; bool compressed = file.IsCompressed; uint compressedSize = (uint)file.CompressedSize; this.file = file; this.offset = file.Offset; // Process the patch information header first if (file.IsPatch) { // Resolving the base file this early may be a waste if the patch ever happens to be a COPY patch… Anyway, it allows for checking the base file's integrity. // But seriously, what's the point in COPY patches anyway ? Aren't those just like regular MPQ files, only with added (useless) weight ? if ((baseStream = baseStream ?? file.Archive.ResolveBaseFileInternal(file)) == null) { throw new FileNotFoundException(string.Format(ErrorMessages.GetString("PatchBaseFileNotFound"), file.Name)); } patchInfoHeader = ReadPatchInfoHeader(file.Archive, file.Offset); offset += patchInfoHeader.Value.HeaderLength; length = patchInfoHeader.Value.PatchLength; // No matter what crap may be written in the block table, it seems that this field is always right (I had to update the decompression method just for that…) if (patchInfoHeader.Value.PatchLength <= file.CompressedSize) { // As it seems, there are some bogus entries in the block table of mpq patch archives. (Only for patch files though) // If you browse the list of DBC files, i'd say there are about 10% of them which have a bogus block table entry. // So, for detecting them, we'll use the same method as in stormlib. We'll try to read the patch header to know is the patch is compressed or not. // By the way, we cannot detect whether the patch is compressed or not if it is encrypted. if (file.IsEncrypted) { throw new InvalidDataException(ErrorMessages.GetString("PatchInfoHeaderInvalidData")); } // Try to read the patch header in the data following the information header and adjust the compressed size dependign on the result: // Since we are “sure” of the uncompressed size (given in the patch header), there is no point in compression if the compressed data isn't even one byte less. // Thus, we can mostly safely decrease the compressed size by 1, which, by the way, is necessary to make decompression work in UpdateBuffer()… compressedSize = patchInfoHeader.Value.PatchLength - ((compressed = !TestPatchHeader(file.Archive, offset)) ? (uint)1 : 0); // It appears that the single unit flag is also lying on some patch entries. Files reported as blocky (such as some of the cataclysm mp3) are in fact single unit… // Forcing this single unit flag to true when the file is compressed seems to be a good solution. Also, we may (or not :p) save a bit of memory by using blocks for uncompressed files. singleUnit = compressed; } } else { patchInfoHeader = null; length = checked ((uint)file.Size); } // Set up the stream the same way for both patches and regular files… if (file.IsEncrypted) { if (file.Seed == 0) { throw new SeedNotFoundException(file.BlockIndex); } else { this.seed = file.Seed; } if ((file.Flags & MpqFileFlags.PositionEncrypted) != 0) { this.seed = (this.seed + (uint)file.Offset) ^ (uint)this.length; } } if (singleUnit) { this.fileHeader = new uint[] { 0, compressedSize } } ; else if (compressed) { this.fileHeader = ReadBlockOffsets(file.Archive, this.seed, this.offset, (int)((length + file.Archive.BlockSize - 1) / file.Archive.BlockSize + 1)); } else { this.fileHeader = new uint[(int)(length + file.Archive.BlockSize - 1) / file.Archive.BlockSize + 1]; this.fileHeader[0] = 0; for (int i = 1; i < this.fileHeader.Length; i++) { this.fileHeader[i] = this.fileHeader[i - 1] + (uint)file.Archive.BlockSize; if (this.fileHeader[i] > length) { this.fileHeader[i] = (uint)this.length; } } } // Treat the files smaller than the block size as single unit. (But only now that we've read the file header) singleUnit |= length <= file.Archive.BlockSize; this.blockBuffer = new byte[singleUnit ? length : (uint)file.Archive.BlockSize]; if (compressed) { this.compressedBuffer = new byte[singleUnit ? compressedSize : (uint)file.Archive.BlockSize]; } this.lastBlockLength = this.length > 0 ? this.length % (uint)this.blockBuffer.Length : 0; if (this.lastBlockLength == 0) { this.lastBlockLength = (uint)this.blockBuffer.Length; } this.currentBlock = -1; UpdateBuffer(); // If we finished initializing a stream to patch data, all there is left is to apply the patch if (patchInfoHeader != null) { // The patching methods will read from this stream instance (whose constructor has yet to finish… !) and return the patched data. this.blockBuffer = ApplyPatch(patchInfoHeader.Value, baseStream); // Once the patch has been applied, transform this stream into a mere memory stream. (The same as with single unit files, in fact) this.compressedBuffer = null; this.fileHeader = new uint[] { 0, (uint)this.blockBuffer.Length }; this.position = 0; this.currentBlock = 0; this.readBufferOffset = 0; this.length = (uint)this.blockBuffer.Length; } } finally { if (baseStream != null) { baseStream.Dispose(); } } }
private unsafe byte[] ApplyBsd0Patch(ref PatchInfoHeader patchInfoHeader, ref PatchHeader patchHeader, uint patchLength, byte[] originalData) { byte[] patchData; if (patchLength < patchHeader.PatchLength) { patchData = UnpackRle(); } else { patchData = new byte[patchLength]; if (Read(patchData, 0, checked ((int)patchLength)) != patchLength) { throw new EndOfStreamException(); } } fixed(byte *patchDataPointer = patchData) { var bsdiffHeader = (PatchBsdiff40Header *)patchDataPointer; if (!BitConverter.IsLittleEndian) { CommonMethods.SwapBytes((ulong *)patchDataPointer, sizeof(PatchBsdiff40Header) >> 3); } if (bsdiffHeader->Signature != 0x3034464649445342 /* 'BSDIFF40' */) { throw new InvalidDataException(ErrorMessages.GetString("Bsd0PatchHeaderInvalidSignature")); } var controlBlock = (uint *)(patchDataPointer + sizeof(PatchBsdiff40Header)); var differenceBlock = (byte *)controlBlock + bsdiffHeader->ControlBlockLength; var extraBlock = differenceBlock + bsdiffHeader->DifferenceBlockLength; if (!BitConverter.IsLittleEndian) { CommonMethods.SwapBytes(controlBlock, bsdiffHeader->ControlBlockLength >> 2); } var patchBuffer = new byte[bsdiffHeader->PatchedFileSize]; fixed(byte *originalDataPointer = originalData) fixed(byte *patchBufferPointer = patchBuffer) { var sourcePointer = originalDataPointer; var destinationPointer = patchBufferPointer; int sourceCount = originalData.Length; int destinationCount = patchBuffer.Length; while (destinationCount != 0) { uint differenceLength = *controlBlock++; uint extraLength = *controlBlock++; uint sourceOffset = *controlBlock++; if (differenceLength > destinationCount) { throw new InvalidDataException(ErrorMessages.GetString("Bsd0PatchInvalidData")); } destinationCount = (int)(destinationCount - differenceLength); // Apply the difference patch (Patched Data = Original data + Difference data) for (; differenceLength-- != 0; destinationPointer++, sourcePointer++) { *destinationPointer = *differenceBlock++; if (sourceCount > 0) { *destinationPointer += *sourcePointer; } } if (extraLength > destinationCount) { throw new InvalidDataException(ErrorMessages.GetString("Bsd0PatchInvalidData")); } destinationCount = (int)(destinationCount - extraLength); // Apply the extra data patch (New data) for (; extraLength-- != 0;) { *destinationPointer++ = *extraBlock++; } sourcePointer += (sourceOffset & 0x80000000) != 0 ? unchecked ((int)(0x80000000 - sourceOffset)) : (int)sourceOffset; } } return(patchBuffer); } }
private unsafe byte[] ApplyPatch(PatchInfoHeader patchInfoHeader, Stream baseStream) { PatchHeader patchHeader; Read((byte *)&patchHeader, sizeof(PatchHeader)); if (!BitConverter.IsLittleEndian) { CommonMethods.SwapBytes((uint *)&patchHeader, sizeof(PatchHeader) >> 2); } if (patchHeader.Signature != 0x48435450 /* 'PTCH' */) { throw new InvalidDataException(ErrorMessages.GetString("PatchHeaderInvalidSignature")); } if (patchHeader.PatchedFileSize != file.Size) { throw new InvalidDataException(ErrorMessages.GetString("PatchHeaderInvalidFileSize")); } if (baseStream.Length != patchHeader.OriginalFileSize) { throw new InvalidDataException(ErrorMessages.GetString("PatchHeaderInvalidBaseFileSize")); } // Once the initial tests are passed, we can load the whole patch in memory. // This will take a big amount of memory, but will avoid having to unpack the file twice… var originalData = new byte[baseStream.Length]; if (baseStream.Read(originalData, 0, originalData.Length) != originalData.Length) { throw new EndOfStreamException(); } var md5 = CommonMethods.SharedMD5; var originalHash = md5.ComputeHash(originalData); PatchMD5ChunkData md5ChunkData; bool hasMD5 = false; while (true) { long chunkPosition = Position; var chunkHeader = stackalloc uint[2]; if (Read((byte *)chunkHeader, 8) != 8) { throw new EndOfStreamException(); } if (!BitConverter.IsLittleEndian) { CommonMethods.SwapBytes(chunkHeader, 2); } if (chunkHeader[0] == 0x5F35444D /* 'MD5_' */) { if (Read((byte *)&md5ChunkData, sizeof(PatchMD5ChunkData)) != sizeof(PatchMD5ChunkData)) { throw new EndOfStreamException(); } if (!CommonMethods.CompareData(originalHash, md5ChunkData.OrginialFileMD5)) { throw new InvalidDataException(ErrorMessages.GetString("PatchBaseFileMD5Failed")); } hasMD5 = true; } else if (chunkHeader[0] == 0x4D524658 /* 'XFRM' */) { // This may not be a real problem, however, let's not handle this case for now… (May fail because of the stupid bogus patches…) if (chunkPosition + chunkHeader[1] != Length) { throw new InvalidDataException(ErrorMessages.GetString("PatchXfrmChunkError")); } uint patchType; if (Read((byte *)&patchType, 4) != 4) { throw new EndOfStreamException(); } if (!BitConverter.IsLittleEndian) { patchType = CommonMethods.SwapBytes(patchType); } uint patchLength = chunkHeader[1] - 12; byte[] patchedData; if (patchType == 0x59504F43 /* 'COPY' */) { patchedData = ApplyCopyPatch(ref patchInfoHeader, ref patchHeader, patchLength, originalData); } if (patchType == 0x30445342 /* 'BSD0' */) { patchedData = ApplyBsd0Patch(ref patchInfoHeader, ref patchHeader, patchLength, originalData); } else { throw new NotSupportedException("Unsupported patch type: '" + CommonMethods.FourCCToString(chunkHeader[0]) + "'"); } if (hasMD5) { var patchedHash = md5.ComputeHash(patchedData); if (!CommonMethods.CompareData(patchedHash, md5ChunkData.PatchedFileMD5)) { throw new InvalidDataException("PatchFinalFileMD5Failed"); } } return(patchedData); } else { throw new InvalidDataException(string.Format(ErrorMessages.GetString("PatchUnknownChunk"), CommonMethods.FourCCToString(chunkHeader[0]))); } Seek(chunkPosition + chunkHeader[1], SeekOrigin.Begin); } }
private void OpenInternal(Stream stream, bool shouldParseListFile) { // MPQ offsets can be 32 bits, 48 bits or 64 bits depending on the MPQ version used… long hashTableOffset, hashTableCompressedSize, hashTableSize; long blockTableOffset, blockTableCompressedSize, blockTableSize; long highBlockTableOffset, highBlockTableCompressedSize, highBlockTableSize; long enhancedHashTableOffset, enhancedHashTableCompressedSize; long enhancedBlockTableOffset, enhancedBlockTableCompressedSize; uint hashTableLength, blockTableLength; uint rawChunkSize; uint signature; byte[] hashes; if (!stream.CanSeek) { throw new InvalidOperationException(ErrorMessages.GetString("SeekableStreamRequired")); } if (checked (stream.Position + stream.Length) < 0x20) { throw new InvalidDataException(ErrorMessages.GetString("NotEnoughData")); } // We use a lot of "long" and "int" variables here, but data is likely stored as ulong and uint… // So better test for overflow… Who knows what might happen in the future… ;) // The "safe" pattern is to read as unsigned and cast to signed where oferflow is possible. checked { this.stream = stream; archiveDataOffset = stream.Position; // We don't handle searching for MPQ data. The stream must already be positionned at the beginning of the MPQ data. signature = stream.ReadUInt32(); // The first part of the file may be MPQ user data. The next part should be regular MPQ archive data. if (signature == MpqUserDataSignature) { userDataOffset = archiveDataOffset; userDataLength = stream.ReadUInt32(); stream.Seek(stream.ReadUInt32() - 3 * sizeof(uint), SeekOrigin.Current); archiveDataOffset = stream.Position; signature = stream.ReadUInt32(); } // Checking for MPQ archive signature if (signature != MpqArchiveSignature) { throw new InvalidDataException(ErrorMessages.GetString("InvalidData")); } headerSize = stream.ReadUInt32(); archiveDataLength = stream.ReadUInt32(); // MPQ format detection // Unknown MPQ version will raise an error… This seems like a safe idea. ushort mpqVersion = stream.ReadUInt16(); switch (mpqVersion) // Read MPQ format { case 0: // Original MPQ format archiveFormat = MpqFormat.Original; if (headerSize < 0x20) { throw new InvalidDataException(ErrorMessages.GetString("InvalidArchiveHeader")); } break; case 1: // Extended MPQ format (WoW Burning Crusade) archiveFormat = MpqFormat.BurningCrusade; if (headerSize < 0x2C) { throw new InvalidDataException(ErrorMessages.GetString("InvalidArchiveHeader")); } break; case 2: // Enhanced MPQ format (Take 1) archiveFormat = MpqFormat.CataclysmFirst; // Header may not contain any additional field than BC extended MPQ format. // However, if additional fields are present, the header should be at least 68 bytes long. if (headerSize < 0x2C || (headerSize > 0x2C && headerSize < 0x44)) { throw new InvalidDataException(ErrorMessages.GetString("InvalidArchiveHeader")); } break; case 3: // Enhanced MPQ format (Take 2) archiveFormat = MpqFormat.CataclysmSecond; if (headerSize < 0xD0) { throw new InvalidDataException(ErrorMessages.GetString("InvalidArchiveHeader")); } break; default: throw new MpqVersionNotSupportedException(mpqVersion); // Newer MPQ versions can probably be read just as well by the existing code… But can't know for sure… } blockSize = 0x200 << stream.ReadUInt16(); // Calculate block size hashTableOffset = stream.ReadUInt32(); // Get Hash Table Offset blockTableOffset = stream.ReadUInt32(); // Get Block Table Offset hashTableLength = stream.ReadUInt32(); // Get Hash Table Size blockTableLength = stream.ReadUInt32(); // Get Block Table Size // Assign the compressed size for the various tables. // Since compression was non-existant with V1 & V2, we know the compressed size is the uncompressed size. // If the compressed size is different as specified in V4, this will be overwritten later. hashTableCompressedSize = 4 * sizeof(uint) * hashTableLength; if (blockTableOffset > hashTableOffset && blockTableOffset - hashTableOffset < hashTableCompressedSize) // Compute compressed hash table length if needed { hashTableCompressedSize = blockTableOffset - hashTableOffset; } blockTableCompressedSize = 4 * sizeof(uint) * blockTableLength; // Process additional values for "Burning Crusade" MPQ format if (archiveFormat >= MpqFormat.BurningCrusade) { ushort hashTableOffsetHigh, blockTableOffsetHigh; // Read extended information highBlockTableOffset = (long)stream.ReadUInt64(); highBlockTableCompressedSize = highBlockTableOffset != 0 ? sizeof(uint) * blockTableLength : 0; hashTableOffsetHigh = stream.ReadUInt16(); blockTableOffsetHigh = stream.ReadUInt16(); // Modify offsets accordingly hashTableOffset |= (long)hashTableOffsetHigh << 32; blockTableOffset |= (long)blockTableOffsetHigh << 32; // Handle MPQ version 3 (Cataclysm First) and newer if (archiveFormat >= MpqFormat.CataclysmFirst && headerSize >= 0x44) { archiveDataLength = (long)stream.ReadUInt64(); enhancedBlockTableOffset = (long)stream.ReadUInt64(); enhancedHashTableOffset = (long)stream.ReadUInt64(); // Handle MPQ version 4 (Cataclysm Second) if (archiveFormat >= MpqFormat.CataclysmSecond) { hashTableCompressedSize = (long)stream.ReadUInt64(); blockTableCompressedSize = (long)stream.ReadUInt64(); highBlockTableCompressedSize = (long)stream.ReadUInt64(); enhancedHashTableCompressedSize = (long)stream.ReadUInt64(); enhancedBlockTableCompressedSize = (long)stream.ReadUInt64(); rawChunkSize = stream.ReadUInt32(); hashes = new byte[6 * 16]; if (stream.Read(hashes, 0, hashes.Length) != hashes.Length) { throw new EndOfStreamException(); } } else { // TODO: Compute the uncompresed size for the new enhanced tables of version 3… (Will have to check how to do that…) highBlockTableCompressedSize = highBlockTableOffset > 0 ? sizeof(ushort) * blockTableLength : 0; } } else { #if DEBUG long oldArchiveSize = archiveDataLength; #endif // Compute 64 bit archive size (We don't really need it for MPQ reading, but for integrity checks, we do…) if (highBlockTableOffset > hashTableOffset && highBlockTableOffset > blockTableOffset) { archiveDataLength = highBlockTableOffset + sizeof(ushort) * blockTableLength; } else if (blockTableOffset > hashTableOffset) { archiveDataLength = blockTableOffset + 4 * sizeof(uint) * blockTableLength; } else { archiveDataLength = hashTableOffset + 4 * sizeof(uint) * hashTableLength; } #if DEBUG Debug.Assert(oldArchiveSize >= archiveDataLength); #endif } } else { highBlockTableOffset = 0; highBlockTableCompressedSize = 0; } if (!CheckOffset((uint)headerSize) || !CheckOffset(hashTableOffset) || !CheckOffset(blockTableOffset) || hashTableLength < blockTableLength) { throw new InvalidDataException(ErrorMessages.GetString("InvalidArchiveHeader")); } hashTableSize = 4 * sizeof(uint) * hashTableLength; blockTableSize = 4 * sizeof(uint) * blockTableLength; highBlockTableSize = highBlockTableOffset != 0 ? sizeof(ushort) * blockTableLength : 0; // Check for strong signature presence if (stream.Length >= archiveDataOffset + archiveDataLength + 2052) { stream.Seek(archiveDataOffset + archiveDataLength, SeekOrigin.Begin); hasStrongSignature = stream.ReadUInt32() == MpqStrongSignatureSignature; } } // Create buffers for table reading var tableReadBuffer = hashTableSize < hashTableCompressedSize || blockTableSize < blockTableCompressedSize || highBlockTableCompressedSize < highBlockTableSize ? new byte[Math.Max(Math.Max(hashTableCompressedSize, blockTableCompressedSize), highBlockTableCompressedSize)] : null; var tableBuffer = new byte[Math.Max(hashTableSize, blockTableSize)]; // Read Hash Table ReadHashTable(tableBuffer, hashTableLength, hashTableOffset, hashTableCompressedSize, tableReadBuffer); // Read Block Table ReadBlockTable(tableBuffer, blockTableLength, blockTableOffset, blockTableCompressedSize, highBlockTableOffset, highBlockTableCompressedSize, tableReadBuffer); // When possible, find and parse the listfile… listFile = FindFile(ListFileName); if (listFile == null) { return; } if (shouldParseListFile) { ParseListFile(); } }
private unsafe byte[] ApplyBsd0Patch(ref PatchInfoHeader patchInfoHeader, ref PatchHeader patchHeader, uint patchLength, byte[] originalData) { byte[] patchData; if (patchLength < patchHeader.PatchLength) { patchData = UnpackRle(patchLength); } else { patchData = new byte[patchLength]; if (Read(patchData, 0, checked ((int)patchLength)) != patchLength) { throw new EndOfStreamException(); } } fixed(byte *patchDataPointer = patchData) { var bsdiffHeader = (PatchBsdiff40Header *)patchDataPointer; if (!BitConverter.IsLittleEndian) { CommonMethods.SwapBytes((ulong *)patchDataPointer, sizeof(PatchBsdiff40Header) / sizeof(ulong)); } if (bsdiffHeader->Signature != 0x3034464649445342 /* 'BSDIFF40' */) { throw new InvalidDataException(ErrorMessages.GetString("Bsd0PatchHeaderInvalidSignature")); } var controlBlock = (uint *)(patchDataPointer + sizeof(PatchBsdiff40Header)); var differenceBlock = (byte *)controlBlock + bsdiffHeader->ControlBlockLength; var extraBlock = differenceBlock + bsdiffHeader->DifferenceBlockLength; if (!BitConverter.IsLittleEndian) { CommonMethods.SwapBytes(controlBlock, bsdiffHeader->ControlBlockLength / sizeof(uint)); } var patchedBuffer = new byte[bsdiffHeader->PatchedFileSize]; uint o = 0; uint n = 0; try { while (n < patchedBuffer.Length) { uint differenceLength = *controlBlock++; uint extraLength = *controlBlock++; uint sourceOffset = *controlBlock++; // Apply the difference patch (Patched Data = Original data + Difference data) for (uint i = 0; i < differenceLength; i++, n++, o++) { patchedBuffer[n] = differenceBlock[i]; if (o < originalData.Length) { patchedBuffer[n] += originalData[o]; } } differenceBlock += differenceLength; // Apply the extra data patch (New data) for (int e = 0; e < extraLength; e++) { patchedBuffer[n++] = extraBlock[e]; } extraBlock += extraLength; unchecked { o += (sourceOffset & 0x80000000) != 0 ? (0x80000000 - sourceOffset) : sourceOffset; } } } catch (IndexOutOfRangeException ex) { throw new InvalidDataException(ErrorMessages.GetString("Bsd0PatchInvalidData"), ex); } return(patchedBuffer); } }
/// <summary>Initializes a new instance of the <see cref="MpqVersionNotSupportedException"/> class.</summary> internal MpqVersionNotSupportedException(ushort version) : base(ErrorMessages.GetString(string.Format("MpqVersionNotSupported", version))) { }
internal SeedNotFoundException(long block) : base(string.Format(ErrorMessages.GetString("SeedNotFound"), block)) { }