/** HUF_getNbBits() : * Read nbBits from CTable symbolTable, for symbol `symbolValue` presumed <= HUF_SYMBOLVALUE_MAX * Note 1 : is not inlined, as HUF_CElt definition is private * Note 2 : const void* used, so that it can provide a statically allocated table as argument (which uses type U32) */ public static uint HUF_getNbBits(void *symbolTable, uint symbolValue) { HUF_CElt_s *table = (HUF_CElt_s *)(symbolTable); assert(symbolValue <= 255); return(table[symbolValue].nbBits); }
/*! HUF_writeCTable() : * `CTable` : Huffman tree to save, using huf representation. * @return : size of saved CTable */ public static nuint HUF_writeCTable(void *dst, nuint maxDstSize, HUF_CElt_s *CTable, uint maxSymbolValue, uint huffLog) { byte *bitsToWeight = stackalloc byte[13]; byte *huffWeight = stackalloc byte[255]; byte *op = (byte *)(dst); uint n; if (maxSymbolValue > 255) { return(unchecked ((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_maxSymbolValue_tooLarge))); } bitsToWeight[0] = 0; for (n = 1; n < huffLog + 1; n++) { bitsToWeight[n] = (byte)(huffLog + 1 - n); } for (n = 0; n < maxSymbolValue; n++) { huffWeight[n] = bitsToWeight[CTable[n].nbBits]; } { nuint hSize = HUF_compressWeights((void *)(op + 1), maxDstSize - 1, (void *)huffWeight, maxSymbolValue); if ((ERR_isError(hSize)) != 0) { return(hSize); } if (((hSize > 1) && (hSize < maxSymbolValue / 2))) { op[0] = (byte)(hSize); return(hSize + 1); } } if (maxSymbolValue > (uint)((256 - 128))) { return(unchecked ((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_GENERIC))); } if (((maxSymbolValue + 1) / 2) + 1 > maxDstSize) { return(unchecked ((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall))); } op[0] = (byte)(128 + (maxSymbolValue - 1)); huffWeight[maxSymbolValue] = 0; for (n = 0; n < maxSymbolValue; n += 2) { op[(n / 2) + 1] = (byte)((huffWeight[n] << 4) + huffWeight[n + 1]); } return(((maxSymbolValue + 1) / 2) + 1); }
public static nuint HUF_estimateCompressedSize(HUF_CElt_s *CTable, uint *count, uint maxSymbolValue) { nuint nbBits = 0; int s; for (s = 0; s <= (int)(maxSymbolValue); ++s) { nbBits += CTable[s].nbBits * count[s]; } return(nbBits >> 3); }
public static int HUF_validateCTable(HUF_CElt_s *CTable, uint *count, uint maxSymbolValue) { int bad = 0; int s; for (s = 0; s <= (int)(maxSymbolValue); ++s) { bad |= ((((count[s] != 0) && (CTable[s].nbBits == 0))) ? 1 : 0); } return(bad == 0 ? 1 : 0); }
public static nuint HUF_buildCTable_wksp(HUF_CElt_s *tree, uint *count, uint maxSymbolValue, uint maxNbBits, void *workSpace, nuint wkspSize) { HUF_buildCTable_wksp_tables *wksp_tables = (HUF_buildCTable_wksp_tables *)(workSpace); nodeElt_s *huffNode0 = (nodeElt_s *)wksp_tables->huffNodeTbl; nodeElt_s *huffNode = huffNode0 + 1; int nonNullRank; if (((nuint)(workSpace) & 3) != 0) { return(unchecked ((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_GENERIC))); } if (wkspSize < (nuint)(sizeof(HUF_buildCTable_wksp_tables))) { return(unchecked ((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_workSpace_tooSmall))); } if (maxNbBits == 0) { maxNbBits = 11; } if (maxSymbolValue > 255) { return(unchecked ((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_maxSymbolValue_tooLarge))); } memset((void *)(huffNode0), (0), ((nuint)(sizeof(nodeElt_s) * 512))); HUF_sort(huffNode, count, maxSymbolValue, (rankPos *)wksp_tables->rankPosition); nonNullRank = HUF_buildTree(huffNode, maxSymbolValue); maxNbBits = HUF_setMaxHeight(huffNode, (uint)(nonNullRank), maxNbBits); if (maxNbBits > 12) { return(unchecked ((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_GENERIC))); } HUF_buildCTableFromTree(tree, huffNode, nonNullRank, maxSymbolValue, maxNbBits); return(maxNbBits); }
/** * HUF_buildCTableFromTree(): * Build the CTable given the Huffman tree in huffNode. * * @param[out] CTable The output Huffman CTable. * @param huffNode The Huffman tree. * @param nonNullRank The last and smallest node in the Huffman tree. * @param maxSymbolValue The maximum symbol value. * @param maxNbBits The exact maximum number of bits used in the Huffman tree. */ private static void HUF_buildCTableFromTree(HUF_CElt_s *CTable, nodeElt_s *huffNode, int nonNullRank, uint maxSymbolValue, uint maxNbBits) { int n; ushort *nbPerRank = stackalloc ushort[13]; memset(nbPerRank, 0, sizeof(ushort) * 13); ushort *valPerRank = stackalloc ushort[13]; memset(valPerRank, 0, sizeof(ushort) * 13); int alphabetSize = (int)(maxSymbolValue + 1); for (n = 0; n <= nonNullRank; n++) { nbPerRank[huffNode[n].nbBits]++; } { ushort min = 0; for (n = (int)(maxNbBits); n > 0; n--) { valPerRank[n] = min; min += (ushort)(nbPerRank[n]); min >>= 1; } } for (n = 0; n < alphabetSize; n++) { CTable[huffNode[n].@byte].nbBits = huffNode[n].nbBits; } for (n = 0; n < alphabetSize; n++) { CTable[n].val = valPerRank[CTable[n].nbBits]++; } }
private static nuint HUF_compressCTable_internal(byte *ostart, byte *op, byte *oend, void *src, nuint srcSize, HUF_nbStreams_e nbStreams, HUF_CElt_s *CTable, int bmi2) { nuint cSize = (nbStreams == HUF_nbStreams_e.HUF_singleStream) ? HUF_compress1X_usingCTable_internal((void *)op, (nuint)(oend - op), src, srcSize, CTable, bmi2) : HUF_compress4X_usingCTable_internal((void *)op, (nuint)(oend - op), src, srcSize, CTable, bmi2); if ((ERR_isError(cSize)) != 0) { return(cSize); } if (cSize == 0) { return(0); } op += cSize; assert(op >= ostart); if ((nuint)(op - ostart) >= srcSize - 1) { return(0); } return((nuint)(op - ostart)); }
public static nuint HUF_compress4X_usingCTable(void *dst, nuint dstSize, void *src, nuint srcSize, HUF_CElt_s *CTable) { return(HUF_compress4X_usingCTable_internal(dst, dstSize, src, srcSize, CTable, 0)); }
private static nuint HUF_compress4X_usingCTable_internal(void *dst, nuint dstSize, void *src, nuint srcSize, HUF_CElt_s *CTable, int bmi2) { nuint segmentSize = (srcSize + 3) / 4; byte *ip = (byte *)(src); byte *iend = ip + srcSize; byte *ostart = (byte *)(dst); byte *oend = ostart + dstSize; byte *op = ostart; if (dstSize < (uint)(6 + 1 + 1 + 1 + 8)) { return(0); } if (srcSize < 12) { return(0); } op += 6; assert(op <= oend); { nuint cSize = HUF_compress1X_usingCTable_internal((void *)op, (nuint)(oend - op), (void *)ip, segmentSize, CTable, bmi2); if ((ERR_isError(cSize)) != 0) { return(cSize); } if (cSize == 0) { return(0); } assert(cSize <= 65535); MEM_writeLE16((void *)ostart, (ushort)(cSize)); op += cSize; } ip += segmentSize; assert(op <= oend); { nuint cSize = HUF_compress1X_usingCTable_internal((void *)op, (nuint)(oend - op), (void *)ip, segmentSize, CTable, bmi2); if ((ERR_isError(cSize)) != 0) { return(cSize); } if (cSize == 0) { return(0); } assert(cSize <= 65535); MEM_writeLE16((void *)(ostart + 2), (ushort)(cSize)); op += cSize; } ip += segmentSize; assert(op <= oend); { nuint cSize = HUF_compress1X_usingCTable_internal((void *)op, (nuint)(oend - op), (void *)ip, segmentSize, CTable, bmi2); if ((ERR_isError(cSize)) != 0) { return(cSize); } if (cSize == 0) { return(0); } assert(cSize <= 65535); MEM_writeLE16((void *)(ostart + 4), (ushort)(cSize)); op += cSize; } ip += segmentSize; assert(op <= oend); assert(ip <= iend); { nuint cSize = HUF_compress1X_usingCTable_internal((void *)op, (nuint)(oend - op), (void *)ip, (nuint)(iend - ip), CTable, bmi2); if ((ERR_isError(cSize)) != 0) { return(cSize); } if (cSize == 0) { return(0); } op += cSize; } return((nuint)(op - ostart)); }
private static nuint HUF_compress1X_usingCTable_internal(void *dst, nuint dstSize, void *src, nuint srcSize, HUF_CElt_s *CTable, int bmi2) { if (bmi2 != 0) { return(HUF_compress1X_usingCTable_internal_bmi2(dst, dstSize, src, srcSize, CTable)); } return(HUF_compress1X_usingCTable_internal_default(dst, dstSize, src, srcSize, CTable)); }
private static nuint HUF_compress1X_usingCTable_internal_default(void *dst, nuint dstSize, void *src, nuint srcSize, HUF_CElt_s *CTable) { return(HUF_compress1X_usingCTable_internal_body(dst, dstSize, src, srcSize, CTable)); }
private static nuint HUF_compress1X_usingCTable_internal_body(void *dst, nuint dstSize, void *src, nuint srcSize, HUF_CElt_s *CTable) { byte * ip = (byte *)(src); byte * ostart = (byte *)(dst); byte * oend = ostart + dstSize; byte * op = ostart; nuint n; BIT_CStream_t bitC; if (dstSize < 8) { return(0); } { nuint initErr = BIT_initCStream(&bitC, (void *)op, (nuint)(oend - op)); if ((ERR_isError(initErr)) != 0) { return(0); } } n = srcSize & unchecked ((nuint) unchecked (~3)); switch (srcSize & 3) { case 3: { HUF_encodeSymbol(&bitC, ip[n + 2], CTable); } if ((nuint)(sizeof(nuint)) * 8 < (uint)(12 * 4 + 7)) { BIT_flushBits(&bitC); } goto case 2; case 2: { HUF_encodeSymbol(&bitC, ip[n + 1], CTable); } if ((nuint)(sizeof(nuint)) * 8 < (uint)(12 * 2 + 7)) { BIT_flushBits(&bitC); } goto case 1; case 1: { HUF_encodeSymbol(&bitC, ip[n + 0], CTable); } BIT_flushBits(&bitC); goto case 0; case 0: default: { break; } } for (; n > 0; n -= 4) { HUF_encodeSymbol(&bitC, ip[n - 1], CTable); if ((nuint)(sizeof(nuint)) * 8 < (uint)(12 * 2 + 7)) { BIT_flushBits(&bitC); } HUF_encodeSymbol(&bitC, ip[n - 2], CTable); if ((nuint)(sizeof(nuint)) * 8 < (uint)(12 * 4 + 7)) { BIT_flushBits(&bitC); } HUF_encodeSymbol(&bitC, ip[n - 3], CTable); if ((nuint)(sizeof(nuint)) * 8 < (uint)(12 * 2 + 7)) { BIT_flushBits(&bitC); } HUF_encodeSymbol(&bitC, ip[n - 4], CTable); BIT_flushBits(&bitC); } return(BIT_closeCStream(&bitC)); }
private static void HUF_encodeSymbol(BIT_CStream_t *bitCPtr, uint symbol, HUF_CElt_s *CTable) { BIT_addBitsFast(bitCPtr, CTable[symbol].val, CTable[symbol].nbBits); }
/* HUF_compress4X_repeat(): * compress input using 4 streams. * re-use an existing huffman compression table */ public static nuint HUF_compress4X_repeat(void *dst, nuint dstSize, void *src, nuint srcSize, uint maxSymbolValue, uint huffLog, void *workSpace, nuint wkspSize, HUF_CElt_s *hufTable, HUF_repeat *repeat, int preferRepeat, int bmi2) { return(HUF_compress_internal(dst, dstSize, src, srcSize, maxSymbolValue, huffLog, HUF_nbStreams_e.HUF_fourStreams, workSpace, wkspSize, hufTable, repeat, preferRepeat, bmi2)); }
/* HUF_compress_internal() : * `workSpace_align4` must be aligned on 4-bytes boundaries, * and occupies the same space as a table of HUF_WORKSPACE_SIZE_U32 unsigned */ private static nuint HUF_compress_internal(void *dst, nuint dstSize, void *src, nuint srcSize, uint maxSymbolValue, uint huffLog, HUF_nbStreams_e nbStreams, void *workSpace_align4, nuint wkspSize, HUF_CElt_s *oldHufTable, HUF_repeat *repeat, int preferRepeat, int bmi2) { HUF_compress_tables_t *table = (HUF_compress_tables_t *)(workSpace_align4); byte *ostart = (byte *)(dst); byte *oend = ostart + dstSize; byte *op = ostart; assert(((nuint)(workSpace_align4) & 3) == 0); if (wkspSize < (uint)(((6 << 10) + 256))) { return(unchecked ((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_workSpace_tooSmall))); } if (srcSize == 0) { return(0); } if (dstSize == 0) { return(0); } if (srcSize > (uint)((128 * 1024))) { return(unchecked ((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_srcSize_wrong))); } if (huffLog > 12) { return(unchecked ((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_tableLog_tooLarge))); } if (maxSymbolValue > 255) { return(unchecked ((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_maxSymbolValue_tooLarge))); } if (maxSymbolValue == 0) { maxSymbolValue = 255; } if (huffLog == 0) { huffLog = 11; } if (preferRepeat != 0 && repeat != null && *repeat == HUF_repeat.HUF_repeat_valid) { return(HUF_compressCTable_internal(ostart, op, oend, src, srcSize, nbStreams, oldHufTable, bmi2)); } { nuint largest = HIST_count_wksp((uint *)table->count, &maxSymbolValue, (void *)(byte *)(src), srcSize, workSpace_align4, wkspSize); if ((ERR_isError(largest)) != 0) { return(largest); } if (largest == srcSize) { *ostart = ((byte *)(src))[0]; return(1); } if (largest <= (srcSize >> 7) + 4) { return(0); } } if (repeat != null && *repeat == HUF_repeat.HUF_repeat_check && (HUF_validateCTable(oldHufTable, (uint *)table->count, maxSymbolValue)) == 0) { *repeat = HUF_repeat.HUF_repeat_none; } if (preferRepeat != 0 && repeat != null && *repeat != HUF_repeat.HUF_repeat_none) { return(HUF_compressCTable_internal(ostart, op, oend, src, srcSize, nbStreams, oldHufTable, bmi2)); } huffLog = HUF_optimalTableLog(huffLog, srcSize, maxSymbolValue); { nuint maxBits = HUF_buildCTable_wksp((HUF_CElt_s *)table->CTable, (uint *)table->count, maxSymbolValue, huffLog, (void *)&table->buildCTable_wksp, (nuint)(4352)); { nuint _var_err__ = maxBits; if ((ERR_isError(_var_err__)) != 0) { return(_var_err__); } } huffLog = (uint)(maxBits); memset((void *)((table->CTable + (maxSymbolValue + 1))), (0), ((nuint)(sizeof(HUF_CElt_s) * 256) - ((maxSymbolValue + 1) * (nuint)(sizeof(HUF_CElt_s))))); } { nuint hSize = HUF_writeCTable((void *)op, dstSize, (HUF_CElt_s *)table->CTable, maxSymbolValue, huffLog); if ((ERR_isError(hSize)) != 0) { return(hSize); } if (repeat != null && *repeat != HUF_repeat.HUF_repeat_none) { nuint oldSize = HUF_estimateCompressedSize(oldHufTable, (uint *)table->count, maxSymbolValue); nuint newSize = HUF_estimateCompressedSize((HUF_CElt_s *)table->CTable, (uint *)table->count, maxSymbolValue); if (oldSize <= hSize + newSize || hSize + 12 >= srcSize) { return(HUF_compressCTable_internal(ostart, op, oend, src, srcSize, nbStreams, oldHufTable, bmi2)); } } if (hSize + 12U >= srcSize) { return(0); } op += hSize; if (repeat != null) { *repeat = HUF_repeat.HUF_repeat_none; } if (oldHufTable != null) { memcpy((void *)(oldHufTable), (void *)(table->CTable), ((nuint)(sizeof(HUF_CElt_s) * 256))); } } return(HUF_compressCTable_internal(ostart, op, oend, src, srcSize, nbStreams, (HUF_CElt_s *)table->CTable, bmi2)); }
/** HUF_buildCTable() : * @return : maxNbBits * Note : count is used before tree is written, so they can safely overlap */ public static nuint HUF_buildCTable(HUF_CElt_s *tree, uint *count, uint maxSymbolValue, uint maxNbBits) { HUF_buildCTable_wksp_tables workspace; return(HUF_buildCTable_wksp(tree, count, maxSymbolValue, maxNbBits, (void *)&workspace, (nuint)(sizeof(HUF_buildCTable_wksp_tables)))); }
/** HUF_readCTable() : * Loading a CTable saved with HUF_writeCTable() */ public static nuint HUF_readCTable(HUF_CElt_s *CTable, uint *maxSymbolValuePtr, void *src, nuint srcSize, uint *hasZeroWeights) { byte *huffWeight = stackalloc byte[256]; uint *rankVal = stackalloc uint[16]; uint tableLog = 0; uint nbSymbols = 0; nuint readSize = HUF_readStats((byte *)huffWeight, (nuint)(255 + 1), (uint *)rankVal, &nbSymbols, &tableLog, src, srcSize); if ((ERR_isError(readSize)) != 0) { return(readSize); } *hasZeroWeights = (((rankVal[0] > 0)) ? 1U : 0U); if (tableLog > 12) { return(unchecked ((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_tableLog_tooLarge))); } if (nbSymbols > *maxSymbolValuePtr + 1) { return(unchecked ((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_maxSymbolValue_tooSmall))); } { uint n, nextRankStart = 0; for (n = 1; n <= tableLog; n++) { uint curr = nextRankStart; nextRankStart += (rankVal[n] << (int)(n - 1)); rankVal[n] = curr; } } { uint n; for (n = 0; n < nbSymbols; n++) { uint w = huffWeight[n]; CTable[n].nbBits = (byte)(unchecked ((byte)(tableLog + 1 - w) & -((w != 0) ? 1 : 0))); } } { ushort *nbPerRank = stackalloc ushort[14]; memset(nbPerRank, 0, sizeof(ushort) * 14); ushort *valPerRank = stackalloc ushort[14]; memset(valPerRank, 0, sizeof(ushort) * 14); { uint n; for (n = 0; n < nbSymbols; n++) { nbPerRank[CTable[n].nbBits]++; } } valPerRank[tableLog + 1] = 0; { ushort min = 0; uint n; for (n = tableLog; n > 0; n--) { valPerRank[n] = min; min += (ushort)(nbPerRank[n]); min >>= 1; } } { uint n; for (n = 0; n < nbSymbols; n++) { CTable[n].val = valPerRank[CTable[n].nbBits]++; } } } *maxSymbolValuePtr = nbSymbols - 1; return(readSize); }
/** ZSTD_compressSubBlock_literal() : * Compresses literals section for a sub-block. * When we have to write the Huffman table we will sometimes choose a header * size larger than necessary. This is because we have to pick the header size * before we know the table size + compressed size, so we have a bound on the * table size. If we guessed incorrectly, we fall back to uncompressed literals. * * We write the header when writeEntropy=1 and set entropyWritten=1 when we succeeded * in writing the header, otherwise it is set to 0. * * hufMetadata->hType has literals block type info. * If it is set_basic, all sub-blocks literals section will be Raw_Literals_Block. * If it is set_rle, all sub-blocks literals section will be RLE_Literals_Block. * If it is set_compressed, first sub-block's literals section will be Compressed_Literals_Block * If it is set_compressed, first sub-block's literals section will be Treeless_Literals_Block * and the following sub-blocks' literals sections will be Treeless_Literals_Block. * @return : compressed size of literals section of a sub-block * Or 0 if it unable to compress. * Or error code */ private static nuint ZSTD_compressSubBlock_literal(HUF_CElt_s *hufTable, ZSTD_hufCTablesMetadata_t *hufMetadata, byte *literals, nuint litSize, void *dst, nuint dstSize, int bmi2, int writeEntropy, int *entropyWritten) { nuint header = (nuint)(writeEntropy != 0 ? 200 : 0); nuint lhSize = (nuint)(3 + ((litSize >= ((uint)(1 * (1 << 10)) - header)) ? 1 : 0) + ((litSize >= ((uint)(16 * (1 << 10)) - header)) ? 1 : 0)); byte *ostart = (byte *)(dst); byte *oend = ostart + dstSize; byte *op = ostart + lhSize; uint singleStream = ((lhSize == 3) ? 1U : 0U); symbolEncodingType_e hType = writeEntropy != 0 ? hufMetadata->hType : symbolEncodingType_e.set_repeat; nuint cLitSize = 0; *entropyWritten = 0; if (litSize == 0 || hufMetadata->hType == symbolEncodingType_e.set_basic) { return(ZSTD_noCompressLiterals(dst, dstSize, (void *)literals, litSize)); } else if (hufMetadata->hType == symbolEncodingType_e.set_rle) { return(ZSTD_compressRleLiteralsBlock(dst, dstSize, (void *)literals, litSize)); } assert(litSize > 0); assert(hufMetadata->hType == symbolEncodingType_e.set_compressed || hufMetadata->hType == symbolEncodingType_e.set_repeat); if (writeEntropy != 0 && hufMetadata->hType == symbolEncodingType_e.set_compressed) { memcpy((void *)(op), (void *)(hufMetadata->hufDesBuffer), (hufMetadata->hufDesSize)); op += hufMetadata->hufDesSize; cLitSize += hufMetadata->hufDesSize; } { nuint cSize = singleStream != 0 ? HUF_compress1X_usingCTable((void *)op, (nuint)(oend - op), (void *)literals, litSize, hufTable) : HUF_compress4X_usingCTable((void *)op, (nuint)(oend - op), (void *)literals, litSize, hufTable); op += cSize; cLitSize += cSize; if (cSize == 0 || (ERR_isError(cSize)) != 0) { return(0); } if (writeEntropy == 0 && cLitSize >= litSize) { return(ZSTD_noCompressLiterals(dst, dstSize, (void *)literals, litSize)); } if (lhSize < (nuint)(3 + ((cLitSize >= (uint)(1 * (1 << 10))) ? 1 : 0) + ((cLitSize >= (uint)(16 * (1 << 10))) ? 1 : 0))) { assert(cLitSize > litSize); return(ZSTD_noCompressLiterals(dst, dstSize, (void *)literals, litSize)); } } switch (lhSize) { case 3: { uint lhc = (uint)(hType + ((singleStream == 0 ? 1 : 0) << 2)) + ((uint)(litSize) << 4) + ((uint)(cLitSize) << 14); MEM_writeLE24((void *)ostart, lhc); break; } case 4: { uint lhc = (uint)(hType + (2 << 2)) + ((uint)(litSize) << 4) + ((uint)(cLitSize) << 18); MEM_writeLE32((void *)ostart, lhc); break; } case 5: { uint lhc = (uint)(hType + (3 << 2)) + ((uint)(litSize) << 4) + ((uint)(cLitSize) << 22); MEM_writeLE32((void *)ostart, lhc); ostart[4] = (byte)(cLitSize >> 10); break; } default: { assert(0 != 0); } break; } *entropyWritten = 1; return((nuint)(op - ostart)); }
private static nuint ZDICT_analyzeEntropy(void *dstBuffer, nuint maxDstSize, int compressionLevel, void *srcBuffer, nuint *fileSizes, uint nbFiles, void *dictBuffer, nuint dictBufferSize, uint notificationLevel) { uint * countLit = stackalloc uint[256]; HUF_CElt_s * hufTable = stackalloc HUF_CElt_s[256]; uint * offcodeCount = stackalloc uint[31]; short * offcodeNCount = stackalloc short[31]; uint offcodeMax = ZSTD_highbit32((uint)(dictBufferSize + (uint)(128 * (1 << 10)))); uint * matchLengthCount = stackalloc uint[53]; short * matchLengthNCount = stackalloc short[53]; uint * litLengthCount = stackalloc uint[36]; short * litLengthNCount = stackalloc short[36]; uint * repOffset = stackalloc uint[1024]; offsetCount_t *bestRepOffset = stackalloc offsetCount_t[4]; EStats_ress_t esr = new EStats_ress_t { dict = null, zc = null, workPlace = null, }; ZSTD_parameters @params; uint u, huffLog = 11, Offlog = 8, mlLog = 9, llLog = 9, total; nuint pos = 0, errorCode; nuint eSize = 0; nuint totalSrcSize = ZDICT_totalSampleSize(fileSizes, nbFiles); nuint averageSampleSize = totalSrcSize / (nbFiles + (uint)(nbFiles == 0 ? 1 : 0)); byte * dstPtr = (byte *)(dstBuffer); if (offcodeMax > 30) { eSize = (unchecked ((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dictionaryCreation_failed))); goto _cleanup; } for (u = 0; u < 256; u++) { countLit[u] = 1; } for (u = 0; u <= offcodeMax; u++) { offcodeCount[u] = 1; } for (u = 0; u <= 52; u++) { matchLengthCount[u] = 1; } for (u = 0; u <= 35; u++) { litLengthCount[u] = 1; } memset((void *)repOffset, 0, (nuint)(sizeof(uint) * 1024)); repOffset[1] = repOffset[4] = repOffset[8] = 1; memset((void *)bestRepOffset, 0, (nuint)(sizeof(offsetCount_t) * 4)); if (compressionLevel == 0) { compressionLevel = 3; } @params = ZSTD_getParams(compressionLevel, (ulong)averageSampleSize, dictBufferSize); esr.dict = ZSTD_createCDict_advanced(dictBuffer, dictBufferSize, ZSTD_dictLoadMethod_e.ZSTD_dlm_byRef, ZSTD_dictContentType_e.ZSTD_dct_rawContent, @params.cParams, ZSTD_defaultCMem); esr.zc = ZSTD_createCCtx(); esr.workPlace = malloc((nuint)((1 << 17))); if (esr.dict == null || esr.zc == null || esr.workPlace == null) { eSize = (unchecked ((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_memory_allocation))); goto _cleanup; } for (u = 0; u < nbFiles; u++) { ZDICT_countEStats(esr, &@params, (uint *)countLit, (uint *)offcodeCount, (uint *)matchLengthCount, (uint *)litLengthCount, (uint *)repOffset, (void *)((sbyte *)(srcBuffer) + pos), fileSizes[u], notificationLevel); pos += fileSizes[u]; } { nuint maxNbBits = HUF_buildCTable((HUF_CElt_s *)hufTable, (uint *)countLit, 255, huffLog); if ((ERR_isError(maxNbBits)) != 0) { eSize = maxNbBits; goto _cleanup; } if (maxNbBits == 8) { ZDICT_flatLit((uint *)countLit); maxNbBits = HUF_buildCTable((HUF_CElt_s *)hufTable, (uint *)countLit, 255, huffLog); assert(maxNbBits == 9); } huffLog = (uint)(maxNbBits); } { uint offset; for (offset = 1; offset < 1024; offset++) { ZDICT_insertSortCount(bestRepOffset, offset, repOffset[offset]); } } total = 0; for (u = 0; u <= offcodeMax; u++) { total += offcodeCount[u]; } errorCode = FSE_normalizeCount((short *)offcodeNCount, Offlog, (uint *)offcodeCount, total, offcodeMax, 1); if ((ERR_isError(errorCode)) != 0) { eSize = errorCode; goto _cleanup; } Offlog = (uint)(errorCode); total = 0; for (u = 0; u <= 52; u++) { total += matchLengthCount[u]; } errorCode = FSE_normalizeCount((short *)matchLengthNCount, mlLog, (uint *)matchLengthCount, total, 52, 1); if ((ERR_isError(errorCode)) != 0) { eSize = errorCode; goto _cleanup; } mlLog = (uint)(errorCode); total = 0; for (u = 0; u <= 35; u++) { total += litLengthCount[u]; } errorCode = FSE_normalizeCount((short *)litLengthNCount, llLog, (uint *)litLengthCount, total, 35, 1); if ((ERR_isError(errorCode)) != 0) { eSize = errorCode; goto _cleanup; } llLog = (uint)(errorCode); { nuint hhSize = HUF_writeCTable((void *)dstPtr, maxDstSize, (HUF_CElt_s *)hufTable, 255, huffLog); if ((ERR_isError(hhSize)) != 0) { eSize = hhSize; goto _cleanup; } dstPtr += hhSize; maxDstSize -= hhSize; eSize += hhSize; } { nuint ohSize = FSE_writeNCount((void *)dstPtr, maxDstSize, (short *)offcodeNCount, 30, Offlog); if ((ERR_isError(ohSize)) != 0) { eSize = ohSize; goto _cleanup; } dstPtr += ohSize; maxDstSize -= ohSize; eSize += ohSize; } { nuint mhSize = FSE_writeNCount((void *)dstPtr, maxDstSize, (short *)matchLengthNCount, 52, mlLog); if ((ERR_isError(mhSize)) != 0) { eSize = mhSize; goto _cleanup; } dstPtr += mhSize; maxDstSize -= mhSize; eSize += mhSize; } { nuint lhSize = FSE_writeNCount((void *)dstPtr, maxDstSize, (short *)litLengthNCount, 35, llLog); if ((ERR_isError(lhSize)) != 0) { eSize = lhSize; goto _cleanup; } dstPtr += lhSize; maxDstSize -= lhSize; eSize += lhSize; } if (maxDstSize < 12) { eSize = (unchecked ((nuint)(-(int)ZSTD_ErrorCode.ZSTD_error_dstSize_tooSmall))); goto _cleanup; } MEM_writeLE32((void *)(dstPtr + 0), repStartValue[0]); MEM_writeLE32((void *)(dstPtr + 4), repStartValue[1]); MEM_writeLE32((void *)(dstPtr + 8), repStartValue[2]); eSize += 12; _cleanup: ZSTD_freeCDict(esr.dict); ZSTD_freeCCtx(esr.zc); free(esr.workPlace); return(eSize); }