ext/zstdruby/libzstd/compress/zstd_fast.c in zstd-ruby-1.5.5.1 vs ext/zstdruby/libzstd/compress/zstd_fast.c in zstd-ruby-1.5.6.0

- old
+ new

@@ -9,11 +9,13 @@ */ #include "zstd_compress_internal.h" /* ZSTD_hashPtr, ZSTD_count, ZSTD_storeSeq */ #include "zstd_fast.h" -static void ZSTD_fillHashTableForCDict(ZSTD_matchState_t* ms, +static +ZSTD_ALLOW_POINTER_OVERFLOW_ATTR +void ZSTD_fillHashTableForCDict(ZSTD_matchState_t* ms, const void* const end, ZSTD_dictTableLoadMethod_e dtlm) { const ZSTD_compressionParameters* const cParams = &ms->cParams; U32* const hashTable = ms->hashTable; @@ -44,11 +46,13 @@ if (hashTable[hashAndTag >> ZSTD_SHORT_CACHE_TAG_BITS] == 0) { /* not yet filled */ ZSTD_writeTaggedIndex(hashTable, hashAndTag, curr + p); } } } } } -static void ZSTD_fillHashTableForCCtx(ZSTD_matchState_t* ms, +static +ZSTD_ALLOW_POINTER_OVERFLOW_ATTR +void ZSTD_fillHashTableForCCtx(ZSTD_matchState_t* ms, const void* const end, ZSTD_dictTableLoadMethod_e dtlm) { const ZSTD_compressionParameters* const cParams = &ms->cParams; U32* const hashTable = ms->hashTable; @@ -137,12 +141,13 @@ * N | H T * N+1 | H * * This is also the work we do at the beginning to enter the loop initially. */ -FORCE_INLINE_TEMPLATE size_t -ZSTD_compressBlock_fast_noDict_generic( +FORCE_INLINE_TEMPLATE +ZSTD_ALLOW_POINTER_OVERFLOW_ATTR +size_t ZSTD_compressBlock_fast_noDict_generic( ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize, U32 const mls, U32 const hasStep) { const ZSTD_compressionParameters* const cParams = &ms->cParams; @@ -454,10 +459,11 @@ } } FORCE_INLINE_TEMPLATE +ZSTD_ALLOW_POINTER_OVERFLOW_ATTR size_t ZSTD_compressBlock_fast_dictMatchState_generic( ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize, U32 const mls, U32 const hasStep) { const ZSTD_compressionParameters* const cParams = &ms->cParams; @@ -500,11 +506,11 @@ * when translating a dict index into a local index */ assert(prefixStartIndex >= (U32)(dictEnd - dictBase)); if (ms->prefetchCDictTables) { size_t const hashTableBytes = (((size_t)1) << dictCParams->hashLog) * sizeof(U32); - PREFETCH_AREA(dictHashTable, hashTableBytes) + PREFETCH_AREA(dictHashTable, hashTableBytes); } /* init */ DEBUGLOG(5, "ZSTD_compressBlock_fast_dictMatchState_generic"); ip0 += (dictAndPrefixLength == 0); @@ -679,10 +685,12 @@ return ZSTD_compressBlock_fast_dictMatchState_7_0(ms, seqStore, rep, src, srcSize); } } -static size_t ZSTD_compressBlock_fast_extDict_generic( +static +ZSTD_ALLOW_POINTER_OVERFLOW_ATTR +size_t ZSTD_compressBlock_fast_extDict_generic( ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], void const* src, size_t srcSize, U32 const mls, U32 const hasStep) { const ZSTD_compressionParameters* const cParams = &ms->cParams; U32* const hashTable = ms->hashTable;