mirror of
https://github.com/opencv/opencv.git
synced 2024-12-27 11:28:14 +08:00
0de26fd78e
Zlib-ng is zlib replacement with optimizations for "next generation" systems. Its optimization may benifits image library decode and encode speed such as libpng. In our tests, if using zlib-ng and libpng combination on a x86_64 machine with AVX2, the time of `imdecode` amd `imencode` will drop 20% approximately. This patch enables zlib-ng's optimization if `CV_DISABLE_OPTIMIZATION` is OFF. Since Zlib-ng can dispatch intrinsics on the fly, port work is much easier. Related discussion: https://github.com/opencv/opencv/issues/22573
364 lines
13 KiB
C
364 lines
13 KiB
C
/*
|
|
* Compute the CRC32 using a parallelized folding approach with the PCLMULQDQ
|
|
* instruction.
|
|
*
|
|
* A white paper describing this algorithm can be found at:
|
|
* doc/crc-pclmulqdq.pdf
|
|
*
|
|
* Copyright (C) 2013 Intel Corporation. All rights reserved.
|
|
* Copyright (C) 2016 Marian Beermann (support for initial value)
|
|
* Authors:
|
|
* Wajdi Feghali <wajdi.k.feghali@intel.com>
|
|
* Jim Guilford <james.guilford@intel.com>
|
|
* Vinodh Gopal <vinodh.gopal@intel.com>
|
|
* Erdinc Ozturk <erdinc.ozturk@intel.com>
|
|
* Jim Kukunas <james.t.kukunas@linux.intel.com>
|
|
*
|
|
* For conditions of distribution and use, see copyright notice in zlib.h
|
|
*/
|
|
|
|
#include "../../zbuild.h"
|
|
|
|
#include <immintrin.h>
|
|
#include <wmmintrin.h>
|
|
#include <smmintrin.h> // _mm_extract_epi32
|
|
#ifdef X86_VPCLMULQDQ
|
|
# include <immintrin.h>
|
|
#endif
|
|
|
|
#include "../../crc32_fold.h"
|
|
#include "../../crc32_braid_p.h"
|
|
#include "x86_intrins.h"
|
|
#include <assert.h>
|
|
|
|
#ifdef X86_VPCLMULQDQ
|
|
static size_t fold_16_vpclmulqdq(__m128i *xmm_crc0, __m128i *xmm_crc1,
|
|
__m128i *xmm_crc2, __m128i *xmm_crc3, const uint8_t *src, size_t len, __m128i init_crc,
|
|
int32_t first);
|
|
static size_t fold_16_vpclmulqdq_copy(__m128i *xmm_crc0, __m128i *xmm_crc1,
|
|
__m128i *xmm_crc2, __m128i *xmm_crc3, uint8_t *dst, const uint8_t *src, size_t len);
|
|
#endif
|
|
|
|
static void fold_1(__m128i *xmm_crc0, __m128i *xmm_crc1, __m128i *xmm_crc2, __m128i *xmm_crc3) {
|
|
const __m128i xmm_fold4 = _mm_set_epi32( 0x00000001, 0x54442bd4,
|
|
0x00000001, 0xc6e41596);
|
|
__m128i x_tmp3;
|
|
__m128 ps_crc0, ps_crc3, ps_res;
|
|
|
|
x_tmp3 = *xmm_crc3;
|
|
|
|
*xmm_crc3 = *xmm_crc0;
|
|
*xmm_crc0 = _mm_clmulepi64_si128(*xmm_crc0, xmm_fold4, 0x01);
|
|
*xmm_crc3 = _mm_clmulepi64_si128(*xmm_crc3, xmm_fold4, 0x10);
|
|
ps_crc0 = _mm_castsi128_ps(*xmm_crc0);
|
|
ps_crc3 = _mm_castsi128_ps(*xmm_crc3);
|
|
ps_res = _mm_xor_ps(ps_crc0, ps_crc3);
|
|
|
|
*xmm_crc0 = *xmm_crc1;
|
|
*xmm_crc1 = *xmm_crc2;
|
|
*xmm_crc2 = x_tmp3;
|
|
*xmm_crc3 = _mm_castps_si128(ps_res);
|
|
}
|
|
|
|
static void fold_2(__m128i *xmm_crc0, __m128i *xmm_crc1, __m128i *xmm_crc2, __m128i *xmm_crc3) {
|
|
const __m128i xmm_fold4 = _mm_set_epi32( 0x00000001, 0x54442bd4,
|
|
0x00000001, 0xc6e41596);
|
|
__m128i x_tmp3, x_tmp2;
|
|
__m128 ps_crc0, ps_crc1, ps_crc2, ps_crc3, ps_res31, ps_res20;
|
|
|
|
x_tmp3 = *xmm_crc3;
|
|
x_tmp2 = *xmm_crc2;
|
|
|
|
*xmm_crc3 = *xmm_crc1;
|
|
*xmm_crc1 = _mm_clmulepi64_si128(*xmm_crc1, xmm_fold4, 0x01);
|
|
*xmm_crc3 = _mm_clmulepi64_si128(*xmm_crc3, xmm_fold4, 0x10);
|
|
ps_crc3 = _mm_castsi128_ps(*xmm_crc3);
|
|
ps_crc1 = _mm_castsi128_ps(*xmm_crc1);
|
|
ps_res31 = _mm_xor_ps(ps_crc3, ps_crc1);
|
|
|
|
*xmm_crc2 = *xmm_crc0;
|
|
*xmm_crc0 = _mm_clmulepi64_si128(*xmm_crc0, xmm_fold4, 0x01);
|
|
*xmm_crc2 = _mm_clmulepi64_si128(*xmm_crc2, xmm_fold4, 0x10);
|
|
ps_crc0 = _mm_castsi128_ps(*xmm_crc0);
|
|
ps_crc2 = _mm_castsi128_ps(*xmm_crc2);
|
|
ps_res20 = _mm_xor_ps(ps_crc0, ps_crc2);
|
|
|
|
*xmm_crc0 = x_tmp2;
|
|
*xmm_crc1 = x_tmp3;
|
|
*xmm_crc2 = _mm_castps_si128(ps_res20);
|
|
*xmm_crc3 = _mm_castps_si128(ps_res31);
|
|
}
|
|
|
|
static void fold_3(__m128i *xmm_crc0, __m128i *xmm_crc1, __m128i *xmm_crc2, __m128i *xmm_crc3) {
|
|
const __m128i xmm_fold4 = _mm_set_epi32( 0x00000001, 0x54442bd4,
|
|
0x00000001, 0xc6e41596);
|
|
__m128i x_tmp3;
|
|
__m128 ps_crc0, ps_crc1, ps_crc2, ps_crc3, ps_res32, ps_res21, ps_res10;
|
|
|
|
x_tmp3 = *xmm_crc3;
|
|
|
|
*xmm_crc3 = *xmm_crc2;
|
|
*xmm_crc2 = _mm_clmulepi64_si128(*xmm_crc2, xmm_fold4, 0x01);
|
|
*xmm_crc3 = _mm_clmulepi64_si128(*xmm_crc3, xmm_fold4, 0x10);
|
|
ps_crc2 = _mm_castsi128_ps(*xmm_crc2);
|
|
ps_crc3 = _mm_castsi128_ps(*xmm_crc3);
|
|
ps_res32 = _mm_xor_ps(ps_crc2, ps_crc3);
|
|
|
|
*xmm_crc2 = *xmm_crc1;
|
|
*xmm_crc1 = _mm_clmulepi64_si128(*xmm_crc1, xmm_fold4, 0x01);
|
|
*xmm_crc2 = _mm_clmulepi64_si128(*xmm_crc2, xmm_fold4, 0x10);
|
|
ps_crc1 = _mm_castsi128_ps(*xmm_crc1);
|
|
ps_crc2 = _mm_castsi128_ps(*xmm_crc2);
|
|
ps_res21 = _mm_xor_ps(ps_crc1, ps_crc2);
|
|
|
|
*xmm_crc1 = *xmm_crc0;
|
|
*xmm_crc0 = _mm_clmulepi64_si128(*xmm_crc0, xmm_fold4, 0x01);
|
|
*xmm_crc1 = _mm_clmulepi64_si128(*xmm_crc1, xmm_fold4, 0x10);
|
|
ps_crc0 = _mm_castsi128_ps(*xmm_crc0);
|
|
ps_crc1 = _mm_castsi128_ps(*xmm_crc1);
|
|
ps_res10 = _mm_xor_ps(ps_crc0, ps_crc1);
|
|
|
|
*xmm_crc0 = x_tmp3;
|
|
*xmm_crc1 = _mm_castps_si128(ps_res10);
|
|
*xmm_crc2 = _mm_castps_si128(ps_res21);
|
|
*xmm_crc3 = _mm_castps_si128(ps_res32);
|
|
}
|
|
|
|
static void fold_4(__m128i *xmm_crc0, __m128i *xmm_crc1, __m128i *xmm_crc2, __m128i *xmm_crc3) {
|
|
const __m128i xmm_fold4 = _mm_set_epi32( 0x00000001, 0x54442bd4,
|
|
0x00000001, 0xc6e41596);
|
|
__m128i x_tmp0, x_tmp1, x_tmp2, x_tmp3;
|
|
__m128 ps_crc0, ps_crc1, ps_crc2, ps_crc3;
|
|
__m128 ps_t0, ps_t1, ps_t2, ps_t3;
|
|
__m128 ps_res0, ps_res1, ps_res2, ps_res3;
|
|
|
|
x_tmp0 = *xmm_crc0;
|
|
x_tmp1 = *xmm_crc1;
|
|
x_tmp2 = *xmm_crc2;
|
|
x_tmp3 = *xmm_crc3;
|
|
|
|
*xmm_crc0 = _mm_clmulepi64_si128(*xmm_crc0, xmm_fold4, 0x01);
|
|
x_tmp0 = _mm_clmulepi64_si128(x_tmp0, xmm_fold4, 0x10);
|
|
ps_crc0 = _mm_castsi128_ps(*xmm_crc0);
|
|
ps_t0 = _mm_castsi128_ps(x_tmp0);
|
|
ps_res0 = _mm_xor_ps(ps_crc0, ps_t0);
|
|
|
|
*xmm_crc1 = _mm_clmulepi64_si128(*xmm_crc1, xmm_fold4, 0x01);
|
|
x_tmp1 = _mm_clmulepi64_si128(x_tmp1, xmm_fold4, 0x10);
|
|
ps_crc1 = _mm_castsi128_ps(*xmm_crc1);
|
|
ps_t1 = _mm_castsi128_ps(x_tmp1);
|
|
ps_res1 = _mm_xor_ps(ps_crc1, ps_t1);
|
|
|
|
*xmm_crc2 = _mm_clmulepi64_si128(*xmm_crc2, xmm_fold4, 0x01);
|
|
x_tmp2 = _mm_clmulepi64_si128(x_tmp2, xmm_fold4, 0x10);
|
|
ps_crc2 = _mm_castsi128_ps(*xmm_crc2);
|
|
ps_t2 = _mm_castsi128_ps(x_tmp2);
|
|
ps_res2 = _mm_xor_ps(ps_crc2, ps_t2);
|
|
|
|
*xmm_crc3 = _mm_clmulepi64_si128(*xmm_crc3, xmm_fold4, 0x01);
|
|
x_tmp3 = _mm_clmulepi64_si128(x_tmp3, xmm_fold4, 0x10);
|
|
ps_crc3 = _mm_castsi128_ps(*xmm_crc3);
|
|
ps_t3 = _mm_castsi128_ps(x_tmp3);
|
|
ps_res3 = _mm_xor_ps(ps_crc3, ps_t3);
|
|
|
|
*xmm_crc0 = _mm_castps_si128(ps_res0);
|
|
*xmm_crc1 = _mm_castps_si128(ps_res1);
|
|
*xmm_crc2 = _mm_castps_si128(ps_res2);
|
|
*xmm_crc3 = _mm_castps_si128(ps_res3);
|
|
}
|
|
|
|
static const unsigned ALIGNED_(32) pshufb_shf_table[60] = {
|
|
0x84838281, 0x88878685, 0x8c8b8a89, 0x008f8e8d, /* shl 15 (16 - 1)/shr1 */
|
|
0x85848382, 0x89888786, 0x8d8c8b8a, 0x01008f8e, /* shl 14 (16 - 3)/shr2 */
|
|
0x86858483, 0x8a898887, 0x8e8d8c8b, 0x0201008f, /* shl 13 (16 - 4)/shr3 */
|
|
0x87868584, 0x8b8a8988, 0x8f8e8d8c, 0x03020100, /* shl 12 (16 - 4)/shr4 */
|
|
0x88878685, 0x8c8b8a89, 0x008f8e8d, 0x04030201, /* shl 11 (16 - 5)/shr5 */
|
|
0x89888786, 0x8d8c8b8a, 0x01008f8e, 0x05040302, /* shl 10 (16 - 6)/shr6 */
|
|
0x8a898887, 0x8e8d8c8b, 0x0201008f, 0x06050403, /* shl 9 (16 - 7)/shr7 */
|
|
0x8b8a8988, 0x8f8e8d8c, 0x03020100, 0x07060504, /* shl 8 (16 - 8)/shr8 */
|
|
0x8c8b8a89, 0x008f8e8d, 0x04030201, 0x08070605, /* shl 7 (16 - 9)/shr9 */
|
|
0x8d8c8b8a, 0x01008f8e, 0x05040302, 0x09080706, /* shl 6 (16 -10)/shr10*/
|
|
0x8e8d8c8b, 0x0201008f, 0x06050403, 0x0a090807, /* shl 5 (16 -11)/shr11*/
|
|
0x8f8e8d8c, 0x03020100, 0x07060504, 0x0b0a0908, /* shl 4 (16 -12)/shr12*/
|
|
0x008f8e8d, 0x04030201, 0x08070605, 0x0c0b0a09, /* shl 3 (16 -13)/shr13*/
|
|
0x01008f8e, 0x05040302, 0x09080706, 0x0d0c0b0a, /* shl 2 (16 -14)/shr14*/
|
|
0x0201008f, 0x06050403, 0x0a090807, 0x0e0d0c0b /* shl 1 (16 -15)/shr15*/
|
|
};
|
|
|
|
static void partial_fold(const size_t len, __m128i *xmm_crc0, __m128i *xmm_crc1, __m128i *xmm_crc2,
|
|
__m128i *xmm_crc3, __m128i *xmm_crc_part) {
|
|
const __m128i xmm_fold4 = _mm_set_epi32( 0x00000001, 0x54442bd4,
|
|
0x00000001, 0xc6e41596);
|
|
const __m128i xmm_mask3 = _mm_set1_epi32((int32_t)0x80808080);
|
|
|
|
__m128i xmm_shl, xmm_shr, xmm_tmp1, xmm_tmp2, xmm_tmp3;
|
|
__m128i xmm_a0_0, xmm_a0_1;
|
|
__m128 ps_crc3, psa0_0, psa0_1, ps_res;
|
|
|
|
xmm_shl = _mm_load_si128((__m128i *)(pshufb_shf_table + (4 * (len - 1))));
|
|
xmm_shr = xmm_shl;
|
|
xmm_shr = _mm_xor_si128(xmm_shr, xmm_mask3);
|
|
|
|
xmm_a0_0 = _mm_shuffle_epi8(*xmm_crc0, xmm_shl);
|
|
|
|
*xmm_crc0 = _mm_shuffle_epi8(*xmm_crc0, xmm_shr);
|
|
xmm_tmp1 = _mm_shuffle_epi8(*xmm_crc1, xmm_shl);
|
|
*xmm_crc0 = _mm_or_si128(*xmm_crc0, xmm_tmp1);
|
|
|
|
*xmm_crc1 = _mm_shuffle_epi8(*xmm_crc1, xmm_shr);
|
|
xmm_tmp2 = _mm_shuffle_epi8(*xmm_crc2, xmm_shl);
|
|
*xmm_crc1 = _mm_or_si128(*xmm_crc1, xmm_tmp2);
|
|
|
|
*xmm_crc2 = _mm_shuffle_epi8(*xmm_crc2, xmm_shr);
|
|
xmm_tmp3 = _mm_shuffle_epi8(*xmm_crc3, xmm_shl);
|
|
*xmm_crc2 = _mm_or_si128(*xmm_crc2, xmm_tmp3);
|
|
|
|
*xmm_crc3 = _mm_shuffle_epi8(*xmm_crc3, xmm_shr);
|
|
*xmm_crc_part = _mm_shuffle_epi8(*xmm_crc_part, xmm_shl);
|
|
*xmm_crc3 = _mm_or_si128(*xmm_crc3, *xmm_crc_part);
|
|
|
|
xmm_a0_1 = _mm_clmulepi64_si128(xmm_a0_0, xmm_fold4, 0x10);
|
|
xmm_a0_0 = _mm_clmulepi64_si128(xmm_a0_0, xmm_fold4, 0x01);
|
|
|
|
ps_crc3 = _mm_castsi128_ps(*xmm_crc3);
|
|
psa0_0 = _mm_castsi128_ps(xmm_a0_0);
|
|
psa0_1 = _mm_castsi128_ps(xmm_a0_1);
|
|
|
|
ps_res = _mm_xor_ps(ps_crc3, psa0_0);
|
|
ps_res = _mm_xor_ps(ps_res, psa0_1);
|
|
|
|
*xmm_crc3 = _mm_castps_si128(ps_res);
|
|
}
|
|
|
|
static inline void crc32_fold_load(__m128i *fold, __m128i *fold0, __m128i *fold1, __m128i *fold2, __m128i *fold3) {
|
|
*fold0 = _mm_load_si128(fold + 0);
|
|
*fold1 = _mm_load_si128(fold + 1);
|
|
*fold2 = _mm_load_si128(fold + 2);
|
|
*fold3 = _mm_load_si128(fold + 3);
|
|
}
|
|
|
|
static inline void crc32_fold_save(__m128i *fold, const __m128i *fold0, const __m128i *fold1,
|
|
const __m128i *fold2, const __m128i *fold3) {
|
|
_mm_storeu_si128(fold + 0, *fold0);
|
|
_mm_storeu_si128(fold + 1, *fold1);
|
|
_mm_storeu_si128(fold + 2, *fold2);
|
|
_mm_storeu_si128(fold + 3, *fold3);
|
|
}
|
|
|
|
Z_INTERNAL uint32_t CRC32_FOLD_RESET(crc32_fold *crc) {
|
|
__m128i xmm_crc0 = _mm_cvtsi32_si128(0x9db42487);
|
|
__m128i xmm_zero = _mm_setzero_si128();
|
|
crc32_fold_save((__m128i *)crc->fold, &xmm_crc0, &xmm_zero, &xmm_zero, &xmm_zero);
|
|
return 0;
|
|
}
|
|
|
|
#define ONCE(op) if (first) { first = 0; op; }
|
|
#define XOR_INITIAL128(where) ONCE(where = _mm_xor_si128(where, xmm_initial))
|
|
#ifdef X86_VPCLMULQDQ
|
|
# define XOR_INITIAL512(where) ONCE(where = _mm512_xor_si512(where, zmm_initial))
|
|
#endif
|
|
|
|
#ifdef X86_VPCLMULQDQ
|
|
# include "crc32_fold_vpclmulqdq_tpl.h"
|
|
#endif
|
|
#include "crc32_fold_pclmulqdq_tpl.h"
|
|
#define COPY
|
|
#ifdef X86_VPCLMULQDQ
|
|
# include "crc32_fold_vpclmulqdq_tpl.h"
|
|
#endif
|
|
#include "crc32_fold_pclmulqdq_tpl.h"
|
|
|
|
static const unsigned ALIGNED_(16) crc_k[] = {
|
|
0xccaa009e, 0x00000000, /* rk1 */
|
|
0x751997d0, 0x00000001, /* rk2 */
|
|
0xccaa009e, 0x00000000, /* rk5 */
|
|
0x63cd6124, 0x00000001, /* rk6 */
|
|
0xf7011640, 0x00000001, /* rk7 */
|
|
0xdb710640, 0x00000001 /* rk8 */
|
|
};
|
|
|
|
static const unsigned ALIGNED_(16) crc_mask[4] = {
|
|
0xFFFFFFFF, 0xFFFFFFFF, 0x00000000, 0x00000000
|
|
};
|
|
|
|
static const unsigned ALIGNED_(16) crc_mask2[4] = {
|
|
0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF
|
|
};
|
|
|
|
Z_INTERNAL uint32_t CRC32_FOLD_FINAL(crc32_fold *crc) {
|
|
const __m128i xmm_mask = _mm_load_si128((__m128i *)crc_mask);
|
|
const __m128i xmm_mask2 = _mm_load_si128((__m128i *)crc_mask2);
|
|
__m128i xmm_crc0, xmm_crc1, xmm_crc2, xmm_crc3;
|
|
__m128i x_tmp0, x_tmp1, x_tmp2, crc_fold;
|
|
|
|
crc32_fold_load((__m128i *)crc->fold, &xmm_crc0, &xmm_crc1, &xmm_crc2, &xmm_crc3);
|
|
|
|
/*
|
|
* k1
|
|
*/
|
|
crc_fold = _mm_load_si128((__m128i *)crc_k);
|
|
|
|
x_tmp0 = _mm_clmulepi64_si128(xmm_crc0, crc_fold, 0x10);
|
|
xmm_crc0 = _mm_clmulepi64_si128(xmm_crc0, crc_fold, 0x01);
|
|
xmm_crc1 = _mm_xor_si128(xmm_crc1, x_tmp0);
|
|
xmm_crc1 = _mm_xor_si128(xmm_crc1, xmm_crc0);
|
|
|
|
x_tmp1 = _mm_clmulepi64_si128(xmm_crc1, crc_fold, 0x10);
|
|
xmm_crc1 = _mm_clmulepi64_si128(xmm_crc1, crc_fold, 0x01);
|
|
xmm_crc2 = _mm_xor_si128(xmm_crc2, x_tmp1);
|
|
xmm_crc2 = _mm_xor_si128(xmm_crc2, xmm_crc1);
|
|
|
|
x_tmp2 = _mm_clmulepi64_si128(xmm_crc2, crc_fold, 0x10);
|
|
xmm_crc2 = _mm_clmulepi64_si128(xmm_crc2, crc_fold, 0x01);
|
|
xmm_crc3 = _mm_xor_si128(xmm_crc3, x_tmp2);
|
|
xmm_crc3 = _mm_xor_si128(xmm_crc3, xmm_crc2);
|
|
|
|
/*
|
|
* k5
|
|
*/
|
|
crc_fold = _mm_load_si128((__m128i *)(crc_k + 4));
|
|
|
|
xmm_crc0 = xmm_crc3;
|
|
xmm_crc3 = _mm_clmulepi64_si128(xmm_crc3, crc_fold, 0);
|
|
xmm_crc0 = _mm_srli_si128(xmm_crc0, 8);
|
|
xmm_crc3 = _mm_xor_si128(xmm_crc3, xmm_crc0);
|
|
|
|
xmm_crc0 = xmm_crc3;
|
|
xmm_crc3 = _mm_slli_si128(xmm_crc3, 4);
|
|
xmm_crc3 = _mm_clmulepi64_si128(xmm_crc3, crc_fold, 0x10);
|
|
xmm_crc3 = _mm_xor_si128(xmm_crc3, xmm_crc0);
|
|
xmm_crc3 = _mm_and_si128(xmm_crc3, xmm_mask2);
|
|
|
|
/*
|
|
* k7
|
|
*/
|
|
xmm_crc1 = xmm_crc3;
|
|
xmm_crc2 = xmm_crc3;
|
|
crc_fold = _mm_load_si128((__m128i *)(crc_k + 8));
|
|
|
|
xmm_crc3 = _mm_clmulepi64_si128(xmm_crc3, crc_fold, 0);
|
|
xmm_crc3 = _mm_xor_si128(xmm_crc3, xmm_crc2);
|
|
xmm_crc3 = _mm_and_si128(xmm_crc3, xmm_mask);
|
|
|
|
xmm_crc2 = xmm_crc3;
|
|
xmm_crc3 = _mm_clmulepi64_si128(xmm_crc3, crc_fold, 0x10);
|
|
xmm_crc3 = _mm_xor_si128(xmm_crc3, xmm_crc2);
|
|
xmm_crc3 = _mm_xor_si128(xmm_crc3, xmm_crc1);
|
|
|
|
crc->value = ~((uint32_t)_mm_extract_epi32(xmm_crc3, 2));
|
|
|
|
return crc->value;
|
|
}
|
|
|
|
Z_INTERNAL uint32_t CRC32(uint32_t crc32, const uint8_t *buf, size_t len) {
|
|
/* For lens < 64, crc32_braid method is faster. The CRC32 instruction for
|
|
* these short lengths might also prove to be effective */
|
|
if (len < 64)
|
|
return PREFIX(crc32_braid)(crc32, buf, len);
|
|
|
|
crc32_fold ALIGNED_(16) crc_state;
|
|
CRC32_FOLD_RESET(&crc_state);
|
|
CRC32_FOLD(&crc_state, buf, len, crc32);
|
|
return CRC32_FOLD_FINAL(&crc_state);
|
|
}
|