opencv/3rdparty/zlib-ng/compare256_rle.h
Letu Ren 0de26fd78e Add zlib-ng as an alternative zlib implementation
Zlib-ng is zlib replacement with optimizations for "next generation" systems. Its optimization may benifits image library decode and encode speed such as libpng. In our tests, if using zlib-ng and libpng combination on a x86_64 machine with AVX2, the time of `imdecode` amd `imencode` will drop 20% approximately. This patch enables zlib-ng's optimization if `CV_DISABLE_OPTIMIZATION` is OFF. Since Zlib-ng can dispatch intrinsics on the fly, port work is much easier.

Related discussion: https://github.com/opencv/opencv/issues/22573
2024-01-14 14:58:47 +08:00

135 lines
3.4 KiB
C

/* compare256_rle.h -- 256 byte run-length encoding comparison
* Copyright (C) 2022 Nathan Moinvaziri
* For conditions of distribution and use, see copyright notice in zlib.h
*/
#include "zbuild.h"
#include "fallback_builtins.h"
typedef uint32_t (*compare256_rle_func)(const uint8_t* src0, const uint8_t* src1);
/* ALIGNED, byte comparison */
static inline uint32_t compare256_rle_c(const uint8_t *src0, const uint8_t *src1) {
uint32_t len = 0;
do {
if (*src0 != *src1)
return len;
src1 += 1, len += 1;
if (*src0 != *src1)
return len;
src1 += 1, len += 1;
if (*src0 != *src1)
return len;
src1 += 1, len += 1;
if (*src0 != *src1)
return len;
src1 += 1, len += 1;
if (*src0 != *src1)
return len;
src1 += 1, len += 1;
if (*src0 != *src1)
return len;
src1 += 1, len += 1;
if (*src0 != *src1)
return len;
src1 += 1, len += 1;
if (*src0 != *src1)
return len;
src1 += 1, len += 1;
} while (len < 256);
return 256;
}
#ifdef UNALIGNED_OK
/* 16-bit unaligned integer comparison */
static inline uint32_t compare256_rle_unaligned_16(const uint8_t *src0, const uint8_t *src1) {
uint32_t len = 0;
uint16_t src0_cmp, src1_cmp;
memcpy(&src0_cmp, src0, sizeof(src0_cmp));
do {
memcpy(&src1_cmp, src1, sizeof(src1_cmp));
if (src0_cmp != src1_cmp)
return len + (*src0 == *src1);
src1 += 2, len += 2;
memcpy(&src1_cmp, src1, sizeof(src1_cmp));
if (src0_cmp != src1_cmp)
return len + (*src0 == *src1);
src1 += 2, len += 2;
memcpy(&src1_cmp, src1, sizeof(src1_cmp));
if (src0_cmp != src1_cmp)
return len + (*src0 == *src1);
src1 += 2, len += 2;
memcpy(&src1_cmp, src1, sizeof(src1_cmp));
if (src0_cmp != src1_cmp)
return len + (*src0 == *src1);
src1 += 2, len += 2;
} while (len < 256);
return 256;
}
#ifdef HAVE_BUILTIN_CTZ
/* 32-bit unaligned integer comparison */
static inline uint32_t compare256_rle_unaligned_32(const uint8_t *src0, const uint8_t *src1) {
uint32_t sv, len = 0;
uint16_t src0_cmp;
memcpy(&src0_cmp, src0, sizeof(src0_cmp));
sv = ((uint32_t)src0_cmp << 16) | src0_cmp;
do {
uint32_t mv, diff;
memcpy(&mv, src1, sizeof(mv));
diff = sv ^ mv;
if (diff) {
uint32_t match_byte = __builtin_ctz(diff) / 8;
return len + match_byte;
}
src1 += 4, len += 4;
} while (len < 256);
return 256;
}
#endif
#if defined(UNALIGNED64_OK) && defined(HAVE_BUILTIN_CTZLL)
/* 64-bit unaligned integer comparison */
static inline uint32_t compare256_rle_unaligned_64(const uint8_t *src0, const uint8_t *src1) {
uint32_t src0_cmp32, len = 0;
uint16_t src0_cmp;
uint64_t sv;
memcpy(&src0_cmp, src0, sizeof(src0_cmp));
src0_cmp32 = ((uint32_t)src0_cmp << 16) | src0_cmp;
sv = ((uint64_t)src0_cmp32 << 32) | src0_cmp32;
do {
uint64_t mv, diff;
memcpy(&mv, src1, sizeof(mv));
diff = sv ^ mv;
if (diff) {
uint64_t match_byte = __builtin_ctzll(diff) / 8;
return len + (uint32_t)match_byte;
}
src1 += 8, len += 8;
} while (len < 256);
return 256;
}
#endif
#endif