mirror of
https://github.com/opencv/opencv.git
synced 2025-06-13 04:52:53 +08:00
android: make optional "cpufeatures", build fixes for NDK r15
This commit is contained in:
parent
cf86f88c71
commit
3e3e2dd512
2
3rdparty/carotene/hal/CMakeLists.txt
vendored
2
3rdparty/carotene/hal/CMakeLists.txt
vendored
@ -67,7 +67,7 @@ function(compile_carotene)
|
|||||||
endif()
|
endif()
|
||||||
check_cxx_compiler_flag("-mfpu=neon" CXX_HAS_MFPU_NEON)
|
check_cxx_compiler_flag("-mfpu=neon" CXX_HAS_MFPU_NEON)
|
||||||
check_c_compiler_flag("-mfpu=neon" C_HAS_MFPU_NEON)
|
check_c_compiler_flag("-mfpu=neon" C_HAS_MFPU_NEON)
|
||||||
if(${CXX_HAS_MFPU_NEON} AND ${C_HAS_MFPU_NEON})
|
if(${CXX_HAS_MFPU_NEON} AND ${C_HAS_MFPU_NEON} AND NOT "${CMAKE_CXX_FLAGS} " MATCHES "-mfpu=neon[^ ]*")
|
||||||
get_target_property(old_flags "carotene_objs" COMPILE_FLAGS)
|
get_target_property(old_flags "carotene_objs" COMPILE_FLAGS)
|
||||||
if(old_flags)
|
if(old_flags)
|
||||||
set_target_properties("carotene_objs" PROPERTIES COMPILE_FLAGS "${old_flags} -mfpu=neon")
|
set_target_properties("carotene_objs" PROPERTIES COMPILE_FLAGS "${old_flags} -mfpu=neon")
|
||||||
|
8
3rdparty/carotene/src/channel_extract.cpp
vendored
8
3rdparty/carotene/src/channel_extract.cpp
vendored
@ -231,7 +231,7 @@ void extract4(const Size2D &size,
|
|||||||
srcStride == dst2Stride && \
|
srcStride == dst2Stride && \
|
||||||
srcStride == dst3Stride &&
|
srcStride == dst3Stride &&
|
||||||
|
|
||||||
#if __GNUC__ == 4 && __GNUC_MINOR__ < 7
|
#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
|
||||||
|
|
||||||
#define SPLIT_ASM2(sgn, bits) __asm__ ( \
|
#define SPLIT_ASM2(sgn, bits) __asm__ ( \
|
||||||
"vld2." #bits " {d0, d2}, [%[in0]] \n\t" \
|
"vld2." #bits " {d0, d2}, [%[in0]] \n\t" \
|
||||||
@ -280,7 +280,7 @@ void extract4(const Size2D &size,
|
|||||||
FILL_LINES##n(VST1Q, sgn##bits) \
|
FILL_LINES##n(VST1Q, sgn##bits) \
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif // __GNUC__ == 4 && __GNUC_MINOR__ < 7
|
#endif
|
||||||
|
|
||||||
#define SPLIT(sgn,bits,n) void split##n(const Size2D &_size, \
|
#define SPLIT(sgn,bits,n) void split##n(const Size2D &_size, \
|
||||||
const sgn##bits * srcBase, ptrdiff_t srcStride \
|
const sgn##bits * srcBase, ptrdiff_t srcStride \
|
||||||
@ -351,7 +351,7 @@ void extract4(const Size2D &size,
|
|||||||
} \
|
} \
|
||||||
}
|
}
|
||||||
|
|
||||||
#if __GNUC__ == 4 && __GNUC_MINOR__ < 7
|
#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
|
||||||
|
|
||||||
#define ALPHA_QUAD(sgn, bits) { \
|
#define ALPHA_QUAD(sgn, bits) { \
|
||||||
internal::prefetch(src + sj); \
|
internal::prefetch(src + sj); \
|
||||||
@ -378,7 +378,7 @@ void extract4(const Size2D &size,
|
|||||||
vst1q_##sgn##bits(dst1 + d1j, vals.v4.val[3]); \
|
vst1q_##sgn##bits(dst1 + d1j, vals.v4.val[3]); \
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif // __GNUC__ == 4 && __GNUC_MINOR__ < 7
|
#endif
|
||||||
|
|
||||||
#define SPLIT4ALPHA(sgn,bits) void split4(const Size2D &_size, \
|
#define SPLIT4ALPHA(sgn,bits) void split4(const Size2D &_size, \
|
||||||
const sgn##bits * srcBase, ptrdiff_t srcStride, \
|
const sgn##bits * srcBase, ptrdiff_t srcStride, \
|
||||||
|
4
3rdparty/carotene/src/channels_combine.cpp
vendored
4
3rdparty/carotene/src/channels_combine.cpp
vendored
@ -77,7 +77,7 @@ namespace CAROTENE_NS {
|
|||||||
dstStride == src2Stride && \
|
dstStride == src2Stride && \
|
||||||
dstStride == src3Stride &&
|
dstStride == src3Stride &&
|
||||||
|
|
||||||
#if __GNUC__ == 4 && __GNUC_MINOR__ < 7
|
#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
|
||||||
|
|
||||||
#define MERGE_ASM2(sgn, bits) __asm__ ( \
|
#define MERGE_ASM2(sgn, bits) __asm__ ( \
|
||||||
"vld1." #bits " {d0-d1}, [%[in0]] \n\t" \
|
"vld1." #bits " {d0-d1}, [%[in0]] \n\t" \
|
||||||
@ -128,7 +128,7 @@ namespace CAROTENE_NS {
|
|||||||
vst##n##q_##sgn##bits(dst + dj, v_dst); \
|
vst##n##q_##sgn##bits(dst + dj, v_dst); \
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif // __GNUC__ == 4 && __GNUC_MINOR__ < 7
|
#endif
|
||||||
|
|
||||||
#define COMBINE(sgn,bits,n) void combine##n(const Size2D &_size \
|
#define COMBINE(sgn,bits,n) void combine##n(const Size2D &_size \
|
||||||
FILL_LINES##n(FARG, sgn##bits), \
|
FILL_LINES##n(FARG, sgn##bits), \
|
||||||
|
78
3rdparty/carotene/src/colorconvert.cpp
vendored
78
3rdparty/carotene/src/colorconvert.cpp
vendored
@ -97,7 +97,7 @@ void rgb2gray(const Size2D &size, COLOR_SPACE color_space,
|
|||||||
const u32 G2Y = color_space == COLOR_SPACE_BT601 ? G2Y_BT601 : G2Y_BT709;
|
const u32 G2Y = color_space == COLOR_SPACE_BT601 ? G2Y_BT601 : G2Y_BT709;
|
||||||
const u32 B2Y = color_space == COLOR_SPACE_BT601 ? B2Y_BT601 : B2Y_BT709;
|
const u32 B2Y = color_space == COLOR_SPACE_BT601 ? B2Y_BT601 : B2Y_BT709;
|
||||||
|
|
||||||
#if defined(__GNUC__) && __GNUC_MINOR__ < 7
|
#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
|
||||||
register int16x4_t v_r2y asm ("d31") = vmov_n_s16(R2Y);
|
register int16x4_t v_r2y asm ("d31") = vmov_n_s16(R2Y);
|
||||||
register int16x4_t v_g2y asm ("d30") = vmov_n_s16(G2Y);
|
register int16x4_t v_g2y asm ("d30") = vmov_n_s16(G2Y);
|
||||||
register int16x4_t v_b2y asm ("d29") = vmov_n_s16(B2Y);
|
register int16x4_t v_b2y asm ("d29") = vmov_n_s16(B2Y);
|
||||||
@ -116,7 +116,7 @@ void rgb2gray(const Size2D &size, COLOR_SPACE color_space,
|
|||||||
u8 * dst = internal::getRowPtr(dstBase, dstStride, i);
|
u8 * dst = internal::getRowPtr(dstBase, dstStride, i);
|
||||||
size_t sj = 0u, dj = 0u;
|
size_t sj = 0u, dj = 0u;
|
||||||
|
|
||||||
#if defined(__GNUC__) && __GNUC_MINOR__ < 7
|
#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
|
||||||
for (; dj < roiw8; sj += 24, dj += 8)
|
for (; dj < roiw8; sj += 24, dj += 8)
|
||||||
{
|
{
|
||||||
internal::prefetch(src + sj);
|
internal::prefetch(src + sj);
|
||||||
@ -198,7 +198,7 @@ void rgbx2gray(const Size2D &size, COLOR_SPACE color_space,
|
|||||||
const u32 G2Y = color_space == COLOR_SPACE_BT601 ? G2Y_BT601 : G2Y_BT709;
|
const u32 G2Y = color_space == COLOR_SPACE_BT601 ? G2Y_BT601 : G2Y_BT709;
|
||||||
const u32 B2Y = color_space == COLOR_SPACE_BT601 ? B2Y_BT601 : B2Y_BT709;
|
const u32 B2Y = color_space == COLOR_SPACE_BT601 ? B2Y_BT601 : B2Y_BT709;
|
||||||
|
|
||||||
#if defined(__GNUC__) && __GNUC_MINOR__ < 7
|
#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
|
||||||
register int16x4_t v_r2y asm ("d31") = vmov_n_s16(R2Y);
|
register int16x4_t v_r2y asm ("d31") = vmov_n_s16(R2Y);
|
||||||
register int16x4_t v_g2y asm ("d30") = vmov_n_s16(G2Y);
|
register int16x4_t v_g2y asm ("d30") = vmov_n_s16(G2Y);
|
||||||
register int16x4_t v_b2y asm ("d29") = vmov_n_s16(B2Y);
|
register int16x4_t v_b2y asm ("d29") = vmov_n_s16(B2Y);
|
||||||
@ -217,7 +217,7 @@ void rgbx2gray(const Size2D &size, COLOR_SPACE color_space,
|
|||||||
u8 * dst = internal::getRowPtr(dstBase, dstStride, i);
|
u8 * dst = internal::getRowPtr(dstBase, dstStride, i);
|
||||||
size_t sj = 0u, dj = 0u;
|
size_t sj = 0u, dj = 0u;
|
||||||
|
|
||||||
#if defined(__GNUC__) && __GNUC_MINOR__ < 7
|
#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
|
||||||
for (; dj < roiw8; sj += 32, dj += 8)
|
for (; dj < roiw8; sj += 32, dj += 8)
|
||||||
{
|
{
|
||||||
internal::prefetch(src + sj);
|
internal::prefetch(src + sj);
|
||||||
@ -300,7 +300,7 @@ void bgr2gray(const Size2D &size, COLOR_SPACE color_space,
|
|||||||
const u32 G2Y = color_space == COLOR_SPACE_BT601 ? G2Y_BT601 : G2Y_BT709;
|
const u32 G2Y = color_space == COLOR_SPACE_BT601 ? G2Y_BT601 : G2Y_BT709;
|
||||||
const u32 B2Y = color_space == COLOR_SPACE_BT601 ? B2Y_BT601 : B2Y_BT709;
|
const u32 B2Y = color_space == COLOR_SPACE_BT601 ? B2Y_BT601 : B2Y_BT709;
|
||||||
|
|
||||||
#if defined(__GNUC__) && __GNUC_MINOR__ < 7
|
#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
|
||||||
register int16x4_t v_r2y asm ("d31") = vmov_n_s16(R2Y);
|
register int16x4_t v_r2y asm ("d31") = vmov_n_s16(R2Y);
|
||||||
register int16x4_t v_g2y asm ("d30") = vmov_n_s16(G2Y);
|
register int16x4_t v_g2y asm ("d30") = vmov_n_s16(G2Y);
|
||||||
register int16x4_t v_b2y asm ("d29") = vmov_n_s16(B2Y);
|
register int16x4_t v_b2y asm ("d29") = vmov_n_s16(B2Y);
|
||||||
@ -319,7 +319,7 @@ void bgr2gray(const Size2D &size, COLOR_SPACE color_space,
|
|||||||
u8 * dst = internal::getRowPtr(dstBase, dstStride, i);
|
u8 * dst = internal::getRowPtr(dstBase, dstStride, i);
|
||||||
size_t sj = 0u, dj = 0u;
|
size_t sj = 0u, dj = 0u;
|
||||||
|
|
||||||
#if defined(__GNUC__) && __GNUC_MINOR__ < 7
|
#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
|
||||||
for (; dj < roiw8; sj += 24, dj += 8)
|
for (; dj < roiw8; sj += 24, dj += 8)
|
||||||
{
|
{
|
||||||
internal::prefetch(src + sj);
|
internal::prefetch(src + sj);
|
||||||
@ -402,7 +402,7 @@ void bgrx2gray(const Size2D &size, COLOR_SPACE color_space,
|
|||||||
const u32 G2Y = color_space == COLOR_SPACE_BT601 ? G2Y_BT601 : G2Y_BT709;
|
const u32 G2Y = color_space == COLOR_SPACE_BT601 ? G2Y_BT601 : G2Y_BT709;
|
||||||
const u32 B2Y = color_space == COLOR_SPACE_BT601 ? B2Y_BT601 : B2Y_BT709;
|
const u32 B2Y = color_space == COLOR_SPACE_BT601 ? B2Y_BT601 : B2Y_BT709;
|
||||||
|
|
||||||
#if defined(__GNUC__) && __GNUC_MINOR__ < 7
|
#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
|
||||||
register int16x4_t v_r2y asm ("d31") = vmov_n_s16(R2Y);
|
register int16x4_t v_r2y asm ("d31") = vmov_n_s16(R2Y);
|
||||||
register int16x4_t v_g2y asm ("d30") = vmov_n_s16(G2Y);
|
register int16x4_t v_g2y asm ("d30") = vmov_n_s16(G2Y);
|
||||||
register int16x4_t v_b2y asm ("d29") = vmov_n_s16(B2Y);
|
register int16x4_t v_b2y asm ("d29") = vmov_n_s16(B2Y);
|
||||||
@ -421,7 +421,7 @@ void bgrx2gray(const Size2D &size, COLOR_SPACE color_space,
|
|||||||
u8 * dst = internal::getRowPtr(dstBase, dstStride, i);
|
u8 * dst = internal::getRowPtr(dstBase, dstStride, i);
|
||||||
size_t sj = 0u, dj = 0u;
|
size_t sj = 0u, dj = 0u;
|
||||||
|
|
||||||
#if defined(__GNUC__) && __GNUC_MINOR__ < 7
|
#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
|
||||||
for (; dj < roiw8; sj += 32, dj += 8)
|
for (; dj < roiw8; sj += 32, dj += 8)
|
||||||
{
|
{
|
||||||
internal::prefetch(src + sj);
|
internal::prefetch(src + sj);
|
||||||
@ -512,7 +512,7 @@ void gray2rgb(const Size2D &size,
|
|||||||
for (; sj < roiw16; sj += 16, dj += 48)
|
for (; sj < roiw16; sj += 16, dj += 48)
|
||||||
{
|
{
|
||||||
internal::prefetch(src + sj);
|
internal::prefetch(src + sj);
|
||||||
#if defined(__GNUC__) && __GNUC_MINOR__ < 7
|
#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
|
||||||
__asm__ (
|
__asm__ (
|
||||||
"vld1.8 {d0-d1}, [%[in0]] \n\t"
|
"vld1.8 {d0-d1}, [%[in0]] \n\t"
|
||||||
"vmov.8 q1, q0 \n\t"
|
"vmov.8 q1, q0 \n\t"
|
||||||
@ -538,7 +538,7 @@ void gray2rgb(const Size2D &size,
|
|||||||
|
|
||||||
if (sj < roiw8)
|
if (sj < roiw8)
|
||||||
{
|
{
|
||||||
#if defined(__GNUC__) && __GNUC_MINOR__ < 7
|
#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
|
||||||
__asm__ (
|
__asm__ (
|
||||||
"vld1.8 {d0}, [%[in]] \n\t"
|
"vld1.8 {d0}, [%[in]] \n\t"
|
||||||
"vmov.8 d1, d0 \n\t"
|
"vmov.8 d1, d0 \n\t"
|
||||||
@ -584,7 +584,7 @@ void gray2rgbx(const Size2D &size,
|
|||||||
size_t roiw16 = size.width >= 15 ? size.width - 15 : 0;
|
size_t roiw16 = size.width >= 15 ? size.width - 15 : 0;
|
||||||
size_t roiw8 = size.width >= 7 ? size.width - 7 : 0;
|
size_t roiw8 = size.width >= 7 ? size.width - 7 : 0;
|
||||||
|
|
||||||
#if defined(__GNUC__) && __GNUC_MINOR__ < 7
|
#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
|
||||||
register uint8x16_t vc255 asm ("q4") = vmovq_n_u8(255);
|
register uint8x16_t vc255 asm ("q4") = vmovq_n_u8(255);
|
||||||
#else
|
#else
|
||||||
uint8x16x4_t vRgba;
|
uint8x16x4_t vRgba;
|
||||||
@ -602,7 +602,7 @@ void gray2rgbx(const Size2D &size,
|
|||||||
for (; sj < roiw16; sj += 16, dj += 64)
|
for (; sj < roiw16; sj += 16, dj += 64)
|
||||||
{
|
{
|
||||||
internal::prefetch(src + sj);
|
internal::prefetch(src + sj);
|
||||||
#if defined(__GNUC__) && __GNUC_MINOR__ < 7
|
#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
|
||||||
__asm__ (
|
__asm__ (
|
||||||
"vld1.8 {d0-d1}, [%[in0]] \n\t"
|
"vld1.8 {d0-d1}, [%[in0]] \n\t"
|
||||||
"vmov.8 q1, q0 \n\t"
|
"vmov.8 q1, q0 \n\t"
|
||||||
@ -628,7 +628,7 @@ void gray2rgbx(const Size2D &size,
|
|||||||
|
|
||||||
if (sj < roiw8)
|
if (sj < roiw8)
|
||||||
{
|
{
|
||||||
#if defined(__GNUC__) && __GNUC_MINOR__ < 7
|
#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
|
||||||
__asm__ (
|
__asm__ (
|
||||||
"vld1.8 {d5}, [%[in]] \n\t"
|
"vld1.8 {d5}, [%[in]] \n\t"
|
||||||
"vmov.8 d6, d5 \n\t"
|
"vmov.8 d6, d5 \n\t"
|
||||||
@ -1409,7 +1409,7 @@ inline void convertToHSV(const s32 r, const s32 g, const s32 b,
|
|||||||
"d24","d25","d26","d27","d28","d29","d30","d31" \
|
"d24","d25","d26","d27","d28","d29","d30","d31" \
|
||||||
);
|
);
|
||||||
|
|
||||||
#if __GNUC_MINOR__ < 7
|
#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
|
||||||
|
|
||||||
#define YCRCB_CONSTS \
|
#define YCRCB_CONSTS \
|
||||||
register int16x4_t vcYR asm ("d31") = vmov_n_s16(4899); \
|
register int16x4_t vcYR asm ("d31") = vmov_n_s16(4899); \
|
||||||
@ -1555,7 +1555,7 @@ inline uint8x8x3_t convertToYCrCb( const int16x8_t& vR, const int16x8_t& vG, con
|
|||||||
#define COEFF_G ( 8663)
|
#define COEFF_G ( 8663)
|
||||||
#define COEFF_B (-17705)
|
#define COEFF_B (-17705)
|
||||||
|
|
||||||
#if defined(__GNUC__) && __GNUC_MINOR__ < 7
|
#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
|
||||||
#define YUV420ALPHA3_CONST
|
#define YUV420ALPHA3_CONST
|
||||||
#define YUV420ALPHA4_CONST register uint8x16_t c255 asm ("q13") = vmovq_n_u8(255);
|
#define YUV420ALPHA4_CONST register uint8x16_t c255 asm ("q13") = vmovq_n_u8(255);
|
||||||
#define YUV420ALPHA3_CONVERT
|
#define YUV420ALPHA3_CONVERT
|
||||||
@ -1852,7 +1852,7 @@ void rgb2hsv(const Size2D &size,
|
|||||||
#ifdef CAROTENE_NEON
|
#ifdef CAROTENE_NEON
|
||||||
size_t roiw8 = size.width >= 7 ? size.width - 7 : 0;
|
size_t roiw8 = size.width >= 7 ? size.width - 7 : 0;
|
||||||
const s32 hsv_shift = 12;
|
const s32 hsv_shift = 12;
|
||||||
#if defined(__GNUC__) && __GNUC_MINOR__ < 7
|
#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
|
||||||
register const f32 vsdiv_table = f32(255 << hsv_shift);
|
register const f32 vsdiv_table = f32(255 << hsv_shift);
|
||||||
register f32 vhdiv_table = f32(hrange << hsv_shift);
|
register f32 vhdiv_table = f32(hrange << hsv_shift);
|
||||||
register const s32 vhrange = hrange;
|
register const s32 vhrange = hrange;
|
||||||
@ -1871,7 +1871,7 @@ void rgb2hsv(const Size2D &size,
|
|||||||
for (; j < roiw8; sj += 24, dj += 24, j += 8)
|
for (; j < roiw8; sj += 24, dj += 24, j += 8)
|
||||||
{
|
{
|
||||||
internal::prefetch(src + sj);
|
internal::prefetch(src + sj);
|
||||||
#if defined(__GNUC__) && __GNUC_MINOR__ < 7
|
#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
|
||||||
CONVERT_TO_HSV_ASM(vld3.8 {d0-d2}, d0, d2)
|
CONVERT_TO_HSV_ASM(vld3.8 {d0-d2}, d0, d2)
|
||||||
#else
|
#else
|
||||||
uint8x8x3_t vRgb = vld3_u8(src + sj);
|
uint8x8x3_t vRgb = vld3_u8(src + sj);
|
||||||
@ -1904,7 +1904,7 @@ void rgbx2hsv(const Size2D &size,
|
|||||||
#ifdef CAROTENE_NEON
|
#ifdef CAROTENE_NEON
|
||||||
size_t roiw8 = size.width >= 7 ? size.width - 7 : 0;
|
size_t roiw8 = size.width >= 7 ? size.width - 7 : 0;
|
||||||
const s32 hsv_shift = 12;
|
const s32 hsv_shift = 12;
|
||||||
#if defined(__GNUC__) && __GNUC_MINOR__ < 7
|
#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
|
||||||
register const f32 vsdiv_table = f32(255 << hsv_shift);
|
register const f32 vsdiv_table = f32(255 << hsv_shift);
|
||||||
register f32 vhdiv_table = f32(hrange << hsv_shift);
|
register f32 vhdiv_table = f32(hrange << hsv_shift);
|
||||||
register const s32 vhrange = hrange;
|
register const s32 vhrange = hrange;
|
||||||
@ -1923,7 +1923,7 @@ void rgbx2hsv(const Size2D &size,
|
|||||||
for (; j < roiw8; sj += 32, dj += 24, j += 8)
|
for (; j < roiw8; sj += 32, dj += 24, j += 8)
|
||||||
{
|
{
|
||||||
internal::prefetch(src + sj);
|
internal::prefetch(src + sj);
|
||||||
#if defined(__GNUC__) && __GNUC_MINOR__ < 7
|
#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
|
||||||
CONVERT_TO_HSV_ASM(vld4.8 {d0-d3}, d0, d2)
|
CONVERT_TO_HSV_ASM(vld4.8 {d0-d3}, d0, d2)
|
||||||
#else
|
#else
|
||||||
uint8x8x4_t vRgb = vld4_u8(src + sj);
|
uint8x8x4_t vRgb = vld4_u8(src + sj);
|
||||||
@ -1956,7 +1956,7 @@ void bgr2hsv(const Size2D &size,
|
|||||||
#ifdef CAROTENE_NEON
|
#ifdef CAROTENE_NEON
|
||||||
size_t roiw8 = size.width >= 7 ? size.width - 7 : 0;
|
size_t roiw8 = size.width >= 7 ? size.width - 7 : 0;
|
||||||
const s32 hsv_shift = 12;
|
const s32 hsv_shift = 12;
|
||||||
#if defined(__GNUC__) && __GNUC_MINOR__ < 7
|
#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
|
||||||
register const f32 vsdiv_table = f32(255 << hsv_shift);
|
register const f32 vsdiv_table = f32(255 << hsv_shift);
|
||||||
register f32 vhdiv_table = f32(hrange << hsv_shift);
|
register f32 vhdiv_table = f32(hrange << hsv_shift);
|
||||||
register const s32 vhrange = hrange;
|
register const s32 vhrange = hrange;
|
||||||
@ -1975,7 +1975,7 @@ void bgr2hsv(const Size2D &size,
|
|||||||
for (; j < roiw8; sj += 24, dj += 24, j += 8)
|
for (; j < roiw8; sj += 24, dj += 24, j += 8)
|
||||||
{
|
{
|
||||||
internal::prefetch(src + sj);
|
internal::prefetch(src + sj);
|
||||||
#if defined(__GNUC__) && __GNUC_MINOR__ < 7
|
#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
|
||||||
CONVERT_TO_HSV_ASM(vld3.8 {d0-d2}, d2, d0)
|
CONVERT_TO_HSV_ASM(vld3.8 {d0-d2}, d2, d0)
|
||||||
#else
|
#else
|
||||||
uint8x8x3_t vRgb = vld3_u8(src + sj);
|
uint8x8x3_t vRgb = vld3_u8(src + sj);
|
||||||
@ -2008,7 +2008,7 @@ void bgrx2hsv(const Size2D &size,
|
|||||||
#ifdef CAROTENE_NEON
|
#ifdef CAROTENE_NEON
|
||||||
size_t roiw8 = size.width >= 7 ? size.width - 7 : 0;
|
size_t roiw8 = size.width >= 7 ? size.width - 7 : 0;
|
||||||
const s32 hsv_shift = 12;
|
const s32 hsv_shift = 12;
|
||||||
#if defined(__GNUC__) && __GNUC_MINOR__ < 7
|
#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
|
||||||
register const f32 vsdiv_table = f32(255 << hsv_shift);
|
register const f32 vsdiv_table = f32(255 << hsv_shift);
|
||||||
register f32 vhdiv_table = f32(hrange << hsv_shift);
|
register f32 vhdiv_table = f32(hrange << hsv_shift);
|
||||||
register const s32 vhrange = hrange;
|
register const s32 vhrange = hrange;
|
||||||
@ -2027,7 +2027,7 @@ void bgrx2hsv(const Size2D &size,
|
|||||||
for (; j < roiw8; sj += 32, dj += 24, j += 8)
|
for (; j < roiw8; sj += 32, dj += 24, j += 8)
|
||||||
{
|
{
|
||||||
internal::prefetch(src + sj);
|
internal::prefetch(src + sj);
|
||||||
#if defined(__GNUC__) && __GNUC_MINOR__ < 7
|
#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
|
||||||
CONVERT_TO_HSV_ASM(vld4.8 {d0-d3}, d2, d0)
|
CONVERT_TO_HSV_ASM(vld4.8 {d0-d3}, d2, d0)
|
||||||
#else
|
#else
|
||||||
uint8x8x4_t vRgb = vld4_u8(src + sj);
|
uint8x8x4_t vRgb = vld4_u8(src + sj);
|
||||||
@ -2068,7 +2068,7 @@ void rgbx2bgr565(const Size2D &size,
|
|||||||
for (; j < roiw16; sj += 64, dj += 32, j += 16)
|
for (; j < roiw16; sj += 64, dj += 32, j += 16)
|
||||||
{
|
{
|
||||||
internal::prefetch(src + sj);
|
internal::prefetch(src + sj);
|
||||||
#if defined(__GNUC__) && __GNUC_MINOR__ < 7
|
#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
|
||||||
__asm__ (
|
__asm__ (
|
||||||
"vld4.8 {d2, d4, d6, d8}, [%[in0]] @ q0 q1 q2 q3 q4 \n\t"
|
"vld4.8 {d2, d4, d6, d8}, [%[in0]] @ q0 q1 q2 q3 q4 \n\t"
|
||||||
"vld4.8 {d3, d5, d7, d9}, [%[in1]] @ xxxxxxxx rrrrRRRR ggggGGGG bbbbBBBB xxxxxxxx \n\t"
|
"vld4.8 {d3, d5, d7, d9}, [%[in1]] @ xxxxxxxx rrrrRRRR ggggGGGG bbbbBBBB xxxxxxxx \n\t"
|
||||||
@ -2122,7 +2122,7 @@ void rgb2bgr565(const Size2D &size,
|
|||||||
for (; j < roiw16; sj += 48, dj += 32, j += 16)
|
for (; j < roiw16; sj += 48, dj += 32, j += 16)
|
||||||
{
|
{
|
||||||
internal::prefetch(src + sj);
|
internal::prefetch(src + sj);
|
||||||
#if defined(__GNUC__) && __GNUC_MINOR__ < 7
|
#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
|
||||||
__asm__ (
|
__asm__ (
|
||||||
"vld3.8 {d2, d4, d6}, [%[in0]] @ q0 q1 q2 q3 q4 \n\t"
|
"vld3.8 {d2, d4, d6}, [%[in0]] @ q0 q1 q2 q3 q4 \n\t"
|
||||||
"vld3.8 {d3, d5, d7}, [%[in1]] @ xxxxxxxx rrrrRRRR ggggGGGG bbbbBBBB xxxxxxxx \n\t"
|
"vld3.8 {d3, d5, d7}, [%[in1]] @ xxxxxxxx rrrrRRRR ggggGGGG bbbbBBBB xxxxxxxx \n\t"
|
||||||
@ -2176,7 +2176,7 @@ void rgbx2rgb565(const Size2D &size,
|
|||||||
for (; j < roiw16; sj += 64, dj += 32, j += 16)
|
for (; j < roiw16; sj += 64, dj += 32, j += 16)
|
||||||
{
|
{
|
||||||
internal::prefetch(src + sj);
|
internal::prefetch(src + sj);
|
||||||
#if defined(__GNUC__) && __GNUC_MINOR__ < 7
|
#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
|
||||||
__asm__ (
|
__asm__ (
|
||||||
"vld4.8 {d0, d2, d4, d6}, [%[in0]] @ q0 q1 q2 q3 \n\t"
|
"vld4.8 {d0, d2, d4, d6}, [%[in0]] @ q0 q1 q2 q3 \n\t"
|
||||||
"vld4.8 {d1, d3, d5, d7}, [%[in1]] @ rrrrRRRR ggggGGGG bbbbBBBB aaaaAAAA \n\t"
|
"vld4.8 {d1, d3, d5, d7}, [%[in1]] @ rrrrRRRR ggggGGGG bbbbBBBB aaaaAAAA \n\t"
|
||||||
@ -2230,7 +2230,7 @@ void rgb2rgb565(const Size2D &size,
|
|||||||
for (; j < roiw16; sj += 48, dj += 32, j += 16)
|
for (; j < roiw16; sj += 48, dj += 32, j += 16)
|
||||||
{
|
{
|
||||||
internal::prefetch(src + sj);
|
internal::prefetch(src + sj);
|
||||||
#if defined(__GNUC__) && __GNUC_MINOR__ < 7
|
#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
|
||||||
__asm__ (
|
__asm__ (
|
||||||
"vld3.8 {d0, d2, d4}, [%[in0]] @ q0 q1 q2 q3 \n\t"
|
"vld3.8 {d0, d2, d4}, [%[in0]] @ q0 q1 q2 q3 \n\t"
|
||||||
"vld3.8 {d1, d3, d5}, [%[in1]] @ rrrrRRRR ggggGGGG bbbbBBBB xxxxxxxx \n\t"
|
"vld3.8 {d1, d3, d5}, [%[in1]] @ rrrrRRRR ggggGGGG bbbbBBBB xxxxxxxx \n\t"
|
||||||
@ -2285,7 +2285,7 @@ void rgb2ycrcb(const Size2D &size,
|
|||||||
for (; j < roiw8; sj += 24, dj += 24, j += 8)
|
for (; j < roiw8; sj += 24, dj += 24, j += 8)
|
||||||
{
|
{
|
||||||
internal::prefetch(src + sj);
|
internal::prefetch(src + sj);
|
||||||
#if defined(__GNUC__) && __GNUC_MINOR__ < 7
|
#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
|
||||||
CONVERTTOYCRCB(vld3.8 {d0-d2}, d0, d1, d2)
|
CONVERTTOYCRCB(vld3.8 {d0-d2}, d0, d1, d2)
|
||||||
#else
|
#else
|
||||||
uint8x8x3_t vRgb = vld3_u8(src + sj);
|
uint8x8x3_t vRgb = vld3_u8(src + sj);
|
||||||
@ -2329,7 +2329,7 @@ void rgbx2ycrcb(const Size2D &size,
|
|||||||
for (; j < roiw8; sj += 32, dj += 24, j += 8)
|
for (; j < roiw8; sj += 32, dj += 24, j += 8)
|
||||||
{
|
{
|
||||||
internal::prefetch(src + sj);
|
internal::prefetch(src + sj);
|
||||||
#if defined(__GNUC__) && __GNUC_MINOR__ < 7
|
#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
|
||||||
CONVERTTOYCRCB(vld4.8 {d0-d3}, d0, d1, d2)
|
CONVERTTOYCRCB(vld4.8 {d0-d3}, d0, d1, d2)
|
||||||
#else
|
#else
|
||||||
uint8x8x4_t vRgba = vld4_u8(src + sj);
|
uint8x8x4_t vRgba = vld4_u8(src + sj);
|
||||||
@ -2373,7 +2373,7 @@ void bgr2ycrcb(const Size2D &size,
|
|||||||
for (; j < roiw8; sj += 24, dj += 24, j += 8)
|
for (; j < roiw8; sj += 24, dj += 24, j += 8)
|
||||||
{
|
{
|
||||||
internal::prefetch(src + sj);
|
internal::prefetch(src + sj);
|
||||||
#if defined(__GNUC__) && __GNUC_MINOR__ < 7
|
#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
|
||||||
CONVERTTOYCRCB(vld3.8 {d0-d2}, d2, d1, d0)
|
CONVERTTOYCRCB(vld3.8 {d0-d2}, d2, d1, d0)
|
||||||
#else
|
#else
|
||||||
uint8x8x3_t vBgr = vld3_u8(src + sj);
|
uint8x8x3_t vBgr = vld3_u8(src + sj);
|
||||||
@ -2417,7 +2417,7 @@ void bgrx2ycrcb(const Size2D &size,
|
|||||||
for (; j < roiw8; sj += 32, dj += 24, j += 8)
|
for (; j < roiw8; sj += 32, dj += 24, j += 8)
|
||||||
{
|
{
|
||||||
internal::prefetch(src + sj);
|
internal::prefetch(src + sj);
|
||||||
#if defined(__GNUC__) && __GNUC_MINOR__ < 7
|
#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
|
||||||
CONVERTTOYCRCB(vld4.8 {d0-d3}, d2, d1, d0)
|
CONVERTTOYCRCB(vld4.8 {d0-d3}, d2, d1, d0)
|
||||||
#else
|
#else
|
||||||
uint8x8x4_t vBgra = vld4_u8(src + sj);
|
uint8x8x4_t vBgra = vld4_u8(src + sj);
|
||||||
@ -2499,7 +2499,7 @@ void yuv420sp2rgb(const Size2D &size,
|
|||||||
internal::prefetch(uv + j);
|
internal::prefetch(uv + j);
|
||||||
internal::prefetch(y1 + j);
|
internal::prefetch(y1 + j);
|
||||||
internal::prefetch(y2 + j);
|
internal::prefetch(y2 + j);
|
||||||
#if defined(__GNUC__) && __GNUC_MINOR__ < 7
|
#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
|
||||||
CONVERTYUV420TORGB(3, d1, d0, q5, q6)
|
CONVERTYUV420TORGB(3, d1, d0, q5, q6)
|
||||||
#else
|
#else
|
||||||
convertYUV420.ToRGB(y1 + j, y2 + j, uv + j, dst1 + dj, dst2 + dj);
|
convertYUV420.ToRGB(y1 + j, y2 + j, uv + j, dst1 + dj, dst2 + dj);
|
||||||
@ -2545,7 +2545,7 @@ void yuv420sp2rgbx(const Size2D &size,
|
|||||||
internal::prefetch(uv + j);
|
internal::prefetch(uv + j);
|
||||||
internal::prefetch(y1 + j);
|
internal::prefetch(y1 + j);
|
||||||
internal::prefetch(y2 + j);
|
internal::prefetch(y2 + j);
|
||||||
#if defined(__GNUC__) && __GNUC_MINOR__ < 7
|
#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
|
||||||
CONVERTYUV420TORGB(4, d1, d0, q5, q6)
|
CONVERTYUV420TORGB(4, d1, d0, q5, q6)
|
||||||
#else
|
#else
|
||||||
convertYUV420.ToRGB(y1 + j, y2 + j, uv + j, dst1 + dj, dst2 + dj);
|
convertYUV420.ToRGB(y1 + j, y2 + j, uv + j, dst1 + dj, dst2 + dj);
|
||||||
@ -2591,7 +2591,7 @@ void yuv420i2rgb(const Size2D &size,
|
|||||||
internal::prefetch(uv + j);
|
internal::prefetch(uv + j);
|
||||||
internal::prefetch(y1 + j);
|
internal::prefetch(y1 + j);
|
||||||
internal::prefetch(y2 + j);
|
internal::prefetch(y2 + j);
|
||||||
#if defined(__GNUC__) && __GNUC_MINOR__ < 7
|
#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
|
||||||
CONVERTYUV420TORGB(3, d0, d1, q5, q6)
|
CONVERTYUV420TORGB(3, d0, d1, q5, q6)
|
||||||
#else
|
#else
|
||||||
convertYUV420.ToRGB(y1 + j, y2 + j, uv + j, dst1 + dj, dst2 + dj);
|
convertYUV420.ToRGB(y1 + j, y2 + j, uv + j, dst1 + dj, dst2 + dj);
|
||||||
@ -2637,7 +2637,7 @@ void yuv420i2rgbx(const Size2D &size,
|
|||||||
internal::prefetch(uv + j);
|
internal::prefetch(uv + j);
|
||||||
internal::prefetch(y1 + j);
|
internal::prefetch(y1 + j);
|
||||||
internal::prefetch(y2 + j);
|
internal::prefetch(y2 + j);
|
||||||
#if defined(__GNUC__) && __GNUC_MINOR__ < 7
|
#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
|
||||||
CONVERTYUV420TORGB(4, d0, d1, q5, q6)
|
CONVERTYUV420TORGB(4, d0, d1, q5, q6)
|
||||||
#else
|
#else
|
||||||
convertYUV420.ToRGB(y1 + j, y2 + j, uv + j, dst1 + dj, dst2 + dj);
|
convertYUV420.ToRGB(y1 + j, y2 + j, uv + j, dst1 + dj, dst2 + dj);
|
||||||
@ -2683,7 +2683,7 @@ void yuv420sp2bgr(const Size2D &size,
|
|||||||
internal::prefetch(uv + j);
|
internal::prefetch(uv + j);
|
||||||
internal::prefetch(y1 + j);
|
internal::prefetch(y1 + j);
|
||||||
internal::prefetch(y2 + j);
|
internal::prefetch(y2 + j);
|
||||||
#if defined(__GNUC__) && __GNUC_MINOR__ < 7
|
#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
|
||||||
CONVERTYUV420TORGB(3, d1, d0, q6, q5)
|
CONVERTYUV420TORGB(3, d1, d0, q6, q5)
|
||||||
#else
|
#else
|
||||||
convertYUV420.ToRGB(y1 + j, y2 + j, uv + j, dst1 + dj, dst2 + dj);
|
convertYUV420.ToRGB(y1 + j, y2 + j, uv + j, dst1 + dj, dst2 + dj);
|
||||||
@ -2729,7 +2729,7 @@ void yuv420sp2bgrx(const Size2D &size,
|
|||||||
internal::prefetch(uv + j);
|
internal::prefetch(uv + j);
|
||||||
internal::prefetch(y1 + j);
|
internal::prefetch(y1 + j);
|
||||||
internal::prefetch(y2 + j);
|
internal::prefetch(y2 + j);
|
||||||
#if defined(__GNUC__) && __GNUC_MINOR__ < 7
|
#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
|
||||||
CONVERTYUV420TORGB(4, d1, d0, q6, q5)
|
CONVERTYUV420TORGB(4, d1, d0, q6, q5)
|
||||||
#else
|
#else
|
||||||
convertYUV420.ToRGB(y1 + j, y2 + j, uv + j, dst1 + dj, dst2 + dj);
|
convertYUV420.ToRGB(y1 + j, y2 + j, uv + j, dst1 + dj, dst2 + dj);
|
||||||
@ -2775,7 +2775,7 @@ void yuv420i2bgr(const Size2D &size,
|
|||||||
internal::prefetch(uv + j);
|
internal::prefetch(uv + j);
|
||||||
internal::prefetch(y1 + j);
|
internal::prefetch(y1 + j);
|
||||||
internal::prefetch(y2 + j);
|
internal::prefetch(y2 + j);
|
||||||
#if defined(__GNUC__) && __GNUC_MINOR__ < 7
|
#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
|
||||||
CONVERTYUV420TORGB(3, d0, d1, q6, q5)
|
CONVERTYUV420TORGB(3, d0, d1, q6, q5)
|
||||||
#else
|
#else
|
||||||
convertYUV420.ToRGB(y1 + j, y2 + j, uv + j, dst1 + dj, dst2 + dj);
|
convertYUV420.ToRGB(y1 + j, y2 + j, uv + j, dst1 + dj, dst2 + dj);
|
||||||
@ -2821,7 +2821,7 @@ void yuv420i2bgrx(const Size2D &size,
|
|||||||
internal::prefetch(uv + j);
|
internal::prefetch(uv + j);
|
||||||
internal::prefetch(y1 + j);
|
internal::prefetch(y1 + j);
|
||||||
internal::prefetch(y2 + j);
|
internal::prefetch(y2 + j);
|
||||||
#if defined(__GNUC__) && __GNUC_MINOR__ < 7
|
#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
|
||||||
CONVERTYUV420TORGB(4, d0, d1, q6, q5)
|
CONVERTYUV420TORGB(4, d0, d1, q6, q5)
|
||||||
#else
|
#else
|
||||||
convertYUV420.ToRGB(y1 + j, y2 + j, uv + j, dst1 + dj, dst2 + dj);
|
convertYUV420.ToRGB(y1 + j, y2 + j, uv + j, dst1 + dj, dst2 + dj);
|
||||||
|
54
3rdparty/carotene/src/convert.cpp
vendored
54
3rdparty/carotene/src/convert.cpp
vendored
@ -101,7 +101,7 @@ CVT_FUNC(u8, s8, 16,
|
|||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
#if __GNUC_MINOR__ < 7
|
#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
|
||||||
CVT_FUNC(u8, u16, 16,
|
CVT_FUNC(u8, u16, 16,
|
||||||
register uint8x16_t zero0 asm ("q1") = vmovq_n_u8(0);,
|
register uint8x16_t zero0 asm ("q1") = vmovq_n_u8(0);,
|
||||||
{
|
{
|
||||||
@ -135,7 +135,7 @@ CVT_FUNC(u8, u16, 16,
|
|||||||
})
|
})
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if __GNUC_MINOR__ < 7
|
#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
|
||||||
CVT_FUNC(u8, s32, 16,
|
CVT_FUNC(u8, s32, 16,
|
||||||
register uint8x16_t zero0 asm ("q1") = vmovq_n_u8(0);
|
register uint8x16_t zero0 asm ("q1") = vmovq_n_u8(0);
|
||||||
register uint8x16_t zero1 asm ("q2") = vmovq_n_u8(0);
|
register uint8x16_t zero1 asm ("q2") = vmovq_n_u8(0);
|
||||||
@ -173,7 +173,7 @@ CVT_FUNC(u8, s32, 16,
|
|||||||
})
|
})
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if __GNUC_MINOR__ < 6
|
#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6
|
||||||
CVT_FUNC(u8, f32, 16,
|
CVT_FUNC(u8, f32, 16,
|
||||||
,
|
,
|
||||||
{
|
{
|
||||||
@ -248,7 +248,7 @@ CVT_FUNC(s8, u8, 16,
|
|||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
#if __GNUC_MINOR__ < 7
|
#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
|
||||||
CVT_FUNC(s8, u16, 16,
|
CVT_FUNC(s8, u16, 16,
|
||||||
register uint8x16_t zero0 asm ("q1") = vmovq_n_u8(0);,
|
register uint8x16_t zero0 asm ("q1") = vmovq_n_u8(0);,
|
||||||
{
|
{
|
||||||
@ -284,7 +284,7 @@ CVT_FUNC(s8, u16, 16,
|
|||||||
})
|
})
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if __GNUC_MINOR__ < 6
|
#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6
|
||||||
CVT_FUNC(s8, s16, 16,
|
CVT_FUNC(s8, s16, 16,
|
||||||
,
|
,
|
||||||
{
|
{
|
||||||
@ -323,7 +323,7 @@ CVT_FUNC(s8, s16, 16,
|
|||||||
})
|
})
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if __GNUC_MINOR__ < 7
|
#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
|
||||||
CVT_FUNC(s8, s32, 16,
|
CVT_FUNC(s8, s32, 16,
|
||||||
,
|
,
|
||||||
{
|
{
|
||||||
@ -377,7 +377,7 @@ CVT_FUNC(s8, s32, 16,
|
|||||||
})
|
})
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if __GNUC_MINOR__ < 6
|
#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6
|
||||||
CVT_FUNC(s8, f32, 16,
|
CVT_FUNC(s8, f32, 16,
|
||||||
,
|
,
|
||||||
{
|
{
|
||||||
@ -440,7 +440,7 @@ CVT_FUNC(s8, f32, 16,
|
|||||||
})
|
})
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if __GNUC_MINOR__ < 6
|
#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6
|
||||||
CVT_FUNC(u16, u8, 16,
|
CVT_FUNC(u16, u8, 16,
|
||||||
,
|
,
|
||||||
{
|
{
|
||||||
@ -479,7 +479,7 @@ CVT_FUNC(u16, u8, 16,
|
|||||||
})
|
})
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if __GNUC_MINOR__ < 6
|
#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6
|
||||||
CVT_FUNC(u16, s8, 16,
|
CVT_FUNC(u16, s8, 16,
|
||||||
register uint8x16_t v127 asm ("q4") = vmovq_n_u8(127);,
|
register uint8x16_t v127 asm ("q4") = vmovq_n_u8(127);,
|
||||||
{
|
{
|
||||||
@ -522,7 +522,7 @@ CVT_FUNC(u16, s8, 16,
|
|||||||
})
|
})
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if __GNUC_MINOR__ < 7
|
#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
|
||||||
CVT_FUNC(u16, s16, 8,
|
CVT_FUNC(u16, s16, 8,
|
||||||
register uint16x8_t v32767 asm ("q4") = vmovq_n_u16(0x7FFF);,
|
register uint16x8_t v32767 asm ("q4") = vmovq_n_u16(0x7FFF);,
|
||||||
{
|
{
|
||||||
@ -555,7 +555,7 @@ CVT_FUNC(u16, s16, 8,
|
|||||||
})
|
})
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if __GNUC_MINOR__ < 7
|
#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
|
||||||
CVT_FUNC(u16, s32, 8,
|
CVT_FUNC(u16, s32, 8,
|
||||||
register uint16x8_t zero0 asm ("q1") = vmovq_n_u16(0);,
|
register uint16x8_t zero0 asm ("q1") = vmovq_n_u16(0);,
|
||||||
{
|
{
|
||||||
@ -589,7 +589,7 @@ CVT_FUNC(u16, s32, 8,
|
|||||||
})
|
})
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if __GNUC_MINOR__ < 6
|
#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6
|
||||||
CVT_FUNC(u16, f32, 8,
|
CVT_FUNC(u16, f32, 8,
|
||||||
,
|
,
|
||||||
{
|
{
|
||||||
@ -633,7 +633,7 @@ CVT_FUNC(u16, f32, 8,
|
|||||||
})
|
})
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if __GNUC_MINOR__ < 6
|
#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6
|
||||||
CVT_FUNC(s16, u8, 16,
|
CVT_FUNC(s16, u8, 16,
|
||||||
,
|
,
|
||||||
{
|
{
|
||||||
@ -672,7 +672,7 @@ CVT_FUNC(s16, u8, 16,
|
|||||||
})
|
})
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if __GNUC_MINOR__ < 6
|
#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6
|
||||||
CVT_FUNC(s16, s8, 16,
|
CVT_FUNC(s16, s8, 16,
|
||||||
,
|
,
|
||||||
{
|
{
|
||||||
@ -711,7 +711,7 @@ CVT_FUNC(s16, s8, 16,
|
|||||||
})
|
})
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if __GNUC_MINOR__ < 7
|
#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
|
||||||
CVT_FUNC(s16, u16, 8,
|
CVT_FUNC(s16, u16, 8,
|
||||||
register int16x8_t vZero asm ("q4") = vmovq_n_s16(0);,
|
register int16x8_t vZero asm ("q4") = vmovq_n_s16(0);,
|
||||||
{
|
{
|
||||||
@ -747,7 +747,7 @@ CVT_FUNC(s16, u16, 8,
|
|||||||
})
|
})
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if __GNUC_MINOR__ < 6
|
#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6
|
||||||
CVT_FUNC(s16, s32, 8,
|
CVT_FUNC(s16, s32, 8,
|
||||||
,
|
,
|
||||||
{
|
{
|
||||||
@ -786,7 +786,7 @@ CVT_FUNC(s16, s32, 8,
|
|||||||
})
|
})
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if __GNUC_MINOR__ < 6
|
#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6
|
||||||
CVT_FUNC(s16, f32, 8,
|
CVT_FUNC(s16, f32, 8,
|
||||||
,
|
,
|
||||||
{
|
{
|
||||||
@ -829,7 +829,7 @@ CVT_FUNC(s16, f32, 8,
|
|||||||
})
|
})
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if __GNUC_MINOR__ < 6
|
#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6
|
||||||
CVT_FUNC(s32, u8, 8,
|
CVT_FUNC(s32, u8, 8,
|
||||||
,
|
,
|
||||||
{
|
{
|
||||||
@ -870,7 +870,7 @@ CVT_FUNC(s32, u8, 8,
|
|||||||
})
|
})
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if __GNUC_MINOR__ < 6
|
#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6
|
||||||
CVT_FUNC(s32, s8, 8,
|
CVT_FUNC(s32, s8, 8,
|
||||||
,
|
,
|
||||||
{
|
{
|
||||||
@ -911,7 +911,7 @@ CVT_FUNC(s32, s8, 8,
|
|||||||
})
|
})
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if __GNUC_MINOR__ < 6
|
#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6
|
||||||
CVT_FUNC(s32, u16, 8,
|
CVT_FUNC(s32, u16, 8,
|
||||||
,
|
,
|
||||||
{
|
{
|
||||||
@ -950,7 +950,7 @@ CVT_FUNC(s32, u16, 8,
|
|||||||
})
|
})
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if __GNUC_MINOR__ < 6
|
#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6
|
||||||
CVT_FUNC(s32, s16, 8,
|
CVT_FUNC(s32, s16, 8,
|
||||||
,
|
,
|
||||||
{
|
{
|
||||||
@ -989,7 +989,7 @@ CVT_FUNC(s32, s16, 8,
|
|||||||
})
|
})
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if __GNUC_MINOR__ < 6
|
#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6
|
||||||
CVT_FUNC(s32, f32, 8,
|
CVT_FUNC(s32, f32, 8,
|
||||||
,
|
,
|
||||||
{
|
{
|
||||||
@ -1034,7 +1034,7 @@ CVT_FUNC(s32, f32, 8,
|
|||||||
})
|
})
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if __GNUC_MINOR__ < 6
|
#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6
|
||||||
CVT_FUNC(f32, u8, 8,
|
CVT_FUNC(f32, u8, 8,
|
||||||
register float32x4_t vmult asm ("q0") = vdupq_n_f32((float)(1 << 16));
|
register float32x4_t vmult asm ("q0") = vdupq_n_f32((float)(1 << 16));
|
||||||
register uint32x4_t vmask asm ("q1") = vdupq_n_u32(1<<16);,
|
register uint32x4_t vmask asm ("q1") = vdupq_n_u32(1<<16);,
|
||||||
@ -1101,7 +1101,7 @@ CVT_FUNC(f32, u8, 8,
|
|||||||
})
|
})
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if __GNUC_MINOR__ < 6
|
#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6
|
||||||
CVT_FUNC(f32, s8, 8,
|
CVT_FUNC(f32, s8, 8,
|
||||||
register float32x4_t vhalf asm ("q0") = vdupq_n_f32(0.5f);,
|
register float32x4_t vhalf asm ("q0") = vdupq_n_f32(0.5f);,
|
||||||
{
|
{
|
||||||
@ -1153,7 +1153,7 @@ CVT_FUNC(f32, s8, 8,
|
|||||||
})
|
})
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if __GNUC_MINOR__ < 6
|
#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6
|
||||||
CVT_FUNC(f32, u16, 8,
|
CVT_FUNC(f32, u16, 8,
|
||||||
register float32x4_t vhalf asm ("q0") = vdupq_n_f32(0.5f);,
|
register float32x4_t vhalf asm ("q0") = vdupq_n_f32(0.5f);,
|
||||||
{
|
{
|
||||||
@ -1212,7 +1212,7 @@ CVT_FUNC(f32, u16, 8,
|
|||||||
})
|
})
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if __GNUC_MINOR__ < 6
|
#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6
|
||||||
CVT_FUNC(f32, s16, 8,
|
CVT_FUNC(f32, s16, 8,
|
||||||
register float32x4_t vhalf asm ("q0") = vdupq_n_f32(0.5f);,
|
register float32x4_t vhalf asm ("q0") = vdupq_n_f32(0.5f);,
|
||||||
{
|
{
|
||||||
@ -1271,7 +1271,7 @@ CVT_FUNC(f32, s16, 8,
|
|||||||
})
|
})
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if __GNUC_MINOR__ < 6
|
#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6
|
||||||
CVT_FUNC(f32, s32, 8,
|
CVT_FUNC(f32, s32, 8,
|
||||||
register float32x4_t vhalf asm ("q0") = vdupq_n_f32(0.5f);,
|
register float32x4_t vhalf asm ("q0") = vdupq_n_f32(0.5f);,
|
||||||
{
|
{
|
||||||
|
56
3rdparty/carotene/src/convert_scale.cpp
vendored
56
3rdparty/carotene/src/convert_scale.cpp
vendored
@ -473,7 +473,7 @@ CVTS_FUNC(u8, s16, 16,
|
|||||||
})
|
})
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if __GNUC_MINOR__ < 7
|
#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
|
||||||
CVTS_FUNC(u8, s32, 16,
|
CVTS_FUNC(u8, s32, 16,
|
||||||
register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha);
|
register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha);
|
||||||
register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta + 0.5f);,
|
register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta + 0.5f);,
|
||||||
@ -562,7 +562,7 @@ CVTS_FUNC(u8, s32, 16,
|
|||||||
})
|
})
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if __GNUC_MINOR__ < 7
|
#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
|
||||||
CVTS_FUNC(u8, f32, 16,
|
CVTS_FUNC(u8, f32, 16,
|
||||||
register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha);
|
register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha);
|
||||||
register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta);,
|
register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta);,
|
||||||
@ -985,7 +985,7 @@ CVTS_FUNC(s8, s16, 16,
|
|||||||
})
|
})
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if __GNUC_MINOR__ < 7
|
#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
|
||||||
CVTS_FUNC(s8, s32, 16,
|
CVTS_FUNC(s8, s32, 16,
|
||||||
register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha);
|
register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha);
|
||||||
register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta + 0.5f);,
|
register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta + 0.5f);,
|
||||||
@ -1074,7 +1074,7 @@ CVTS_FUNC(s8, s32, 16,
|
|||||||
})
|
})
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if __GNUC_MINOR__ < 7
|
#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
|
||||||
CVTS_FUNC(s8, f32, 16,
|
CVTS_FUNC(s8, f32, 16,
|
||||||
register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha);
|
register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha);
|
||||||
register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta);,
|
register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta);,
|
||||||
@ -1155,7 +1155,7 @@ CVTS_FUNC(s8, f32, 16,
|
|||||||
})
|
})
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if __GNUC_MINOR__ < 7
|
#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
|
||||||
CVTS_FUNC(u16, u8, 16,
|
CVTS_FUNC(u16, u8, 16,
|
||||||
register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha);
|
register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha);
|
||||||
register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta + 0.5f);,
|
register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta + 0.5f);,
|
||||||
@ -1214,7 +1214,7 @@ CVTS_FUNC(u16, u8, 16,
|
|||||||
})
|
})
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if __GNUC_MINOR__ < 7
|
#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
|
||||||
CVTS_FUNC(u16, s8, 16,
|
CVTS_FUNC(u16, s8, 16,
|
||||||
register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha);
|
register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha);
|
||||||
register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta + 0.5f);,
|
register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta + 0.5f);,
|
||||||
@ -1273,7 +1273,7 @@ CVTS_FUNC(u16, s8, 16,
|
|||||||
})
|
})
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if __GNUC_MINOR__ < 7
|
#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
|
||||||
CVTS_FUNC1(u16, 16,
|
CVTS_FUNC1(u16, 16,
|
||||||
register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha);
|
register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha);
|
||||||
register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta + 0.5f);,
|
register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta + 0.5f);,
|
||||||
@ -1330,7 +1330,7 @@ CVTS_FUNC1(u16, 16,
|
|||||||
})
|
})
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if __GNUC_MINOR__ < 7
|
#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
|
||||||
CVTS_FUNC(u16, s16, 8,
|
CVTS_FUNC(u16, s16, 8,
|
||||||
register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha);
|
register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha);
|
||||||
register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta + 0.5f);,
|
register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta + 0.5f);,
|
||||||
@ -1387,7 +1387,7 @@ CVTS_FUNC(u16, s16, 8,
|
|||||||
})
|
})
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if __GNUC_MINOR__ < 7
|
#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
|
||||||
CVTS_FUNC(u16, s32, 8,
|
CVTS_FUNC(u16, s32, 8,
|
||||||
register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha);
|
register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha);
|
||||||
register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta + 0.5f);,
|
register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta + 0.5f);,
|
||||||
@ -1443,7 +1443,7 @@ CVTS_FUNC(u16, s32, 8,
|
|||||||
})
|
})
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if __GNUC_MINOR__ < 7
|
#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
|
||||||
CVTS_FUNC(u16, f32, 8,
|
CVTS_FUNC(u16, f32, 8,
|
||||||
register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha);
|
register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha);
|
||||||
register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta);,
|
register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta);,
|
||||||
@ -1495,7 +1495,7 @@ CVTS_FUNC(u16, f32, 8,
|
|||||||
})
|
})
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if __GNUC_MINOR__ < 7
|
#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
|
||||||
CVTS_FUNC(s16, u8, 16,
|
CVTS_FUNC(s16, u8, 16,
|
||||||
register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha);
|
register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha);
|
||||||
register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta + 0.5f);,
|
register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta + 0.5f);,
|
||||||
@ -1554,7 +1554,7 @@ CVTS_FUNC(s16, u8, 16,
|
|||||||
})
|
})
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if __GNUC_MINOR__ < 7
|
#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
|
||||||
CVTS_FUNC(s16, s8, 16,
|
CVTS_FUNC(s16, s8, 16,
|
||||||
register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha);
|
register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha);
|
||||||
register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta + 0.5f);,
|
register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta + 0.5f);,
|
||||||
@ -1613,7 +1613,7 @@ CVTS_FUNC(s16, s8, 16,
|
|||||||
})
|
})
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if __GNUC_MINOR__ < 7
|
#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
|
||||||
CVTS_FUNC(s16, u16, 8,
|
CVTS_FUNC(s16, u16, 8,
|
||||||
register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha);
|
register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha);
|
||||||
register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta + 0.5f);,
|
register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta + 0.5f);,
|
||||||
@ -1670,7 +1670,7 @@ CVTS_FUNC(s16, u16, 8,
|
|||||||
})
|
})
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if __GNUC_MINOR__ < 7
|
#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
|
||||||
CVTS_FUNC1(s16, 16,
|
CVTS_FUNC1(s16, 16,
|
||||||
register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha);
|
register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha);
|
||||||
register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta + 0.5f);,
|
register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta + 0.5f);,
|
||||||
@ -1727,7 +1727,7 @@ CVTS_FUNC1(s16, 16,
|
|||||||
})
|
})
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if __GNUC_MINOR__ < 7
|
#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
|
||||||
CVTS_FUNC(s16, s32, 8,
|
CVTS_FUNC(s16, s32, 8,
|
||||||
register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha);
|
register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha);
|
||||||
register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta + 0.5f);,
|
register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta + 0.5f);,
|
||||||
@ -1783,7 +1783,7 @@ CVTS_FUNC(s16, s32, 8,
|
|||||||
})
|
})
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if __GNUC_MINOR__ < 7
|
#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
|
||||||
CVTS_FUNC(s16, f32, 8,
|
CVTS_FUNC(s16, f32, 8,
|
||||||
register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha);
|
register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha);
|
||||||
register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta);,
|
register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta);,
|
||||||
@ -1835,7 +1835,7 @@ CVTS_FUNC(s16, f32, 8,
|
|||||||
})
|
})
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if __GNUC_MINOR__ < 7
|
#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
|
||||||
CVTS_FUNC(s32, u8, 8,
|
CVTS_FUNC(s32, u8, 8,
|
||||||
register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha);
|
register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha);
|
||||||
register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta + 0.5f);,
|
register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta + 0.5f);,
|
||||||
@ -1893,7 +1893,7 @@ CVTS_FUNC(s32, u8, 8,
|
|||||||
})
|
})
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if __GNUC_MINOR__ < 7
|
#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
|
||||||
CVTS_FUNC(s32, s8, 8,
|
CVTS_FUNC(s32, s8, 8,
|
||||||
register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha);
|
register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha);
|
||||||
register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta + 0.5f);,
|
register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta + 0.5f);,
|
||||||
@ -1951,7 +1951,7 @@ CVTS_FUNC(s32, s8, 8,
|
|||||||
})
|
})
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if __GNUC_MINOR__ < 7
|
#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
|
||||||
CVTS_FUNC(s32, u16, 8,
|
CVTS_FUNC(s32, u16, 8,
|
||||||
register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha);
|
register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha);
|
||||||
register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta + 0.5f);,
|
register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta + 0.5f);,
|
||||||
@ -2007,7 +2007,7 @@ CVTS_FUNC(s32, u16, 8,
|
|||||||
})
|
})
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if __GNUC_MINOR__ < 7
|
#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
|
||||||
CVTS_FUNC(s32, s16, 8,
|
CVTS_FUNC(s32, s16, 8,
|
||||||
register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha);
|
register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha);
|
||||||
register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta + 0.5f);,
|
register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta + 0.5f);,
|
||||||
@ -2063,7 +2063,7 @@ CVTS_FUNC(s32, s16, 8,
|
|||||||
})
|
})
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if __GNUC_MINOR__ < 7
|
#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
|
||||||
CVTS_FUNC1(s32, 8,
|
CVTS_FUNC1(s32, 8,
|
||||||
register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha);
|
register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha);
|
||||||
register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta + 0.5f);,
|
register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta + 0.5f);,
|
||||||
@ -2118,7 +2118,7 @@ CVTS_FUNC1(s32, 8,
|
|||||||
})
|
})
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if __GNUC_MINOR__ < 7
|
#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
|
||||||
CVTS_FUNC(s32, f32, 8,
|
CVTS_FUNC(s32, f32, 8,
|
||||||
register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha);
|
register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha);
|
||||||
register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta);,
|
register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta);,
|
||||||
@ -2169,7 +2169,7 @@ CVTS_FUNC(s32, f32, 8,
|
|||||||
})
|
})
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if __GNUC_MINOR__ < 7
|
#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
|
||||||
CVTS_FUNC(f32, u8, 8,
|
CVTS_FUNC(f32, u8, 8,
|
||||||
register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)((1 << 16)*alpha));
|
register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)((1 << 16)*alpha));
|
||||||
register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)((1 << 16)*beta));
|
register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)((1 << 16)*beta));
|
||||||
@ -2239,7 +2239,7 @@ CVTS_FUNC(f32, u8, 8,
|
|||||||
})
|
})
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if __GNUC_MINOR__ < 7
|
#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
|
||||||
CVTS_FUNC(f32, s8, 8,
|
CVTS_FUNC(f32, s8, 8,
|
||||||
register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha);
|
register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha);
|
||||||
register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta + 0.5f);,
|
register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta + 0.5f);,
|
||||||
@ -2293,7 +2293,7 @@ CVTS_FUNC(f32, s8, 8,
|
|||||||
})
|
})
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if __GNUC_MINOR__ < 7
|
#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
|
||||||
CVTS_FUNC(f32, u16, 8,
|
CVTS_FUNC(f32, u16, 8,
|
||||||
register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha);
|
register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha);
|
||||||
register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta + 0.5f);,
|
register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta + 0.5f);,
|
||||||
@ -2345,7 +2345,7 @@ CVTS_FUNC(f32, u16, 8,
|
|||||||
})
|
})
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if __GNUC_MINOR__ < 7
|
#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
|
||||||
CVTS_FUNC(f32, s16, 8,
|
CVTS_FUNC(f32, s16, 8,
|
||||||
register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha);
|
register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha);
|
||||||
register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta + 0.5f);,
|
register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta + 0.5f);,
|
||||||
@ -2397,7 +2397,7 @@ CVTS_FUNC(f32, s16, 8,
|
|||||||
})
|
})
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if __GNUC_MINOR__ < 7
|
#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
|
||||||
CVTS_FUNC(f32, s32, 8,
|
CVTS_FUNC(f32, s32, 8,
|
||||||
register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha);
|
register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha);
|
||||||
register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta + 0.5f);,
|
register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta + 0.5f);,
|
||||||
@ -2448,7 +2448,7 @@ CVTS_FUNC(f32, s32, 8,
|
|||||||
})
|
})
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if __GNUC_MINOR__ < 7
|
#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
|
||||||
CVTS_FUNC1(f32, 8,
|
CVTS_FUNC1(f32, 8,
|
||||||
register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha);
|
register float32x4_t vscale asm ("q0") = vdupq_n_f32((f32)alpha);
|
||||||
register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta);,
|
register float32x4_t vshift asm ("q1") = vdupq_n_f32((f32)beta);,
|
||||||
|
2
3rdparty/carotene/src/gaussian_blur.cpp
vendored
2
3rdparty/carotene/src/gaussian_blur.cpp
vendored
@ -327,7 +327,7 @@ void gaussianBlur5x5(const Size2D &size, s32 cn,
|
|||||||
u16* lidx1 = lane + x - 1*2;
|
u16* lidx1 = lane + x - 1*2;
|
||||||
u16* lidx3 = lane + x + 1*2;
|
u16* lidx3 = lane + x + 1*2;
|
||||||
u16* lidx4 = lane + x + 2*2;
|
u16* lidx4 = lane + x + 2*2;
|
||||||
#if __GNUC_MINOR__ < 7
|
#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
|
||||||
__asm__ __volatile__ (
|
__asm__ __volatile__ (
|
||||||
"vld2.16 {d0, d2}, [%[in0]]! \n\t"
|
"vld2.16 {d0, d2}, [%[in0]]! \n\t"
|
||||||
"vld2.16 {d1, d3}, [%[in0]] \n\t"
|
"vld2.16 {d1, d3}, [%[in0]] \n\t"
|
||||||
|
2
3rdparty/carotene/src/opticalflow.cpp
vendored
2
3rdparty/carotene/src/opticalflow.cpp
vendored
@ -210,7 +210,7 @@ void pyrLKOptFlowLevel(const Size2D &size, s32 cn,
|
|||||||
internal::prefetch(dsrc + dstep * 2, 0);
|
internal::prefetch(dsrc + dstep * 2, 0);
|
||||||
for(x = 0; x <= wwcn - 4; x += 4, dsrc += 4*2, dIptr += 4*2 )
|
for(x = 0; x <= wwcn - 4; x += 4, dsrc += 4*2, dIptr += 4*2 )
|
||||||
{
|
{
|
||||||
#if __GNUC_MINOR__ < 0
|
#if 0
|
||||||
__asm__ (
|
__asm__ (
|
||||||
"vld2.16 {d0-d1}, [%[dsrc00]] \n\t"
|
"vld2.16 {d0-d1}, [%[dsrc00]] \n\t"
|
||||||
"vld2.16 {d2-d3}, [%[dsrc10]] \n\t"
|
"vld2.16 {d2-d3}, [%[dsrc10]] \n\t"
|
||||||
|
8
3rdparty/carotene/src/pyramid.cpp
vendored
8
3rdparty/carotene/src/pyramid.cpp
vendored
@ -331,7 +331,7 @@ void gaussianPyramidDown(const Size2D &srcSize,
|
|||||||
for (; x < roiw8; x += 8)
|
for (; x < roiw8; x += 8)
|
||||||
{
|
{
|
||||||
internal::prefetch(lane + 2 * x);
|
internal::prefetch(lane + 2 * x);
|
||||||
#if __GNUC_MINOR__ < 7
|
#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
|
||||||
__asm__ (
|
__asm__ (
|
||||||
"vld2.16 {d0-d3}, [%[in0]] \n\t"
|
"vld2.16 {d0-d3}, [%[in0]] \n\t"
|
||||||
"vld2.16 {d4-d7}, [%[in4]] \n\t"
|
"vld2.16 {d4-d7}, [%[in4]] \n\t"
|
||||||
@ -538,7 +538,7 @@ void gaussianPyramidDown(const Size2D &srcSize,
|
|||||||
for (; x < roiw4; x += 4)
|
for (; x < roiw4; x += 4)
|
||||||
{
|
{
|
||||||
internal::prefetch(lane + 2 * x);
|
internal::prefetch(lane + 2 * x);
|
||||||
#if __GNUC_MINOR__ < 7
|
#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
|
||||||
__asm__ (
|
__asm__ (
|
||||||
"vld2.32 {d0-d3}, [%[in0]] \n\t"
|
"vld2.32 {d0-d3}, [%[in0]] \n\t"
|
||||||
"vld2.32 {d4-d7}, [%[in4]] \n\t"
|
"vld2.32 {d4-d7}, [%[in4]] \n\t"
|
||||||
@ -672,7 +672,7 @@ void gaussianPyramidDown(const Size2D &srcSize,
|
|||||||
std::vector<f32> _buf(cn*(srcSize.width + 4) + 32/sizeof(f32));
|
std::vector<f32> _buf(cn*(srcSize.width + 4) + 32/sizeof(f32));
|
||||||
f32* lane = internal::alignPtr(&_buf[2*cn], 32);
|
f32* lane = internal::alignPtr(&_buf[2*cn], 32);
|
||||||
|
|
||||||
#if __GNUC_MINOR__ < 7
|
#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
|
||||||
register float32x4_t vc6d4f32 asm ("q11") = vmovq_n_f32(1.5f); // 6/4
|
register float32x4_t vc6d4f32 asm ("q11") = vmovq_n_f32(1.5f); // 6/4
|
||||||
register float32x4_t vc1d4f32 asm ("q12") = vmovq_n_f32(0.25f); // 1/4
|
register float32x4_t vc1d4f32 asm ("q12") = vmovq_n_f32(0.25f); // 1/4
|
||||||
|
|
||||||
@ -739,7 +739,7 @@ void gaussianPyramidDown(const Size2D &srcSize,
|
|||||||
for (; x < roiw4; x += 4)
|
for (; x < roiw4; x += 4)
|
||||||
{
|
{
|
||||||
internal::prefetch(lane + 2 * x);
|
internal::prefetch(lane + 2 * x);
|
||||||
#if __GNUC_MINOR__ < 7
|
#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
|
||||||
__asm__ __volatile__ (
|
__asm__ __volatile__ (
|
||||||
"vld2.32 {d0-d3}, [%[in0]] \n\t"
|
"vld2.32 {d0-d3}, [%[in0]] \n\t"
|
||||||
"vld2.32 {d8-d11}, [%[in4]] \n\t"
|
"vld2.32 {d8-d11}, [%[in4]] \n\t"
|
||||||
|
6
3rdparty/carotene/src/scharr.cpp
vendored
6
3rdparty/carotene/src/scharr.cpp
vendored
@ -109,7 +109,7 @@ void ScharrDeriv(const Size2D &size, s32 cn,
|
|||||||
internal::prefetch(srow0 + x);
|
internal::prefetch(srow0 + x);
|
||||||
internal::prefetch(srow1 + x);
|
internal::prefetch(srow1 + x);
|
||||||
internal::prefetch(srow2 + x);
|
internal::prefetch(srow2 + x);
|
||||||
#if __GNUC_MINOR__ < 7
|
#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 7
|
||||||
__asm__ (
|
__asm__ (
|
||||||
"vld1.8 {d0}, [%[src0]] \n\t"
|
"vld1.8 {d0}, [%[src0]] \n\t"
|
||||||
"vld1.8 {d2}, [%[src2]] \n\t"
|
"vld1.8 {d2}, [%[src2]] \n\t"
|
||||||
@ -161,7 +161,7 @@ void ScharrDeriv(const Size2D &size, s32 cn,
|
|||||||
x = 0;
|
x = 0;
|
||||||
for( ; x < roiw8; x += 8 )
|
for( ; x < roiw8; x += 8 )
|
||||||
{
|
{
|
||||||
#if __GNUC_MINOR__ < 6
|
#if defined(__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ < 6
|
||||||
__asm__ (
|
__asm__ (
|
||||||
"vld1.16 {d4-d5}, [%[s2ptr]] \n\t"
|
"vld1.16 {d4-d5}, [%[s2ptr]] \n\t"
|
||||||
"vld1.16 {d8-d9}, [%[s4ptr]] \n\t"
|
"vld1.16 {d8-d9}, [%[s4ptr]] \n\t"
|
||||||
@ -198,7 +198,7 @@ void ScharrDeriv(const Size2D &size, s32 cn,
|
|||||||
vr.val[1] = vmlaq_s16(s3x10, s24, vc3);
|
vr.val[1] = vmlaq_s16(s3x10, s24, vc3);
|
||||||
|
|
||||||
vst2q_s16(drow + x*2, vr);
|
vst2q_s16(drow + x*2, vr);
|
||||||
#endif //__GNUC_MINOR__ < 6
|
#endif
|
||||||
}
|
}
|
||||||
for( ; x < colsn; x++ )
|
for( ; x < colsn; x++ )
|
||||||
{
|
{
|
||||||
|
12
3rdparty/libpng/CMakeLists.txt
vendored
12
3rdparty/libpng/CMakeLists.txt
vendored
@ -28,11 +28,13 @@ file(GLOB lib_srcs *.c)
|
|||||||
file(GLOB lib_hdrs *.h)
|
file(GLOB lib_hdrs *.h)
|
||||||
|
|
||||||
|
|
||||||
if(ENABLE_NEON)
|
if(ARM OR AARCH64)
|
||||||
list(APPEND lib_srcs arm/arm_init.c arm/filter_neon.S arm/filter_neon_intrinsics.c)
|
if(ENABLE_NEON AND NOT AARCH64)
|
||||||
add_definitions(-DPNG_ARM_NEON_OPT=2)
|
list(APPEND lib_srcs arm/arm_init.c arm/filter_neon.S arm/filter_neon_intrinsics.c)
|
||||||
elseif(AARCH64)
|
add_definitions(-DPNG_ARM_NEON_OPT=2)
|
||||||
add_definitions(-DPNG_ARM_NEON_OPT=0) # NEON assembler is not supported
|
else()
|
||||||
|
add_definitions(-DPNG_ARM_NEON_OPT=0) # NEON assembler is not supported
|
||||||
|
endif()
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
if(ENABLE_SSE
|
if(ENABLE_SSE
|
||||||
|
8
3rdparty/libwebp/CMakeLists.txt
vendored
8
3rdparty/libwebp/CMakeLists.txt
vendored
@ -5,7 +5,9 @@
|
|||||||
project(${WEBP_LIBRARY})
|
project(${WEBP_LIBRARY})
|
||||||
|
|
||||||
ocv_include_directories(${CMAKE_CURRENT_SOURCE_DIR})
|
ocv_include_directories(${CMAKE_CURRENT_SOURCE_DIR})
|
||||||
ocv_include_directories(${CPUFEATURES_INCLUDE_DIRS})
|
if(ANDROID)
|
||||||
|
ocv_include_directories(${CPUFEATURES_INCLUDE_DIRS})
|
||||||
|
endif()
|
||||||
|
|
||||||
file(GLOB lib_srcs dec/*.c demux/*.c dsp/*.c enc/*.c mux/*.c utils/*.c webp/*.c)
|
file(GLOB lib_srcs dec/*.c demux/*.c dsp/*.c enc/*.c mux/*.c utils/*.c webp/*.c)
|
||||||
file(GLOB lib_hdrs dec/*.h demux/*.h dsp/*.h enc/*.h mux/*.h utils/*.h webp/*.h)
|
file(GLOB lib_hdrs dec/*.h demux/*.h dsp/*.h enc/*.h mux/*.h utils/*.h webp/*.h)
|
||||||
@ -28,7 +30,9 @@ endif()
|
|||||||
add_definitions(-DWEBP_USE_THREAD)
|
add_definitions(-DWEBP_USE_THREAD)
|
||||||
|
|
||||||
add_library(${WEBP_LIBRARY} STATIC ${lib_srcs} ${lib_hdrs})
|
add_library(${WEBP_LIBRARY} STATIC ${lib_srcs} ${lib_hdrs})
|
||||||
target_link_libraries(${WEBP_LIBRARY} ${CPUFEATURES_LIBRARIES})
|
if(ANDROID)
|
||||||
|
target_link_libraries(${WEBP_LIBRARY} ${CPUFEATURES_LIBRARIES})
|
||||||
|
endif()
|
||||||
|
|
||||||
if(UNIX)
|
if(UNIX)
|
||||||
if(CMAKE_COMPILER_IS_GNUCXX OR CV_ICC)
|
if(CMAKE_COMPILER_IS_GNUCXX OR CV_ICC)
|
||||||
|
@ -199,6 +199,7 @@ OCV_OPTION(WITH_1394 "Include IEEE1394 support" ON
|
|||||||
OCV_OPTION(WITH_AVFOUNDATION "Use AVFoundation for Video I/O (iOS/Mac)" ON IF APPLE)
|
OCV_OPTION(WITH_AVFOUNDATION "Use AVFoundation for Video I/O (iOS/Mac)" ON IF APPLE)
|
||||||
OCV_OPTION(WITH_CARBON "Use Carbon for UI instead of Cocoa" OFF IF APPLE )
|
OCV_OPTION(WITH_CARBON "Use Carbon for UI instead of Cocoa" OFF IF APPLE )
|
||||||
OCV_OPTION(WITH_CAROTENE "Use NVidia carotene acceleration library for ARM platform" ON IF (ARM OR AARCH64) AND NOT IOS AND NOT (CMAKE_VERSION VERSION_LESS "2.8.11"))
|
OCV_OPTION(WITH_CAROTENE "Use NVidia carotene acceleration library for ARM platform" ON IF (ARM OR AARCH64) AND NOT IOS AND NOT (CMAKE_VERSION VERSION_LESS "2.8.11"))
|
||||||
|
OCV_OPTION(WITH_CPUFEATURES "Use cpufeatures Android library" ON IF ANDROID)
|
||||||
OCV_OPTION(WITH_VTK "Include VTK library support (and build opencv_viz module eiher)" ON IF (NOT ANDROID AND NOT IOS AND NOT WINRT AND NOT CMAKE_CROSSCOMPILING) )
|
OCV_OPTION(WITH_VTK "Include VTK library support (and build opencv_viz module eiher)" ON IF (NOT ANDROID AND NOT IOS AND NOT WINRT AND NOT CMAKE_CROSSCOMPILING) )
|
||||||
OCV_OPTION(WITH_CUDA "Include NVidia Cuda Runtime support" ON IF (NOT IOS AND NOT WINRT) )
|
OCV_OPTION(WITH_CUDA "Include NVidia Cuda Runtime support" ON IF (NOT IOS AND NOT WINRT) )
|
||||||
OCV_OPTION(WITH_CUFFT "Include NVidia Cuda Fast Fourier Transform (FFT) library support" ON IF (NOT IOS AND NOT WINRT) )
|
OCV_OPTION(WITH_CUFFT "Include NVidia Cuda Fast Fourier Transform (FFT) library support" ON IF (NOT IOS AND NOT WINRT) )
|
||||||
@ -302,7 +303,7 @@ OCV_OPTION(ENABLE_COVERAGE "Enable coverage collection with GCov"
|
|||||||
OCV_OPTION(ENABLE_OMIT_FRAME_POINTER "Enable -fomit-frame-pointer for GCC" ON IF CMAKE_COMPILER_IS_GNUCXX AND NOT (APPLE AND CMAKE_COMPILER_IS_CLANGCXX) )
|
OCV_OPTION(ENABLE_OMIT_FRAME_POINTER "Enable -fomit-frame-pointer for GCC" ON IF CMAKE_COMPILER_IS_GNUCXX AND NOT (APPLE AND CMAKE_COMPILER_IS_CLANGCXX) )
|
||||||
OCV_OPTION(ENABLE_POWERPC "Enable PowerPC for GCC" ON IF (CMAKE_COMPILER_IS_GNUCXX AND CMAKE_SYSTEM_PROCESSOR MATCHES powerpc.*) )
|
OCV_OPTION(ENABLE_POWERPC "Enable PowerPC for GCC" ON IF (CMAKE_COMPILER_IS_GNUCXX AND CMAKE_SYSTEM_PROCESSOR MATCHES powerpc.*) )
|
||||||
OCV_OPTION(ENABLE_FAST_MATH "Enable -ffast-math (not recommended for GCC 4.6.x)" OFF IF (CMAKE_COMPILER_IS_GNUCXX AND (X86 OR X86_64)) )
|
OCV_OPTION(ENABLE_FAST_MATH "Enable -ffast-math (not recommended for GCC 4.6.x)" OFF IF (CMAKE_COMPILER_IS_GNUCXX AND (X86 OR X86_64)) )
|
||||||
OCV_OPTION(ENABLE_NEON "Enable NEON instructions" "${NEON}" IF CMAKE_COMPILER_IS_GNUCXX AND (ARM OR AARCH64 OR IOS) )
|
OCV_OPTION(ENABLE_NEON "Enable NEON instructions" (NEON OR ANDROID_ARM_NEON OR AARCH64) IF CMAKE_COMPILER_IS_GNUCXX AND (ARM OR AARCH64 OR IOS) )
|
||||||
OCV_OPTION(ENABLE_VFPV3 "Enable VFPv3-D32 instructions" OFF IF CMAKE_COMPILER_IS_GNUCXX AND (ARM OR AARCH64 OR IOS) )
|
OCV_OPTION(ENABLE_VFPV3 "Enable VFPv3-D32 instructions" OFF IF CMAKE_COMPILER_IS_GNUCXX AND (ARM OR AARCH64 OR IOS) )
|
||||||
OCV_OPTION(ENABLE_NOISY_WARNINGS "Show all warnings even if they are too noisy" OFF )
|
OCV_OPTION(ENABLE_NOISY_WARNINGS "Show all warnings even if they are too noisy" OFF )
|
||||||
OCV_OPTION(OPENCV_WARNINGS_ARE_ERRORS "Treat warnings as errors" OFF )
|
OCV_OPTION(OPENCV_WARNINGS_ARE_ERRORS "Treat warnings as errors" OFF )
|
||||||
@ -578,8 +579,9 @@ endif()
|
|||||||
# Detect 3rd-party libraries
|
# Detect 3rd-party libraries
|
||||||
# ----------------------------------------------------------------------------
|
# ----------------------------------------------------------------------------
|
||||||
|
|
||||||
if(ANDROID)
|
if(ANDROID AND WITH_CPUFEATURES)
|
||||||
add_subdirectory(3rdparty/cpufeatures)
|
add_subdirectory(3rdparty/cpufeatures)
|
||||||
|
set(HAVE_CPUFEATURES 1)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
include(cmake/OpenCVFindLibsGrfmt.cmake)
|
include(cmake/OpenCVFindLibsGrfmt.cmake)
|
||||||
|
@ -122,7 +122,7 @@ endmacro()
|
|||||||
|
|
||||||
macro(ocv_append_optimization_flag var OPT)
|
macro(ocv_append_optimization_flag var OPT)
|
||||||
if(CPU_${OPT}_FLAGS_CONFLICT)
|
if(CPU_${OPT}_FLAGS_CONFLICT)
|
||||||
string(REGEX REPLACE " ${CPU_${OPT}_FLAGS_CONFLICT}" "" ${var} " ${${var}}")
|
string(REGEX REPLACE " ${CPU_${OPT}_FLAGS_CONFLICT}" "" ${var} " ${${var}} ")
|
||||||
string(REGEX REPLACE "^ +" "" ${var} "${${var}}")
|
string(REGEX REPLACE "^ +" "" ${var} "${${var}}")
|
||||||
endif()
|
endif()
|
||||||
set(${var} "${${var}} ${CPU_${OPT}_FLAGS_ON}")
|
set(${var} "${${var}} ${CPU_${OPT}_FLAGS_ON}")
|
||||||
@ -250,17 +250,21 @@ if(X86 OR X86_64)
|
|||||||
endif()
|
endif()
|
||||||
|
|
||||||
elseif(ARM OR AARCH64)
|
elseif(ARM OR AARCH64)
|
||||||
|
ocv_update(CPU_NEON_TEST_FILE "${OpenCV_SOURCE_DIR}/cmake/checks/cpu_neon.cpp")
|
||||||
ocv_update(CPU_FP16_TEST_FILE "${OpenCV_SOURCE_DIR}/cmake/checks/cpu_fp16.cpp")
|
ocv_update(CPU_FP16_TEST_FILE "${OpenCV_SOURCE_DIR}/cmake/checks/cpu_fp16.cpp")
|
||||||
if(NOT AARCH64)
|
if(NOT AARCH64)
|
||||||
ocv_update(CPU_KNOWN_OPTIMIZATIONS "VFPV3;NEON;FP16")
|
ocv_update(CPU_KNOWN_OPTIMIZATIONS "VFPV3;NEON;FP16")
|
||||||
ocv_update(CPU_NEON_FLAGS_ON "-mfpu=neon")
|
|
||||||
ocv_update(CPU_VFPV3_FLAGS_ON "-mfpu=vfpv3")
|
ocv_update(CPU_VFPV3_FLAGS_ON "-mfpu=vfpv3")
|
||||||
|
ocv_update(CPU_NEON_FLAGS_ON "-mfpu=neon")
|
||||||
|
ocv_update(CPU_NEON_FLAGS_CONFLICT "-mfpu=[^ ]*")
|
||||||
ocv_update(CPU_FP16_FLAGS_ON "-mfpu=neon-fp16")
|
ocv_update(CPU_FP16_FLAGS_ON "-mfpu=neon-fp16")
|
||||||
set(CPU_BASELINE "DETECT" CACHE STRING "${HELP_CPU_BASELINE}")
|
ocv_update(CPU_FP16_IMPLIES "NEON")
|
||||||
|
ocv_update(CPU_FP16_FLAGS_CONFLICT "-mfpu=[^ ]*")
|
||||||
else()
|
else()
|
||||||
ocv_update(CPU_KNOWN_OPTIMIZATIONS "NEON;FP16")
|
ocv_update(CPU_KNOWN_OPTIMIZATIONS "NEON;FP16")
|
||||||
ocv_update(CPU_NEON_FLAGS_ON "")
|
ocv_update(CPU_NEON_FLAGS_ON "")
|
||||||
set(CPU_BASELINE "NEON" CACHE STRING "${HELP_CPU_BASELINE}")
|
ocv_update(CPU_FP16_IMPLIES "NEON")
|
||||||
|
set(CPU_BASELINE "NEON;FP16" CACHE STRING "${HELP_CPU_BASELINE}")
|
||||||
endif()
|
endif()
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
@ -270,6 +274,11 @@ set(CPU_DISPATCH "" CACHE STRING "${HELP_CPU_DISPATCH}")
|
|||||||
set_property(CACHE CPU_BASELINE PROPERTY STRINGS "" ${CPU_KNOWN_OPTIMIZATIONS})
|
set_property(CACHE CPU_BASELINE PROPERTY STRINGS "" ${CPU_KNOWN_OPTIMIZATIONS})
|
||||||
set_property(CACHE CPU_DISPATCH PROPERTY STRINGS "" ${CPU_KNOWN_OPTIMIZATIONS})
|
set_property(CACHE CPU_DISPATCH PROPERTY STRINGS "" ${CPU_KNOWN_OPTIMIZATIONS})
|
||||||
|
|
||||||
|
# Update CPU_BASELINE_DETECT flag
|
||||||
|
if(";${CPU_BASELINE};" MATCHES ";DETECT;")
|
||||||
|
set(CPU_BASELINE_DETECT ON)
|
||||||
|
endif()
|
||||||
|
|
||||||
set(CPU_BASELINE_FLAGS "")
|
set(CPU_BASELINE_FLAGS "")
|
||||||
|
|
||||||
set(CPU_BASELINE_FINAL "")
|
set(CPU_BASELINE_FINAL "")
|
||||||
@ -378,7 +387,7 @@ foreach(OPT ${CPU_KNOWN_OPTIMIZATIONS})
|
|||||||
if(CPU_${OPT}_SUPPORTED)
|
if(CPU_${OPT}_SUPPORTED)
|
||||||
if(";${CPU_DISPATCH};" MATCHES ";${OPT};" AND NOT __is_from_baseline)
|
if(";${CPU_DISPATCH};" MATCHES ";${OPT};" AND NOT __is_from_baseline)
|
||||||
list(APPEND CPU_DISPATCH_FINAL ${OPT})
|
list(APPEND CPU_DISPATCH_FINAL ${OPT})
|
||||||
elseif(__is_from_baseline AND NOT CPU_BASELINE_DETECT)
|
elseif(__is_from_baseline)
|
||||||
list(APPEND CPU_BASELINE_FINAL ${OPT})
|
list(APPEND CPU_BASELINE_FINAL ${OPT})
|
||||||
ocv_append_optimization_flag(CPU_BASELINE_FLAGS ${OPT})
|
ocv_append_optimization_flag(CPU_BASELINE_FLAGS ${OPT})
|
||||||
endif()
|
endif()
|
||||||
@ -483,12 +492,6 @@ macro(ocv_compiler_optimization_options)
|
|||||||
if(ARM)
|
if(ARM)
|
||||||
add_extra_compiler_option("-mfp16-format=ieee")
|
add_extra_compiler_option("-mfp16-format=ieee")
|
||||||
endif(ARM)
|
endif(ARM)
|
||||||
if(ENABLE_NEON)
|
|
||||||
add_extra_compiler_option("-mfpu=neon")
|
|
||||||
endif()
|
|
||||||
if(ENABLE_VFPV3 AND NOT ENABLE_NEON)
|
|
||||||
add_extra_compiler_option("-mfpu=vfpv3")
|
|
||||||
endif()
|
|
||||||
endmacro()
|
endmacro()
|
||||||
|
|
||||||
macro(ocv_compiler_optimization_options_finalize)
|
macro(ocv_compiler_optimization_options_finalize)
|
||||||
|
@ -96,17 +96,23 @@ if(WITH_WEBP)
|
|||||||
ocv_clear_vars(WEBP_FOUND WEBP_LIBRARY WEBP_LIBRARIES WEBP_INCLUDE_DIR)
|
ocv_clear_vars(WEBP_FOUND WEBP_LIBRARY WEBP_LIBRARIES WEBP_INCLUDE_DIR)
|
||||||
else()
|
else()
|
||||||
include(cmake/OpenCVFindWebP.cmake)
|
include(cmake/OpenCVFindWebP.cmake)
|
||||||
|
if(WEBP_FOUND)
|
||||||
|
set(HAVE_WEBP 1)
|
||||||
|
endif()
|
||||||
endif()
|
endif()
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
# --- Add libwebp to 3rdparty/libwebp and compile it if not available ---
|
# --- Add libwebp to 3rdparty/libwebp and compile it if not available ---
|
||||||
if(WITH_WEBP AND NOT WEBP_FOUND)
|
if(WITH_WEBP AND NOT WEBP_FOUND
|
||||||
|
AND (NOT ANDROID OR HAVE_CPUFEATURES)
|
||||||
|
)
|
||||||
|
|
||||||
set(WEBP_LIBRARY libwebp)
|
set(WEBP_LIBRARY libwebp)
|
||||||
set(WEBP_LIBRARIES ${WEBP_LIBRARY})
|
set(WEBP_LIBRARIES ${WEBP_LIBRARY})
|
||||||
|
|
||||||
add_subdirectory("${OpenCV_SOURCE_DIR}/3rdparty/libwebp")
|
add_subdirectory("${OpenCV_SOURCE_DIR}/3rdparty/libwebp")
|
||||||
set(WEBP_INCLUDE_DIR "${${WEBP_LIBRARY}_SOURCE_DIR}")
|
set(WEBP_INCLUDE_DIR "${${WEBP_LIBRARY}_SOURCE_DIR}")
|
||||||
|
set(HAVE_WEBP 1)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
if(NOT WEBP_VERSION AND WEBP_INCLUDE_DIR)
|
if(NOT WEBP_VERSION AND WEBP_INCLUDE_DIR)
|
||||||
|
@ -386,6 +386,16 @@ macro(ocv_warnings_disable)
|
|||||||
endif(NOT ENABLE_NOISY_WARNINGS)
|
endif(NOT ENABLE_NOISY_WARNINGS)
|
||||||
endmacro()
|
endmacro()
|
||||||
|
|
||||||
|
macro(ocv_append_sourge_file_compile_definitions source)
|
||||||
|
get_source_file_property(_value "${source}" COMPILE_DEFINITIONS)
|
||||||
|
if(_value)
|
||||||
|
set(_value ${_value} ${ARGN})
|
||||||
|
else()
|
||||||
|
set(_value ${ARGN})
|
||||||
|
endif()
|
||||||
|
set_source_files_properties("${source}" PROPERTIES COMPILE_DEFINITIONS "${_value}")
|
||||||
|
endmacro()
|
||||||
|
|
||||||
macro(add_apple_compiler_options the_module)
|
macro(add_apple_compiler_options the_module)
|
||||||
ocv_check_flag_support(OBJCXX "-fobjc-exceptions" HAVE_OBJC_EXCEPTIONS "")
|
ocv_check_flag_support(OBJCXX "-fobjc-exceptions" HAVE_OBJC_EXCEPTIONS "")
|
||||||
if(HAVE_OBJC_EXCEPTIONS)
|
if(HAVE_OBJC_EXCEPTIONS)
|
||||||
|
27
cmake/checks/cpu_neon.cpp
Normal file
27
cmake/checks/cpu_neon.cpp
Normal file
@ -0,0 +1,27 @@
|
|||||||
|
#include <stdio.h>
|
||||||
|
|
||||||
|
#if (defined WIN32 || defined _WIN32) && defined(_M_ARM)
|
||||||
|
# include <Intrin.h>
|
||||||
|
# include <arm_neon.h>
|
||||||
|
# define CV_NEON 1
|
||||||
|
#elif defined(__ARM_NEON__) || (defined (__ARM_NEON) && defined(__aarch64__))
|
||||||
|
# include <arm_neon.h>
|
||||||
|
# define CV_NEON 1
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#if defined CV_NEON
|
||||||
|
int test()
|
||||||
|
{
|
||||||
|
const float src[] = { 0.0f, 0.0f, 0.0f, 0.0f };
|
||||||
|
float32x4_t val = vld1q_f32((const float32_t*)(src));
|
||||||
|
return (int)vgetq_lane_f32(val, 0);
|
||||||
|
}
|
||||||
|
#else
|
||||||
|
#error "NEON is not supported"
|
||||||
|
#endif
|
||||||
|
|
||||||
|
int main()
|
||||||
|
{
|
||||||
|
printf("%d\n", test());
|
||||||
|
return 0;
|
||||||
|
}
|
@ -32,7 +32,11 @@ source_group("Src" FILES "${OPENCV_MODULE_opencv_core_BINARY_DIR}/version_string
|
|||||||
ocv_glob_module_sources(SOURCES "${OPENCV_MODULE_opencv_core_BINARY_DIR}/version_string.inc"
|
ocv_glob_module_sources(SOURCES "${OPENCV_MODULE_opencv_core_BINARY_DIR}/version_string.inc"
|
||||||
HEADERS ${lib_cuda_hdrs} ${lib_cuda_hdrs_detail})
|
HEADERS ${lib_cuda_hdrs} ${lib_cuda_hdrs_detail})
|
||||||
|
|
||||||
ocv_module_include_directories(${the_module} ${ZLIB_INCLUDE_DIRS} ${OPENCL_INCLUDE_DIRS} ${CPUFEATURES_INCLUDE_DIRS})
|
ocv_module_include_directories(${the_module} ${ZLIB_INCLUDE_DIRS} ${OPENCL_INCLUDE_DIRS})
|
||||||
|
if(ANDROID AND HAVE_CPUFEATURES)
|
||||||
|
ocv_append_sourge_file_compile_definitions(${CMAKE_CURRENT_SOURCE_DIR}/src/system.cpp "HAVE_CPUFEATURES=1")
|
||||||
|
ocv_module_include_directories(${CPUFEATURES_INCLUDE_DIRS})
|
||||||
|
endif()
|
||||||
ocv_create_module(${extra_libs})
|
ocv_create_module(${extra_libs})
|
||||||
|
|
||||||
ocv_target_link_libraries(${the_module} ${ZLIB_LIBRARIES} "${OPENCL_LIBRARIES}" "${VA_LIBRARIES}" "${LAPACK_LIBRARIES}" "${CPUFEATURES_LIBRARIES}")
|
ocv_target_link_libraries(${the_module} ${ZLIB_LIBRARIES} "${OPENCL_LIBRARIES}" "${VA_LIBRARIES}" "${LAPACK_LIBRARIES}" "${CPUFEATURES_LIBRARIES}")
|
||||||
|
@ -71,7 +71,11 @@
|
|||||||
# define CV_AVX 1
|
# define CV_AVX 1
|
||||||
#endif
|
#endif
|
||||||
#ifdef CV_CPU_COMPILE_FP16
|
#ifdef CV_CPU_COMPILE_FP16
|
||||||
# include <immintrin.h>
|
# if defined(__arm__) || defined(__aarch64__) || defined(_M_ARM)
|
||||||
|
# include <arm_neon.h>
|
||||||
|
# else
|
||||||
|
# include <immintrin.h>
|
||||||
|
# endif
|
||||||
# define CV_FP16 1
|
# define CV_FP16 1
|
||||||
#endif
|
#endif
|
||||||
#ifdef CV_CPU_COMPILE_AVX2
|
#ifdef CV_CPU_COMPILE_AVX2
|
||||||
|
@ -279,15 +279,27 @@ struct v_float64x2
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if CV_FP16
|
#if CV_FP16
|
||||||
// Workaround for old comiplers
|
// Workaround for old compilers
|
||||||
template <typename T> static inline int16x4_t vreinterpret_s16_f16(T a)
|
template <typename T> static inline int16x4_t vreinterpret_s16_f16(T a)
|
||||||
{ return (int16x4_t)a; }
|
{ return (int16x4_t)a; }
|
||||||
template <typename T> static inline float16x4_t vreinterpret_f16_s16(T a)
|
template <typename T> static inline float16x4_t vreinterpret_f16_s16(T a)
|
||||||
{ return (float16x4_t)a; }
|
{ return (float16x4_t)a; }
|
||||||
template <typename T> static inline float16x4_t vld1_f16(const T* ptr)
|
template <typename T> static inline float16x4_t cv_vld1_f16(const T* ptr)
|
||||||
{ return vreinterpret_f16_s16(vld1_s16((const short*)ptr)); }
|
{
|
||||||
template <typename T> static inline void vst1_f16(T* ptr, float16x4_t a)
|
#ifndef vld1_f16 // APPLE compiler defines vld1_f16 as macro
|
||||||
{ vst1_s16((short*)ptr, vreinterpret_s16_f16(a)); }
|
return vreinterpret_f16_s16(vld1_s16((const short*)ptr));
|
||||||
|
#else
|
||||||
|
return vld1_f16((const __fp16*)ptr);
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
template <typename T> static inline void cv_vst1_f16(T* ptr, float16x4_t a)
|
||||||
|
{
|
||||||
|
#ifndef vst1_f16 // APPLE compiler defines vst1_f16 as macro
|
||||||
|
vst1_s16((short*)ptr, vreinterpret_s16_f16(a));
|
||||||
|
#else
|
||||||
|
vst1_f16((__fp16*)ptr, a);
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
struct v_float16x4
|
struct v_float16x4
|
||||||
{
|
{
|
||||||
@ -299,7 +311,7 @@ struct v_float16x4
|
|||||||
v_float16x4(short v0, short v1, short v2, short v3)
|
v_float16x4(short v0, short v1, short v2, short v3)
|
||||||
{
|
{
|
||||||
short v[] = {v0, v1, v2, v3};
|
short v[] = {v0, v1, v2, v3};
|
||||||
val = vld1_f16(v);
|
val = cv_vld1_f16(v);
|
||||||
}
|
}
|
||||||
short get0() const
|
short get0() const
|
||||||
{
|
{
|
||||||
@ -778,9 +790,9 @@ OPENCV_HAL_IMPL_NEON_LOADSTORE_OP(v_float64x2, double, f64)
|
|||||||
#if CV_FP16
|
#if CV_FP16
|
||||||
// Workaround for old comiplers
|
// Workaround for old comiplers
|
||||||
inline v_float16x4 v_load_f16(const short* ptr)
|
inline v_float16x4 v_load_f16(const short* ptr)
|
||||||
{ return v_float16x4(vld1_f16(ptr)); }
|
{ return v_float16x4(cv_vld1_f16(ptr)); }
|
||||||
inline void v_store_f16(short* ptr, v_float16x4& a)
|
inline void v_store_f16(short* ptr, v_float16x4& a)
|
||||||
{ vst1_f16(ptr, a.val); }
|
{ cv_vst1_f16(ptr, a.val); }
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#define OPENCV_HAL_IMPL_NEON_REDUCE_OP_8(_Tpvec, _Tpnvec, scalartype, func, vectorfunc, suffix) \
|
#define OPENCV_HAL_IMPL_NEON_REDUCE_OP_8(_Tpvec, _Tpnvec, scalartype, func, vectorfunc, suffix) \
|
||||||
|
@ -108,11 +108,6 @@ void cvtScaleHalf_SIMD16f32f( const short* src, size_t sstep, float* dst, size_t
|
|||||||
#elif CV_NEON
|
#elif CV_NEON
|
||||||
const static int cVectorWidth = 4;
|
const static int cVectorWidth = 4;
|
||||||
|
|
||||||
template <typename T> static inline float16x4_t vld1_f16(const T* ptr)
|
|
||||||
{ return (float16x4_t)vld1_s16((const short*)ptr); }
|
|
||||||
template <typename T> static inline void vst1_f16(T* ptr, float16x4_t a)
|
|
||||||
{ vst1_s16((short*)ptr, a); }
|
|
||||||
|
|
||||||
void cvtScaleHalf_SIMD32f16f( const float* src, size_t sstep, short* dst, size_t dstep, cv::Size size )
|
void cvtScaleHalf_SIMD32f16f( const float* src, size_t sstep, short* dst, size_t dstep, cv::Size size )
|
||||||
{
|
{
|
||||||
CV_INSTRUMENT_REGION()
|
CV_INSTRUMENT_REGION()
|
||||||
@ -129,7 +124,7 @@ void cvtScaleHalf_SIMD32f16f( const float* src, size_t sstep, short* dst, size_t
|
|||||||
|
|
||||||
float16x4_t v_dst = vcvt_f16_f32(v_src);
|
float16x4_t v_dst = vcvt_f16_f32(v_src);
|
||||||
|
|
||||||
vst1_f16((__fp16*)dst + x, v_dst);
|
cv_vst1_f16((__fp16*)dst + x, v_dst);
|
||||||
}
|
}
|
||||||
|
|
||||||
for ( ; x < size.width; x++ )
|
for ( ; x < size.width; x++ )
|
||||||
@ -151,7 +146,7 @@ void cvtScaleHalf_SIMD16f32f( const short* src, size_t sstep, float* dst, size_t
|
|||||||
int x = 0;
|
int x = 0;
|
||||||
for ( ; x <= size.width - cVectorWidth ; x += cVectorWidth )
|
for ( ; x <= size.width - cVectorWidth ; x += cVectorWidth )
|
||||||
{
|
{
|
||||||
float16x4_t v_src = vld1_f16((__fp16*)src + x);
|
float16x4_t v_src = cv_vld1_f16((__fp16*)src + x);
|
||||||
|
|
||||||
float32x4_t v_dst = vcvt_f32_f16(v_src);
|
float32x4_t v_dst = vcvt_f32_f16(v_src);
|
||||||
|
|
||||||
|
@ -13,7 +13,7 @@ const unsigned int kBiasFp32Exponent = 127;
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if CV_FP16_TYPE
|
#if CV_FP16_TYPE
|
||||||
float convertFp16SW(short fp16)
|
inline float convertFp16SW(short fp16)
|
||||||
{
|
{
|
||||||
// Fp16 -> Fp32
|
// Fp16 -> Fp32
|
||||||
Cv16suf a;
|
Cv16suf a;
|
||||||
@ -21,7 +21,7 @@ float convertFp16SW(short fp16)
|
|||||||
return (float)a.h;
|
return (float)a.h;
|
||||||
}
|
}
|
||||||
#else
|
#else
|
||||||
float convertFp16SW(short fp16)
|
inline float convertFp16SW(short fp16)
|
||||||
{
|
{
|
||||||
// Fp16 -> Fp32
|
// Fp16 -> Fp32
|
||||||
Cv16suf b;
|
Cv16suf b;
|
||||||
@ -75,7 +75,7 @@ float convertFp16SW(short fp16)
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if CV_FP16_TYPE
|
#if CV_FP16_TYPE
|
||||||
short convertFp16SW(float fp32)
|
inline short convertFp16SW(float fp32)
|
||||||
{
|
{
|
||||||
// Fp32 -> Fp16
|
// Fp32 -> Fp16
|
||||||
Cv16suf a;
|
Cv16suf a;
|
||||||
@ -83,7 +83,7 @@ short convertFp16SW(float fp32)
|
|||||||
return a.i;
|
return a.i;
|
||||||
}
|
}
|
||||||
#else
|
#else
|
||||||
short convertFp16SW(float fp32)
|
inline short convertFp16SW(float fp32)
|
||||||
{
|
{
|
||||||
// Fp32 -> Fp16
|
// Fp32 -> Fp16
|
||||||
Cv32suf a;
|
Cv32suf a;
|
||||||
|
@ -73,7 +73,7 @@ Mutex* __initialization_mutex_initializer = &getInitializationMutex();
|
|||||||
#endif
|
#endif
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if defined ANDROID
|
#if defined ANDROID && defined HAVE_CPUFEATURES
|
||||||
# include <cpu-features.h>
|
# include <cpu-features.h>
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
@ -450,11 +450,27 @@ struct HWFeatures
|
|||||||
have[CV_CPU_NEON] = true;
|
have[CV_CPU_NEON] = true;
|
||||||
have[CV_CPU_FP16] = true;
|
have[CV_CPU_FP16] = true;
|
||||||
#elif defined __arm__ && defined __ANDROID__
|
#elif defined __arm__ && defined __ANDROID__
|
||||||
|
#if defined HAVE_CPUFEATURES
|
||||||
__android_log_print(ANDROID_LOG_INFO, "OpenCV", "calling android_getCpuFeatures() ...");
|
__android_log_print(ANDROID_LOG_INFO, "OpenCV", "calling android_getCpuFeatures() ...");
|
||||||
uint64_t features = android_getCpuFeatures();
|
uint64_t features = android_getCpuFeatures();
|
||||||
__android_log_print(ANDROID_LOG_INFO, "OpenCV", "calling android_getCpuFeatures() ... Done (%llx)", features);
|
__android_log_print(ANDROID_LOG_INFO, "OpenCV", "calling android_getCpuFeatures() ... Done (%llx)", features);
|
||||||
have[CV_CPU_NEON] = (features & ANDROID_CPU_ARM_FEATURE_NEON) != 0;
|
have[CV_CPU_NEON] = (features & ANDROID_CPU_ARM_FEATURE_NEON) != 0;
|
||||||
have[CV_CPU_FP16] = (features & ANDROID_CPU_ARM_FEATURE_VFP_FP16) != 0;
|
have[CV_CPU_FP16] = (features & ANDROID_CPU_ARM_FEATURE_VFP_FP16) != 0;
|
||||||
|
#else
|
||||||
|
__android_log_print(ANDROID_LOG_INFO, "OpenCV", "cpufeatures library is not avaialble for CPU detection");
|
||||||
|
#if CV_NEON
|
||||||
|
__android_log_print(ANDROID_LOG_INFO, "OpenCV", "- NEON instructions is enabled via build flags");
|
||||||
|
have[CV_CPU_NEON] = true;
|
||||||
|
#else
|
||||||
|
__android_log_print(ANDROID_LOG_INFO, "OpenCV", "- NEON instructions is NOT enabled via build flags");
|
||||||
|
#endif
|
||||||
|
#if CV_FP16
|
||||||
|
__android_log_print(ANDROID_LOG_INFO, "OpenCV", "- FP16 instructions is enabled via build flags");
|
||||||
|
have[CV_CPU_FP16] = true;
|
||||||
|
#else
|
||||||
|
__android_log_print(ANDROID_LOG_INFO, "OpenCV", "- FP16 instructions is NOT enabled via build flags");
|
||||||
|
#endif
|
||||||
|
#endif
|
||||||
#elif defined __arm__
|
#elif defined __arm__
|
||||||
int cpufile = open("/proc/self/auxv", O_RDONLY);
|
int cpufile = open("/proc/self/auxv", O_RDONLY);
|
||||||
|
|
||||||
|
@ -23,7 +23,7 @@ if(HAVE_JPEG)
|
|||||||
list(APPEND GRFMT_LIBS ${JPEG_LIBRARIES})
|
list(APPEND GRFMT_LIBS ${JPEG_LIBRARIES})
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
if(WITH_WEBP)
|
if(HAVE_WEBP)
|
||||||
add_definitions(-DHAVE_WEBP)
|
add_definitions(-DHAVE_WEBP)
|
||||||
ocv_include_directories(${WEBP_INCLUDE_DIR})
|
ocv_include_directories(${WEBP_INCLUDE_DIR})
|
||||||
list(APPEND GRFMT_LIBS ${WEBP_LIBRARIES})
|
list(APPEND GRFMT_LIBS ${WEBP_LIBRARIES})
|
||||||
|
Loading…
Reference in New Issue
Block a user