mirror of
https://github.com/opencv/opencv.git
synced 2024-11-29 22:00:25 +08:00
fix for cv::Mat::convertTo with scale
This commit is contained in:
parent
f50f0ba63e
commit
bbc161e1cb
@ -1507,8 +1507,8 @@ struct cvtScale_SIMD<uchar, uchar, float>
|
|||||||
float32x4_t v_dst1 = vaddq_f32(vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_low_u16(v_src))), v_scale), v_shift);
|
float32x4_t v_dst1 = vaddq_f32(vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_low_u16(v_src))), v_scale), v_shift);
|
||||||
float32x4_t v_dst2 = vaddq_f32(vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_high_u16(v_src))), v_scale), v_shift);
|
float32x4_t v_dst2 = vaddq_f32(vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_high_u16(v_src))), v_scale), v_shift);
|
||||||
|
|
||||||
uint16x8_t v_dst = vcombine_u16(vqmovn_u32(vcvtq_u32_f32(v_dst1)),
|
uint16x8_t v_dst = vcombine_u16(vqmovn_u32(cv_vrndq_u32_f32(v_dst1)),
|
||||||
vqmovn_u32(vcvtq_u32_f32(v_dst2)));
|
vqmovn_u32(cv_vrndq_u32_f32(v_dst2)));
|
||||||
vst1_u8(dst + x, vqmovn_u16(v_dst));
|
vst1_u8(dst + x, vqmovn_u16(v_dst));
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1530,8 +1530,8 @@ struct cvtScale_SIMD<uchar, schar, float>
|
|||||||
float32x4_t v_dst1 = vaddq_f32(vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_low_u16(v_src))), v_scale), v_shift);
|
float32x4_t v_dst1 = vaddq_f32(vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_low_u16(v_src))), v_scale), v_shift);
|
||||||
float32x4_t v_dst2 = vaddq_f32(vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_high_u16(v_src))), v_scale), v_shift);
|
float32x4_t v_dst2 = vaddq_f32(vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_high_u16(v_src))), v_scale), v_shift);
|
||||||
|
|
||||||
int16x8_t v_dst = vcombine_s16(vqmovn_s32(vcvtq_s32_f32(v_dst1)),
|
int16x8_t v_dst = vcombine_s16(vqmovn_s32(cv_vrndq_s32_f32(v_dst1)),
|
||||||
vqmovn_s32(vcvtq_s32_f32(v_dst2)));
|
vqmovn_s32(cv_vrndq_s32_f32(v_dst2)));
|
||||||
vst1_s8(dst + x, vqmovn_s16(v_dst));
|
vst1_s8(dst + x, vqmovn_s16(v_dst));
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1553,8 +1553,8 @@ struct cvtScale_SIMD<uchar, ushort, float>
|
|||||||
float32x4_t v_dst1 = vaddq_f32(vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_low_u16(v_src))), v_scale), v_shift);
|
float32x4_t v_dst1 = vaddq_f32(vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_low_u16(v_src))), v_scale), v_shift);
|
||||||
float32x4_t v_dst2 = vaddq_f32(vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_high_u16(v_src))), v_scale), v_shift);
|
float32x4_t v_dst2 = vaddq_f32(vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_high_u16(v_src))), v_scale), v_shift);
|
||||||
|
|
||||||
uint16x8_t v_dst = vcombine_u16(vqmovn_u32(vcvtq_u32_f32(v_dst1)),
|
uint16x8_t v_dst = vcombine_u16(vqmovn_u32(cv_vrndq_u32_f32(v_dst1)),
|
||||||
vqmovn_u32(vcvtq_u32_f32(v_dst2)));
|
vqmovn_u32(cv_vrndq_u32_f32(v_dst2)));
|
||||||
vst1q_u16(dst + x, v_dst);
|
vst1q_u16(dst + x, v_dst);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1576,8 +1576,8 @@ struct cvtScale_SIMD<uchar, short, float>
|
|||||||
float32x4_t v_dst1 = vaddq_f32(vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_low_u16(v_src))), v_scale), v_shift);
|
float32x4_t v_dst1 = vaddq_f32(vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_low_u16(v_src))), v_scale), v_shift);
|
||||||
float32x4_t v_dst2 = vaddq_f32(vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_high_u16(v_src))), v_scale), v_shift);
|
float32x4_t v_dst2 = vaddq_f32(vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_high_u16(v_src))), v_scale), v_shift);
|
||||||
|
|
||||||
int16x8_t v_dst = vcombine_s16(vqmovn_s32(vcvtq_s32_f32(v_dst1)),
|
int16x8_t v_dst = vcombine_s16(vqmovn_s32(cv_vrndq_s32_f32(v_dst1)),
|
||||||
vqmovn_s32(vcvtq_s32_f32(v_dst2)));
|
vqmovn_s32(cv_vrndq_s32_f32(v_dst2)));
|
||||||
vst1q_s16(dst + x, v_dst);
|
vst1q_s16(dst + x, v_dst);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1599,8 +1599,8 @@ struct cvtScale_SIMD<uchar, int, float>
|
|||||||
float32x4_t v_dst1 = vaddq_f32(vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_low_u16(v_src))), v_scale), v_shift);
|
float32x4_t v_dst1 = vaddq_f32(vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_low_u16(v_src))), v_scale), v_shift);
|
||||||
float32x4_t v_dst2 = vaddq_f32(vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_high_u16(v_src))), v_scale), v_shift);
|
float32x4_t v_dst2 = vaddq_f32(vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_high_u16(v_src))), v_scale), v_shift);
|
||||||
|
|
||||||
vst1q_s32(dst + x, vcvtq_s32_f32(v_dst1));
|
vst1q_s32(dst + x, cv_vrndq_s32_f32(v_dst1));
|
||||||
vst1q_s32(dst + x + 4, vcvtq_s32_f32(v_dst2));
|
vst1q_s32(dst + x + 4, cv_vrndq_s32_f32(v_dst2));
|
||||||
}
|
}
|
||||||
|
|
||||||
return x;
|
return x;
|
||||||
@ -1642,8 +1642,8 @@ struct cvtScale_SIMD<schar, uchar, float>
|
|||||||
float32x4_t v_dst1 = vaddq_f32(vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_low_s16(v_src))), v_scale), v_shift);
|
float32x4_t v_dst1 = vaddq_f32(vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_low_s16(v_src))), v_scale), v_shift);
|
||||||
float32x4_t v_dst2 = vaddq_f32(vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_high_s16(v_src))), v_scale), v_shift);
|
float32x4_t v_dst2 = vaddq_f32(vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_high_s16(v_src))), v_scale), v_shift);
|
||||||
|
|
||||||
uint16x8_t v_dst = vcombine_u16(vqmovn_u32(vcvtq_u32_f32(v_dst1)),
|
uint16x8_t v_dst = vcombine_u16(vqmovn_u32(cv_vrndq_u32_f32(v_dst1)),
|
||||||
vqmovn_u32(vcvtq_u32_f32(v_dst2)));
|
vqmovn_u32(cv_vrndq_u32_f32(v_dst2)));
|
||||||
vst1_u8(dst + x, vqmovn_u16(v_dst));
|
vst1_u8(dst + x, vqmovn_u16(v_dst));
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1665,8 +1665,8 @@ struct cvtScale_SIMD<schar, schar, float>
|
|||||||
float32x4_t v_dst1 = vaddq_f32(vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_low_s16(v_src))), v_scale), v_shift);
|
float32x4_t v_dst1 = vaddq_f32(vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_low_s16(v_src))), v_scale), v_shift);
|
||||||
float32x4_t v_dst2 = vaddq_f32(vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_high_s16(v_src))), v_scale), v_shift);
|
float32x4_t v_dst2 = vaddq_f32(vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_high_s16(v_src))), v_scale), v_shift);
|
||||||
|
|
||||||
int16x8_t v_dst = vcombine_s16(vqmovn_s32(vcvtq_s32_f32(v_dst1)),
|
int16x8_t v_dst = vcombine_s16(vqmovn_s32(cv_vrndq_s32_f32(v_dst1)),
|
||||||
vqmovn_s32(vcvtq_s32_f32(v_dst2)));
|
vqmovn_s32(cv_vrndq_s32_f32(v_dst2)));
|
||||||
vst1_s8(dst + x, vqmovn_s16(v_dst));
|
vst1_s8(dst + x, vqmovn_s16(v_dst));
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1688,8 +1688,8 @@ struct cvtScale_SIMD<schar, ushort, float>
|
|||||||
float32x4_t v_dst1 = vaddq_f32(vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_low_s16(v_src))), v_scale), v_shift);
|
float32x4_t v_dst1 = vaddq_f32(vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_low_s16(v_src))), v_scale), v_shift);
|
||||||
float32x4_t v_dst2 = vaddq_f32(vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_high_s16(v_src))), v_scale), v_shift);
|
float32x4_t v_dst2 = vaddq_f32(vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_high_s16(v_src))), v_scale), v_shift);
|
||||||
|
|
||||||
uint16x8_t v_dst = vcombine_u16(vqmovn_u32(vcvtq_u32_f32(v_dst1)),
|
uint16x8_t v_dst = vcombine_u16(vqmovn_u32(cv_vrndq_u32_f32(v_dst1)),
|
||||||
vqmovn_u32(vcvtq_u32_f32(v_dst2)));
|
vqmovn_u32(cv_vrndq_u32_f32(v_dst2)));
|
||||||
vst1q_u16(dst + x, v_dst);
|
vst1q_u16(dst + x, v_dst);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1711,8 +1711,8 @@ struct cvtScale_SIMD<schar, short, float>
|
|||||||
float32x4_t v_dst1 = vaddq_f32(vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_low_s16(v_src))), v_scale), v_shift);
|
float32x4_t v_dst1 = vaddq_f32(vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_low_s16(v_src))), v_scale), v_shift);
|
||||||
float32x4_t v_dst2 = vaddq_f32(vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_high_s16(v_src))), v_scale), v_shift);
|
float32x4_t v_dst2 = vaddq_f32(vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_high_s16(v_src))), v_scale), v_shift);
|
||||||
|
|
||||||
int16x8_t v_dst = vcombine_s16(vqmovn_s32(vcvtq_s32_f32(v_dst1)),
|
int16x8_t v_dst = vcombine_s16(vqmovn_s32(cv_vrndq_s32_f32(v_dst1)),
|
||||||
vqmovn_s32(vcvtq_s32_f32(v_dst2)));
|
vqmovn_s32(cv_vrndq_s32_f32(v_dst2)));
|
||||||
vst1q_s16(dst + x, v_dst);
|
vst1q_s16(dst + x, v_dst);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1734,8 +1734,8 @@ struct cvtScale_SIMD<schar, int, float>
|
|||||||
float32x4_t v_dst1 = vaddq_f32(vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_low_s16(v_src))), v_scale), v_shift);
|
float32x4_t v_dst1 = vaddq_f32(vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_low_s16(v_src))), v_scale), v_shift);
|
||||||
float32x4_t v_dst2 = vaddq_f32(vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_high_s16(v_src))), v_scale), v_shift);
|
float32x4_t v_dst2 = vaddq_f32(vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_high_s16(v_src))), v_scale), v_shift);
|
||||||
|
|
||||||
vst1q_s32(dst + x, vcvtq_s32_f32(v_dst1));
|
vst1q_s32(dst + x, cv_vrndq_s32_f32(v_dst1));
|
||||||
vst1q_s32(dst + x + 4, vcvtq_s32_f32(v_dst2));
|
vst1q_s32(dst + x + 4, cv_vrndq_s32_f32(v_dst2));
|
||||||
}
|
}
|
||||||
|
|
||||||
return x;
|
return x;
|
||||||
@ -1777,8 +1777,8 @@ struct cvtScale_SIMD<ushort, uchar, float>
|
|||||||
float32x4_t v_dst1 = vaddq_f32(vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_low_u16(v_src))), v_scale), v_shift);
|
float32x4_t v_dst1 = vaddq_f32(vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_low_u16(v_src))), v_scale), v_shift);
|
||||||
float32x4_t v_dst2 = vaddq_f32(vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_high_u16(v_src))), v_scale), v_shift);
|
float32x4_t v_dst2 = vaddq_f32(vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_high_u16(v_src))), v_scale), v_shift);
|
||||||
|
|
||||||
uint16x8_t v_dst = vcombine_u16(vqmovn_u32(vcvtq_u32_f32(v_dst1)),
|
uint16x8_t v_dst = vcombine_u16(vqmovn_u32(cv_vrndq_u32_f32(v_dst1)),
|
||||||
vqmovn_u32(vcvtq_u32_f32(v_dst2)));
|
vqmovn_u32(cv_vrndq_u32_f32(v_dst2)));
|
||||||
vst1_u8(dst + x, vqmovn_u16(v_dst));
|
vst1_u8(dst + x, vqmovn_u16(v_dst));
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1800,8 +1800,8 @@ struct cvtScale_SIMD<ushort, schar, float>
|
|||||||
float32x4_t v_dst1 = vaddq_f32(vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_low_u16(v_src))), v_scale), v_shift);
|
float32x4_t v_dst1 = vaddq_f32(vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_low_u16(v_src))), v_scale), v_shift);
|
||||||
float32x4_t v_dst2 = vaddq_f32(vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_high_u16(v_src))), v_scale), v_shift);
|
float32x4_t v_dst2 = vaddq_f32(vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_high_u16(v_src))), v_scale), v_shift);
|
||||||
|
|
||||||
int16x8_t v_dst = vcombine_s16(vqmovn_s32(vcvtq_s32_f32(v_dst1)),
|
int16x8_t v_dst = vcombine_s16(vqmovn_s32(cv_vrndq_s32_f32(v_dst1)),
|
||||||
vqmovn_s32(vcvtq_s32_f32(v_dst2)));
|
vqmovn_s32(cv_vrndq_s32_f32(v_dst2)));
|
||||||
vst1_s8(dst + x, vqmovn_s16(v_dst));
|
vst1_s8(dst + x, vqmovn_s16(v_dst));
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1823,8 +1823,8 @@ struct cvtScale_SIMD<ushort, ushort, float>
|
|||||||
float32x4_t v_dst1 = vaddq_f32(vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_low_u16(v_src))), v_scale), v_shift);
|
float32x4_t v_dst1 = vaddq_f32(vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_low_u16(v_src))), v_scale), v_shift);
|
||||||
float32x4_t v_dst2 = vaddq_f32(vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_high_u16(v_src))), v_scale), v_shift);
|
float32x4_t v_dst2 = vaddq_f32(vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_high_u16(v_src))), v_scale), v_shift);
|
||||||
|
|
||||||
uint16x8_t v_dst = vcombine_u16(vqmovn_u32(vcvtq_u32_f32(v_dst1)),
|
uint16x8_t v_dst = vcombine_u16(vqmovn_u32(cv_vrndq_u32_f32(v_dst1)),
|
||||||
vqmovn_u32(vcvtq_u32_f32(v_dst2)));
|
vqmovn_u32(cv_vrndq_u32_f32(v_dst2)));
|
||||||
vst1q_u16(dst + x, v_dst);
|
vst1q_u16(dst + x, v_dst);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1846,8 +1846,8 @@ struct cvtScale_SIMD<ushort, short, float>
|
|||||||
float32x4_t v_dst1 = vaddq_f32(vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_low_u16(v_src))), v_scale), v_shift);
|
float32x4_t v_dst1 = vaddq_f32(vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_low_u16(v_src))), v_scale), v_shift);
|
||||||
float32x4_t v_dst2 = vaddq_f32(vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_high_u16(v_src))), v_scale), v_shift);
|
float32x4_t v_dst2 = vaddq_f32(vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_high_u16(v_src))), v_scale), v_shift);
|
||||||
|
|
||||||
int16x8_t v_dst = vcombine_s16(vqmovn_s32(vcvtq_s32_f32(v_dst1)),
|
int16x8_t v_dst = vcombine_s16(vqmovn_s32(cv_vrndq_s32_f32(v_dst1)),
|
||||||
vqmovn_s32(vcvtq_s32_f32(v_dst2)));
|
vqmovn_s32(cv_vrndq_s32_f32(v_dst2)));
|
||||||
vst1q_s16(dst + x, v_dst);
|
vst1q_s16(dst + x, v_dst);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1869,8 +1869,8 @@ struct cvtScale_SIMD<ushort, int, float>
|
|||||||
float32x4_t v_dst1 = vaddq_f32(vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_low_u16(v_src))), v_scale), v_shift);
|
float32x4_t v_dst1 = vaddq_f32(vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_low_u16(v_src))), v_scale), v_shift);
|
||||||
float32x4_t v_dst2 = vaddq_f32(vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_high_u16(v_src))), v_scale), v_shift);
|
float32x4_t v_dst2 = vaddq_f32(vmulq_f32(vcvtq_f32_u32(vmovl_u16(vget_high_u16(v_src))), v_scale), v_shift);
|
||||||
|
|
||||||
vst1q_s32(dst + x, vcvtq_s32_f32(v_dst1));
|
vst1q_s32(dst + x, cv_vrndq_s32_f32(v_dst1));
|
||||||
vst1q_s32(dst + x + 4, vcvtq_s32_f32(v_dst2));
|
vst1q_s32(dst + x + 4, cv_vrndq_s32_f32(v_dst2));
|
||||||
}
|
}
|
||||||
|
|
||||||
return x;
|
return x;
|
||||||
@ -1912,8 +1912,8 @@ struct cvtScale_SIMD<short, uchar, float>
|
|||||||
float32x4_t v_dst1 = vaddq_f32(vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_low_s16(v_src))), v_scale), v_shift);
|
float32x4_t v_dst1 = vaddq_f32(vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_low_s16(v_src))), v_scale), v_shift);
|
||||||
float32x4_t v_dst2 = vaddq_f32(vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_high_s16(v_src))), v_scale), v_shift);
|
float32x4_t v_dst2 = vaddq_f32(vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_high_s16(v_src))), v_scale), v_shift);
|
||||||
|
|
||||||
uint16x8_t v_dst = vcombine_u16(vqmovn_u32(vcvtq_u32_f32(v_dst1)),
|
uint16x8_t v_dst = vcombine_u16(vqmovn_u32(cv_vrndq_u32_f32(v_dst1)),
|
||||||
vqmovn_u32(vcvtq_u32_f32(v_dst2)));
|
vqmovn_u32(cv_vrndq_u32_f32(v_dst2)));
|
||||||
vst1_u8(dst + x, vqmovn_u16(v_dst));
|
vst1_u8(dst + x, vqmovn_u16(v_dst));
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1935,8 +1935,8 @@ struct cvtScale_SIMD<short, schar, float>
|
|||||||
float32x4_t v_dst1 = vaddq_f32(vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_low_s16(v_src))), v_scale), v_shift);
|
float32x4_t v_dst1 = vaddq_f32(vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_low_s16(v_src))), v_scale), v_shift);
|
||||||
float32x4_t v_dst2 = vaddq_f32(vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_high_s16(v_src))), v_scale), v_shift);
|
float32x4_t v_dst2 = vaddq_f32(vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_high_s16(v_src))), v_scale), v_shift);
|
||||||
|
|
||||||
int16x8_t v_dst = vcombine_s16(vqmovn_s32(vcvtq_s32_f32(v_dst1)),
|
int16x8_t v_dst = vcombine_s16(vqmovn_s32(cv_vrndq_s32_f32(v_dst1)),
|
||||||
vqmovn_s32(vcvtq_s32_f32(v_dst2)));
|
vqmovn_s32(cv_vrndq_s32_f32(v_dst2)));
|
||||||
vst1_s8(dst + x, vqmovn_s16(v_dst));
|
vst1_s8(dst + x, vqmovn_s16(v_dst));
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1958,8 +1958,8 @@ struct cvtScale_SIMD<short, ushort, float>
|
|||||||
float32x4_t v_dst1 = vaddq_f32(vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_low_s16(v_src))), v_scale), v_shift);
|
float32x4_t v_dst1 = vaddq_f32(vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_low_s16(v_src))), v_scale), v_shift);
|
||||||
float32x4_t v_dst2 = vaddq_f32(vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_high_s16(v_src))), v_scale), v_shift);
|
float32x4_t v_dst2 = vaddq_f32(vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_high_s16(v_src))), v_scale), v_shift);
|
||||||
|
|
||||||
uint16x8_t v_dst = vcombine_u16(vqmovn_u32(vcvtq_u32_f32(v_dst1)),
|
uint16x8_t v_dst = vcombine_u16(vqmovn_u32(cv_vrndq_u32_f32(v_dst1)),
|
||||||
vqmovn_u32(vcvtq_u32_f32(v_dst2)));
|
vqmovn_u32(cv_vrndq_u32_f32(v_dst2)));
|
||||||
vst1q_u16(dst + x, v_dst);
|
vst1q_u16(dst + x, v_dst);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2001,8 +2001,8 @@ struct cvtScale_SIMD<int, uchar, float>
|
|||||||
float32x4_t v_dst1 = vaddq_f32(vmulq_f32(vcvtq_f32_s32(vld1q_s32(src + x)), v_scale), v_shift);
|
float32x4_t v_dst1 = vaddq_f32(vmulq_f32(vcvtq_f32_s32(vld1q_s32(src + x)), v_scale), v_shift);
|
||||||
float32x4_t v_dst2 = vaddq_f32(vmulq_f32(vcvtq_f32_s32(vld1q_s32(src + x + 4)), v_scale), v_shift);
|
float32x4_t v_dst2 = vaddq_f32(vmulq_f32(vcvtq_f32_s32(vld1q_s32(src + x + 4)), v_scale), v_shift);
|
||||||
|
|
||||||
uint16x8_t v_dst = vcombine_u16(vqmovn_u32(vcvtq_u32_f32(v_dst1)),
|
uint16x8_t v_dst = vcombine_u16(vqmovn_u32(cv_vrndq_u32_f32(v_dst1)),
|
||||||
vqmovn_u32(vcvtq_u32_f32(v_dst2)));
|
vqmovn_u32(cv_vrndq_u32_f32(v_dst2)));
|
||||||
vst1_u8(dst + x, vqmovn_u16(v_dst));
|
vst1_u8(dst + x, vqmovn_u16(v_dst));
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2023,8 +2023,8 @@ struct cvtScale_SIMD<int, schar, float>
|
|||||||
float32x4_t v_dst1 = vaddq_f32(vmulq_f32(vcvtq_f32_s32(vld1q_s32(src + x)), v_scale), v_shift);
|
float32x4_t v_dst1 = vaddq_f32(vmulq_f32(vcvtq_f32_s32(vld1q_s32(src + x)), v_scale), v_shift);
|
||||||
float32x4_t v_dst2 = vaddq_f32(vmulq_f32(vcvtq_f32_s32(vld1q_s32(src + x + 4)), v_scale), v_shift);
|
float32x4_t v_dst2 = vaddq_f32(vmulq_f32(vcvtq_f32_s32(vld1q_s32(src + x + 4)), v_scale), v_shift);
|
||||||
|
|
||||||
int16x8_t v_dst = vcombine_s16(vqmovn_s32(vcvtq_s32_f32(v_dst1)),
|
int16x8_t v_dst = vcombine_s16(vqmovn_s32(cv_vrndq_s32_f32(v_dst1)),
|
||||||
vqmovn_s32(vcvtq_s32_f32(v_dst2)));
|
vqmovn_s32(cv_vrndq_s32_f32(v_dst2)));
|
||||||
vst1_s8(dst + x, vqmovn_s16(v_dst));
|
vst1_s8(dst + x, vqmovn_s16(v_dst));
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2045,8 +2045,8 @@ struct cvtScale_SIMD<int, ushort, float>
|
|||||||
float32x4_t v_dst1 = vaddq_f32(vmulq_f32(vcvtq_f32_s32(vld1q_s32(src + x)), v_scale), v_shift);
|
float32x4_t v_dst1 = vaddq_f32(vmulq_f32(vcvtq_f32_s32(vld1q_s32(src + x)), v_scale), v_shift);
|
||||||
float32x4_t v_dst2 = vaddq_f32(vmulq_f32(vcvtq_f32_s32(vld1q_s32(src + x + 4)), v_scale), v_shift);
|
float32x4_t v_dst2 = vaddq_f32(vmulq_f32(vcvtq_f32_s32(vld1q_s32(src + x + 4)), v_scale), v_shift);
|
||||||
|
|
||||||
uint16x8_t v_dst = vcombine_u16(vqmovn_u32(vcvtq_u32_f32(v_dst1)),
|
uint16x8_t v_dst = vcombine_u16(vqmovn_u32(cv_vrndq_u32_f32(v_dst1)),
|
||||||
vqmovn_u32(vcvtq_u32_f32(v_dst2)));
|
vqmovn_u32(cv_vrndq_u32_f32(v_dst2)));
|
||||||
vst1q_u16(dst + x, v_dst);
|
vst1q_u16(dst + x, v_dst);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2067,8 +2067,8 @@ struct cvtScale_SIMD<int, short, float>
|
|||||||
float32x4_t v_dst1 = vaddq_f32(vmulq_f32(vcvtq_f32_s32(vld1q_s32(src + x)), v_scale), v_shift);
|
float32x4_t v_dst1 = vaddq_f32(vmulq_f32(vcvtq_f32_s32(vld1q_s32(src + x)), v_scale), v_shift);
|
||||||
float32x4_t v_dst2 = vaddq_f32(vmulq_f32(vcvtq_f32_s32(vld1q_s32(src + x + 4)), v_scale), v_shift);
|
float32x4_t v_dst2 = vaddq_f32(vmulq_f32(vcvtq_f32_s32(vld1q_s32(src + x + 4)), v_scale), v_shift);
|
||||||
|
|
||||||
int16x8_t v_dst = vcombine_s16(vqmovn_s32(vcvtq_s32_f32(v_dst1)),
|
int16x8_t v_dst = vcombine_s16(vqmovn_s32(cv_vrndq_s32_f32(v_dst1)),
|
||||||
vqmovn_s32(vcvtq_s32_f32(v_dst2)));
|
vqmovn_s32(cv_vrndq_s32_f32(v_dst2)));
|
||||||
vst1q_s16(dst + x, v_dst);
|
vst1q_s16(dst + x, v_dst);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2091,8 +2091,8 @@ struct cvtScale_SIMD<float, uchar, float>
|
|||||||
float32x4_t v_dst1 = vaddq_f32(vmulq_f32(vld1q_f32(src + x), v_scale), v_shift);
|
float32x4_t v_dst1 = vaddq_f32(vmulq_f32(vld1q_f32(src + x), v_scale), v_shift);
|
||||||
float32x4_t v_dst2 = vaddq_f32(vmulq_f32(vld1q_f32(src + x + 4), v_scale), v_shift);
|
float32x4_t v_dst2 = vaddq_f32(vmulq_f32(vld1q_f32(src + x + 4), v_scale), v_shift);
|
||||||
|
|
||||||
uint16x8_t v_dst = vcombine_u16(vqmovn_u32(vcvtq_u32_f32(v_dst1)),
|
uint16x8_t v_dst = vcombine_u16(vqmovn_u32(cv_vrndq_u32_f32(v_dst1)),
|
||||||
vqmovn_u32(vcvtq_u32_f32(v_dst2)));
|
vqmovn_u32(cv_vrndq_u32_f32(v_dst2)));
|
||||||
vst1_u8(dst + x, vqmovn_u16(v_dst));
|
vst1_u8(dst + x, vqmovn_u16(v_dst));
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2113,8 +2113,8 @@ struct cvtScale_SIMD<float, schar, float>
|
|||||||
float32x4_t v_dst1 = vaddq_f32(vmulq_f32(vld1q_f32(src + x), v_scale), v_shift);
|
float32x4_t v_dst1 = vaddq_f32(vmulq_f32(vld1q_f32(src + x), v_scale), v_shift);
|
||||||
float32x4_t v_dst2 = vaddq_f32(vmulq_f32(vld1q_f32(src + x + 4), v_scale), v_shift);
|
float32x4_t v_dst2 = vaddq_f32(vmulq_f32(vld1q_f32(src + x + 4), v_scale), v_shift);
|
||||||
|
|
||||||
int16x8_t v_dst = vcombine_s16(vqmovn_s32(vcvtq_s32_f32(v_dst1)),
|
int16x8_t v_dst = vcombine_s16(vqmovn_s32(cv_vrndq_s32_f32(v_dst1)),
|
||||||
vqmovn_s32(vcvtq_s32_f32(v_dst2)));
|
vqmovn_s32(cv_vrndq_s32_f32(v_dst2)));
|
||||||
vst1_s8(dst + x, vqmovn_s16(v_dst));
|
vst1_s8(dst + x, vqmovn_s16(v_dst));
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2135,8 +2135,8 @@ struct cvtScale_SIMD<float, ushort, float>
|
|||||||
float32x4_t v_dst1 = vaddq_f32(vmulq_f32(vld1q_f32(src + x), v_scale), v_shift);
|
float32x4_t v_dst1 = vaddq_f32(vmulq_f32(vld1q_f32(src + x), v_scale), v_shift);
|
||||||
float32x4_t v_dst2 = vaddq_f32(vmulq_f32(vld1q_f32(src + x + 4), v_scale), v_shift);
|
float32x4_t v_dst2 = vaddq_f32(vmulq_f32(vld1q_f32(src + x + 4), v_scale), v_shift);
|
||||||
|
|
||||||
uint16x8_t v_dst = vcombine_u16(vqmovn_u32(vcvtq_u32_f32(v_dst1)),
|
uint16x8_t v_dst = vcombine_u16(vqmovn_u32(cv_vrndq_u32_f32(v_dst1)),
|
||||||
vqmovn_u32(vcvtq_u32_f32(v_dst2)));
|
vqmovn_u32(cv_vrndq_u32_f32(v_dst2)));
|
||||||
vst1q_u16(dst + x, v_dst);
|
vst1q_u16(dst + x, v_dst);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2157,8 +2157,8 @@ struct cvtScale_SIMD<float, short, float>
|
|||||||
float32x4_t v_dst1 = vaddq_f32(vmulq_f32(vld1q_f32(src + x), v_scale), v_shift);
|
float32x4_t v_dst1 = vaddq_f32(vmulq_f32(vld1q_f32(src + x), v_scale), v_shift);
|
||||||
float32x4_t v_dst2 = vaddq_f32(vmulq_f32(vld1q_f32(src + x + 4), v_scale), v_shift);
|
float32x4_t v_dst2 = vaddq_f32(vmulq_f32(vld1q_f32(src + x + 4), v_scale), v_shift);
|
||||||
|
|
||||||
int16x8_t v_dst = vcombine_s16(vqmovn_s32(vcvtq_s32_f32(v_dst1)),
|
int16x8_t v_dst = vcombine_s16(vqmovn_s32(cv_vrndq_s32_f32(v_dst1)),
|
||||||
vqmovn_s32(vcvtq_s32_f32(v_dst2)));
|
vqmovn_s32(cv_vrndq_s32_f32(v_dst2)));
|
||||||
vst1q_s16(dst + x, v_dst);
|
vst1q_s16(dst + x, v_dst);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2175,7 +2175,7 @@ struct cvtScale_SIMD<float, int, float>
|
|||||||
float32x4_t v_shift = vdupq_n_f32(shift), v_scale = vdupq_n_f32(scale);
|
float32x4_t v_shift = vdupq_n_f32(shift), v_scale = vdupq_n_f32(scale);
|
||||||
|
|
||||||
for ( ; x <= width - 4; x += 4)
|
for ( ; x <= width - 4; x += 4)
|
||||||
vst1q_s32(dst + x, vcvtq_s32_f32(vaddq_f32(vmulq_f32(vld1q_f32(src + x), v_scale), v_shift)));
|
vst1q_s32(dst + x, cv_vrndq_s32_f32(vaddq_f32(vmulq_f32(vld1q_f32(src + x), v_scale), v_shift)));
|
||||||
|
|
||||||
return x;
|
return x;
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user