mirror of
https://github.com/opencv/opencv.git
synced 2024-11-24 19:20:28 +08:00
Merge pull request #781 from bitwangyaoyao:2.4_fixerr
This commit is contained in:
commit
a770d04e78
@ -92,9 +92,6 @@ namespace cv
|
||||
extern const char *arithm_bitwise_xor_scalar_mask;
|
||||
extern const char *arithm_compare_eq;
|
||||
extern const char *arithm_compare_ne;
|
||||
extern const char *arithm_sub;
|
||||
extern const char *arithm_sub_scalar;
|
||||
extern const char *arithm_sub_scalar_mask;
|
||||
extern const char *arithm_mul;
|
||||
extern const char *arithm_div;
|
||||
extern const char *arithm_absdiff;
|
||||
@ -130,7 +127,8 @@ inline int divUp(int total, int grain)
|
||||
/////////////////////// add subtract multiply divide /////////////////////////
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
template<typename T>
|
||||
void arithmetic_run(const oclMat &src1, const oclMat &src2, oclMat &dst, string kernelName, const char **kernelString, void *_scalar)
|
||||
void arithmetic_run(const oclMat &src1, const oclMat &src2, oclMat &dst,
|
||||
string kernelName, const char **kernelString, void *_scalar, int op_type = 0)
|
||||
{
|
||||
if(!src1.clCxt->supportsFeature(Context::CL_DOUBLE) && src1.type() == CV_64F)
|
||||
{
|
||||
@ -186,14 +184,25 @@ void arithmetic_run(const oclMat &src1, const oclMat &src2, oclMat &dst, string
|
||||
scalar = (T)scalar1;
|
||||
args.push_back( make_pair( sizeof(T), (void *)&scalar ));
|
||||
}
|
||||
|
||||
openCLExecuteKernel(clCxt, kernelString, kernelName, globalThreads, localThreads, args, -1, depth);
|
||||
switch(op_type)
|
||||
{
|
||||
case MAT_ADD:
|
||||
openCLExecuteKernel(clCxt, kernelString, kernelName, globalThreads, localThreads, args, -1, depth, "-D ARITHM_ADD");
|
||||
break;
|
||||
case MAT_SUB:
|
||||
openCLExecuteKernel(clCxt, kernelString, kernelName, globalThreads, localThreads, args, -1, depth, "-D ARITHM_SUB");
|
||||
break;
|
||||
default:
|
||||
openCLExecuteKernel(clCxt, kernelString, kernelName, globalThreads, localThreads, args, -1, depth);
|
||||
}
|
||||
}
|
||||
static void arithmetic_run(const oclMat &src1, const oclMat &src2, oclMat &dst, string kernelName, const char **kernelString)
|
||||
static void arithmetic_run(const oclMat &src1, const oclMat &src2, oclMat &dst,
|
||||
string kernelName, const char **kernelString, int op_type = 0)
|
||||
{
|
||||
arithmetic_run<char>(src1, src2, dst, kernelName, kernelString, (void *)NULL);
|
||||
arithmetic_run<char>(src1, src2, dst, kernelName, kernelString, (void *)NULL, op_type);
|
||||
}
|
||||
static void arithmetic_run(const oclMat &src1, const oclMat &src2, oclMat &dst, const oclMat &mask, string kernelName, const char **kernelString)
|
||||
static void arithmetic_run(const oclMat &src1, const oclMat &src2, oclMat &dst, const oclMat &mask,
|
||||
string kernelName, const char **kernelString, int op_type = 0)
|
||||
{
|
||||
if(!src1.clCxt->supportsFeature(Context::CL_DOUBLE) && src1.type() == CV_64F)
|
||||
{
|
||||
@ -248,24 +257,34 @@ static void arithmetic_run(const oclMat &src1, const oclMat &src2, oclMat &dst,
|
||||
args.push_back( make_pair( sizeof(cl_int), (void *)&cols ));
|
||||
args.push_back( make_pair( sizeof(cl_int), (void *)&dst_step1 ));
|
||||
|
||||
openCLExecuteKernel(clCxt, kernelString, kernelName, globalThreads, localThreads, args, channels, depth);
|
||||
switch (op_type)
|
||||
{
|
||||
case MAT_ADD:
|
||||
openCLExecuteKernel(clCxt, kernelString, kernelName, globalThreads, localThreads, args, channels, depth, "-D ARITHM_ADD");
|
||||
break;
|
||||
case MAT_SUB:
|
||||
openCLExecuteKernel(clCxt, kernelString, kernelName, globalThreads, localThreads, args, channels, depth, "-D ARITHM_SUB");
|
||||
break;
|
||||
default:
|
||||
openCLExecuteKernel(clCxt, kernelString, kernelName, globalThreads, localThreads, args, channels, depth);
|
||||
}
|
||||
}
|
||||
void cv::ocl::add(const oclMat &src1, const oclMat &src2, oclMat &dst)
|
||||
{
|
||||
arithmetic_run(src1, src2, dst, "arithm_add", &arithm_add);
|
||||
arithmetic_run(src1, src2, dst, "arithm_add", &arithm_add, MAT_ADD);
|
||||
}
|
||||
void cv::ocl::add(const oclMat &src1, const oclMat &src2, oclMat &dst, const oclMat &mask)
|
||||
{
|
||||
arithmetic_run(src1, src2, dst, mask, "arithm_add_with_mask", &arithm_add);
|
||||
arithmetic_run(src1, src2, dst, mask, "arithm_add_with_mask", &arithm_add, MAT_ADD);
|
||||
}
|
||||
|
||||
void cv::ocl::subtract(const oclMat &src1, const oclMat &src2, oclMat &dst)
|
||||
{
|
||||
arithmetic_run(src1, src2, dst, "arithm_sub", &arithm_sub);
|
||||
arithmetic_run(src1, src2, dst, "arithm_add", &arithm_add, MAT_SUB);
|
||||
}
|
||||
void cv::ocl::subtract(const oclMat &src1, const oclMat &src2, oclMat &dst, const oclMat &mask)
|
||||
{
|
||||
arithmetic_run(src1, src2, dst, mask, "arithm_sub_with_mask", &arithm_sub);
|
||||
arithmetic_run(src1, src2, dst, mask, "arithm_add_with_mask", &arithm_add, MAT_SUB);
|
||||
}
|
||||
typedef void (*MulDivFunc)(const oclMat &src1, const oclMat &src2, oclMat &dst, string kernelName,
|
||||
const char **kernelString, void *scalar);
|
||||
@ -351,12 +370,9 @@ void arithmetic_scalar_run(const oclMat &src1, const Scalar &src2, oclMat &dst,
|
||||
args.push_back( make_pair( sizeof(cl_int) , (void *)&cols ));
|
||||
args.push_back( make_pair( sizeof(cl_int) , (void *)&dst_step1 ));
|
||||
if(isMatSubScalar != 0)
|
||||
{
|
||||
isMatSubScalar = isMatSubScalar > 0 ? 1 : 0;
|
||||
args.push_back( make_pair( sizeof(cl_int) , (void *)&isMatSubScalar));
|
||||
}
|
||||
|
||||
openCLExecuteKernel(clCxt, kernelString, kernelName, globalThreads, localThreads, args, channels, depth);
|
||||
openCLExecuteKernel(clCxt, kernelString, kernelName, globalThreads, localThreads, args, channels, depth, "-D ARITHM_SUB");
|
||||
else
|
||||
openCLExecuteKernel(clCxt, kernelString, kernelName, globalThreads, localThreads, args, channels, depth, "-D ARITHM_ADD");
|
||||
}
|
||||
|
||||
static void arithmetic_scalar_run(const oclMat &src, oclMat &dst, string kernelName, const char **kernelString, double scalar)
|
||||
@ -452,14 +468,14 @@ void cv::ocl::add(const oclMat &src1, const Scalar &src2, oclMat &dst, const ocl
|
||||
|
||||
void cv::ocl::subtract(const oclMat &src1, const Scalar &src2, oclMat &dst, const oclMat &mask)
|
||||
{
|
||||
string kernelName = mask.data ? "arithm_s_sub_with_mask" : "arithm_s_sub";
|
||||
const char **kernelString = mask.data ? &arithm_sub_scalar_mask : &arithm_sub_scalar;
|
||||
string kernelName = mask.data ? "arithm_s_add_with_mask" : "arithm_s_add";
|
||||
const char **kernelString = mask.data ? &arithm_add_scalar_mask : &arithm_add_scalar;
|
||||
arithmetic_scalar( src1, src2, dst, mask, kernelName, kernelString, 1);
|
||||
}
|
||||
void cv::ocl::subtract(const Scalar &src2, const oclMat &src1, oclMat &dst, const oclMat &mask)
|
||||
{
|
||||
string kernelName = mask.data ? "arithm_s_sub_with_mask" : "arithm_s_sub";
|
||||
const char **kernelString = mask.data ? &arithm_sub_scalar_mask : &arithm_sub_scalar;
|
||||
string kernelName = mask.data ? "arithm_s_add_with_mask" : "arithm_s_add";
|
||||
const char **kernelString = mask.data ? &arithm_add_scalar_mask : &arithm_add_scalar;
|
||||
arithmetic_scalar( src1, src2, dst, mask, kernelName, kernelString, -1);
|
||||
}
|
||||
void cv::ocl::divide(double scalar, const oclMat &src, oclMat &dst)
|
||||
|
@ -52,6 +52,11 @@
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#ifdef ARITHM_ADD
|
||||
#define ARITHM_OP(A,B) ((A)+(B))
|
||||
#elif defined ARITHM_SUB
|
||||
#define ARITHM_OP(A,B) ((A)-(B))
|
||||
#endif
|
||||
//////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
/////////////////////////////////////////////ADD////////////////////////////////////////////////////
|
||||
///////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
@ -95,7 +100,7 @@ __kernel void arithm_add_D0 (__global uchar *src1, int src1_step, int src1_offse
|
||||
src2_data.xyzw = (src2_index == -1) ? src2_data.wxyz:tmp.xyzw;
|
||||
}
|
||||
uchar4 dst_data = *((__global uchar4 *)(dst + dst_index));
|
||||
short4 tmp = convert_short4_sat(src1_data) + convert_short4_sat(src2_data);
|
||||
short4 tmp = ARITHM_OP(convert_short4_sat(src1_data), convert_short4_sat(src2_data));
|
||||
uchar4 tmp_data = convert_uchar4_sat(tmp);
|
||||
|
||||
dst_data.x = ((dst_index + 0 >= dst_start) && (dst_index + 0 < dst_end)) ? tmp_data.x : dst_data.x;
|
||||
@ -134,7 +139,7 @@ __kernel void arithm_add_D2 (__global ushort *src1, int src1_step, int src1_offs
|
||||
ushort4 src2_data = vload4(0, (__global ushort *)((__global char *)src2 + src2_index));
|
||||
|
||||
ushort4 dst_data = *((__global ushort4 *)((__global char *)dst + dst_index));
|
||||
int4 tmp = convert_int4_sat(src1_data) + convert_int4_sat(src2_data);
|
||||
int4 tmp = ARITHM_OP(convert_int4_sat(src1_data), convert_int4_sat(src2_data));
|
||||
ushort4 tmp_data = convert_ushort4_sat(tmp);
|
||||
|
||||
dst_data.x = ((dst_index + 0 >= dst_start) && (dst_index + 0 < dst_end)) ? tmp_data.x : dst_data.x;
|
||||
@ -172,7 +177,7 @@ __kernel void arithm_add_D3 (__global short *src1, int src1_step, int src1_offse
|
||||
short4 src2_data = vload4(0, (__global short *)((__global char *)src2 + src2_index));
|
||||
|
||||
short4 dst_data = *((__global short4 *)((__global char *)dst + dst_index));
|
||||
int4 tmp = convert_int4_sat(src1_data) + convert_int4_sat(src2_data);
|
||||
int4 tmp = ARITHM_OP(convert_int4_sat(src1_data), convert_int4_sat(src2_data));
|
||||
short4 tmp_data = convert_short4_sat(tmp);
|
||||
|
||||
dst_data.x = ((dst_index + 0 >= dst_start) && (dst_index + 0 < dst_end)) ? tmp_data.x : dst_data.x;
|
||||
@ -200,7 +205,7 @@ __kernel void arithm_add_D4 (__global int *src1, int src1_step, int src1_offset,
|
||||
|
||||
int data1 = *((__global int *)((__global char *)src1 + src1_index));
|
||||
int data2 = *((__global int *)((__global char *)src2 + src2_index));
|
||||
long tmp = (long)(data1) + (long)(data2);
|
||||
long tmp = ARITHM_OP((long)(data1), (long)(data2));
|
||||
|
||||
*((__global int *)((__global char *)dst + dst_index)) = convert_int_sat(tmp);
|
||||
}
|
||||
@ -221,7 +226,7 @@ __kernel void arithm_add_D5 (__global float *src1, int src1_step, int src1_offse
|
||||
|
||||
float data1 = *((__global float *)((__global char *)src1 + src1_index));
|
||||
float data2 = *((__global float *)((__global char *)src2 + src2_index));
|
||||
float tmp = data1 + data2;
|
||||
float tmp = ARITHM_OP(data1, data2);
|
||||
|
||||
*((__global float *)((__global char *)dst + dst_index)) = tmp;
|
||||
}
|
||||
@ -245,7 +250,7 @@ __kernel void arithm_add_D6 (__global double *src1, int src1_step, int src1_offs
|
||||
double data1 = *((__global double *)((__global char *)src1 + src1_index));
|
||||
double data2 = *((__global double *)((__global char *)src2 + src2_index));
|
||||
|
||||
*((__global double *)((__global char *)dst + dst_index)) = data1 + data2;
|
||||
*((__global double *)((__global char *)dst + dst_index)) = ARITHM_OP(data1, data2);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
@ -302,7 +307,7 @@ __kernel void arithm_add_with_mask_C1_D0 (__global uchar *src1, int src1_step, i
|
||||
}
|
||||
|
||||
uchar4 data = *((__global uchar4 *)(dst + dst_index));
|
||||
short4 tmp = convert_short4_sat(src1_data) + convert_short4_sat(src2_data);
|
||||
short4 tmp = ARITHM_OP(convert_short4_sat(src1_data), convert_short4_sat(src2_data));
|
||||
uchar4 tmp_data = convert_uchar4_sat(tmp);
|
||||
|
||||
data.x = ((mask_data.x) && (dst_index + 0 >= dst_start) && (dst_index + 0 < dst_end)) ? tmp_data.x : data.x;
|
||||
@ -344,7 +349,7 @@ __kernel void arithm_add_with_mask_C1_D2 (__global ushort *src1, int src1_step,
|
||||
uchar2 mask_data = vload2(0, mask + mask_index);
|
||||
|
||||
ushort2 data = *((__global ushort2 *)((__global uchar *)dst + dst_index));
|
||||
int2 tmp = convert_int2_sat(src1_data) + convert_int2_sat(src2_data);
|
||||
int2 tmp = ARITHM_OP(convert_int2_sat(src1_data), convert_int2_sat(src2_data));
|
||||
ushort2 tmp_data = convert_ushort2_sat(tmp);
|
||||
|
||||
data.x = ((mask_data.x) && (dst_index + 0 >= dst_start)) ? tmp_data.x : data.x;
|
||||
@ -384,7 +389,7 @@ __kernel void arithm_add_with_mask_C1_D3 (__global short *src1, int src1_step, i
|
||||
uchar2 mask_data = vload2(0, mask + mask_index);
|
||||
|
||||
short2 data = *((__global short2 *)((__global uchar *)dst + dst_index));
|
||||
int2 tmp = convert_int2_sat(src1_data) + convert_int2_sat(src2_data);
|
||||
int2 tmp = ARITHM_OP(convert_int2_sat(src1_data), convert_int2_sat(src2_data));
|
||||
short2 tmp_data = convert_short2_sat(tmp);
|
||||
|
||||
data.x = ((mask_data.x) && (dst_index + 0 >= dst_start)) ? tmp_data.x : data.x;
|
||||
@ -416,7 +421,7 @@ __kernel void arithm_add_with_mask_C1_D4 (__global int *src1, int src1_step, i
|
||||
int src_data2 = *((__global int *)((__global char *)src2 + src2_index));
|
||||
int dst_data = *((__global int *)((__global char *)dst + dst_index));
|
||||
|
||||
int data = convert_int_sat((long)src_data1 + (long)src_data2);
|
||||
int data = convert_int_sat(ARITHM_OP((long)src_data1, (long)src_data2));
|
||||
data = mask_data ? data : dst_data;
|
||||
|
||||
*((__global int *)((__global char *)dst + dst_index)) = data;
|
||||
@ -446,7 +451,7 @@ __kernel void arithm_add_with_mask_C1_D5 (__global float *src1, int src1_step, i
|
||||
float src_data2 = *((__global float *)((__global char *)src2 + src2_index));
|
||||
float dst_data = *((__global float *)((__global char *)dst + dst_index));
|
||||
|
||||
float data = src_data1 + src_data2;
|
||||
float data = ARITHM_OP(src_data1, src_data2);
|
||||
data = mask_data ? data : dst_data;
|
||||
|
||||
*((__global float *)((__global char *)dst + dst_index)) = data;
|
||||
@ -477,7 +482,7 @@ __kernel void arithm_add_with_mask_C1_D6 (__global double *src1, int src1_step,
|
||||
double src_data2 = *((__global double *)((__global char *)src2 + src2_index));
|
||||
double dst_data = *((__global double *)((__global char *)dst + dst_index));
|
||||
|
||||
double data = src_data1 + src_data2;
|
||||
double data = ARITHM_OP(src_data1, src_data2);
|
||||
data = mask_data ? data : dst_data;
|
||||
|
||||
*((__global double *)((__global char *)dst + dst_index)) = data;
|
||||
@ -516,7 +521,7 @@ __kernel void arithm_add_with_mask_C2_D0 (__global uchar *src1, int src1_step, i
|
||||
uchar2 mask_data = vload2(0, mask + mask_index);
|
||||
|
||||
uchar4 data = *((__global uchar4 *)(dst + dst_index));
|
||||
short4 tmp = convert_short4_sat(src1_data) + convert_short4_sat(src2_data);
|
||||
short4 tmp = ARITHM_OP(convert_short4_sat(src1_data), convert_short4_sat(src2_data));
|
||||
uchar4 tmp_data = convert_uchar4_sat(tmp);
|
||||
|
||||
data.xy = ((mask_data.x) && (dst_index + 0 >= dst_start)) ? tmp_data.xy : data.xy;
|
||||
@ -548,7 +553,7 @@ __kernel void arithm_add_with_mask_C2_D2 (__global ushort *src1, int src1_step,
|
||||
ushort2 src_data2 = *((__global ushort2 *)((__global char *)src2 + src2_index));
|
||||
ushort2 dst_data = *((__global ushort2 *)((__global char *)dst + dst_index));
|
||||
|
||||
int2 tmp = convert_int2_sat(src_data1) + convert_int2_sat(src_data2);
|
||||
int2 tmp = ARITHM_OP(convert_int2_sat(src_data1), convert_int2_sat(src_data2));
|
||||
ushort2 data = convert_ushort2_sat(tmp);
|
||||
data = mask_data ? data : dst_data;
|
||||
|
||||
@ -578,7 +583,7 @@ __kernel void arithm_add_with_mask_C2_D3 (__global short *src1, int src1_step, i
|
||||
short2 src_data2 = *((__global short2 *)((__global char *)src2 + src2_index));
|
||||
short2 dst_data = *((__global short2 *)((__global char *)dst + dst_index));
|
||||
|
||||
int2 tmp = convert_int2_sat(src_data1) + convert_int2_sat(src_data2);
|
||||
int2 tmp = ARITHM_OP(convert_int2_sat(src_data1), convert_int2_sat(src_data2));
|
||||
short2 data = convert_short2_sat(tmp);
|
||||
data = mask_data ? data : dst_data;
|
||||
|
||||
@ -608,7 +613,7 @@ __kernel void arithm_add_with_mask_C2_D4 (__global int *src1, int src1_step, i
|
||||
int2 src_data2 = *((__global int2 *)((__global char *)src2 + src2_index));
|
||||
int2 dst_data = *((__global int2 *)((__global char *)dst + dst_index));
|
||||
|
||||
int2 data = convert_int2_sat(convert_long2_sat(src_data1) + convert_long2_sat(src_data2));
|
||||
int2 data = convert_int2_sat(ARITHM_OP(convert_long2_sat(src_data1), convert_long2_sat(src_data2)));
|
||||
data = mask_data ? data : dst_data;
|
||||
|
||||
*((__global int2 *)((__global char *)dst + dst_index)) = data;
|
||||
@ -637,7 +642,7 @@ __kernel void arithm_add_with_mask_C2_D5 (__global float *src1, int src1_step, i
|
||||
float2 src_data2 = *((__global float2 *)((__global char *)src2 + src2_index));
|
||||
float2 dst_data = *((__global float2 *)((__global char *)dst + dst_index));
|
||||
|
||||
float2 data = src_data1 + src_data2;
|
||||
float2 data = ARITHM_OP(src_data1, src_data2);
|
||||
data = mask_data ? data : dst_data;
|
||||
|
||||
*((__global float2 *)((__global char *)dst + dst_index)) = data;
|
||||
@ -668,329 +673,14 @@ __kernel void arithm_add_with_mask_C2_D6 (__global double *src1, int src1_step,
|
||||
double2 src_data2 = *((__global double2 *)((__global char *)src2 + src2_index));
|
||||
double2 dst_data = *((__global double2 *)((__global char *)dst + dst_index));
|
||||
|
||||
double2 data = src_data1 + src_data2;
|
||||
double2 data = ARITHM_OP(src_data1, src_data2);
|
||||
data = mask_data ? data : dst_data;
|
||||
|
||||
*((__global double2 *)((__global char *)dst + dst_index)) = data;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
__kernel void arithm_add_with_mask_C3_D0 (__global uchar *src1, int src1_step, int src1_offset,
|
||||
__global uchar *src2, int src2_step, int src2_offset,
|
||||
__global uchar *mask, int mask_step, int mask_offset,
|
||||
__global uchar *dst, int dst_step, int dst_offset,
|
||||
int rows, int cols, int dst_step1)
|
||||
{
|
||||
|
||||
int x = get_global_id(0);
|
||||
int y = get_global_id(1);
|
||||
|
||||
if (x < cols && y < rows)
|
||||
{
|
||||
x = x << 2;
|
||||
|
||||
#ifdef dst_align
|
||||
#undef dst_align
|
||||
#endif
|
||||
#define dst_align (((dst_offset % dst_step) / 3 ) & 3)
|
||||
int src1_index = mad24(y, src1_step, (x * 3) + src1_offset - (dst_align * 3));
|
||||
int src2_index = mad24(y, src2_step, (x * 3) + src2_offset - (dst_align * 3));
|
||||
int mask_index = mad24(y, mask_step, x + mask_offset - dst_align);
|
||||
|
||||
int dst_start = mad24(y, dst_step, dst_offset);
|
||||
int dst_end = mad24(y, dst_step, dst_offset + dst_step1);
|
||||
int dst_index = mad24(y, dst_step, dst_offset + (x * 3) - (dst_align * 3));
|
||||
|
||||
uchar4 src1_data_0 = vload4(0, src1 + src1_index + 0);
|
||||
uchar4 src1_data_1 = vload4(0, src1 + src1_index + 4);
|
||||
uchar4 src1_data_2 = vload4(0, src1 + src1_index + 8);
|
||||
|
||||
uchar4 src2_data_0 = vload4(0, src2 + src2_index + 0);
|
||||
uchar4 src2_data_1 = vload4(0, src2 + src2_index + 4);
|
||||
uchar4 src2_data_2 = vload4(0, src2 + src2_index + 8);
|
||||
|
||||
uchar4 mask_data = vload4(0, mask + mask_index);
|
||||
|
||||
uchar4 data_0 = *((__global uchar4 *)(dst + dst_index + 0));
|
||||
uchar4 data_1 = *((__global uchar4 *)(dst + dst_index + 4));
|
||||
uchar4 data_2 = *((__global uchar4 *)(dst + dst_index + 8));
|
||||
|
||||
uchar4 tmp_data_0 = convert_uchar4_sat(convert_short4_sat(src1_data_0) + convert_short4_sat(src2_data_0));
|
||||
uchar4 tmp_data_1 = convert_uchar4_sat(convert_short4_sat(src1_data_1) + convert_short4_sat(src2_data_1));
|
||||
uchar4 tmp_data_2 = convert_uchar4_sat(convert_short4_sat(src1_data_2) + convert_short4_sat(src2_data_2));
|
||||
|
||||
data_0.xyz = ((mask_data.x) && (dst_index + 0 >= dst_start)) ? tmp_data_0.xyz : data_0.xyz;
|
||||
data_0.w = ((mask_data.y) && (dst_index + 3 >= dst_start) && (dst_index + 3 < dst_end))
|
||||
? tmp_data_0.w : data_0.w;
|
||||
|
||||
data_1.xy = ((mask_data.y) && (dst_index + 3 >= dst_start) && (dst_index + 3 < dst_end))
|
||||
? tmp_data_1.xy : data_1.xy;
|
||||
data_1.zw = ((mask_data.z) && (dst_index + 6 >= dst_start) && (dst_index + 6 < dst_end))
|
||||
? tmp_data_1.zw : data_1.zw;
|
||||
|
||||
data_2.x = ((mask_data.z) && (dst_index + 6 >= dst_start) && (dst_index + 6 < dst_end))
|
||||
? tmp_data_2.x : data_2.x;
|
||||
data_2.yzw = ((mask_data.w) && (dst_index + 9 >= dst_start) && (dst_index + 9 < dst_end))
|
||||
? tmp_data_2.yzw : data_2.yzw;
|
||||
|
||||
*((__global uchar4 *)(dst + dst_index + 0)) = data_0;
|
||||
*((__global uchar4 *)(dst + dst_index + 4)) = data_1;
|
||||
*((__global uchar4 *)(dst + dst_index + 8)) = data_2;
|
||||
}
|
||||
}
|
||||
__kernel void arithm_add_with_mask_C3_D2 (__global ushort *src1, int src1_step, int src1_offset,
|
||||
__global ushort *src2, int src2_step, int src2_offset,
|
||||
__global uchar *mask, int mask_step, int mask_offset,
|
||||
__global ushort *dst, int dst_step, int dst_offset,
|
||||
int rows, int cols, int dst_step1)
|
||||
{
|
||||
|
||||
int x = get_global_id(0);
|
||||
int y = get_global_id(1);
|
||||
|
||||
if (x < cols && y < rows)
|
||||
{
|
||||
x = x << 1;
|
||||
|
||||
#ifdef dst_align
|
||||
#undef dst_align
|
||||
#endif
|
||||
#define dst_align (((dst_offset % dst_step) / 6 ) & 1)
|
||||
int src1_index = mad24(y, src1_step, (x * 6) + src1_offset - (dst_align * 6));
|
||||
int src2_index = mad24(y, src2_step, (x * 6) + src2_offset - (dst_align * 6));
|
||||
int mask_index = mad24(y, mask_step, x + mask_offset - dst_align);
|
||||
|
||||
int dst_start = mad24(y, dst_step, dst_offset);
|
||||
int dst_end = mad24(y, dst_step, dst_offset + dst_step1);
|
||||
int dst_index = mad24(y, dst_step, dst_offset + (x * 6) - (dst_align * 6));
|
||||
|
||||
ushort2 src1_data_0 = vload2(0, (__global ushort *)((__global char *)src1 + src1_index + 0));
|
||||
ushort2 src1_data_1 = vload2(0, (__global ushort *)((__global char *)src1 + src1_index + 4));
|
||||
ushort2 src1_data_2 = vload2(0, (__global ushort *)((__global char *)src1 + src1_index + 8));
|
||||
|
||||
ushort2 src2_data_0 = vload2(0, (__global ushort *)((__global char *)src2 + src2_index + 0));
|
||||
ushort2 src2_data_1 = vload2(0, (__global ushort *)((__global char *)src2 + src2_index + 4));
|
||||
ushort2 src2_data_2 = vload2(0, (__global ushort *)((__global char *)src2 + src2_index + 8));
|
||||
|
||||
uchar2 mask_data = vload2(0, mask + mask_index);
|
||||
|
||||
ushort2 data_0 = *((__global ushort2 *)((__global char *)dst + dst_index + 0));
|
||||
ushort2 data_1 = *((__global ushort2 *)((__global char *)dst + dst_index + 4));
|
||||
ushort2 data_2 = *((__global ushort2 *)((__global char *)dst + dst_index + 8));
|
||||
|
||||
ushort2 tmp_data_0 = convert_ushort2_sat(convert_int2_sat(src1_data_0) + convert_int2_sat(src2_data_0));
|
||||
ushort2 tmp_data_1 = convert_ushort2_sat(convert_int2_sat(src1_data_1) + convert_int2_sat(src2_data_1));
|
||||
ushort2 tmp_data_2 = convert_ushort2_sat(convert_int2_sat(src1_data_2) + convert_int2_sat(src2_data_2));
|
||||
|
||||
data_0.xy = ((mask_data.x) && (dst_index + 0 >= dst_start)) ? tmp_data_0.xy : data_0.xy;
|
||||
|
||||
data_1.x = ((mask_data.x) && (dst_index + 0 >= dst_start) && (dst_index + 0 < dst_end))
|
||||
? tmp_data_1.x : data_1.x;
|
||||
data_1.y = ((mask_data.y) && (dst_index + 6 >= dst_start) && (dst_index + 6 < dst_end))
|
||||
? tmp_data_1.y : data_1.y;
|
||||
|
||||
data_2.xy = ((mask_data.y) && (dst_index + 6 >= dst_start) && (dst_index + 6 < dst_end))
|
||||
? tmp_data_2.xy : data_2.xy;
|
||||
|
||||
*((__global ushort2 *)((__global char *)dst + dst_index + 0))= data_0;
|
||||
*((__global ushort2 *)((__global char *)dst + dst_index + 4))= data_1;
|
||||
*((__global ushort2 *)((__global char *)dst + dst_index + 8))= data_2;
|
||||
}
|
||||
}
|
||||
__kernel void arithm_add_with_mask_C3_D3 (__global short *src1, int src1_step, int src1_offset,
|
||||
__global short *src2, int src2_step, int src2_offset,
|
||||
__global uchar *mask, int mask_step, int mask_offset,
|
||||
__global short *dst, int dst_step, int dst_offset,
|
||||
int rows, int cols, int dst_step1)
|
||||
{
|
||||
|
||||
int x = get_global_id(0);
|
||||
int y = get_global_id(1);
|
||||
|
||||
if (x < cols && y < rows)
|
||||
{
|
||||
x = x << 1;
|
||||
|
||||
#ifdef dst_align
|
||||
#undef dst_align
|
||||
#endif
|
||||
#define dst_align (((dst_offset % dst_step) / 6 ) & 1)
|
||||
int src1_index = mad24(y, src1_step, (x * 6) + src1_offset - (dst_align * 6));
|
||||
int src2_index = mad24(y, src2_step, (x * 6) + src2_offset - (dst_align * 6));
|
||||
int mask_index = mad24(y, mask_step, x + mask_offset - dst_align);
|
||||
|
||||
int dst_start = mad24(y, dst_step, dst_offset);
|
||||
int dst_end = mad24(y, dst_step, dst_offset + dst_step1);
|
||||
int dst_index = mad24(y, dst_step, dst_offset + (x * 6) - (dst_align * 6));
|
||||
|
||||
short2 src1_data_0 = vload2(0, (__global short *)((__global char *)src1 + src1_index + 0));
|
||||
short2 src1_data_1 = vload2(0, (__global short *)((__global char *)src1 + src1_index + 4));
|
||||
short2 src1_data_2 = vload2(0, (__global short *)((__global char *)src1 + src1_index + 8));
|
||||
|
||||
short2 src2_data_0 = vload2(0, (__global short *)((__global char *)src2 + src2_index + 0));
|
||||
short2 src2_data_1 = vload2(0, (__global short *)((__global char *)src2 + src2_index + 4));
|
||||
short2 src2_data_2 = vload2(0, (__global short *)((__global char *)src2 + src2_index + 8));
|
||||
|
||||
uchar2 mask_data = vload2(0, mask + mask_index);
|
||||
|
||||
short2 data_0 = *((__global short2 *)((__global char *)dst + dst_index + 0));
|
||||
short2 data_1 = *((__global short2 *)((__global char *)dst + dst_index + 4));
|
||||
short2 data_2 = *((__global short2 *)((__global char *)dst + dst_index + 8));
|
||||
|
||||
short2 tmp_data_0 = convert_short2_sat(convert_int2_sat(src1_data_0) + convert_int2_sat(src2_data_0));
|
||||
short2 tmp_data_1 = convert_short2_sat(convert_int2_sat(src1_data_1) + convert_int2_sat(src2_data_1));
|
||||
short2 tmp_data_2 = convert_short2_sat(convert_int2_sat(src1_data_2) + convert_int2_sat(src2_data_2));
|
||||
|
||||
data_0.xy = ((mask_data.x) && (dst_index + 0 >= dst_start)) ? tmp_data_0.xy : data_0.xy;
|
||||
|
||||
data_1.x = ((mask_data.x) && (dst_index + 0 >= dst_start) && (dst_index + 0 < dst_end))
|
||||
? tmp_data_1.x : data_1.x;
|
||||
data_1.y = ((mask_data.y) && (dst_index + 6 >= dst_start) && (dst_index + 6 < dst_end))
|
||||
? tmp_data_1.y : data_1.y;
|
||||
|
||||
data_2.xy = ((mask_data.y) && (dst_index + 6 >= dst_start) && (dst_index + 6 < dst_end))
|
||||
? tmp_data_2.xy : data_2.xy;
|
||||
|
||||
*((__global short2 *)((__global char *)dst + dst_index + 0))= data_0;
|
||||
*((__global short2 *)((__global char *)dst + dst_index + 4))= data_1;
|
||||
*((__global short2 *)((__global char *)dst + dst_index + 8))= data_2;
|
||||
}
|
||||
}
|
||||
__kernel void arithm_add_with_mask_C3_D4 (__global int *src1, int src1_step, int src1_offset,
|
||||
__global int *src2, int src2_step, int src2_offset,
|
||||
__global uchar *mask, int mask_step, int mask_offset,
|
||||
__global int *dst, int dst_step, int dst_offset,
|
||||
int rows, int cols, int dst_step1)
|
||||
{
|
||||
|
||||
int x = get_global_id(0);
|
||||
int y = get_global_id(1);
|
||||
|
||||
if (x < cols && y < rows)
|
||||
{
|
||||
int src1_index = mad24(y, src1_step, (x * 12) + src1_offset);
|
||||
int src2_index = mad24(y, src2_step, (x * 12) + src2_offset);
|
||||
int mask_index = mad24(y, mask_step, x + mask_offset);
|
||||
int dst_index = mad24(y, dst_step, dst_offset + (x * 12));
|
||||
|
||||
int src1_data_0 = *((__global int *)((__global char *)src1 + src1_index + 0));
|
||||
int src1_data_1 = *((__global int *)((__global char *)src1 + src1_index + 4));
|
||||
int src1_data_2 = *((__global int *)((__global char *)src1 + src1_index + 8));
|
||||
|
||||
int src2_data_0 = *((__global int *)((__global char *)src2 + src2_index + 0));
|
||||
int src2_data_1 = *((__global int *)((__global char *)src2 + src2_index + 4));
|
||||
int src2_data_2 = *((__global int *)((__global char *)src2 + src2_index + 8));
|
||||
|
||||
uchar mask_data = * (mask + mask_index);
|
||||
|
||||
int data_0 = *((__global int *)((__global char *)dst + dst_index + 0));
|
||||
int data_1 = *((__global int *)((__global char *)dst + dst_index + 4));
|
||||
int data_2 = *((__global int *)((__global char *)dst + dst_index + 8));
|
||||
|
||||
int tmp_data_0 = convert_int_sat((long)src1_data_0 + (long)src2_data_0);
|
||||
int tmp_data_1 = convert_int_sat((long)src1_data_1 + (long)src2_data_1);
|
||||
int tmp_data_2 = convert_int_sat((long)src1_data_2 + (long)src2_data_2);
|
||||
|
||||
data_0 = mask_data ? tmp_data_0 : data_0;
|
||||
data_1 = mask_data ? tmp_data_1 : data_1;
|
||||
data_2 = mask_data ? tmp_data_2 : data_2;
|
||||
|
||||
*((__global int *)((__global char *)dst + dst_index + 0))= data_0;
|
||||
*((__global int *)((__global char *)dst + dst_index + 4))= data_1;
|
||||
*((__global int *)((__global char *)dst + dst_index + 8))= data_2;
|
||||
}
|
||||
}
|
||||
__kernel void arithm_add_with_mask_C3_D5 (__global float *src1, int src1_step, int src1_offset,
|
||||
__global float *src2, int src2_step, int src2_offset,
|
||||
__global uchar *mask, int mask_step, int mask_offset,
|
||||
__global float *dst, int dst_step, int dst_offset,
|
||||
int rows, int cols, int dst_step1)
|
||||
{
|
||||
|
||||
int x = get_global_id(0);
|
||||
int y = get_global_id(1);
|
||||
|
||||
if (x < cols && y < rows)
|
||||
{
|
||||
int src1_index = mad24(y, src1_step, (x * 12) + src1_offset);
|
||||
int src2_index = mad24(y, src2_step, (x * 12) + src2_offset);
|
||||
int mask_index = mad24(y, mask_step, x + mask_offset);
|
||||
int dst_index = mad24(y, dst_step, dst_offset + (x * 12));
|
||||
|
||||
float src1_data_0 = *((__global float *)((__global char *)src1 + src1_index + 0));
|
||||
float src1_data_1 = *((__global float *)((__global char *)src1 + src1_index + 4));
|
||||
float src1_data_2 = *((__global float *)((__global char *)src1 + src1_index + 8));
|
||||
|
||||
float src2_data_0 = *((__global float *)((__global char *)src2 + src2_index + 0));
|
||||
float src2_data_1 = *((__global float *)((__global char *)src2 + src2_index + 4));
|
||||
float src2_data_2 = *((__global float *)((__global char *)src2 + src2_index + 8));
|
||||
|
||||
uchar mask_data = * (mask + mask_index);
|
||||
|
||||
float data_0 = *((__global float *)((__global char *)dst + dst_index + 0));
|
||||
float data_1 = *((__global float *)((__global char *)dst + dst_index + 4));
|
||||
float data_2 = *((__global float *)((__global char *)dst + dst_index + 8));
|
||||
|
||||
float tmp_data_0 = src1_data_0 + src2_data_0;
|
||||
float tmp_data_1 = src1_data_1 + src2_data_1;
|
||||
float tmp_data_2 = src1_data_2 + src2_data_2;
|
||||
|
||||
data_0 = mask_data ? tmp_data_0 : data_0;
|
||||
data_1 = mask_data ? tmp_data_1 : data_1;
|
||||
data_2 = mask_data ? tmp_data_2 : data_2;
|
||||
|
||||
*((__global float *)((__global char *)dst + dst_index + 0))= data_0;
|
||||
*((__global float *)((__global char *)dst + dst_index + 4))= data_1;
|
||||
*((__global float *)((__global char *)dst + dst_index + 8))= data_2;
|
||||
}
|
||||
}
|
||||
|
||||
#if defined (DOUBLE_SUPPORT)
|
||||
__kernel void arithm_add_with_mask_C3_D6 (__global double *src1, int src1_step, int src1_offset,
|
||||
__global double *src2, int src2_step, int src2_offset,
|
||||
__global uchar *mask, int mask_step, int mask_offset,
|
||||
__global double *dst, int dst_step, int dst_offset,
|
||||
int rows, int cols, int dst_step1)
|
||||
{
|
||||
|
||||
int x = get_global_id(0);
|
||||
int y = get_global_id(1);
|
||||
|
||||
if (x < cols && y < rows)
|
||||
{
|
||||
int src1_index = mad24(y, src1_step, (x * 24) + src1_offset);
|
||||
int src2_index = mad24(y, src2_step, (x * 24) + src2_offset);
|
||||
int mask_index = mad24(y, mask_step, x + mask_offset);
|
||||
int dst_index = mad24(y, dst_step, dst_offset + (x * 24));
|
||||
|
||||
double src1_data_0 = *((__global double *)((__global char *)src1 + src1_index + 0 ));
|
||||
double src1_data_1 = *((__global double *)((__global char *)src1 + src1_index + 8 ));
|
||||
double src1_data_2 = *((__global double *)((__global char *)src1 + src1_index + 16));
|
||||
|
||||
double src2_data_0 = *((__global double *)((__global char *)src2 + src2_index + 0 ));
|
||||
double src2_data_1 = *((__global double *)((__global char *)src2 + src2_index + 8 ));
|
||||
double src2_data_2 = *((__global double *)((__global char *)src2 + src2_index + 16));
|
||||
|
||||
uchar mask_data = * (mask + mask_index);
|
||||
|
||||
double data_0 = *((__global double *)((__global char *)dst + dst_index + 0 ));
|
||||
double data_1 = *((__global double *)((__global char *)dst + dst_index + 8 ));
|
||||
double data_2 = *((__global double *)((__global char *)dst + dst_index + 16));
|
||||
|
||||
double tmp_data_0 = src1_data_0 + src2_data_0;
|
||||
double tmp_data_1 = src1_data_1 + src2_data_1;
|
||||
double tmp_data_2 = src1_data_2 + src2_data_2;
|
||||
|
||||
data_0 = mask_data ? tmp_data_0 : data_0;
|
||||
data_1 = mask_data ? tmp_data_1 : data_1;
|
||||
data_2 = mask_data ? tmp_data_2 : data_2;
|
||||
|
||||
*((__global double *)((__global char *)dst + dst_index + 0 ))= data_0;
|
||||
*((__global double *)((__global char *)dst + dst_index + 8 ))= data_1;
|
||||
*((__global double *)((__global char *)dst + dst_index + 16))= data_2;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
__kernel void arithm_add_with_mask_C4_D0 (__global uchar *src1, int src1_step, int src1_offset,
|
||||
__global uchar *src2, int src2_step, int src2_offset,
|
||||
__global uchar *mask, int mask_step, int mask_offset,
|
||||
@ -1014,7 +704,7 @@ __kernel void arithm_add_with_mask_C4_D0 (__global uchar *src1, int src1_step, i
|
||||
uchar4 src_data2 = *((__global uchar4 *)(src2 + src2_index));
|
||||
uchar4 dst_data = *((__global uchar4 *)(dst + dst_index));
|
||||
|
||||
uchar4 data = convert_uchar4_sat(convert_ushort4_sat(src_data1) + convert_ushort4_sat(src_data2));
|
||||
uchar4 data = convert_uchar4_sat(ARITHM_OP(convert_short4_sat(src_data1), convert_short4_sat(src_data2)));
|
||||
data = mask_data ? data : dst_data;
|
||||
|
||||
*((__global uchar4 *)(dst + dst_index)) = data;
|
||||
@ -1043,7 +733,7 @@ __kernel void arithm_add_with_mask_C4_D2 (__global ushort *src1, int src1_step,
|
||||
ushort4 src_data2 = *((__global ushort4 *)((__global char *)src2 + src2_index));
|
||||
ushort4 dst_data = *((__global ushort4 *)((__global char *)dst + dst_index));
|
||||
|
||||
ushort4 data = convert_ushort4_sat(convert_int4_sat(src_data1) + convert_int4_sat(src_data2));
|
||||
ushort4 data = convert_ushort4_sat(ARITHM_OP(convert_int4_sat(src_data1), convert_int4_sat(src_data2)));
|
||||
data = mask_data ? data : dst_data;
|
||||
|
||||
*((__global ushort4 *)((__global char *)dst + dst_index)) = data;
|
||||
@ -1072,7 +762,7 @@ __kernel void arithm_add_with_mask_C4_D3 (__global short *src1, int src1_step, i
|
||||
short4 src_data2 = *((__global short4 *)((__global char *)src2 + src2_index));
|
||||
short4 dst_data = *((__global short4 *)((__global char *)dst + dst_index));
|
||||
|
||||
short4 data = convert_short4_sat(convert_int4_sat(src_data1) + convert_int4_sat(src_data2));
|
||||
short4 data = convert_short4_sat(ARITHM_OP(convert_int4_sat(src_data1), convert_int4_sat(src_data2)));
|
||||
data = mask_data ? data : dst_data;
|
||||
|
||||
*((__global short4 *)((__global char *)dst + dst_index)) = data;
|
||||
@ -1101,7 +791,7 @@ __kernel void arithm_add_with_mask_C4_D4 (__global int *src1, int src1_step, i
|
||||
int4 src_data2 = *((__global int4 *)((__global char *)src2 + src2_index));
|
||||
int4 dst_data = *((__global int4 *)((__global char *)dst + dst_index));
|
||||
|
||||
int4 data = convert_int4_sat(convert_long4_sat(src_data1) + convert_long4_sat(src_data2));
|
||||
int4 data = convert_int4_sat(ARITHM_OP(convert_long4_sat(src_data1), convert_long4_sat(src_data2)));
|
||||
data = mask_data ? data : dst_data;
|
||||
|
||||
*((__global int4 *)((__global char *)dst + dst_index)) = data;
|
||||
@ -1130,7 +820,7 @@ __kernel void arithm_add_with_mask_C4_D5 (__global float *src1, int src1_step, i
|
||||
float4 src_data2 = *((__global float4 *)((__global char *)src2 + src2_index));
|
||||
float4 dst_data = *((__global float4 *)((__global char *)dst + dst_index));
|
||||
|
||||
float4 data = src_data1 + src_data2;
|
||||
float4 data = ARITHM_OP(src_data1, src_data2);
|
||||
data = mask_data ? data : dst_data;
|
||||
|
||||
*((__global float4 *)((__global char *)dst + dst_index)) = data;
|
||||
@ -1161,7 +851,7 @@ __kernel void arithm_add_with_mask_C4_D6 (__global double *src1, int src1_step,
|
||||
double4 src_data2 = *((__global double4 *)((__global char *)src2 + src2_index));
|
||||
double4 dst_data = *((__global double4 *)((__global char *)dst + dst_index));
|
||||
|
||||
double4 data = src_data1 + src_data2;
|
||||
double4 data = ARITHM_OP(src_data1, src_data2);
|
||||
data = mask_data ? data : dst_data;
|
||||
|
||||
*((__global double4 *)((__global char *)dst + dst_index)) = data;
|
||||
|
@ -49,7 +49,12 @@
|
||||
#elif defined (cl_amd_fp64)
|
||||
#pragma OPENCL EXTENSION cl_amd_fp64:enable
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#ifdef ARITHM_ADD
|
||||
#define ARITHM_OP(A,B) ((A)+(B))
|
||||
#elif defined ARITHM_SUB
|
||||
#define ARITHM_OP(A,B) ((A)-(B))
|
||||
#endif
|
||||
/**************************************add with scalar without mask**************************************/
|
||||
__kernel void arithm_s_add_C1_D0 (__global uchar *src1, int src1_step, int src1_offset,
|
||||
@ -83,7 +88,7 @@ __kernel void arithm_s_add_C1_D0 (__global uchar *src1, int src1_step, int src
|
||||
}
|
||||
|
||||
uchar4 data = *((__global uchar4 *)(dst + dst_index));
|
||||
int4 tmp = convert_int4_sat(src1_data) + src2_data;
|
||||
int4 tmp = ARITHM_OP(convert_int4_sat(src1_data), src2_data);
|
||||
uchar4 tmp_data = convert_uchar4_sat(tmp);
|
||||
|
||||
data.x = ((dst_index + 0 >= dst_start) && (dst_index + 0 < dst_end)) ? tmp_data.x : data.x;
|
||||
@ -120,7 +125,7 @@ __kernel void arithm_s_add_C1_D2 (__global ushort *src1, int src1_step, int sr
|
||||
int2 src2_data = (int2)(src2.x, src2.x);
|
||||
|
||||
ushort2 data = *((__global ushort2 *)((__global uchar *)dst + dst_index));
|
||||
int2 tmp = convert_int2_sat(src1_data) + src2_data;
|
||||
int2 tmp = ARITHM_OP(convert_int2_sat(src1_data), src2_data);
|
||||
ushort2 tmp_data = convert_ushort2_sat(tmp);
|
||||
|
||||
data.x = (dst_index + 0 >= dst_start) ? tmp_data.x : data.x;
|
||||
@ -155,7 +160,7 @@ __kernel void arithm_s_add_C1_D3 (__global short *src1, int src1_step, int src
|
||||
int2 src2_data = (int2)(src2.x, src2.x);
|
||||
short2 data = *((__global short2 *)((__global uchar *)dst + dst_index));
|
||||
|
||||
int2 tmp = convert_int2_sat(src1_data) + src2_data;
|
||||
int2 tmp = ARITHM_OP(convert_int2_sat(src1_data), src2_data);
|
||||
short2 tmp_data = convert_short2_sat(tmp);
|
||||
|
||||
data.x = (dst_index + 0 >= dst_start) ? tmp_data.x : data.x;
|
||||
@ -181,7 +186,7 @@ __kernel void arithm_s_add_C1_D4 (__global int *src1, int src1_step, int src1_
|
||||
int src_data2 = src2.x;
|
||||
int dst_data = *((__global int *)((__global char *)dst + dst_index));
|
||||
|
||||
int data = convert_int_sat((long)src_data1 + (long)src_data2);
|
||||
int data = convert_int_sat(ARITHM_OP((long)src_data1, (long)src_data2));
|
||||
|
||||
*((__global int *)((__global char *)dst + dst_index)) = data;
|
||||
}
|
||||
@ -203,7 +208,7 @@ __kernel void arithm_s_add_C1_D5 (__global float *src1, int src1_step, int src
|
||||
float src_data2 = src2.x;
|
||||
float dst_data = *((__global float *)((__global char *)dst + dst_index));
|
||||
|
||||
float data = src_data1 + src_data2;
|
||||
float data = ARITHM_OP(src_data1, src_data2);
|
||||
|
||||
*((__global float *)((__global char *)dst + dst_index)) = data;
|
||||
}
|
||||
@ -227,7 +232,7 @@ __kernel void arithm_s_add_C1_D6 (__global double *src1, int src1_step, int sr
|
||||
double src2_data = src2.x;
|
||||
double dst_data = *((__global double *)((__global char *)dst + dst_index));
|
||||
|
||||
double data = src_data1 + src2_data;
|
||||
double data = ARITHM_OP(src_data1, src2_data);
|
||||
|
||||
*((__global double *)((__global char *)dst + dst_index)) = data;
|
||||
}
|
||||
@ -260,7 +265,7 @@ __kernel void arithm_s_add_C2_D0 (__global uchar *src1, int src1_step, int src
|
||||
int4 src2_data = (int4)(src2.x, src2.y, src2.x, src2.y);
|
||||
|
||||
uchar4 data = *((__global uchar4 *)(dst + dst_index));
|
||||
int4 tmp = convert_int4_sat(src1_data) + src2_data;
|
||||
int4 tmp = ARITHM_OP(convert_int4_sat(src1_data), src2_data);
|
||||
uchar4 tmp_data = convert_uchar4_sat(tmp);
|
||||
|
||||
data.xy = (dst_index + 0 >= dst_start) ? tmp_data.xy : data.xy;
|
||||
@ -286,7 +291,7 @@ __kernel void arithm_s_add_C2_D2 (__global ushort *src1, int src1_step, int sr
|
||||
int2 src_data2 = (int2)(src2.x, src2.y);
|
||||
ushort2 dst_data = *((__global ushort2 *)((__global char *)dst + dst_index));
|
||||
|
||||
int2 tmp = convert_int2_sat(src_data1) + src_data2;
|
||||
int2 tmp = ARITHM_OP(convert_int2_sat(src_data1), src_data2);
|
||||
ushort2 data = convert_ushort2_sat(tmp);
|
||||
|
||||
*((__global ushort2 *)((__global char *)dst + dst_index)) = data;
|
||||
@ -309,7 +314,7 @@ __kernel void arithm_s_add_C2_D3 (__global short *src1, int src1_step, int src
|
||||
int2 src_data2 = (int2)(src2.x, src2.y);
|
||||
short2 dst_data = *((__global short2 *)((__global char *)dst + dst_index));
|
||||
|
||||
int2 tmp = convert_int2_sat(src_data1) + src_data2;
|
||||
int2 tmp = ARITHM_OP(convert_int2_sat(src_data1), src_data2);
|
||||
short2 data = convert_short2_sat(tmp);
|
||||
|
||||
*((__global short2 *)((__global char *)dst + dst_index)) = data;
|
||||
@ -332,7 +337,7 @@ __kernel void arithm_s_add_C2_D4 (__global int *src1, int src1_step, int src1_
|
||||
int2 src_data2 = (int2)(src2.x, src2.y);
|
||||
int2 dst_data = *((__global int2 *)((__global char *)dst + dst_index));
|
||||
|
||||
int2 data = convert_int2_sat(convert_long2_sat(src_data1) + convert_long2_sat(src_data2));
|
||||
int2 data = convert_int2_sat(ARITHM_OP(convert_long2_sat(src_data1), convert_long2_sat(src_data2)));
|
||||
*((__global int2 *)((__global char *)dst + dst_index)) = data;
|
||||
}
|
||||
}
|
||||
@ -353,7 +358,7 @@ __kernel void arithm_s_add_C2_D5 (__global float *src1, int src1_step, int src
|
||||
float2 src_data2 = (float2)(src2.x, src2.y);
|
||||
float2 dst_data = *((__global float2 *)((__global char *)dst + dst_index));
|
||||
|
||||
float2 data = src_data1 + src_data2;
|
||||
float2 data = ARITHM_OP(src_data1, src_data2);
|
||||
*((__global float2 *)((__global char *)dst + dst_index)) = data;
|
||||
}
|
||||
}
|
||||
@ -376,280 +381,13 @@ __kernel void arithm_s_add_C2_D6 (__global double *src1, int src1_step, int sr
|
||||
double2 src_data2 = (double2)(src2.x, src2.y);
|
||||
double2 dst_data = *((__global double2 *)((__global char *)dst + dst_index));
|
||||
|
||||
double2 data = src_data1 + src_data2;
|
||||
double2 data = ARITHM_OP(src_data1, src_data2);
|
||||
|
||||
*((__global double2 *)((__global char *)dst + dst_index)) = data;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
__kernel void arithm_s_add_C3_D0 (__global uchar *src1, int src1_step, int src1_offset,
|
||||
__global uchar *dst, int dst_step, int dst_offset,
|
||||
int4 src2, int rows, int cols, int dst_step1)
|
||||
{
|
||||
|
||||
int x = get_global_id(0);
|
||||
int y = get_global_id(1);
|
||||
|
||||
if (x < cols && y < rows)
|
||||
{
|
||||
x = x << 2;
|
||||
|
||||
#ifdef dst_align
|
||||
#undef dst_align
|
||||
#endif
|
||||
#define dst_align (((dst_offset % dst_step) / 3 ) & 3)
|
||||
int src1_index = mad24(y, src1_step, (x * 3) + src1_offset - (dst_align * 3));
|
||||
|
||||
int dst_start = mad24(y, dst_step, dst_offset);
|
||||
int dst_end = mad24(y, dst_step, dst_offset + dst_step1);
|
||||
int dst_index = mad24(y, dst_step, dst_offset + (x * 3) - (dst_align * 3));
|
||||
|
||||
uchar4 src1_data_0 = vload4(0, src1 + src1_index + 0);
|
||||
uchar4 src1_data_1 = vload4(0, src1 + src1_index + 4);
|
||||
uchar4 src1_data_2 = vload4(0, src1 + src1_index + 8);
|
||||
|
||||
int4 src2_data_0 = (int4)(src2.x, src2.y, src2.z, src2.x);
|
||||
int4 src2_data_1 = (int4)(src2.y, src2.z, src2.x, src2.y);
|
||||
int4 src2_data_2 = (int4)(src2.z, src2.x, src2.y, src2.z);
|
||||
|
||||
uchar4 data_0 = *((__global uchar4 *)(dst + dst_index + 0));
|
||||
uchar4 data_1 = *((__global uchar4 *)(dst + dst_index + 4));
|
||||
uchar4 data_2 = *((__global uchar4 *)(dst + dst_index + 8));
|
||||
|
||||
uchar4 tmp_data_0 = convert_uchar4_sat(convert_int4_sat(src1_data_0) + src2_data_0);
|
||||
uchar4 tmp_data_1 = convert_uchar4_sat(convert_int4_sat(src1_data_1) + src2_data_1);
|
||||
uchar4 tmp_data_2 = convert_uchar4_sat(convert_int4_sat(src1_data_2) + src2_data_2);
|
||||
|
||||
data_0.xyz = ((dst_index + 0 >= dst_start)) ? tmp_data_0.xyz : data_0.xyz;
|
||||
data_0.w = ((dst_index + 3 >= dst_start) && (dst_index + 3 < dst_end))
|
||||
? tmp_data_0.w : data_0.w;
|
||||
|
||||
data_1.xy = ((dst_index + 3 >= dst_start) && (dst_index + 3 < dst_end))
|
||||
? tmp_data_1.xy : data_1.xy;
|
||||
data_1.zw = ((dst_index + 6 >= dst_start) && (dst_index + 6 < dst_end))
|
||||
? tmp_data_1.zw : data_1.zw;
|
||||
|
||||
data_2.x = ((dst_index + 6 >= dst_start) && (dst_index + 6 < dst_end))
|
||||
? tmp_data_2.x : data_2.x;
|
||||
data_2.yzw = ((dst_index + 9 >= dst_start) && (dst_index + 9 < dst_end))
|
||||
? tmp_data_2.yzw : data_2.yzw;
|
||||
|
||||
*((__global uchar4 *)(dst + dst_index + 0)) = data_0;
|
||||
*((__global uchar4 *)(dst + dst_index + 4)) = data_1;
|
||||
*((__global uchar4 *)(dst + dst_index + 8)) = data_2;
|
||||
}
|
||||
}
|
||||
__kernel void arithm_s_add_C3_D2 (__global ushort *src1, int src1_step, int src1_offset,
|
||||
__global ushort *dst, int dst_step, int dst_offset,
|
||||
int4 src2, int rows, int cols, int dst_step1)
|
||||
{
|
||||
|
||||
int x = get_global_id(0);
|
||||
int y = get_global_id(1);
|
||||
|
||||
if (x < cols && y < rows)
|
||||
{
|
||||
x = x << 1;
|
||||
|
||||
#ifdef dst_align
|
||||
#undef dst_align
|
||||
#endif
|
||||
#define dst_align (((dst_offset % dst_step) / 6 ) & 1)
|
||||
int src1_index = mad24(y, src1_step, (x * 6) + src1_offset - (dst_align * 6));
|
||||
|
||||
int dst_start = mad24(y, dst_step, dst_offset);
|
||||
int dst_end = mad24(y, dst_step, dst_offset + dst_step1);
|
||||
int dst_index = mad24(y, dst_step, dst_offset + (x * 6) - (dst_align * 6));
|
||||
|
||||
ushort2 src1_data_0 = vload2(0, (__global ushort *)((__global char *)src1 + src1_index + 0));
|
||||
ushort2 src1_data_1 = vload2(0, (__global ushort *)((__global char *)src1 + src1_index + 4));
|
||||
ushort2 src1_data_2 = vload2(0, (__global ushort *)((__global char *)src1 + src1_index + 8));
|
||||
|
||||
int2 src2_data_0 = (int2)(src2.x, src2.y);
|
||||
int2 src2_data_1 = (int2)(src2.z, src2.x);
|
||||
int2 src2_data_2 = (int2)(src2.y, src2.z);
|
||||
|
||||
ushort2 data_0 = *((__global ushort2 *)((__global char *)dst + dst_index + 0));
|
||||
ushort2 data_1 = *((__global ushort2 *)((__global char *)dst + dst_index + 4));
|
||||
ushort2 data_2 = *((__global ushort2 *)((__global char *)dst + dst_index + 8));
|
||||
|
||||
ushort2 tmp_data_0 = convert_ushort2_sat(convert_int2_sat(src1_data_0) + src2_data_0);
|
||||
ushort2 tmp_data_1 = convert_ushort2_sat(convert_int2_sat(src1_data_1) + src2_data_1);
|
||||
ushort2 tmp_data_2 = convert_ushort2_sat(convert_int2_sat(src1_data_2) + src2_data_2);
|
||||
|
||||
data_0.xy = ((dst_index + 0 >= dst_start)) ? tmp_data_0.xy : data_0.xy;
|
||||
|
||||
data_1.x = ((dst_index + 0 >= dst_start) && (dst_index + 0 < dst_end))
|
||||
? tmp_data_1.x : data_1.x;
|
||||
data_1.y = ((dst_index + 6 >= dst_start) && (dst_index + 6 < dst_end))
|
||||
? tmp_data_1.y : data_1.y;
|
||||
|
||||
data_2.xy = ((dst_index + 6 >= dst_start) && (dst_index + 6 < dst_end))
|
||||
? tmp_data_2.xy : data_2.xy;
|
||||
|
||||
*((__global ushort2 *)((__global char *)dst + dst_index + 0))= data_0;
|
||||
*((__global ushort2 *)((__global char *)dst + dst_index + 4))= data_1;
|
||||
*((__global ushort2 *)((__global char *)dst + dst_index + 8))= data_2;
|
||||
}
|
||||
}
|
||||
__kernel void arithm_s_add_C3_D3 (__global short *src1, int src1_step, int src1_offset,
|
||||
__global short *dst, int dst_step, int dst_offset,
|
||||
int4 src2, int rows, int cols, int dst_step1)
|
||||
{
|
||||
|
||||
int x = get_global_id(0);
|
||||
int y = get_global_id(1);
|
||||
|
||||
if (x < cols && y < rows)
|
||||
{
|
||||
x = x << 1;
|
||||
|
||||
#ifdef dst_align
|
||||
#undef dst_align
|
||||
#endif
|
||||
#define dst_align (((dst_offset % dst_step) / 6 ) & 1)
|
||||
int src1_index = mad24(y, src1_step, (x * 6) + src1_offset - (dst_align * 6));
|
||||
|
||||
int dst_start = mad24(y, dst_step, dst_offset);
|
||||
int dst_end = mad24(y, dst_step, dst_offset + dst_step1);
|
||||
int dst_index = mad24(y, dst_step, dst_offset + (x * 6) - (dst_align * 6));
|
||||
|
||||
short2 src1_data_0 = vload2(0, (__global short *)((__global char *)src1 + src1_index + 0));
|
||||
short2 src1_data_1 = vload2(0, (__global short *)((__global char *)src1 + src1_index + 4));
|
||||
short2 src1_data_2 = vload2(0, (__global short *)((__global char *)src1 + src1_index + 8));
|
||||
|
||||
int2 src2_data_0 = (int2)(src2.x, src2.y);
|
||||
int2 src2_data_1 = (int2)(src2.z, src2.x);
|
||||
int2 src2_data_2 = (int2)(src2.y, src2.z);
|
||||
|
||||
short2 data_0 = *((__global short2 *)((__global char *)dst + dst_index + 0));
|
||||
short2 data_1 = *((__global short2 *)((__global char *)dst + dst_index + 4));
|
||||
short2 data_2 = *((__global short2 *)((__global char *)dst + dst_index + 8));
|
||||
|
||||
short2 tmp_data_0 = convert_short2_sat(convert_int2_sat(src1_data_0) + src2_data_0);
|
||||
short2 tmp_data_1 = convert_short2_sat(convert_int2_sat(src1_data_1) + src2_data_1);
|
||||
short2 tmp_data_2 = convert_short2_sat(convert_int2_sat(src1_data_2) + src2_data_2);
|
||||
|
||||
data_0.xy = ((dst_index + 0 >= dst_start)) ? tmp_data_0.xy : data_0.xy;
|
||||
|
||||
data_1.x = ((dst_index + 0 >= dst_start) && (dst_index + 0 < dst_end))
|
||||
? tmp_data_1.x : data_1.x;
|
||||
data_1.y = ((dst_index + 6 >= dst_start) && (dst_index + 6 < dst_end))
|
||||
? tmp_data_1.y : data_1.y;
|
||||
|
||||
data_2.xy = ((dst_index + 6 >= dst_start) && (dst_index + 6 < dst_end))
|
||||
? tmp_data_2.xy : data_2.xy;
|
||||
|
||||
*((__global short2 *)((__global char *)dst + dst_index + 0))= data_0;
|
||||
*((__global short2 *)((__global char *)dst + dst_index + 4))= data_1;
|
||||
*((__global short2 *)((__global char *)dst + dst_index + 8))= data_2;
|
||||
}
|
||||
}
|
||||
__kernel void arithm_s_add_C3_D4 (__global int *src1, int src1_step, int src1_offset,
|
||||
__global int *dst, int dst_step, int dst_offset,
|
||||
int4 src2, int rows, int cols, int dst_step1)
|
||||
{
|
||||
|
||||
int x = get_global_id(0);
|
||||
int y = get_global_id(1);
|
||||
|
||||
if (x < cols && y < rows)
|
||||
{
|
||||
int src1_index = mad24(y, src1_step, (x * 12) + src1_offset);
|
||||
int dst_index = mad24(y, dst_step, dst_offset + (x * 12));
|
||||
|
||||
int src1_data_0 = *((__global int *)((__global char *)src1 + src1_index + 0));
|
||||
int src1_data_1 = *((__global int *)((__global char *)src1 + src1_index + 4));
|
||||
int src1_data_2 = *((__global int *)((__global char *)src1 + src1_index + 8));
|
||||
|
||||
int src2_data_0 = src2.x;
|
||||
int src2_data_1 = src2.y;
|
||||
int src2_data_2 = src2.z;
|
||||
|
||||
int data_0 = *((__global int *)((__global char *)dst + dst_index + 0));
|
||||
int data_1 = *((__global int *)((__global char *)dst + dst_index + 4));
|
||||
int data_2 = *((__global int *)((__global char *)dst + dst_index + 8));
|
||||
|
||||
int tmp_data_0 = convert_int_sat((long)src1_data_0 + (long)src2_data_0);
|
||||
int tmp_data_1 = convert_int_sat((long)src1_data_1 + (long)src2_data_1);
|
||||
int tmp_data_2 = convert_int_sat((long)src1_data_2 + (long)src2_data_2);
|
||||
|
||||
*((__global int *)((__global char *)dst + dst_index + 0))= tmp_data_0;
|
||||
*((__global int *)((__global char *)dst + dst_index + 4))= tmp_data_1;
|
||||
*((__global int *)((__global char *)dst + dst_index + 8))= tmp_data_2;
|
||||
}
|
||||
}
|
||||
__kernel void arithm_s_add_C3_D5 (__global float *src1, int src1_step, int src1_offset,
|
||||
__global float *dst, int dst_step, int dst_offset,
|
||||
float4 src2, int rows, int cols, int dst_step1)
|
||||
{
|
||||
|
||||
int x = get_global_id(0);
|
||||
int y = get_global_id(1);
|
||||
|
||||
if (x < cols && y < rows)
|
||||
{
|
||||
int src1_index = mad24(y, src1_step, (x * 12) + src1_offset);
|
||||
int dst_index = mad24(y, dst_step, dst_offset + (x * 12));
|
||||
|
||||
float src1_data_0 = *((__global float *)((__global char *)src1 + src1_index + 0));
|
||||
float src1_data_1 = *((__global float *)((__global char *)src1 + src1_index + 4));
|
||||
float src1_data_2 = *((__global float *)((__global char *)src1 + src1_index + 8));
|
||||
|
||||
float src2_data_0 = src2.x;
|
||||
float src2_data_1 = src2.y;
|
||||
float src2_data_2 = src2.z;
|
||||
|
||||
float data_0 = *((__global float *)((__global char *)dst + dst_index + 0));
|
||||
float data_1 = *((__global float *)((__global char *)dst + dst_index + 4));
|
||||
float data_2 = *((__global float *)((__global char *)dst + dst_index + 8));
|
||||
|
||||
float tmp_data_0 = src1_data_0 + src2_data_0;
|
||||
float tmp_data_1 = src1_data_1 + src2_data_1;
|
||||
float tmp_data_2 = src1_data_2 + src2_data_2;
|
||||
|
||||
*((__global float *)((__global char *)dst + dst_index + 0))= tmp_data_0;
|
||||
*((__global float *)((__global char *)dst + dst_index + 4))= tmp_data_1;
|
||||
*((__global float *)((__global char *)dst + dst_index + 8))= tmp_data_2;
|
||||
}
|
||||
}
|
||||
|
||||
#if defined (DOUBLE_SUPPORT)
|
||||
__kernel void arithm_s_add_C3_D6 (__global double *src1, int src1_step, int src1_offset,
|
||||
__global double *dst, int dst_step, int dst_offset,
|
||||
double4 src2, int rows, int cols, int dst_step1)
|
||||
{
|
||||
|
||||
int x = get_global_id(0);
|
||||
int y = get_global_id(1);
|
||||
|
||||
if (x < cols && y < rows)
|
||||
{
|
||||
int src1_index = mad24(y, src1_step, (x * 24) + src1_offset);
|
||||
int dst_index = mad24(y, dst_step, dst_offset + (x * 24));
|
||||
|
||||
double src1_data_0 = *((__global double *)((__global char *)src1 + src1_index + 0 ));
|
||||
double src1_data_1 = *((__global double *)((__global char *)src1 + src1_index + 8 ));
|
||||
double src1_data_2 = *((__global double *)((__global char *)src1 + src1_index + 16));
|
||||
|
||||
double src2_data_0 = src2.x;
|
||||
double src2_data_1 = src2.y;
|
||||
double src2_data_2 = src2.z;
|
||||
|
||||
double data_0 = *((__global double *)((__global char *)dst + dst_index + 0 ));
|
||||
double data_1 = *((__global double *)((__global char *)dst + dst_index + 8 ));
|
||||
double data_2 = *((__global double *)((__global char *)dst + dst_index + 16));
|
||||
|
||||
double tmp_data_0 = src1_data_0 + src2_data_0;
|
||||
double tmp_data_1 = src1_data_1 + src2_data_1;
|
||||
double tmp_data_2 = src1_data_2 + src2_data_2;
|
||||
|
||||
*((__global double *)((__global char *)dst + dst_index + 0 ))= tmp_data_0;
|
||||
*((__global double *)((__global char *)dst + dst_index + 8 ))= tmp_data_1;
|
||||
*((__global double *)((__global char *)dst + dst_index + 16))= tmp_data_2;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
__kernel void arithm_s_add_C4_D0 (__global uchar *src1, int src1_step, int src1_offset,
|
||||
__global uchar *dst, int dst_step, int dst_offset,
|
||||
int4 src2, int rows, int cols, int dst_step1)
|
||||
@ -665,7 +403,7 @@ __kernel void arithm_s_add_C4_D0 (__global uchar *src1, int src1_step, int src
|
||||
|
||||
uchar4 src_data1 = *((__global uchar4 *)(src1 + src1_index));
|
||||
|
||||
uchar4 data = convert_uchar4_sat(convert_int4_sat(src_data1) + src2);
|
||||
uchar4 data = convert_uchar4_sat(ARITHM_OP(convert_int4_sat(src_data1), src2));
|
||||
|
||||
*((__global uchar4 *)(dst + dst_index)) = data;
|
||||
}
|
||||
@ -685,7 +423,7 @@ __kernel void arithm_s_add_C4_D2 (__global ushort *src1, int src1_step, int sr
|
||||
|
||||
ushort4 src_data1 = *((__global ushort4 *)((__global char *)src1 + src1_index));
|
||||
|
||||
ushort4 data = convert_ushort4_sat(convert_int4_sat(src_data1) + src2);
|
||||
ushort4 data = convert_ushort4_sat(ARITHM_OP(convert_int4_sat(src_data1), src2));
|
||||
|
||||
*((__global ushort4 *)((__global char *)dst + dst_index)) = data;
|
||||
}
|
||||
@ -705,7 +443,7 @@ __kernel void arithm_s_add_C4_D3 (__global short *src1, int src1_step, int src
|
||||
|
||||
short4 src_data1 = *((__global short4 *)((__global char *)src1 + src1_index));
|
||||
|
||||
short4 data = convert_short4_sat(convert_int4_sat(src_data1) + src2);
|
||||
short4 data = convert_short4_sat(ARITHM_OP(convert_int4_sat(src_data1), src2));
|
||||
|
||||
*((__global short4 *)((__global char *)dst + dst_index)) = data;
|
||||
}
|
||||
@ -725,7 +463,7 @@ __kernel void arithm_s_add_C4_D4 (__global int *src1, int src1_step, int src1_
|
||||
|
||||
int4 src_data1 = *((__global int4 *)((__global char *)src1 + src1_index));
|
||||
|
||||
int4 data = convert_int4_sat(convert_long4_sat(src_data1) + convert_long4_sat(src2));
|
||||
int4 data = convert_int4_sat(ARITHM_OP(convert_long4_sat(src_data1), convert_long4_sat(src2)));
|
||||
|
||||
*((__global int4 *)((__global char *)dst + dst_index)) = data;
|
||||
}
|
||||
@ -745,7 +483,7 @@ __kernel void arithm_s_add_C4_D5 (__global float *src1, int src1_step, int src
|
||||
|
||||
float4 src_data1 = *((__global float4 *)((__global char *)src1 + src1_index));
|
||||
|
||||
float4 data = src_data1 + src2;
|
||||
float4 data = ARITHM_OP(src_data1, src2);
|
||||
|
||||
*((__global float4 *)((__global char *)dst + dst_index)) = data;
|
||||
}
|
||||
@ -767,7 +505,7 @@ __kernel void arithm_s_add_C4_D6 (__global double *src1, int src1_step, int sr
|
||||
|
||||
double4 src_data1 = *((__global double4 *)((__global char *)src1 + src1_index));
|
||||
|
||||
double4 data = src_data1 + src2;
|
||||
double4 data = ARITHM_OP(src_data1, src2);
|
||||
|
||||
*((__global double4 *)((__global char *)dst + dst_index)) = data;
|
||||
}
|
||||
|
@ -51,6 +51,11 @@
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#ifdef ARITHM_ADD
|
||||
#define ARITHM_OP(A,B) ((A)+(B))
|
||||
#elif defined ARITHM_SUB
|
||||
#define ARITHM_OP(A,B) ((A)-(B))
|
||||
#endif
|
||||
/**************************************add with scalar with mask**************************************/
|
||||
__kernel void arithm_s_add_with_mask_C1_D0 (__global uchar *src1, int src1_step, int src1_offset,
|
||||
__global uchar *dst, int dst_step, int dst_offset,
|
||||
@ -94,7 +99,7 @@ __kernel void arithm_s_add_with_mask_C1_D0 (__global uchar *src1, int src1_ste
|
||||
}
|
||||
|
||||
uchar4 data = *((__global uchar4 *)(dst + dst_index));
|
||||
int4 tmp = convert_int4_sat(src1_data) + src2_data;
|
||||
int4 tmp = ARITHM_OP(convert_int4_sat(src1_data), src2_data);
|
||||
uchar4 tmp_data = convert_uchar4_sat(tmp);
|
||||
|
||||
data.x = ((mask_data.x) && (dst_index + 0 >= dst_start) && (dst_index + 0 < dst_end)) ? tmp_data.x : data.x;
|
||||
@ -134,7 +139,7 @@ __kernel void arithm_s_add_with_mask_C1_D2 (__global ushort *src1, int src1_st
|
||||
uchar2 mask_data = vload2(0, mask + mask_index);
|
||||
|
||||
ushort2 data = *((__global ushort2 *)((__global uchar *)dst + dst_index));
|
||||
int2 tmp = convert_int2_sat(src1_data) + src2_data;
|
||||
int2 tmp = ARITHM_OP(convert_int2_sat(src1_data), src2_data);
|
||||
ushort2 tmp_data = convert_ushort2_sat(tmp);
|
||||
|
||||
data.x = ((mask_data.x) && (dst_index + 0 >= dst_start)) ? tmp_data.x : data.x;
|
||||
@ -172,7 +177,7 @@ __kernel void arithm_s_add_with_mask_C1_D3 (__global short *src1, int src1_ste
|
||||
uchar2 mask_data = vload2(0, mask + mask_index);
|
||||
|
||||
short2 data = *((__global short2 *)((__global uchar *)dst + dst_index));
|
||||
int2 tmp = convert_int2_sat(src1_data) + src2_data;
|
||||
int2 tmp = ARITHM_OP(convert_int2_sat(src1_data), src2_data);
|
||||
short2 tmp_data = convert_short2_sat(tmp);
|
||||
|
||||
data.x = ((mask_data.x) && (dst_index + 0 >= dst_start)) ? tmp_data.x : data.x;
|
||||
@ -202,7 +207,7 @@ __kernel void arithm_s_add_with_mask_C1_D4 (__global int *src1, int src1_ste
|
||||
int src_data2 = src2.x;
|
||||
int dst_data = *((__global int *)((__global char *)dst + dst_index));
|
||||
|
||||
int data = convert_int_sat((long)src_data1 + (long)src_data2);
|
||||
int data = convert_int_sat(ARITHM_OP((long)src_data1, (long)src_data2));
|
||||
data = mask_data ? data : dst_data;
|
||||
|
||||
*((__global int *)((__global char *)dst + dst_index)) = data;
|
||||
@ -230,7 +235,7 @@ __kernel void arithm_s_add_with_mask_C1_D5 (__global float *src1, int src1_s
|
||||
float src_data2 = src2.x;
|
||||
float dst_data = *((__global float *)((__global char *)dst + dst_index));
|
||||
|
||||
float data = src_data1 + src_data2;
|
||||
float data = ARITHM_OP(src_data1, src_data2);
|
||||
data = mask_data ? data : dst_data;
|
||||
|
||||
*((__global float *)((__global char *)dst + dst_index)) = data;
|
||||
@ -260,7 +265,7 @@ __kernel void arithm_s_add_with_mask_C1_D6 (__global double *src1, int src1_
|
||||
double src_data2 = src2.x;
|
||||
double dst_data = *((__global double *)((__global char *)dst + dst_index));
|
||||
|
||||
double data = src_data1 + src_data2;
|
||||
double data = ARITHM_OP(src_data1, src_data2);
|
||||
data = mask_data ? data : dst_data;
|
||||
|
||||
*((__global double *)((__global char *)dst + dst_index)) = data;
|
||||
@ -296,7 +301,7 @@ __kernel void arithm_s_add_with_mask_C2_D0 (__global uchar *src1, int src1_ste
|
||||
uchar2 mask_data = vload2(0, mask + mask_index);
|
||||
|
||||
uchar4 data = *((__global uchar4 *)(dst + dst_index));
|
||||
int4 tmp = convert_int4_sat(src1_data) + src2_data;
|
||||
int4 tmp = ARITHM_OP(convert_int4_sat(src1_data), src2_data);
|
||||
uchar4 tmp_data = convert_uchar4_sat(tmp);
|
||||
|
||||
data.xy = ((mask_data.x) && (dst_index + 0 >= dst_start)) ? tmp_data.xy : data.xy;
|
||||
@ -326,7 +331,7 @@ __kernel void arithm_s_add_with_mask_C2_D2 (__global ushort *src1, int src1_st
|
||||
int2 src_data2 = (int2)(src2.x, src2.y);
|
||||
ushort2 dst_data = *((__global ushort2 *)((__global char *)dst + dst_index));
|
||||
|
||||
int2 tmp = convert_int2_sat(src_data1) + src_data2;
|
||||
int2 tmp = ARITHM_OP(convert_int2_sat(src_data1), src_data2);
|
||||
ushort2 data = convert_ushort2_sat(tmp);
|
||||
data = mask_data ? data : dst_data;
|
||||
|
||||
@ -354,7 +359,7 @@ __kernel void arithm_s_add_with_mask_C2_D3 (__global short *src1, int src1_ste
|
||||
int2 src_data2 = (int2)(src2.x, src2.y);
|
||||
short2 dst_data = *((__global short2 *)((__global char *)dst + dst_index));
|
||||
|
||||
int2 tmp = convert_int2_sat(src_data1) + src_data2;
|
||||
int2 tmp = ARITHM_OP(convert_int2_sat(src_data1), src_data2);
|
||||
short2 data = convert_short2_sat(tmp);
|
||||
data = mask_data ? data : dst_data;
|
||||
|
||||
@ -382,7 +387,7 @@ __kernel void arithm_s_add_with_mask_C2_D4 (__global int *src1, int src1_step,
|
||||
int2 src_data2 = (int2)(src2.x, src2.y);
|
||||
int2 dst_data = *((__global int2 *)((__global char *)dst + dst_index));
|
||||
|
||||
int2 data = convert_int2_sat(convert_long2_sat(src_data1) + convert_long2_sat(src_data2));
|
||||
int2 data = convert_int2_sat(ARITHM_OP(convert_long2_sat(src_data1), convert_long2_sat(src_data2)));
|
||||
data = mask_data ? data : dst_data;
|
||||
|
||||
*((__global int2 *)((__global char *)dst + dst_index)) = data;
|
||||
@ -409,7 +414,7 @@ __kernel void arithm_s_add_with_mask_C2_D5 (__global float *src1, int src1_ste
|
||||
float2 src_data2 = (float2)(src2.x, src2.y);
|
||||
float2 dst_data = *((__global float2 *)((__global char *)dst + dst_index));
|
||||
|
||||
float2 data = src_data1 + src_data2;
|
||||
float2 data = ARITHM_OP(src_data1, src_data2);
|
||||
data = mask_data ? data : dst_data;
|
||||
|
||||
*((__global float2 *)((__global char *)dst + dst_index)) = data;
|
||||
@ -438,7 +443,7 @@ __kernel void arithm_s_add_with_mask_C2_D6 (__global double *src1, int src1_st
|
||||
double2 src_data2 = (double2)(src2.x, src2.y);
|
||||
double2 dst_data = *((__global double2 *)((__global char *)dst + dst_index));
|
||||
|
||||
double2 data = src_data1 + src_data2;
|
||||
double2 data = ARITHM_OP(src_data1, src_data2);
|
||||
data = mask_data ? data : dst_data;
|
||||
|
||||
*((__global double2 *)((__global char *)dst + dst_index)) = data;
|
||||
@ -446,317 +451,11 @@ __kernel void arithm_s_add_with_mask_C2_D6 (__global double *src1, int src1_st
|
||||
}
|
||||
#endif
|
||||
|
||||
__kernel void arithm_s_add_with_mask_C3_D0 (__global uchar *src1, int src1_step, int src1_offset,
|
||||
__global uchar *dst, int dst_step, int dst_offset,
|
||||
__global uchar *mask, int mask_step, int mask_offset,
|
||||
int4 src2, int rows, int cols, int dst_step1)
|
||||
{
|
||||
|
||||
int x = get_global_id(0);
|
||||
int y = get_global_id(1);
|
||||
|
||||
if (x < cols && y < rows)
|
||||
{
|
||||
x = x << 2;
|
||||
|
||||
#ifdef dst_align
|
||||
#undef dst_align
|
||||
#endif
|
||||
#define dst_align (((dst_offset % dst_step) / 3 ) & 3)
|
||||
int src1_index = mad24(y, src1_step, (x * 3) + src1_offset - (dst_align * 3));
|
||||
int mask_index = mad24(y, mask_step, x + mask_offset - dst_align);
|
||||
|
||||
int dst_start = mad24(y, dst_step, dst_offset);
|
||||
int dst_end = mad24(y, dst_step, dst_offset + dst_step1);
|
||||
int dst_index = mad24(y, dst_step, dst_offset + (x * 3) - (dst_align * 3));
|
||||
|
||||
uchar4 src1_data_0 = vload4(0, src1 + src1_index + 0);
|
||||
uchar4 src1_data_1 = vload4(0, src1 + src1_index + 4);
|
||||
uchar4 src1_data_2 = vload4(0, src1 + src1_index + 8);
|
||||
|
||||
int4 src2_data_0 = (int4)(src2.x, src2.y, src2.z, src2.x);
|
||||
int4 src2_data_1 = (int4)(src2.y, src2.z, src2.x, src2.y);
|
||||
int4 src2_data_2 = (int4)(src2.z, src2.x, src2.y, src2.z);
|
||||
|
||||
uchar4 mask_data = vload4(0, mask + mask_index);
|
||||
|
||||
uchar4 data_0 = *((__global uchar4 *)(dst + dst_index + 0));
|
||||
uchar4 data_1 = *((__global uchar4 *)(dst + dst_index + 4));
|
||||
uchar4 data_2 = *((__global uchar4 *)(dst + dst_index + 8));
|
||||
|
||||
uchar4 tmp_data_0 = convert_uchar4_sat(convert_int4_sat(src1_data_0) + src2_data_0);
|
||||
uchar4 tmp_data_1 = convert_uchar4_sat(convert_int4_sat(src1_data_1) + src2_data_1);
|
||||
uchar4 tmp_data_2 = convert_uchar4_sat(convert_int4_sat(src1_data_2) + src2_data_2);
|
||||
|
||||
data_0.xyz = ((mask_data.x) && (dst_index + 0 >= dst_start)) ? tmp_data_0.xyz : data_0.xyz;
|
||||
data_0.w = ((mask_data.y) && (dst_index + 3 >= dst_start) && (dst_index + 3 < dst_end))
|
||||
? tmp_data_0.w : data_0.w;
|
||||
|
||||
data_1.xy = ((mask_data.y) && (dst_index + 3 >= dst_start) && (dst_index + 3 < dst_end))
|
||||
? tmp_data_1.xy : data_1.xy;
|
||||
data_1.zw = ((mask_data.z) && (dst_index + 6 >= dst_start) && (dst_index + 6 < dst_end))
|
||||
? tmp_data_1.zw : data_1.zw;
|
||||
|
||||
data_2.x = ((mask_data.z) && (dst_index + 6 >= dst_start) && (dst_index + 6 < dst_end))
|
||||
? tmp_data_2.x : data_2.x;
|
||||
data_2.yzw = ((mask_data.w) && (dst_index + 9 >= dst_start) && (dst_index + 9 < dst_end))
|
||||
? tmp_data_2.yzw : data_2.yzw;
|
||||
|
||||
*((__global uchar4 *)(dst + dst_index + 0)) = data_0;
|
||||
*((__global uchar4 *)(dst + dst_index + 4)) = data_1;
|
||||
*((__global uchar4 *)(dst + dst_index + 8)) = data_2;
|
||||
}
|
||||
}
|
||||
__kernel void arithm_s_add_with_mask_C3_D2 (__global ushort *src1, int src1_step, int src1_offset,
|
||||
__global ushort *dst, int dst_step, int dst_offset,
|
||||
__global uchar *mask, int mask_step, int mask_offset,
|
||||
int4 src2, int rows, int cols, int dst_step1)
|
||||
{
|
||||
|
||||
int x = get_global_id(0);
|
||||
int y = get_global_id(1);
|
||||
|
||||
if (x < cols && y < rows)
|
||||
{
|
||||
x = x << 1;
|
||||
|
||||
#ifdef dst_align
|
||||
#undef dst_align
|
||||
#endif
|
||||
#define dst_align (((dst_offset % dst_step) / 6 ) & 1)
|
||||
int src1_index = mad24(y, src1_step, (x * 6) + src1_offset - (dst_align * 6));
|
||||
int mask_index = mad24(y, mask_step, x + mask_offset - dst_align);
|
||||
|
||||
int dst_start = mad24(y, dst_step, dst_offset);
|
||||
int dst_end = mad24(y, dst_step, dst_offset + dst_step1);
|
||||
int dst_index = mad24(y, dst_step, dst_offset + (x * 6) - (dst_align * 6));
|
||||
|
||||
ushort2 src1_data_0 = vload2(0, (__global ushort *)((__global char *)src1 + src1_index + 0));
|
||||
ushort2 src1_data_1 = vload2(0, (__global ushort *)((__global char *)src1 + src1_index + 4));
|
||||
ushort2 src1_data_2 = vload2(0, (__global ushort *)((__global char *)src1 + src1_index + 8));
|
||||
|
||||
int2 src2_data_0 = (int2)(src2.x, src2.y);
|
||||
int2 src2_data_1 = (int2)(src2.z, src2.x);
|
||||
int2 src2_data_2 = (int2)(src2.y, src2.z);
|
||||
|
||||
uchar2 mask_data = vload2(0, mask + mask_index);
|
||||
|
||||
ushort2 data_0 = *((__global ushort2 *)((__global char *)dst + dst_index + 0));
|
||||
ushort2 data_1 = *((__global ushort2 *)((__global char *)dst + dst_index + 4));
|
||||
ushort2 data_2 = *((__global ushort2 *)((__global char *)dst + dst_index + 8));
|
||||
|
||||
ushort2 tmp_data_0 = convert_ushort2_sat(convert_int2_sat(src1_data_0) + src2_data_0);
|
||||
ushort2 tmp_data_1 = convert_ushort2_sat(convert_int2_sat(src1_data_1) + src2_data_1);
|
||||
ushort2 tmp_data_2 = convert_ushort2_sat(convert_int2_sat(src1_data_2) + src2_data_2);
|
||||
|
||||
data_0.xy = ((mask_data.x) && (dst_index + 0 >= dst_start)) ? tmp_data_0.xy : data_0.xy;
|
||||
|
||||
data_1.x = ((mask_data.x) && (dst_index + 0 >= dst_start) && (dst_index + 0 < dst_end))
|
||||
? tmp_data_1.x : data_1.x;
|
||||
data_1.y = ((mask_data.y) && (dst_index + 6 >= dst_start) && (dst_index + 6 < dst_end))
|
||||
? tmp_data_1.y : data_1.y;
|
||||
|
||||
data_2.xy = ((mask_data.y) && (dst_index + 6 >= dst_start) && (dst_index + 6 < dst_end))
|
||||
? tmp_data_2.xy : data_2.xy;
|
||||
|
||||
*((__global ushort2 *)((__global char *)dst + dst_index + 0))= data_0;
|
||||
*((__global ushort2 *)((__global char *)dst + dst_index + 4))= data_1;
|
||||
*((__global ushort2 *)((__global char *)dst + dst_index + 8))= data_2;
|
||||
}
|
||||
}
|
||||
__kernel void arithm_s_add_with_mask_C3_D3 (__global short *src1, int src1_step, int src1_offset,
|
||||
__global short *dst, int dst_step, int dst_offset,
|
||||
__global uchar *mask, int mask_step, int mask_offset,
|
||||
int4 src2, int rows, int cols, int dst_step1)
|
||||
{
|
||||
|
||||
int x = get_global_id(0);
|
||||
int y = get_global_id(1);
|
||||
|
||||
if (x < cols && y < rows)
|
||||
{
|
||||
x = x << 1;
|
||||
|
||||
#ifdef dst_align
|
||||
#undef dst_align
|
||||
#endif
|
||||
#define dst_align (((dst_offset % dst_step) / 6 ) & 1)
|
||||
int src1_index = mad24(y, src1_step, (x * 6) + src1_offset - (dst_align * 6));
|
||||
int mask_index = mad24(y, mask_step, x + mask_offset - dst_align);
|
||||
|
||||
int dst_start = mad24(y, dst_step, dst_offset);
|
||||
int dst_end = mad24(y, dst_step, dst_offset + dst_step1);
|
||||
int dst_index = mad24(y, dst_step, dst_offset + (x * 6) - (dst_align * 6));
|
||||
|
||||
short2 src1_data_0 = vload2(0, (__global short *)((__global char *)src1 + src1_index + 0));
|
||||
short2 src1_data_1 = vload2(0, (__global short *)((__global char *)src1 + src1_index + 4));
|
||||
short2 src1_data_2 = vload2(0, (__global short *)((__global char *)src1 + src1_index + 8));
|
||||
|
||||
int2 src2_data_0 = (int2)(src2.x, src2.y);
|
||||
int2 src2_data_1 = (int2)(src2.z, src2.x);
|
||||
int2 src2_data_2 = (int2)(src2.y, src2.z);
|
||||
|
||||
uchar2 mask_data = vload2(0, mask + mask_index);
|
||||
|
||||
short2 data_0 = *((__global short2 *)((__global char *)dst + dst_index + 0));
|
||||
short2 data_1 = *((__global short2 *)((__global char *)dst + dst_index + 4));
|
||||
short2 data_2 = *((__global short2 *)((__global char *)dst + dst_index + 8));
|
||||
|
||||
short2 tmp_data_0 = convert_short2_sat(convert_int2_sat(src1_data_0) + src2_data_0);
|
||||
short2 tmp_data_1 = convert_short2_sat(convert_int2_sat(src1_data_1) + src2_data_1);
|
||||
short2 tmp_data_2 = convert_short2_sat(convert_int2_sat(src1_data_2) + src2_data_2);
|
||||
|
||||
data_0.xy = ((mask_data.x) && (dst_index + 0 >= dst_start)) ? tmp_data_0.xy : data_0.xy;
|
||||
|
||||
data_1.x = ((mask_data.x) && (dst_index + 0 >= dst_start) && (dst_index + 0 < dst_end))
|
||||
? tmp_data_1.x : data_1.x;
|
||||
data_1.y = ((mask_data.y) && (dst_index + 6 >= dst_start) && (dst_index + 6 < dst_end))
|
||||
? tmp_data_1.y : data_1.y;
|
||||
|
||||
data_2.xy = ((mask_data.y) && (dst_index + 6 >= dst_start) && (dst_index + 6 < dst_end))
|
||||
? tmp_data_2.xy : data_2.xy;
|
||||
|
||||
*((__global short2 *)((__global char *)dst + dst_index + 0))= data_0;
|
||||
*((__global short2 *)((__global char *)dst + dst_index + 4))= data_1;
|
||||
*((__global short2 *)((__global char *)dst + dst_index + 8))= data_2;
|
||||
}
|
||||
}
|
||||
__kernel void arithm_s_add_with_mask_C3_D4 (__global int *src1, int src1_step, int src1_offset,
|
||||
__global int *dst, int dst_step, int dst_offset,
|
||||
__global uchar *mask, int mask_step, int mask_offset,
|
||||
int4 src2, int rows, int cols, int dst_step1)
|
||||
{
|
||||
|
||||
int x = get_global_id(0);
|
||||
int y = get_global_id(1);
|
||||
|
||||
if (x < cols && y < rows)
|
||||
{
|
||||
int src1_index = mad24(y, src1_step, (x * 12) + src1_offset);
|
||||
int mask_index = mad24(y, mask_step, x + mask_offset);
|
||||
int dst_index = mad24(y, dst_step, dst_offset + (x * 12));
|
||||
|
||||
int src1_data_0 = *((__global int *)((__global char *)src1 + src1_index + 0));
|
||||
int src1_data_1 = *((__global int *)((__global char *)src1 + src1_index + 4));
|
||||
int src1_data_2 = *((__global int *)((__global char *)src1 + src1_index + 8));
|
||||
|
||||
int src2_data_0 = src2.x;
|
||||
int src2_data_1 = src2.y;
|
||||
int src2_data_2 = src2.z;
|
||||
|
||||
uchar mask_data = * (mask + mask_index);
|
||||
|
||||
int data_0 = *((__global int *)((__global char *)dst + dst_index + 0));
|
||||
int data_1 = *((__global int *)((__global char *)dst + dst_index + 4));
|
||||
int data_2 = *((__global int *)((__global char *)dst + dst_index + 8));
|
||||
|
||||
int tmp_data_0 = convert_int_sat((long)src1_data_0 + (long)src2_data_0);
|
||||
int tmp_data_1 = convert_int_sat((long)src1_data_1 + (long)src2_data_1);
|
||||
int tmp_data_2 = convert_int_sat((long)src1_data_2 + (long)src2_data_2);
|
||||
|
||||
data_0 = mask_data ? tmp_data_0 : data_0;
|
||||
data_1 = mask_data ? tmp_data_1 : data_1;
|
||||
data_2 = mask_data ? tmp_data_2 : data_2;
|
||||
|
||||
*((__global int *)((__global char *)dst + dst_index + 0))= data_0;
|
||||
*((__global int *)((__global char *)dst + dst_index + 4))= data_1;
|
||||
*((__global int *)((__global char *)dst + dst_index + 8))= data_2;
|
||||
}
|
||||
}
|
||||
__kernel void arithm_s_add_with_mask_C3_D5 (__global float *src1, int src1_step, int src1_offset,
|
||||
__global float *dst, int dst_step, int dst_offset,
|
||||
__global uchar *mask, int mask_step, int mask_offset,
|
||||
float4 src2, int rows, int cols, int dst_step1)
|
||||
{
|
||||
|
||||
int x = get_global_id(0);
|
||||
int y = get_global_id(1);
|
||||
|
||||
if (x < cols && y < rows)
|
||||
{
|
||||
int src1_index = mad24(y, src1_step, (x * 12) + src1_offset);
|
||||
int mask_index = mad24(y, mask_step, x + mask_offset);
|
||||
int dst_index = mad24(y, dst_step, dst_offset + (x * 12));
|
||||
|
||||
float src1_data_0 = *((__global float *)((__global char *)src1 + src1_index + 0));
|
||||
float src1_data_1 = *((__global float *)((__global char *)src1 + src1_index + 4));
|
||||
float src1_data_2 = *((__global float *)((__global char *)src1 + src1_index + 8));
|
||||
|
||||
float src2_data_0 = src2.x;
|
||||
float src2_data_1 = src2.y;
|
||||
float src2_data_2 = src2.z;
|
||||
|
||||
uchar mask_data = * (mask + mask_index);
|
||||
|
||||
float data_0 = *((__global float *)((__global char *)dst + dst_index + 0));
|
||||
float data_1 = *((__global float *)((__global char *)dst + dst_index + 4));
|
||||
float data_2 = *((__global float *)((__global char *)dst + dst_index + 8));
|
||||
|
||||
float tmp_data_0 = src1_data_0 + src2_data_0;
|
||||
float tmp_data_1 = src1_data_1 + src2_data_1;
|
||||
float tmp_data_2 = src1_data_2 + src2_data_2;
|
||||
|
||||
data_0 = mask_data ? tmp_data_0 : data_0;
|
||||
data_1 = mask_data ? tmp_data_1 : data_1;
|
||||
data_2 = mask_data ? tmp_data_2 : data_2;
|
||||
|
||||
*((__global float *)((__global char *)dst + dst_index + 0))= data_0;
|
||||
*((__global float *)((__global char *)dst + dst_index + 4))= data_1;
|
||||
*((__global float *)((__global char *)dst + dst_index + 8))= data_2;
|
||||
}
|
||||
}
|
||||
|
||||
#if defined (DOUBLE_SUPPORT)
|
||||
__kernel void arithm_s_add_with_mask_C3_D6 (__global double *src1, int src1_step, int src1_offset,
|
||||
__global double *dst, int dst_step, int dst_offset,
|
||||
__global uchar *mask, int mask_step, int mask_offset,
|
||||
double4 src2, int rows, int cols, int dst_step1)
|
||||
{
|
||||
|
||||
int x = get_global_id(0);
|
||||
int y = get_global_id(1);
|
||||
|
||||
if (x < cols && y < rows)
|
||||
{
|
||||
int src1_index = mad24(y, src1_step, (x * 24) + src1_offset);
|
||||
int mask_index = mad24(y, mask_step, x + mask_offset);
|
||||
int dst_index = mad24(y, dst_step, dst_offset + (x * 24));
|
||||
|
||||
double src1_data_0 = *((__global double *)((__global char *)src1 + src1_index + 0 ));
|
||||
double src1_data_1 = *((__global double *)((__global char *)src1 + src1_index + 8 ));
|
||||
double src1_data_2 = *((__global double *)((__global char *)src1 + src1_index + 16));
|
||||
|
||||
double src2_data_0 = src2.x;
|
||||
double src2_data_1 = src2.y;
|
||||
double src2_data_2 = src2.z;
|
||||
|
||||
uchar mask_data = * (mask + mask_index);
|
||||
|
||||
double data_0 = *((__global double *)((__global char *)dst + dst_index + 0 ));
|
||||
double data_1 = *((__global double *)((__global char *)dst + dst_index + 8 ));
|
||||
double data_2 = *((__global double *)((__global char *)dst + dst_index + 16));
|
||||
|
||||
double tmp_data_0 = src1_data_0 + src2_data_0;
|
||||
double tmp_data_1 = src1_data_1 + src2_data_1;
|
||||
double tmp_data_2 = src1_data_2 + src2_data_2;
|
||||
|
||||
data_0 = mask_data ? tmp_data_0 : data_0;
|
||||
data_1 = mask_data ? tmp_data_1 : data_1;
|
||||
data_2 = mask_data ? tmp_data_2 : data_2;
|
||||
|
||||
*((__global double *)((__global char *)dst + dst_index + 0 ))= data_0;
|
||||
*((__global double *)((__global char *)dst + dst_index + 8 ))= data_1;
|
||||
*((__global double *)((__global char *)dst + dst_index + 16))= data_2;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
__kernel void arithm_s_add_with_mask_C4_D0 (__global uchar *src1, int src1_step, int src1_offset,
|
||||
__global uchar *dst, int dst_step, int dst_offset,
|
||||
__global uchar *mask, int mask_step, int mask_offset,
|
||||
int4 src2, int rows, int cols, int dst_step1)
|
||||
{
|
||||
|
||||
int x = get_global_id(0);
|
||||
int y = get_global_id(1);
|
||||
|
||||
@ -771,7 +470,7 @@ __kernel void arithm_s_add_with_mask_C4_D0 (__global uchar *src1, int src1_ste
|
||||
uchar4 src_data1 = *((__global uchar4 *)(src1 + src1_index));
|
||||
uchar4 dst_data = *((__global uchar4 *)(dst + dst_index));
|
||||
|
||||
uchar4 data = convert_uchar4_sat(convert_int4_sat(src_data1) + src2);
|
||||
uchar4 data = convert_uchar4_sat(ARITHM_OP(convert_int4_sat(src_data1), src2));
|
||||
data = mask_data ? data : dst_data;
|
||||
|
||||
*((__global uchar4 *)(dst + dst_index)) = data;
|
||||
@ -797,7 +496,7 @@ __kernel void arithm_s_add_with_mask_C4_D2 (__global ushort *src1, int src1_st
|
||||
ushort4 src_data1 = *((__global ushort4 *)((__global char *)src1 + src1_index));
|
||||
ushort4 dst_data = *((__global ushort4 *)((__global char *)dst + dst_index));
|
||||
|
||||
ushort4 data = convert_ushort4_sat(convert_int4_sat(src_data1) + src2);
|
||||
ushort4 data = convert_ushort4_sat(ARITHM_OP(convert_int4_sat(src_data1), src2));
|
||||
data = mask_data ? data : dst_data;
|
||||
|
||||
*((__global ushort4 *)((__global char *)dst + dst_index)) = data;
|
||||
@ -823,7 +522,7 @@ __kernel void arithm_s_add_with_mask_C4_D3 (__global short *src1, int src1_ste
|
||||
short4 src_data1 = *((__global short4 *)((__global char *)src1 + src1_index));
|
||||
short4 dst_data = *((__global short4 *)((__global char *)dst + dst_index));
|
||||
|
||||
short4 data = convert_short4_sat(convert_int4_sat(src_data1) + src2);
|
||||
short4 data = convert_short4_sat(ARITHM_OP(convert_int4_sat(src_data1), src2));
|
||||
data = mask_data ? data : dst_data;
|
||||
|
||||
*((__global short4 *)((__global char *)dst + dst_index)) = data;
|
||||
@ -849,7 +548,7 @@ __kernel void arithm_s_add_with_mask_C4_D4 (__global int *src1, int src1_step,
|
||||
int4 src_data1 = *((__global int4 *)((__global char *)src1 + src1_index));
|
||||
int4 dst_data = *((__global int4 *)((__global char *)dst + dst_index));
|
||||
|
||||
int4 data = convert_int4_sat(convert_long4_sat(src_data1) + convert_long4_sat(src2));
|
||||
int4 data = convert_int4_sat(ARITHM_OP(convert_long4_sat(src_data1), convert_long4_sat(src2)));
|
||||
data = mask_data ? data : dst_data;
|
||||
|
||||
*((__global int4 *)((__global char *)dst + dst_index)) = data;
|
||||
@ -875,7 +574,7 @@ __kernel void arithm_s_add_with_mask_C4_D5 (__global float *src1, int src1_ste
|
||||
float4 src_data1 = *((__global float4 *)((__global char *)src1 + src1_index));
|
||||
float4 dst_data = *((__global float4 *)((__global char *)dst + dst_index));
|
||||
|
||||
float4 data = src_data1 + src2;
|
||||
float4 data = ARITHM_OP(src_data1, src2);
|
||||
data = mask_data ? data : dst_data;
|
||||
|
||||
*((__global float4 *)((__global char *)dst + dst_index)) = data;
|
||||
@ -903,7 +602,7 @@ __kernel void arithm_s_add_with_mask_C4_D6 (__global double *src1, int src1_st
|
||||
double4 src_data1 = *((__global double4 *)((__global char *)src1 + src1_index));
|
||||
double4 dst_data = *((__global double4 *)((__global char *)dst + dst_index));
|
||||
|
||||
double4 data = src_data1 + src2;
|
||||
double4 data = ARITHM_OP(src_data1, src2);
|
||||
data = mask_data ? data : dst_data;
|
||||
|
||||
*((__global double4 *)((__global char *)dst + dst_index)) = data;
|
||||
|
@ -565,397 +565,6 @@ __kernel void arithm_bitwise_and_with_mask_C2_D6 (
|
||||
}
|
||||
|
||||
|
||||
|
||||
__kernel void arithm_bitwise_and_with_mask_C3_D0 (
|
||||
__global uchar *src1, int src1_step, int src1_offset,
|
||||
__global uchar *src2, int src2_step, int src2_offset,
|
||||
__global uchar *mask, int mask_step, int mask_offset,
|
||||
__global uchar *dst, int dst_step, int dst_offset,
|
||||
int rows, int cols, int dst_step1)
|
||||
{
|
||||
|
||||
int x = get_global_id(0);
|
||||
int y = get_global_id(1);
|
||||
|
||||
if (x < cols && y < rows)
|
||||
{
|
||||
x = x << 2;
|
||||
|
||||
#ifdef dst_align
|
||||
#undef dst_align
|
||||
#endif
|
||||
#define dst_align (((dst_offset % dst_step) / 3 ) & 3)
|
||||
int src1_index = mad24(y, src1_step, (x * 3) + src1_offset - (dst_align * 3));
|
||||
int src2_index = mad24(y, src2_step, (x * 3) + src2_offset - (dst_align * 3));
|
||||
int mask_index = mad24(y, mask_step, x + mask_offset - dst_align);
|
||||
|
||||
int dst_start = mad24(y, dst_step, dst_offset);
|
||||
int dst_end = mad24(y, dst_step, dst_offset + dst_step1);
|
||||
int dst_index = mad24(y, dst_step, dst_offset + (x * 3) - (dst_align * 3));
|
||||
|
||||
uchar4 src1_data_0 = vload4(0, src1 + src1_index + 0);
|
||||
uchar4 src1_data_1 = vload4(0, src1 + src1_index + 4);
|
||||
uchar4 src1_data_2 = vload4(0, src1 + src1_index + 8);
|
||||
|
||||
uchar4 src2_data_0 = vload4(0, src2 + src2_index + 0);
|
||||
uchar4 src2_data_1 = vload4(0, src2 + src2_index + 4);
|
||||
uchar4 src2_data_2 = vload4(0, src2 + src2_index + 8);
|
||||
|
||||
uchar4 mask_data = vload4(0, mask + mask_index);
|
||||
|
||||
uchar4 data_0 = *((__global uchar4 *)(dst + dst_index + 0));
|
||||
uchar4 data_1 = *((__global uchar4 *)(dst + dst_index + 4));
|
||||
uchar4 data_2 = *((__global uchar4 *)(dst + dst_index + 8));
|
||||
|
||||
uchar4 tmp_data_0 = src1_data_0 & src2_data_0;
|
||||
uchar4 tmp_data_1 = src1_data_1 & src2_data_1;
|
||||
uchar4 tmp_data_2 = src1_data_2 & src2_data_2;
|
||||
|
||||
data_0.xyz = ((mask_data.x) && (dst_index + 0 >= dst_start)) ? tmp_data_0.xyz : data_0.xyz;
|
||||
data_0.w = ((mask_data.y) && (dst_index + 3 >= dst_start) && (dst_index + 3 < dst_end))
|
||||
? tmp_data_0.w : data_0.w;
|
||||
|
||||
data_1.xy = ((mask_data.y) && (dst_index + 3 >= dst_start) && (dst_index + 3 < dst_end))
|
||||
? tmp_data_1.xy : data_1.xy;
|
||||
data_1.zw = ((mask_data.z) && (dst_index + 6 >= dst_start) && (dst_index + 6 < dst_end))
|
||||
? tmp_data_1.zw : data_1.zw;
|
||||
|
||||
data_2.x = ((mask_data.z) && (dst_index + 6 >= dst_start) && (dst_index + 6 < dst_end))
|
||||
? tmp_data_2.x : data_2.x;
|
||||
data_2.yzw = ((mask_data.w) && (dst_index + 9 >= dst_start) && (dst_index + 9 < dst_end))
|
||||
? tmp_data_2.yzw : data_2.yzw;
|
||||
|
||||
*((__global uchar4 *)(dst + dst_index + 0)) = data_0;
|
||||
*((__global uchar4 *)(dst + dst_index + 4)) = data_1;
|
||||
*((__global uchar4 *)(dst + dst_index + 8)) = data_2;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
__kernel void arithm_bitwise_and_with_mask_C3_D1 (
|
||||
__global char *src1, int src1_step, int src1_offset,
|
||||
__global char *src2, int src2_step, int src2_offset,
|
||||
__global uchar *mask, int mask_step, int mask_offset,
|
||||
__global char *dst, int dst_step, int dst_offset,
|
||||
int rows, int cols, int dst_step1)
|
||||
{
|
||||
|
||||
int x = get_global_id(0);
|
||||
int y = get_global_id(1);
|
||||
|
||||
if (x < cols && y < rows)
|
||||
{
|
||||
x = x << 2;
|
||||
|
||||
#ifdef dst_align
|
||||
#undef dst_align
|
||||
#endif
|
||||
#define dst_align (((dst_offset % dst_step) / 3 ) & 3)
|
||||
int src1_index = mad24(y, src1_step, (x * 3) + src1_offset - (dst_align * 3));
|
||||
int src2_index = mad24(y, src2_step, (x * 3) + src2_offset - (dst_align * 3));
|
||||
int mask_index = mad24(y, mask_step, x + mask_offset - dst_align);
|
||||
|
||||
int dst_start = mad24(y, dst_step, dst_offset);
|
||||
int dst_end = mad24(y, dst_step, dst_offset + dst_step1);
|
||||
int dst_index = mad24(y, dst_step, dst_offset + (x * 3) - (dst_align * 3));
|
||||
|
||||
char4 src1_data_0 = vload4(0, src1 + src1_index + 0);
|
||||
char4 src1_data_1 = vload4(0, src1 + src1_index + 4);
|
||||
char4 src1_data_2 = vload4(0, src1 + src1_index + 8);
|
||||
|
||||
char4 src2_data_0 = vload4(0, src2 + src2_index + 0);
|
||||
char4 src2_data_1 = vload4(0, src2 + src2_index + 4);
|
||||
char4 src2_data_2 = vload4(0, src2 + src2_index + 8);
|
||||
|
||||
uchar4 mask_data = vload4(0, mask + mask_index);
|
||||
|
||||
char4 data_0 = *((__global char4 *)(dst + dst_index + 0));
|
||||
char4 data_1 = *((__global char4 *)(dst + dst_index + 4));
|
||||
char4 data_2 = *((__global char4 *)(dst + dst_index + 8));
|
||||
|
||||
char4 tmp_data_0 = src1_data_0 & src2_data_0;
|
||||
char4 tmp_data_1 = src1_data_1 & src2_data_1;
|
||||
char4 tmp_data_2 = src1_data_2 & src2_data_2;
|
||||
|
||||
data_0.xyz = ((mask_data.x) && (dst_index + 0 >= dst_start)) ? tmp_data_0.xyz : data_0.xyz;
|
||||
data_0.w = ((mask_data.y) && (dst_index + 3 >= dst_start) && (dst_index + 3 < dst_end))
|
||||
? tmp_data_0.w : data_0.w;
|
||||
|
||||
data_1.xy = ((mask_data.y) && (dst_index + 3 >= dst_start) && (dst_index + 3 < dst_end))
|
||||
? tmp_data_1.xy : data_1.xy;
|
||||
data_1.zw = ((mask_data.z) && (dst_index + 6 >= dst_start) && (dst_index + 6 < dst_end))
|
||||
? tmp_data_1.zw : data_1.zw;
|
||||
|
||||
data_2.x = ((mask_data.z) && (dst_index + 6 >= dst_start) && (dst_index + 6 < dst_end))
|
||||
? tmp_data_2.x : data_2.x;
|
||||
data_2.yzw = ((mask_data.w) && (dst_index + 9 >= dst_start) && (dst_index + 9 < dst_end))
|
||||
? tmp_data_2.yzw : data_2.yzw;
|
||||
|
||||
*((__global char4 *)(dst + dst_index + 0)) = data_0;
|
||||
*((__global char4 *)(dst + dst_index + 4)) = data_1;
|
||||
*((__global char4 *)(dst + dst_index + 8)) = data_2;
|
||||
}
|
||||
}
|
||||
|
||||
__kernel void arithm_bitwise_and_with_mask_C3_D2 (
|
||||
__global ushort *src1, int src1_step, int src1_offset,
|
||||
__global ushort *src2, int src2_step, int src2_offset,
|
||||
__global uchar *mask, int mask_step, int mask_offset,
|
||||
__global ushort *dst, int dst_step, int dst_offset,
|
||||
int rows, int cols, int dst_step1)
|
||||
{
|
||||
|
||||
int x = get_global_id(0);
|
||||
int y = get_global_id(1);
|
||||
|
||||
if (x < cols && y < rows)
|
||||
{
|
||||
x = x << 1;
|
||||
|
||||
#ifdef dst_align
|
||||
#undef dst_align
|
||||
#endif
|
||||
#define dst_align (((dst_offset % dst_step) / 6 ) & 1)
|
||||
int src1_index = mad24(y, src1_step, (x * 6) + src1_offset - (dst_align * 6));
|
||||
int src2_index = mad24(y, src2_step, (x * 6) + src2_offset - (dst_align * 6));
|
||||
int mask_index = mad24(y, mask_step, x + mask_offset - dst_align);
|
||||
|
||||
int dst_start = mad24(y, dst_step, dst_offset);
|
||||
int dst_end = mad24(y, dst_step, dst_offset + dst_step1);
|
||||
int dst_index = mad24(y, dst_step, dst_offset + (x * 6) - (dst_align * 6));
|
||||
|
||||
ushort2 src1_data_0 = vload2(0, (__global ushort *)((__global char *)src1 + src1_index + 0));
|
||||
ushort2 src1_data_1 = vload2(0, (__global ushort *)((__global char *)src1 + src1_index + 4));
|
||||
ushort2 src1_data_2 = vload2(0, (__global ushort *)((__global char *)src1 + src1_index + 8));
|
||||
|
||||
ushort2 src2_data_0 = vload2(0, (__global ushort *)((__global char *)src2 + src2_index + 0));
|
||||
ushort2 src2_data_1 = vload2(0, (__global ushort *)((__global char *)src2 + src2_index + 4));
|
||||
ushort2 src2_data_2 = vload2(0, (__global ushort *)((__global char *)src2 + src2_index + 8));
|
||||
|
||||
uchar2 mask_data = vload2(0, mask + mask_index);
|
||||
|
||||
ushort2 data_0 = *((__global ushort2 *)((__global char *)dst + dst_index + 0));
|
||||
ushort2 data_1 = *((__global ushort2 *)((__global char *)dst + dst_index + 4));
|
||||
ushort2 data_2 = *((__global ushort2 *)((__global char *)dst + dst_index + 8));
|
||||
|
||||
ushort2 tmp_data_0 = src1_data_0 & src2_data_0 ;
|
||||
ushort2 tmp_data_1 = src1_data_1 & src2_data_1 ;
|
||||
ushort2 tmp_data_2 = src1_data_2 & src2_data_2 ;
|
||||
|
||||
data_0.xy = ((mask_data.x) && (dst_index + 0 >= dst_start)) ? tmp_data_0.xy : data_0.xy;
|
||||
|
||||
data_1.x = ((mask_data.x) && (dst_index + 0 >= dst_start) && (dst_index + 0 < dst_end))
|
||||
? tmp_data_1.x : data_1.x;
|
||||
data_1.y = ((mask_data.y) && (dst_index + 6 >= dst_start) && (dst_index + 6 < dst_end))
|
||||
? tmp_data_1.y : data_1.y;
|
||||
|
||||
data_2.xy = ((mask_data.y) && (dst_index + 6 >= dst_start) && (dst_index + 6 < dst_end))
|
||||
? tmp_data_2.xy : data_2.xy;
|
||||
|
||||
*((__global ushort2 *)((__global char *)dst + dst_index + 0))= data_0;
|
||||
*((__global ushort2 *)((__global char *)dst + dst_index + 4))= data_1;
|
||||
*((__global ushort2 *)((__global char *)dst + dst_index + 8))= data_2;
|
||||
}
|
||||
}
|
||||
__kernel void arithm_bitwise_and_with_mask_C3_D3 (
|
||||
__global short *src1, int src1_step, int src1_offset,
|
||||
__global short *src2, int src2_step, int src2_offset,
|
||||
__global uchar *mask, int mask_step, int mask_offset,
|
||||
__global short *dst, int dst_step, int dst_offset,
|
||||
int rows, int cols, int dst_step1)
|
||||
{
|
||||
|
||||
int x = get_global_id(0);
|
||||
int y = get_global_id(1);
|
||||
|
||||
if (x < cols && y < rows)
|
||||
{
|
||||
x = x << 1;
|
||||
|
||||
#ifdef dst_align
|
||||
#undef dst_align
|
||||
#endif
|
||||
#define dst_align (((dst_offset % dst_step) / 6 ) & 1)
|
||||
int src1_index = mad24(y, src1_step, (x * 6) + src1_offset - (dst_align * 6));
|
||||
int src2_index = mad24(y, src2_step, (x * 6) + src2_offset - (dst_align * 6));
|
||||
int mask_index = mad24(y, mask_step, x + mask_offset - dst_align);
|
||||
|
||||
int dst_start = mad24(y, dst_step, dst_offset);
|
||||
int dst_end = mad24(y, dst_step, dst_offset + dst_step1);
|
||||
int dst_index = mad24(y, dst_step, dst_offset + (x * 6) - (dst_align * 6));
|
||||
|
||||
short2 src1_data_0 = vload2(0, (__global short *)((__global char *)src1 + src1_index + 0));
|
||||
short2 src1_data_1 = vload2(0, (__global short *)((__global char *)src1 + src1_index + 4));
|
||||
short2 src1_data_2 = vload2(0, (__global short *)((__global char *)src1 + src1_index + 8));
|
||||
|
||||
short2 src2_data_0 = vload2(0, (__global short *)((__global char *)src2 + src2_index + 0));
|
||||
short2 src2_data_1 = vload2(0, (__global short *)((__global char *)src2 + src2_index + 4));
|
||||
short2 src2_data_2 = vload2(0, (__global short *)((__global char *)src2 + src2_index + 8));
|
||||
|
||||
uchar2 mask_data = vload2(0, mask + mask_index);
|
||||
|
||||
short2 data_0 = *((__global short2 *)((__global char *)dst + dst_index + 0));
|
||||
short2 data_1 = *((__global short2 *)((__global char *)dst + dst_index + 4));
|
||||
short2 data_2 = *((__global short2 *)((__global char *)dst + dst_index + 8));
|
||||
|
||||
short2 tmp_data_0 = src1_data_0 & src2_data_0 ;
|
||||
short2 tmp_data_1 = src1_data_1 & src2_data_1 ;
|
||||
short2 tmp_data_2 = src1_data_2 & src2_data_2 ;
|
||||
|
||||
data_0.xy = ((mask_data.x) && (dst_index + 0 >= dst_start)) ? tmp_data_0.xy : data_0.xy;
|
||||
|
||||
data_1.x = ((mask_data.x) && (dst_index + 0 >= dst_start) && (dst_index + 0 < dst_end))
|
||||
? tmp_data_1.x : data_1.x;
|
||||
data_1.y = ((mask_data.y) && (dst_index + 6 >= dst_start) && (dst_index + 6 < dst_end))
|
||||
? tmp_data_1.y : data_1.y;
|
||||
|
||||
data_2.xy = ((mask_data.y) && (dst_index + 6 >= dst_start) && (dst_index + 6 < dst_end))
|
||||
? tmp_data_2.xy : data_2.xy;
|
||||
|
||||
*((__global short2 *)((__global char *)dst + dst_index + 0))= data_0;
|
||||
*((__global short2 *)((__global char *)dst + dst_index + 4))= data_1;
|
||||
*((__global short2 *)((__global char *)dst + dst_index + 8))= data_2;
|
||||
}
|
||||
}
|
||||
__kernel void arithm_bitwise_and_with_mask_C3_D4 (
|
||||
__global int *src1, int src1_step, int src1_offset,
|
||||
__global int *src2, int src2_step, int src2_offset,
|
||||
__global uchar *mask, int mask_step, int mask_offset,
|
||||
__global int *dst, int dst_step, int dst_offset,
|
||||
int rows, int cols, int dst_step1)
|
||||
{
|
||||
|
||||
int x = get_global_id(0);
|
||||
int y = get_global_id(1);
|
||||
|
||||
if (x < cols && y < rows)
|
||||
{
|
||||
int src1_index = mad24(y, src1_step, (x * 12) + src1_offset);
|
||||
int src2_index = mad24(y, src2_step, (x * 12) + src2_offset);
|
||||
int mask_index = mad24(y, mask_step, x + mask_offset);
|
||||
int dst_index = mad24(y, dst_step, dst_offset + (x * 12));
|
||||
|
||||
int src1_data_0 = *((__global int *)((__global char *)src1 + src1_index + 0));
|
||||
int src1_data_1 = *((__global int *)((__global char *)src1 + src1_index + 4));
|
||||
int src1_data_2 = *((__global int *)((__global char *)src1 + src1_index + 8));
|
||||
|
||||
int src2_data_0 = *((__global int *)((__global char *)src2 + src2_index + 0));
|
||||
int src2_data_1 = *((__global int *)((__global char *)src2 + src2_index + 4));
|
||||
int src2_data_2 = *((__global int *)((__global char *)src2 + src2_index + 8));
|
||||
|
||||
uchar mask_data = * (mask + mask_index);
|
||||
|
||||
int data_0 = *((__global int *)((__global char *)dst + dst_index + 0));
|
||||
int data_1 = *((__global int *)((__global char *)dst + dst_index + 4));
|
||||
int data_2 = *((__global int *)((__global char *)dst + dst_index + 8));
|
||||
|
||||
int tmp_data_0 = src1_data_0 & src2_data_0 ;
|
||||
int tmp_data_1 = src1_data_1 & src2_data_1 ;
|
||||
int tmp_data_2 = src1_data_2 & src2_data_2 ;
|
||||
|
||||
data_0 = mask_data ? tmp_data_0 : data_0;
|
||||
data_1 = mask_data ? tmp_data_1 : data_1;
|
||||
data_2 = mask_data ? tmp_data_2 : data_2;
|
||||
|
||||
*((__global int *)((__global char *)dst + dst_index + 0))= data_0;
|
||||
*((__global int *)((__global char *)dst + dst_index + 4))= data_1;
|
||||
*((__global int *)((__global char *)dst + dst_index + 8))= data_2;
|
||||
}
|
||||
}
|
||||
__kernel void arithm_bitwise_and_with_mask_C3_D5 (
|
||||
__global char *src1, int src1_step, int src1_offset,
|
||||
__global char *src2, int src2_step, int src2_offset,
|
||||
__global uchar *mask, int mask_step, int mask_offset,
|
||||
__global char *dst, int dst_step, int dst_offset,
|
||||
int rows, int cols, int dst_step1)
|
||||
{
|
||||
|
||||
int x = get_global_id(0);
|
||||
int y = get_global_id(1);
|
||||
|
||||
if (x < cols && y < rows)
|
||||
{
|
||||
int src1_index = mad24(y, src1_step, (x * 12) + src1_offset);
|
||||
int src2_index = mad24(y, src2_step, (x * 12) + src2_offset);
|
||||
int mask_index = mad24(y, mask_step, x + mask_offset);
|
||||
int dst_index = mad24(y, dst_step, dst_offset + (x * 12));
|
||||
|
||||
char4 src1_data_0 = *((__global char4 *)((__global char *)src1 + src1_index + 0));
|
||||
char4 src1_data_1 = *((__global char4 *)((__global char *)src1 + src1_index + 4));
|
||||
char4 src1_data_2 = *((__global char4 *)((__global char *)src1 + src1_index + 8));
|
||||
|
||||
char4 src2_data_0 = *((__global char4 *)((__global char *)src2 + src2_index + 0));
|
||||
char4 src2_data_1 = *((__global char4 *)((__global char *)src2 + src2_index + 4));
|
||||
char4 src2_data_2 = *((__global char4 *)((__global char *)src2 + src2_index + 8));
|
||||
|
||||
uchar mask_data = * (mask + mask_index);
|
||||
|
||||
char4 data_0 = *((__global char4 *)((__global char *)dst + dst_index + 0));
|
||||
char4 data_1 = *((__global char4 *)((__global char *)dst + dst_index + 4));
|
||||
char4 data_2 = *((__global char4 *)((__global char *)dst + dst_index + 8));
|
||||
|
||||
char4 tmp_data_0 = src1_data_0 & src2_data_0;
|
||||
char4 tmp_data_1 = src1_data_1 & src2_data_1;
|
||||
char4 tmp_data_2 = src1_data_2 & src2_data_2;
|
||||
|
||||
data_0 = mask_data ? tmp_data_0 : data_0;
|
||||
data_1 = mask_data ? tmp_data_1 : data_1;
|
||||
data_2 = mask_data ? tmp_data_2 : data_2;
|
||||
|
||||
*((__global char4 *)((__global char *)dst + dst_index + 0))= data_0;
|
||||
*((__global char4 *)((__global char *)dst + dst_index + 4))= data_1;
|
||||
*((__global char4 *)((__global char *)dst + dst_index + 8))= data_2;
|
||||
}
|
||||
}
|
||||
#if defined (DOUBLE_SUPPORT)
|
||||
__kernel void arithm_bitwise_and_with_mask_C3_D6 (
|
||||
__global char *src1, int src1_step, int src1_offset,
|
||||
__global char *src2, int src2_step, int src2_offset,
|
||||
__global uchar *mask, int mask_step, int mask_offset,
|
||||
__global char *dst, int dst_step, int dst_offset,
|
||||
int rows, int cols, int dst_step1)
|
||||
{
|
||||
|
||||
int x = get_global_id(0);
|
||||
int y = get_global_id(1);
|
||||
|
||||
if (x < cols && y < rows)
|
||||
{
|
||||
int src1_index = mad24(y, src1_step, (x * 24) + src1_offset);
|
||||
int src2_index = mad24(y, src2_step, (x * 24) + src2_offset);
|
||||
int mask_index = mad24(y, mask_step, x + mask_offset);
|
||||
int dst_index = mad24(y, dst_step, dst_offset + (x * 24));
|
||||
|
||||
char8 src1_data_0 = *((__global char8 *)((__global char *)src1 + src1_index + 0 ));
|
||||
char8 src1_data_1 = *((__global char8 *)((__global char *)src1 + src1_index + 8 ));
|
||||
char8 src1_data_2 = *((__global char8 *)((__global char *)src1 + src1_index + 16));
|
||||
|
||||
char8 src2_data_0 = *((__global char8 *)((__global char *)src2 + src2_index + 0 ));
|
||||
char8 src2_data_1 = *((__global char8 *)((__global char *)src2 + src2_index + 8 ));
|
||||
char8 src2_data_2 = *((__global char8 *)((__global char *)src2 + src2_index + 16));
|
||||
|
||||
uchar mask_data = * (mask + mask_index);
|
||||
|
||||
char8 data_0 = *((__global char8 *)((__global char *)dst + dst_index + 0 ));
|
||||
char8 data_1 = *((__global char8 *)((__global char *)dst + dst_index + 8 ));
|
||||
char8 data_2 = *((__global char8 *)((__global char *)dst + dst_index + 16));
|
||||
|
||||
char8 tmp_data_0 = src1_data_0 & src2_data_0;
|
||||
char8 tmp_data_1 = src1_data_1 & src2_data_1;
|
||||
char8 tmp_data_2 = src1_data_2 & src2_data_2;
|
||||
|
||||
data_0 = mask_data ? tmp_data_0 : data_0;
|
||||
data_1 = mask_data ? tmp_data_1 : data_1;
|
||||
data_2 = mask_data ? tmp_data_2 : data_2;
|
||||
|
||||
*((__global char8 *)((__global char *)dst + dst_index + 0 ))= data_0;
|
||||
*((__global char8 *)((__global char *)dst + dst_index + 8 ))= data_1;
|
||||
*((__global char8 *)((__global char *)dst + dst_index + 16))= data_2;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
__kernel void arithm_bitwise_and_with_mask_C4_D0 (
|
||||
__global uchar *src1, int src1_step, int src1_offset,
|
||||
__global uchar *src2, int src2_step, int src2_offset,
|
||||
|
@ -461,340 +461,7 @@ __kernel void arithm_s_bitwise_and_C2_D6 (
|
||||
}
|
||||
}
|
||||
#endif
|
||||
__kernel void arithm_s_bitwise_and_C3_D0 (
|
||||
__global uchar *src1, int src1_step, int src1_offset,
|
||||
__global uchar *dst, int dst_step, int dst_offset,
|
||||
uchar4 src2, int rows, int cols, int dst_step1)
|
||||
{
|
||||
|
||||
int x = get_global_id(0);
|
||||
int y = get_global_id(1);
|
||||
|
||||
if (x < cols && y < rows)
|
||||
{
|
||||
x = x << 2;
|
||||
|
||||
#ifdef dst_align
|
||||
#undef dst_align
|
||||
#endif
|
||||
#define dst_align (((dst_offset % dst_step) / 3 ) & 3)
|
||||
int src1_index = mad24(y, src1_step, (x * 3) + src1_offset - (dst_align * 3));
|
||||
|
||||
int dst_start = mad24(y, dst_step, dst_offset);
|
||||
int dst_end = mad24(y, dst_step, dst_offset + dst_step1);
|
||||
int dst_index = mad24(y, dst_step, dst_offset + (x * 3) - (dst_align * 3));
|
||||
|
||||
uchar4 src1_data_0 = vload4(0, src1 + src1_index + 0);
|
||||
uchar4 src1_data_1 = vload4(0, src1 + src1_index + 4);
|
||||
uchar4 src1_data_2 = vload4(0, src1 + src1_index + 8);
|
||||
|
||||
uchar4 src2_data_0 = (uchar4)(src2.x, src2.y, src2.z, src2.x);
|
||||
uchar4 src2_data_1 = (uchar4)(src2.y, src2.z, src2.x, src2.y);
|
||||
uchar4 src2_data_2 = (uchar4)(src2.z, src2.x, src2.y, src2.z);
|
||||
|
||||
uchar4 data_0 = *((__global uchar4 *)(dst + dst_index + 0));
|
||||
uchar4 data_1 = *((__global uchar4 *)(dst + dst_index + 4));
|
||||
uchar4 data_2 = *((__global uchar4 *)(dst + dst_index + 8));
|
||||
|
||||
uchar4 tmp_data_0 = src1_data_0 & src2_data_0;
|
||||
uchar4 tmp_data_1 = src1_data_1 & src2_data_1;
|
||||
uchar4 tmp_data_2 = src1_data_2 & src2_data_2;
|
||||
|
||||
data_0.xyz = ((dst_index + 0 >= dst_start)) ? tmp_data_0.xyz : data_0.xyz;
|
||||
data_0.w = ((dst_index + 3 >= dst_start) && (dst_index + 3 < dst_end))
|
||||
? tmp_data_0.w : data_0.w;
|
||||
|
||||
data_1.xy = ((dst_index + 3 >= dst_start) && (dst_index + 3 < dst_end))
|
||||
? tmp_data_1.xy : data_1.xy;
|
||||
data_1.zw = ((dst_index + 6 >= dst_start) && (dst_index + 6 < dst_end))
|
||||
? tmp_data_1.zw : data_1.zw;
|
||||
|
||||
data_2.x = ((dst_index + 6 >= dst_start) && (dst_index + 6 < dst_end))
|
||||
? tmp_data_2.x : data_2.x;
|
||||
data_2.yzw = ((dst_index + 9 >= dst_start) && (dst_index + 9 < dst_end))
|
||||
? tmp_data_2.yzw : data_2.yzw;
|
||||
|
||||
*((__global uchar4 *)(dst + dst_index + 0)) = data_0;
|
||||
*((__global uchar4 *)(dst + dst_index + 4)) = data_1;
|
||||
*((__global uchar4 *)(dst + dst_index + 8)) = data_2;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
__kernel void arithm_s_bitwise_and_C3_D1 (
|
||||
__global char *src1, int src1_step, int src1_offset,
|
||||
__global char *dst, int dst_step, int dst_offset,
|
||||
char4 src2, int rows, int cols, int dst_step1)
|
||||
{
|
||||
|
||||
int x = get_global_id(0);
|
||||
int y = get_global_id(1);
|
||||
|
||||
if (x < cols && y < rows)
|
||||
{
|
||||
x = x << 2;
|
||||
|
||||
#ifdef dst_align
|
||||
#undef dst_align
|
||||
#endif
|
||||
#define dst_align (((dst_offset % dst_step) / 3 ) & 3)
|
||||
int src1_index = mad24(y, src1_step, (x * 3) + src1_offset - (dst_align * 3));
|
||||
|
||||
int dst_start = mad24(y, dst_step, dst_offset);
|
||||
int dst_end = mad24(y, dst_step, dst_offset + dst_step1);
|
||||
int dst_index = mad24(y, dst_step, dst_offset + (x * 3) - (dst_align * 3));
|
||||
|
||||
char4 src1_data_0 = vload4(0, src1 + src1_index + 0);
|
||||
char4 src1_data_1 = vload4(0, src1 + src1_index + 4);
|
||||
char4 src1_data_2 = vload4(0, src1 + src1_index + 8);
|
||||
|
||||
char4 src2_data_0 = (char4)(src2.x, src2.y, src2.z, src2.x);
|
||||
char4 src2_data_1 = (char4)(src2.y, src2.z, src2.x, src2.y);
|
||||
char4 src2_data_2 = (char4)(src2.z, src2.x, src2.y, src2.z);
|
||||
|
||||
char4 data_0 = *((__global char4 *)(dst + dst_index + 0));
|
||||
char4 data_1 = *((__global char4 *)(dst + dst_index + 4));
|
||||
char4 data_2 = *((__global char4 *)(dst + dst_index + 8));
|
||||
|
||||
char4 tmp_data_0 = convert_char4_sat(convert_uchar4_sat(src1_data_0) & convert_uchar4_sat(src2_data_0));
|
||||
char4 tmp_data_1 = convert_char4_sat(convert_uchar4_sat(src1_data_1) & convert_uchar4_sat(src2_data_1));
|
||||
char4 tmp_data_2 = convert_char4_sat(convert_uchar4_sat(src1_data_2) & convert_uchar4_sat(src2_data_2));
|
||||
|
||||
data_0.xyz = ((dst_index + 0 >= dst_start)) ? tmp_data_0.xyz : data_0.xyz;
|
||||
data_0.w = ((dst_index + 3 >= dst_start) && (dst_index + 3 < dst_end))
|
||||
? tmp_data_0.w : data_0.w;
|
||||
|
||||
data_1.xy = ((dst_index + 3 >= dst_start) && (dst_index + 3 < dst_end))
|
||||
? tmp_data_1.xy : data_1.xy;
|
||||
data_1.zw = ((dst_index + 6 >= dst_start) && (dst_index + 6 < dst_end))
|
||||
? tmp_data_1.zw : data_1.zw;
|
||||
|
||||
data_2.x = ((dst_index + 6 >= dst_start) && (dst_index + 6 < dst_end))
|
||||
? tmp_data_2.x : data_2.x;
|
||||
data_2.yzw = ((dst_index + 9 >= dst_start) && (dst_index + 9 < dst_end))
|
||||
? tmp_data_2.yzw : data_2.yzw;
|
||||
|
||||
*((__global char4 *)(dst + dst_index + 0)) = data_0;
|
||||
*((__global char4 *)(dst + dst_index + 4)) = data_1;
|
||||
*((__global char4 *)(dst + dst_index + 8)) = data_2;
|
||||
}
|
||||
}
|
||||
|
||||
__kernel void arithm_s_bitwise_and_C3_D2 (
|
||||
__global ushort *src1, int src1_step, int src1_offset,
|
||||
__global ushort *dst, int dst_step, int dst_offset,
|
||||
ushort4 src2, int rows, int cols, int dst_step1)
|
||||
{
|
||||
|
||||
int x = get_global_id(0);
|
||||
int y = get_global_id(1);
|
||||
|
||||
if (x < cols && y < rows)
|
||||
{
|
||||
x = x << 1;
|
||||
|
||||
#ifdef dst_align
|
||||
#undef dst_align
|
||||
#endif
|
||||
#define dst_align (((dst_offset % dst_step) / 6 ) & 1)
|
||||
int src1_index = mad24(y, src1_step, (x * 6) + src1_offset - (dst_align * 6));
|
||||
|
||||
int dst_start = mad24(y, dst_step, dst_offset);
|
||||
int dst_end = mad24(y, dst_step, dst_offset + dst_step1);
|
||||
int dst_index = mad24(y, dst_step, dst_offset + (x * 6) - (dst_align * 6));
|
||||
|
||||
ushort2 src1_data_0 = vload2(0, (__global ushort *)((__global char *)src1 + src1_index + 0));
|
||||
ushort2 src1_data_1 = vload2(0, (__global ushort *)((__global char *)src1 + src1_index + 4));
|
||||
ushort2 src1_data_2 = vload2(0, (__global ushort *)((__global char *)src1 + src1_index + 8));
|
||||
|
||||
ushort2 src2_data_0 = (ushort2)(src2.x, src2.y);
|
||||
ushort2 src2_data_1 = (ushort2)(src2.z, src2.x);
|
||||
ushort2 src2_data_2 = (ushort2)(src2.y, src2.z);
|
||||
|
||||
ushort2 data_0 = *((__global ushort2 *)((__global char *)dst + dst_index + 0));
|
||||
ushort2 data_1 = *((__global ushort2 *)((__global char *)dst + dst_index + 4));
|
||||
ushort2 data_2 = *((__global ushort2 *)((__global char *)dst + dst_index + 8));
|
||||
|
||||
ushort2 tmp_data_0 = src1_data_0 & src2_data_0;
|
||||
ushort2 tmp_data_1 = src1_data_1 & src2_data_1;
|
||||
ushort2 tmp_data_2 = src1_data_2 & src2_data_2;
|
||||
|
||||
data_0.xy = ((dst_index + 0 >= dst_start)) ? tmp_data_0.xy : data_0.xy;
|
||||
|
||||
data_1.x = ((dst_index + 0 >= dst_start) && (dst_index + 0 < dst_end))
|
||||
? tmp_data_1.x : data_1.x;
|
||||
data_1.y = ((dst_index + 6 >= dst_start) && (dst_index + 6 < dst_end))
|
||||
? tmp_data_1.y : data_1.y;
|
||||
|
||||
data_2.xy = ((dst_index + 6 >= dst_start) && (dst_index + 6 < dst_end))
|
||||
? tmp_data_2.xy : data_2.xy;
|
||||
|
||||
*((__global ushort2 *)((__global char *)dst + dst_index + 0))= data_0;
|
||||
*((__global ushort2 *)((__global char *)dst + dst_index + 4))= data_1;
|
||||
*((__global ushort2 *)((__global char *)dst + dst_index + 8))= data_2;
|
||||
}
|
||||
}
|
||||
__kernel void arithm_s_bitwise_and_C3_D3 (
|
||||
__global short *src1, int src1_step, int src1_offset,
|
||||
__global short *dst, int dst_step, int dst_offset,
|
||||
short4 src2, int rows, int cols, int dst_step1)
|
||||
{
|
||||
|
||||
int x = get_global_id(0);
|
||||
int y = get_global_id(1);
|
||||
|
||||
if (x < cols && y < rows)
|
||||
{
|
||||
x = x << 1;
|
||||
|
||||
#ifdef dst_align
|
||||
#undef dst_align
|
||||
#endif
|
||||
#define dst_align (((dst_offset % dst_step) / 6 ) & 1)
|
||||
int src1_index = mad24(y, src1_step, (x * 6) + src1_offset - (dst_align * 6));
|
||||
|
||||
int dst_start = mad24(y, dst_step, dst_offset);
|
||||
int dst_end = mad24(y, dst_step, dst_offset + dst_step1);
|
||||
int dst_index = mad24(y, dst_step, dst_offset + (x * 6) - (dst_align * 6));
|
||||
|
||||
short2 src1_data_0 = vload2(0, (__global short *)((__global char *)src1 + src1_index + 0));
|
||||
short2 src1_data_1 = vload2(0, (__global short *)((__global char *)src1 + src1_index + 4));
|
||||
short2 src1_data_2 = vload2(0, (__global short *)((__global char *)src1 + src1_index + 8));
|
||||
|
||||
short2 src2_data_0 = (short2)(src2.x, src2.y);
|
||||
short2 src2_data_1 = (short2)(src2.z, src2.x);
|
||||
short2 src2_data_2 = (short2)(src2.y, src2.z);
|
||||
|
||||
short2 data_0 = *((__global short2 *)((__global char *)dst + dst_index + 0));
|
||||
short2 data_1 = *((__global short2 *)((__global char *)dst + dst_index + 4));
|
||||
short2 data_2 = *((__global short2 *)((__global char *)dst + dst_index + 8));
|
||||
|
||||
short2 tmp_data_0 = src1_data_0 & src2_data_0;
|
||||
short2 tmp_data_1 = src1_data_1 & src2_data_1;
|
||||
short2 tmp_data_2 = src1_data_2 & src2_data_2;
|
||||
|
||||
data_0.xy = ((dst_index + 0 >= dst_start)) ? tmp_data_0.xy : data_0.xy;
|
||||
|
||||
data_1.x = ((dst_index + 0 >= dst_start) && (dst_index + 0 < dst_end))
|
||||
? tmp_data_1.x : data_1.x;
|
||||
data_1.y = ((dst_index + 6 >= dst_start) && (dst_index + 6 < dst_end))
|
||||
? tmp_data_1.y : data_1.y;
|
||||
|
||||
data_2.xy = ((dst_index + 6 >= dst_start) && (dst_index + 6 < dst_end))
|
||||
? tmp_data_2.xy : data_2.xy;
|
||||
|
||||
*((__global short2 *)((__global char *)dst + dst_index + 0))= data_0;
|
||||
*((__global short2 *)((__global char *)dst + dst_index + 4))= data_1;
|
||||
*((__global short2 *)((__global char *)dst + dst_index + 8))= data_2;
|
||||
}
|
||||
}
|
||||
__kernel void arithm_s_bitwise_and_C3_D4 (
|
||||
__global int *src1, int src1_step, int src1_offset,
|
||||
__global int *dst, int dst_step, int dst_offset,
|
||||
int4 src2, int rows, int cols, int dst_step1)
|
||||
{
|
||||
|
||||
int x = get_global_id(0);
|
||||
int y = get_global_id(1);
|
||||
|
||||
if (x < cols && y < rows)
|
||||
{
|
||||
int src1_index = mad24(y, src1_step, (x * 12) + src1_offset);
|
||||
int dst_index = mad24(y, dst_step, dst_offset + (x * 12));
|
||||
|
||||
int src1_data_0 = *((__global int *)((__global char *)src1 + src1_index + 0));
|
||||
int src1_data_1 = *((__global int *)((__global char *)src1 + src1_index + 4));
|
||||
int src1_data_2 = *((__global int *)((__global char *)src1 + src1_index + 8));
|
||||
|
||||
int src2_data_0 = src2.x;
|
||||
int src2_data_1 = src2.y;
|
||||
int src2_data_2 = src2.z;
|
||||
|
||||
int data_0 = *((__global int *)((__global char *)dst + dst_index + 0));
|
||||
int data_1 = *((__global int *)((__global char *)dst + dst_index + 4));
|
||||
int data_2 = *((__global int *)((__global char *)dst + dst_index + 8));
|
||||
|
||||
int tmp_data_0 = src1_data_0 & src2_data_0;
|
||||
int tmp_data_1 = src1_data_1 & src2_data_1;
|
||||
int tmp_data_2 = src1_data_2 & src2_data_2;
|
||||
|
||||
*((__global int *)((__global char *)dst + dst_index + 0))= tmp_data_0;
|
||||
*((__global int *)((__global char *)dst + dst_index + 4))= tmp_data_1;
|
||||
*((__global int *)((__global char *)dst + dst_index + 8))= tmp_data_2;
|
||||
}
|
||||
}
|
||||
__kernel void arithm_s_bitwise_and_C3_D5 (
|
||||
__global char *src1, int src1_step, int src1_offset,
|
||||
__global char *dst, int dst_step, int dst_offset,
|
||||
char16 src2, int rows, int cols, int dst_step1)
|
||||
{
|
||||
|
||||
int x = get_global_id(0);
|
||||
int y = get_global_id(1);
|
||||
|
||||
if (x < cols && y < rows)
|
||||
{
|
||||
int src1_index = mad24(y, src1_step, (x * 12) + src1_offset);
|
||||
int dst_index = mad24(y, dst_step, dst_offset + (x * 12));
|
||||
|
||||
char4 src1_data_0 = *((__global char4 *)((__global char *)src1 + src1_index + 0));
|
||||
char4 src1_data_1 = *((__global char4 *)((__global char *)src1 + src1_index + 4));
|
||||
char4 src1_data_2 = *((__global char4 *)((__global char *)src1 + src1_index + 8));
|
||||
|
||||
char4 src2_data_0 = (char4)(src2.s0, src2.s1, src2.s2, src2.s3);
|
||||
char4 src2_data_1 = (char4)(src2.s4, src2.s5, src2.s6, src2.s7);
|
||||
char4 src2_data_2 = (char4)(src2.s8, src2.s9, src2.sA, src2.sB);
|
||||
|
||||
char4 data_0 = *((__global char4 *)((__global char *)dst + dst_index + 0));
|
||||
char4 data_1 = *((__global char4 *)((__global char *)dst + dst_index + 4));
|
||||
char4 data_2 = *((__global char4 *)((__global char *)dst + dst_index + 8));
|
||||
|
||||
char4 tmp_data_0 = src1_data_0 & src2_data_0;
|
||||
char4 tmp_data_1 = src1_data_1 & src2_data_1;
|
||||
char4 tmp_data_2 = src1_data_2 & src2_data_2;
|
||||
|
||||
*((__global char4 *)((__global char *)dst + dst_index + 0))= tmp_data_0;
|
||||
*((__global char4 *)((__global char *)dst + dst_index + 4))= tmp_data_1;
|
||||
*((__global char4 *)((__global char *)dst + dst_index + 8))= tmp_data_2;
|
||||
}
|
||||
}
|
||||
#if defined (DOUBLE_SUPPORT)
|
||||
__kernel void arithm_s_bitwise_and_C3_D6 (
|
||||
__global short *src1, int src1_step, int src1_offset,
|
||||
__global short *dst, int dst_step, int dst_offset,
|
||||
short16 src2, int rows, int cols, int dst_step1)
|
||||
{
|
||||
|
||||
int x = get_global_id(0);
|
||||
int y = get_global_id(1);
|
||||
|
||||
if (x < cols && y < rows)
|
||||
{
|
||||
int src1_index = mad24(y, src1_step, (x * 24) + src1_offset);
|
||||
int dst_index = mad24(y, dst_step, dst_offset + (x * 24));
|
||||
|
||||
short4 src1_data_0 = *((__global short4 *)((__global char *)src1 + src1_index + 0 ));
|
||||
short4 src1_data_1 = *((__global short4 *)((__global char *)src1 + src1_index + 8 ));
|
||||
short4 src1_data_2 = *((__global short4 *)((__global char *)src1 + src1_index + 16));
|
||||
|
||||
short4 src2_data_0 = (short4)(src2.s0, src2.s1, src2.s2, src2.s3);
|
||||
short4 src2_data_1 = (short4)(src2.s4, src2.s5, src2.s6, src2.s7);
|
||||
short4 src2_data_2 = (short4)(src2.s8, src2.s9, src2.sa, src2.sb);
|
||||
|
||||
short4 data_0 = *((__global short4 *)((__global char *)dst + dst_index + 0 ));
|
||||
short4 data_1 = *((__global short4 *)((__global char *)dst + dst_index + 8 ));
|
||||
short4 data_2 = *((__global short4 *)((__global char *)dst + dst_index + 16));
|
||||
|
||||
short4 tmp_data_0 = src1_data_0 & src2_data_0;
|
||||
short4 tmp_data_1 = src1_data_1 & src2_data_1;
|
||||
short4 tmp_data_2 = src1_data_2 & src2_data_2;
|
||||
|
||||
*((__global short4 *)((__global char *)dst + dst_index + 0 ))= tmp_data_0;
|
||||
*((__global short4 *)((__global char *)dst + dst_index + 8 ))= tmp_data_1;
|
||||
*((__global short4 *)((__global char *)dst + dst_index + 16))= tmp_data_2;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
__kernel void arithm_s_bitwise_and_C4_D0 (
|
||||
__global uchar *src1, int src1_step, int src1_offset,
|
||||
__global uchar *dst, int dst_step, int dst_offset,
|
||||
|
@ -566,396 +566,6 @@ __kernel void arithm_bitwise_or_with_mask_C2_D6 (
|
||||
#endif
|
||||
|
||||
|
||||
__kernel void arithm_bitwise_or_with_mask_C3_D0 (
|
||||
__global uchar *src1, int src1_step, int src1_offset,
|
||||
__global uchar *src2, int src2_step, int src2_offset,
|
||||
__global uchar *mask, int mask_step, int mask_offset,
|
||||
__global uchar *dst, int dst_step, int dst_offset,
|
||||
int rows, int cols, int dst_step1)
|
||||
{
|
||||
|
||||
int x = get_global_id(0);
|
||||
int y = get_global_id(1);
|
||||
|
||||
if (x < cols && y < rows)
|
||||
{
|
||||
x = x << 2;
|
||||
|
||||
#ifdef dst_align
|
||||
#undef dst_align
|
||||
#endif
|
||||
#define dst_align (((dst_offset % dst_step) / 3 ) & 3)
|
||||
int src1_index = mad24(y, src1_step, (x * 3) + src1_offset - (dst_align * 3));
|
||||
int src2_index = mad24(y, src2_step, (x * 3) + src2_offset - (dst_align * 3));
|
||||
int mask_index = mad24(y, mask_step, x + mask_offset - dst_align);
|
||||
|
||||
int dst_start = mad24(y, dst_step, dst_offset);
|
||||
int dst_end = mad24(y, dst_step, dst_offset + dst_step1);
|
||||
int dst_index = mad24(y, dst_step, dst_offset + (x * 3) - (dst_align * 3));
|
||||
|
||||
uchar4 src1_data_0 = vload4(0, src1 + src1_index + 0);
|
||||
uchar4 src1_data_1 = vload4(0, src1 + src1_index + 4);
|
||||
uchar4 src1_data_2 = vload4(0, src1 + src1_index + 8);
|
||||
|
||||
uchar4 src2_data_0 = vload4(0, src2 + src2_index + 0);
|
||||
uchar4 src2_data_1 = vload4(0, src2 + src2_index + 4);
|
||||
uchar4 src2_data_2 = vload4(0, src2 + src2_index + 8);
|
||||
|
||||
uchar4 mask_data = vload4(0, mask + mask_index);
|
||||
|
||||
uchar4 data_0 = *((__global uchar4 *)(dst + dst_index + 0));
|
||||
uchar4 data_1 = *((__global uchar4 *)(dst + dst_index + 4));
|
||||
uchar4 data_2 = *((__global uchar4 *)(dst + dst_index + 8));
|
||||
|
||||
uchar4 tmp_data_0 = src1_data_0 | src2_data_0;
|
||||
uchar4 tmp_data_1 = src1_data_1 | src2_data_1;
|
||||
uchar4 tmp_data_2 = src1_data_2 | src2_data_2;
|
||||
|
||||
data_0.xyz = ((mask_data.x) && (dst_index + 0 >= dst_start)) ? tmp_data_0.xyz : data_0.xyz;
|
||||
data_0.w = ((mask_data.y) && (dst_index + 3 >= dst_start) && (dst_index + 3 < dst_end))
|
||||
? tmp_data_0.w : data_0.w;
|
||||
|
||||
data_1.xy = ((mask_data.y) && (dst_index + 3 >= dst_start) && (dst_index + 3 < dst_end))
|
||||
? tmp_data_1.xy : data_1.xy;
|
||||
data_1.zw = ((mask_data.z) && (dst_index + 6 >= dst_start) && (dst_index + 6 < dst_end))
|
||||
? tmp_data_1.zw : data_1.zw;
|
||||
|
||||
data_2.x = ((mask_data.z) && (dst_index + 6 >= dst_start) && (dst_index + 6 < dst_end))
|
||||
? tmp_data_2.x : data_2.x;
|
||||
data_2.yzw = ((mask_data.w) && (dst_index + 9 >= dst_start) && (dst_index + 9 < dst_end))
|
||||
? tmp_data_2.yzw : data_2.yzw;
|
||||
|
||||
*((__global uchar4 *)(dst + dst_index + 0)) = data_0;
|
||||
*((__global uchar4 *)(dst + dst_index + 4)) = data_1;
|
||||
*((__global uchar4 *)(dst + dst_index + 8)) = data_2;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
__kernel void arithm_bitwise_or_with_mask_C3_D1 (
|
||||
__global char *src1, int src1_step, int src1_offset,
|
||||
__global char *src2, int src2_step, int src2_offset,
|
||||
__global uchar *mask, int mask_step, int mask_offset,
|
||||
__global char *dst, int dst_step, int dst_offset,
|
||||
int rows, int cols, int dst_step1)
|
||||
{
|
||||
|
||||
int x = get_global_id(0);
|
||||
int y = get_global_id(1);
|
||||
|
||||
if (x < cols && y < rows)
|
||||
{
|
||||
x = x << 2;
|
||||
|
||||
#ifdef dst_align
|
||||
#undef dst_align
|
||||
#endif
|
||||
#define dst_align (((dst_offset % dst_step) / 3 ) & 3)
|
||||
int src1_index = mad24(y, src1_step, (x * 3) + src1_offset - (dst_align * 3));
|
||||
int src2_index = mad24(y, src2_step, (x * 3) + src2_offset - (dst_align * 3));
|
||||
int mask_index = mad24(y, mask_step, x + mask_offset - dst_align);
|
||||
|
||||
int dst_start = mad24(y, dst_step, dst_offset);
|
||||
int dst_end = mad24(y, dst_step, dst_offset + dst_step1);
|
||||
int dst_index = mad24(y, dst_step, dst_offset + (x * 3) - (dst_align * 3));
|
||||
|
||||
char4 src1_data_0 = vload4(0, src1 + src1_index + 0);
|
||||
char4 src1_data_1 = vload4(0, src1 + src1_index + 4);
|
||||
char4 src1_data_2 = vload4(0, src1 + src1_index + 8);
|
||||
|
||||
char4 src2_data_0 = vload4(0, src2 + src2_index + 0);
|
||||
char4 src2_data_1 = vload4(0, src2 + src2_index + 4);
|
||||
char4 src2_data_2 = vload4(0, src2 + src2_index + 8);
|
||||
|
||||
uchar4 mask_data = vload4(0, mask + mask_index);
|
||||
|
||||
char4 data_0 = *((__global char4 *)(dst + dst_index + 0));
|
||||
char4 data_1 = *((__global char4 *)(dst + dst_index + 4));
|
||||
char4 data_2 = *((__global char4 *)(dst + dst_index + 8));
|
||||
|
||||
char4 tmp_data_0 = src1_data_0 | src2_data_0;
|
||||
char4 tmp_data_1 = src1_data_1 | src2_data_1;
|
||||
char4 tmp_data_2 = src1_data_2 | src2_data_2;
|
||||
|
||||
data_0.xyz = ((mask_data.x) && (dst_index + 0 >= dst_start)) ? tmp_data_0.xyz : data_0.xyz;
|
||||
data_0.w = ((mask_data.y) && (dst_index + 3 >= dst_start) && (dst_index + 3 < dst_end))
|
||||
? tmp_data_0.w : data_0.w;
|
||||
|
||||
data_1.xy = ((mask_data.y) && (dst_index + 3 >= dst_start) && (dst_index + 3 < dst_end))
|
||||
? tmp_data_1.xy : data_1.xy;
|
||||
data_1.zw = ((mask_data.z) && (dst_index + 6 >= dst_start) && (dst_index + 6 < dst_end))
|
||||
? tmp_data_1.zw : data_1.zw;
|
||||
|
||||
data_2.x = ((mask_data.z) && (dst_index + 6 >= dst_start) && (dst_index + 6 < dst_end))
|
||||
? tmp_data_2.x : data_2.x;
|
||||
data_2.yzw = ((mask_data.w) && (dst_index + 9 >= dst_start) && (dst_index + 9 < dst_end))
|
||||
? tmp_data_2.yzw : data_2.yzw;
|
||||
|
||||
*((__global char4 *)(dst + dst_index + 0)) = data_0;
|
||||
*((__global char4 *)(dst + dst_index + 4)) = data_1;
|
||||
*((__global char4 *)(dst + dst_index + 8)) = data_2;
|
||||
}
|
||||
}
|
||||
|
||||
__kernel void arithm_bitwise_or_with_mask_C3_D2 (
|
||||
__global ushort *src1, int src1_step, int src1_offset,
|
||||
__global ushort *src2, int src2_step, int src2_offset,
|
||||
__global uchar *mask, int mask_step, int mask_offset,
|
||||
__global ushort *dst, int dst_step, int dst_offset,
|
||||
int rows, int cols, int dst_step1)
|
||||
{
|
||||
|
||||
int x = get_global_id(0);
|
||||
int y = get_global_id(1);
|
||||
|
||||
if (x < cols && y < rows)
|
||||
{
|
||||
x = x << 1;
|
||||
|
||||
#ifdef dst_align
|
||||
#undef dst_align
|
||||
#endif
|
||||
#define dst_align (((dst_offset % dst_step) / 6 ) & 1)
|
||||
int src1_index = mad24(y, src1_step, (x * 6) + src1_offset - (dst_align * 6));
|
||||
int src2_index = mad24(y, src2_step, (x * 6) + src2_offset - (dst_align * 6));
|
||||
int mask_index = mad24(y, mask_step, x + mask_offset - dst_align);
|
||||
|
||||
int dst_start = mad24(y, dst_step, dst_offset);
|
||||
int dst_end = mad24(y, dst_step, dst_offset + dst_step1);
|
||||
int dst_index = mad24(y, dst_step, dst_offset + (x * 6) - (dst_align * 6));
|
||||
|
||||
ushort2 src1_data_0 = vload2(0, (__global ushort *)((__global char *)src1 + src1_index + 0));
|
||||
ushort2 src1_data_1 = vload2(0, (__global ushort *)((__global char *)src1 + src1_index + 4));
|
||||
ushort2 src1_data_2 = vload2(0, (__global ushort *)((__global char *)src1 + src1_index + 8));
|
||||
|
||||
ushort2 src2_data_0 = vload2(0, (__global ushort *)((__global char *)src2 + src2_index + 0));
|
||||
ushort2 src2_data_1 = vload2(0, (__global ushort *)((__global char *)src2 + src2_index + 4));
|
||||
ushort2 src2_data_2 = vload2(0, (__global ushort *)((__global char *)src2 + src2_index + 8));
|
||||
|
||||
uchar2 mask_data = vload2(0, mask + mask_index);
|
||||
|
||||
ushort2 data_0 = *((__global ushort2 *)((__global char *)dst + dst_index + 0));
|
||||
ushort2 data_1 = *((__global ushort2 *)((__global char *)dst + dst_index + 4));
|
||||
ushort2 data_2 = *((__global ushort2 *)((__global char *)dst + dst_index + 8));
|
||||
|
||||
ushort2 tmp_data_0 = src1_data_0 | src2_data_0 ;
|
||||
ushort2 tmp_data_1 = src1_data_1 | src2_data_1 ;
|
||||
ushort2 tmp_data_2 = src1_data_2 | src2_data_2 ;
|
||||
|
||||
data_0.xy = ((mask_data.x) && (dst_index + 0 >= dst_start)) ? tmp_data_0.xy : data_0.xy;
|
||||
|
||||
data_1.x = ((mask_data.x) && (dst_index + 0 >= dst_start) && (dst_index + 0 < dst_end))
|
||||
? tmp_data_1.x : data_1.x;
|
||||
data_1.y = ((mask_data.y) && (dst_index + 6 >= dst_start) && (dst_index + 6 < dst_end))
|
||||
? tmp_data_1.y : data_1.y;
|
||||
|
||||
data_2.xy = ((mask_data.y) && (dst_index + 6 >= dst_start) && (dst_index + 6 < dst_end))
|
||||
? tmp_data_2.xy : data_2.xy;
|
||||
|
||||
*((__global ushort2 *)((__global char *)dst + dst_index + 0))= data_0;
|
||||
*((__global ushort2 *)((__global char *)dst + dst_index + 4))= data_1;
|
||||
*((__global ushort2 *)((__global char *)dst + dst_index + 8))= data_2;
|
||||
}
|
||||
}
|
||||
__kernel void arithm_bitwise_or_with_mask_C3_D3 (
|
||||
__global short *src1, int src1_step, int src1_offset,
|
||||
__global short *src2, int src2_step, int src2_offset,
|
||||
__global uchar *mask, int mask_step, int mask_offset,
|
||||
__global short *dst, int dst_step, int dst_offset,
|
||||
int rows, int cols, int dst_step1)
|
||||
{
|
||||
|
||||
int x = get_global_id(0);
|
||||
int y = get_global_id(1);
|
||||
|
||||
if (x < cols && y < rows)
|
||||
{
|
||||
x = x << 1;
|
||||
|
||||
#ifdef dst_align
|
||||
#undef dst_align
|
||||
#endif
|
||||
#define dst_align (((dst_offset % dst_step) / 6 ) & 1)
|
||||
int src1_index = mad24(y, src1_step, (x * 6) + src1_offset - (dst_align * 6));
|
||||
int src2_index = mad24(y, src2_step, (x * 6) + src2_offset - (dst_align * 6));
|
||||
int mask_index = mad24(y, mask_step, x + mask_offset - dst_align);
|
||||
|
||||
int dst_start = mad24(y, dst_step, dst_offset);
|
||||
int dst_end = mad24(y, dst_step, dst_offset + dst_step1);
|
||||
int dst_index = mad24(y, dst_step, dst_offset + (x * 6) - (dst_align * 6));
|
||||
|
||||
short2 src1_data_0 = vload2(0, (__global short *)((__global char *)src1 + src1_index + 0));
|
||||
short2 src1_data_1 = vload2(0, (__global short *)((__global char *)src1 + src1_index + 4));
|
||||
short2 src1_data_2 = vload2(0, (__global short *)((__global char *)src1 + src1_index + 8));
|
||||
|
||||
short2 src2_data_0 = vload2(0, (__global short *)((__global char *)src2 + src2_index + 0));
|
||||
short2 src2_data_1 = vload2(0, (__global short *)((__global char *)src2 + src2_index + 4));
|
||||
short2 src2_data_2 = vload2(0, (__global short *)((__global char *)src2 + src2_index + 8));
|
||||
|
||||
uchar2 mask_data = vload2(0, mask + mask_index);
|
||||
|
||||
short2 data_0 = *((__global short2 *)((__global char *)dst + dst_index + 0));
|
||||
short2 data_1 = *((__global short2 *)((__global char *)dst + dst_index + 4));
|
||||
short2 data_2 = *((__global short2 *)((__global char *)dst + dst_index + 8));
|
||||
|
||||
short2 tmp_data_0 = src1_data_0 | src2_data_0 ;
|
||||
short2 tmp_data_1 = src1_data_1 | src2_data_1 ;
|
||||
short2 tmp_data_2 = src1_data_2 | src2_data_2 ;
|
||||
|
||||
data_0.xy = ((mask_data.x) && (dst_index + 0 >= dst_start)) ? tmp_data_0.xy : data_0.xy;
|
||||
|
||||
data_1.x = ((mask_data.x) && (dst_index + 0 >= dst_start) && (dst_index + 0 < dst_end))
|
||||
? tmp_data_1.x : data_1.x;
|
||||
data_1.y = ((mask_data.y) && (dst_index + 6 >= dst_start) && (dst_index + 6 < dst_end))
|
||||
? tmp_data_1.y : data_1.y;
|
||||
|
||||
data_2.xy = ((mask_data.y) && (dst_index + 6 >= dst_start) && (dst_index + 6 < dst_end))
|
||||
? tmp_data_2.xy : data_2.xy;
|
||||
|
||||
*((__global short2 *)((__global char *)dst + dst_index + 0))= data_0;
|
||||
*((__global short2 *)((__global char *)dst + dst_index + 4))= data_1;
|
||||
*((__global short2 *)((__global char *)dst + dst_index + 8))= data_2;
|
||||
}
|
||||
}
|
||||
__kernel void arithm_bitwise_or_with_mask_C3_D4 (
|
||||
__global int *src1, int src1_step, int src1_offset,
|
||||
__global int *src2, int src2_step, int src2_offset,
|
||||
__global uchar *mask, int mask_step, int mask_offset,
|
||||
__global int *dst, int dst_step, int dst_offset,
|
||||
int rows, int cols, int dst_step1)
|
||||
{
|
||||
|
||||
int x = get_global_id(0);
|
||||
int y = get_global_id(1);
|
||||
|
||||
if (x < cols && y < rows)
|
||||
{
|
||||
int src1_index = mad24(y, src1_step, (x * 12) + src1_offset);
|
||||
int src2_index = mad24(y, src2_step, (x * 12) + src2_offset);
|
||||
int mask_index = mad24(y, mask_step, x + mask_offset);
|
||||
int dst_index = mad24(y, dst_step, dst_offset + (x * 12));
|
||||
|
||||
int src1_data_0 = *((__global int *)((__global char *)src1 + src1_index + 0));
|
||||
int src1_data_1 = *((__global int *)((__global char *)src1 + src1_index + 4));
|
||||
int src1_data_2 = *((__global int *)((__global char *)src1 + src1_index + 8));
|
||||
|
||||
int src2_data_0 = *((__global int *)((__global char *)src2 + src2_index + 0));
|
||||
int src2_data_1 = *((__global int *)((__global char *)src2 + src2_index + 4));
|
||||
int src2_data_2 = *((__global int *)((__global char *)src2 + src2_index + 8));
|
||||
|
||||
uchar mask_data = * (mask + mask_index);
|
||||
|
||||
int data_0 = *((__global int *)((__global char *)dst + dst_index + 0));
|
||||
int data_1 = *((__global int *)((__global char *)dst + dst_index + 4));
|
||||
int data_2 = *((__global int *)((__global char *)dst + dst_index + 8));
|
||||
|
||||
int tmp_data_0 = src1_data_0 | src2_data_0 ;
|
||||
int tmp_data_1 = src1_data_1 | src2_data_1 ;
|
||||
int tmp_data_2 = src1_data_2 | src2_data_2 ;
|
||||
|
||||
data_0 = mask_data ? tmp_data_0 : data_0;
|
||||
data_1 = mask_data ? tmp_data_1 : data_1;
|
||||
data_2 = mask_data ? tmp_data_2 : data_2;
|
||||
|
||||
*((__global int *)((__global char *)dst + dst_index + 0))= data_0;
|
||||
*((__global int *)((__global char *)dst + dst_index + 4))= data_1;
|
||||
*((__global int *)((__global char *)dst + dst_index + 8))= data_2;
|
||||
}
|
||||
}
|
||||
__kernel void arithm_bitwise_or_with_mask_C3_D5 (
|
||||
__global char *src1, int src1_step, int src1_offset,
|
||||
__global char *src2, int src2_step, int src2_offset,
|
||||
__global uchar *mask, int mask_step, int mask_offset,
|
||||
__global char *dst, int dst_step, int dst_offset,
|
||||
int rows, int cols, int dst_step1)
|
||||
{
|
||||
|
||||
int x = get_global_id(0);
|
||||
int y = get_global_id(1);
|
||||
|
||||
if (x < cols && y < rows)
|
||||
{
|
||||
int src1_index = mad24(y, src1_step, (x * 12) + src1_offset);
|
||||
int src2_index = mad24(y, src2_step, (x * 12) + src2_offset);
|
||||
int mask_index = mad24(y, mask_step, x + mask_offset);
|
||||
int dst_index = mad24(y, dst_step, dst_offset + (x * 12));
|
||||
|
||||
char4 src1_data_0 = *((__global char4 *)((__global char *)src1 + src1_index + 0));
|
||||
char4 src1_data_1 = *((__global char4 *)((__global char *)src1 + src1_index + 4));
|
||||
char4 src1_data_2 = *((__global char4 *)((__global char *)src1 + src1_index + 8));
|
||||
|
||||
char4 src2_data_0 = *((__global char4 *)((__global char *)src2 + src2_index + 0));
|
||||
char4 src2_data_1 = *((__global char4 *)((__global char *)src2 + src2_index + 4));
|
||||
char4 src2_data_2 = *((__global char4 *)((__global char *)src2 + src2_index + 8));
|
||||
|
||||
uchar mask_data = * (mask + mask_index);
|
||||
|
||||
char4 data_0 = *((__global char4 *)((__global char *)dst + dst_index + 0));
|
||||
char4 data_1 = *((__global char4 *)((__global char *)dst + dst_index + 4));
|
||||
char4 data_2 = *((__global char4 *)((__global char *)dst + dst_index + 8));
|
||||
|
||||
char4 tmp_data_0 = src1_data_0 | src2_data_0;
|
||||
char4 tmp_data_1 = src1_data_1 | src2_data_1;
|
||||
char4 tmp_data_2 = src1_data_2 | src2_data_2;
|
||||
|
||||
data_0 = mask_data ? tmp_data_0 : data_0;
|
||||
data_1 = mask_data ? tmp_data_1 : data_1;
|
||||
data_2 = mask_data ? tmp_data_2 : data_2;
|
||||
|
||||
*((__global char4 *)((__global char *)dst + dst_index + 0))= data_0;
|
||||
*((__global char4 *)((__global char *)dst + dst_index + 4))= data_1;
|
||||
*((__global char4 *)((__global char *)dst + dst_index + 8))= data_2;
|
||||
}
|
||||
}
|
||||
#if defined (DOUBLE_SUPPORT)
|
||||
__kernel void arithm_bitwise_or_with_mask_C3_D6 (
|
||||
__global char *src1, int src1_step, int src1_offset,
|
||||
__global char *src2, int src2_step, int src2_offset,
|
||||
__global uchar *mask, int mask_step, int mask_offset,
|
||||
__global char *dst, int dst_step, int dst_offset,
|
||||
int rows, int cols, int dst_step1)
|
||||
{
|
||||
|
||||
int x = get_global_id(0);
|
||||
int y = get_global_id(1);
|
||||
|
||||
if (x < cols && y < rows)
|
||||
{
|
||||
int src1_index = mad24(y, src1_step, (x * 24) + src1_offset);
|
||||
int src2_index = mad24(y, src2_step, (x * 24) + src2_offset);
|
||||
int mask_index = mad24(y, mask_step, x + mask_offset);
|
||||
int dst_index = mad24(y, dst_step, dst_offset + (x * 24));
|
||||
|
||||
char8 src1_data_0 = *((__global char8 *)((__global char *)src1 + src1_index + 0 ));
|
||||
char8 src1_data_1 = *((__global char8 *)((__global char *)src1 + src1_index + 8 ));
|
||||
char8 src1_data_2 = *((__global char8 *)((__global char *)src1 + src1_index + 16));
|
||||
|
||||
char8 src2_data_0 = *((__global char8 *)((__global char *)src2 + src2_index + 0 ));
|
||||
char8 src2_data_1 = *((__global char8 *)((__global char *)src2 + src2_index + 8 ));
|
||||
char8 src2_data_2 = *((__global char8 *)((__global char *)src2 + src2_index + 16));
|
||||
|
||||
uchar mask_data = * (mask + mask_index);
|
||||
|
||||
char8 data_0 = *((__global char8 *)((__global char *)dst + dst_index + 0 ));
|
||||
char8 data_1 = *((__global char8 *)((__global char *)dst + dst_index + 8 ));
|
||||
char8 data_2 = *((__global char8 *)((__global char *)dst + dst_index + 16));
|
||||
|
||||
char8 tmp_data_0 = src1_data_0 | src2_data_0;
|
||||
char8 tmp_data_1 = src1_data_1 | src2_data_1;
|
||||
char8 tmp_data_2 = src1_data_2 | src2_data_2;
|
||||
|
||||
data_0 = mask_data ? tmp_data_0 : data_0;
|
||||
data_1 = mask_data ? tmp_data_1 : data_1;
|
||||
data_2 = mask_data ? tmp_data_2 : data_2;
|
||||
|
||||
*((__global char8 *)((__global char *)dst + dst_index + 0 ))= data_0;
|
||||
*((__global char8 *)((__global char *)dst + dst_index + 8 ))= data_1;
|
||||
*((__global char8 *)((__global char *)dst + dst_index + 16))= data_2;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
__kernel void arithm_bitwise_or_with_mask_C4_D0 (
|
||||
__global uchar *src1, int src1_step, int src1_offset,
|
||||
__global uchar *src2, int src2_step, int src2_offset,
|
||||
|
@ -457,344 +457,7 @@ __kernel void arithm_s_bitwise_or_C2_D6 (
|
||||
}
|
||||
}
|
||||
#endif
|
||||
__kernel void arithm_s_bitwise_or_C3_D0 (
|
||||
__global uchar *src1, int src1_step, int src1_offset,
|
||||
__global uchar *dst, int dst_step, int dst_offset,
|
||||
uchar4 src2, int rows, int cols, int dst_step1)
|
||||
|
||||
{
|
||||
|
||||
int x = get_global_id(0);
|
||||
int y = get_global_id(1);
|
||||
|
||||
if (x < cols && y < rows)
|
||||
{
|
||||
x = x << 2;
|
||||
|
||||
#ifdef dst_align
|
||||
#undef dst_align
|
||||
#endif
|
||||
#define dst_align (((dst_offset % dst_step) / 3 ) & 3)
|
||||
int src1_index = mad24(y, src1_step, (x * 3) + src1_offset - (dst_align * 3));
|
||||
|
||||
int dst_start = mad24(y, dst_step, dst_offset);
|
||||
int dst_end = mad24(y, dst_step, dst_offset + dst_step1);
|
||||
int dst_index = mad24(y, dst_step, dst_offset + (x * 3) - (dst_align * 3));
|
||||
|
||||
uchar4 src1_data_0 = vload4(0, src1 + src1_index + 0);
|
||||
uchar4 src1_data_1 = vload4(0, src1 + src1_index + 4);
|
||||
uchar4 src1_data_2 = vload4(0, src1 + src1_index + 8);
|
||||
|
||||
uchar4 src2_data_0 = (uchar4)(src2.x, src2.y, src2.z, src2.x);
|
||||
uchar4 src2_data_1 = (uchar4)(src2.y, src2.z, src2.x, src2.y);
|
||||
uchar4 src2_data_2 = (uchar4)(src2.z, src2.x, src2.y, src2.z);
|
||||
|
||||
uchar4 data_0 = *((__global uchar4 *)(dst + dst_index + 0));
|
||||
uchar4 data_1 = *((__global uchar4 *)(dst + dst_index + 4));
|
||||
uchar4 data_2 = *((__global uchar4 *)(dst + dst_index + 8));
|
||||
|
||||
uchar4 tmp_data_0 = src1_data_0 | src2_data_0 ;
|
||||
uchar4 tmp_data_1 = src1_data_1 | src2_data_1 ;
|
||||
uchar4 tmp_data_2 = src1_data_2 | src2_data_2 ;
|
||||
|
||||
data_0.xyz = ((dst_index + 0 >= dst_start)) ? tmp_data_0.xyz : data_0.xyz;
|
||||
data_0.w = ((dst_index + 3 >= dst_start) && (dst_index + 3 < dst_end))
|
||||
? tmp_data_0.w : data_0.w;
|
||||
|
||||
data_1.xy = ((dst_index + 3 >= dst_start) && (dst_index + 3 < dst_end))
|
||||
? tmp_data_1.xy : data_1.xy;
|
||||
data_1.zw = ((dst_index + 6 >= dst_start) && (dst_index + 6 < dst_end))
|
||||
? tmp_data_1.zw : data_1.zw;
|
||||
|
||||
data_2.x = ((dst_index + 6 >= dst_start) && (dst_index + 6 < dst_end))
|
||||
? tmp_data_2.x : data_2.x;
|
||||
data_2.yzw = ((dst_index + 9 >= dst_start) && (dst_index + 9 < dst_end))
|
||||
? tmp_data_2.yzw : data_2.yzw;
|
||||
|
||||
*((__global uchar4 *)(dst + dst_index + 0)) = data_0;
|
||||
*((__global uchar4 *)(dst + dst_index + 4)) = data_1;
|
||||
*((__global uchar4 *)(dst + dst_index + 8)) = data_2;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
__kernel void arithm_s_bitwise_or_C3_D1 (
|
||||
__global char *src1, int src1_step, int src1_offset,
|
||||
__global char *dst, int dst_step, int dst_offset,
|
||||
char4 src2, int rows, int cols, int dst_step1)
|
||||
|
||||
{
|
||||
|
||||
int x = get_global_id(0);
|
||||
int y = get_global_id(1);
|
||||
|
||||
if (x < cols && y < rows)
|
||||
{
|
||||
x = x << 2;
|
||||
|
||||
#ifdef dst_align
|
||||
#undef dst_align
|
||||
#endif
|
||||
#define dst_align (((dst_offset % dst_step) / 3 ) & 3)
|
||||
int src1_index = mad24(y, src1_step, (x * 3) + src1_offset - (dst_align * 3));
|
||||
|
||||
int dst_start = mad24(y, dst_step, dst_offset);
|
||||
int dst_end = mad24(y, dst_step, dst_offset + dst_step1);
|
||||
int dst_index = mad24(y, dst_step, dst_offset + (x * 3) - (dst_align * 3));
|
||||
|
||||
char4 src1_data_0 = vload4(0, src1 + src1_index + 0);
|
||||
char4 src1_data_1 = vload4(0, src1 + src1_index + 4);
|
||||
char4 src1_data_2 = vload4(0, src1 + src1_index + 8);
|
||||
|
||||
char4 src2_data_0 = (char4)(src2.x, src2.y, src2.z, src2.x);
|
||||
char4 src2_data_1 = (char4)(src2.y, src2.z, src2.x, src2.y);
|
||||
char4 src2_data_2 = (char4)(src2.z, src2.x, src2.y, src2.z);
|
||||
|
||||
char4 data_0 = *((__global char4 *)(dst + dst_index + 0));
|
||||
char4 data_1 = *((__global char4 *)(dst + dst_index + 4));
|
||||
char4 data_2 = *((__global char4 *)(dst + dst_index + 8));
|
||||
|
||||
char4 tmp_data_0 = src1_data_0 | src2_data_0;
|
||||
char4 tmp_data_1 = src1_data_1 | src2_data_1;
|
||||
char4 tmp_data_2 = src1_data_2 | src2_data_2;
|
||||
|
||||
data_0.xyz = ((dst_index + 0 >= dst_start)) ? tmp_data_0.xyz : data_0.xyz;
|
||||
data_0.w = ((dst_index + 3 >= dst_start) && (dst_index + 3 < dst_end))
|
||||
? tmp_data_0.w : data_0.w;
|
||||
|
||||
data_1.xy = ((dst_index + 3 >= dst_start) && (dst_index + 3 < dst_end))
|
||||
? tmp_data_1.xy : data_1.xy;
|
||||
data_1.zw = ((dst_index + 6 >= dst_start) && (dst_index + 6 < dst_end))
|
||||
? tmp_data_1.zw : data_1.zw;
|
||||
|
||||
data_2.x = ((dst_index + 6 >= dst_start) && (dst_index + 6 < dst_end))
|
||||
? tmp_data_2.x : data_2.x;
|
||||
data_2.yzw = ((dst_index + 9 >= dst_start) && (dst_index + 9 < dst_end))
|
||||
? tmp_data_2.yzw : data_2.yzw;
|
||||
|
||||
*((__global char4 *)(dst + dst_index + 0)) = data_0;
|
||||
*((__global char4 *)(dst + dst_index + 4)) = data_1;
|
||||
*((__global char4 *)(dst + dst_index + 8)) = data_2;
|
||||
}
|
||||
}
|
||||
|
||||
__kernel void arithm_s_bitwise_or_C3_D2 (
|
||||
__global ushort *src1, int src1_step, int src1_offset,
|
||||
__global ushort *dst, int dst_step, int dst_offset,
|
||||
ushort4 src2, int rows, int cols, int dst_step1)
|
||||
|
||||
{
|
||||
|
||||
int x = get_global_id(0);
|
||||
int y = get_global_id(1);
|
||||
|
||||
if (x < cols && y < rows)
|
||||
{
|
||||
x = x << 1;
|
||||
|
||||
#ifdef dst_align
|
||||
#undef dst_align
|
||||
#endif
|
||||
#define dst_align (((dst_offset % dst_step) / 6 ) & 1)
|
||||
int src1_index = mad24(y, src1_step, (x * 6) + src1_offset - (dst_align * 6));
|
||||
|
||||
int dst_start = mad24(y, dst_step, dst_offset);
|
||||
int dst_end = mad24(y, dst_step, dst_offset + dst_step1);
|
||||
int dst_index = mad24(y, dst_step, dst_offset + (x * 6) - (dst_align * 6));
|
||||
|
||||
ushort2 src1_data_0 = vload2(0, (__global ushort *)((__global char *)src1 + src1_index + 0));
|
||||
ushort2 src1_data_1 = vload2(0, (__global ushort *)((__global char *)src1 + src1_index + 4));
|
||||
ushort2 src1_data_2 = vload2(0, (__global ushort *)((__global char *)src1 + src1_index + 8));
|
||||
|
||||
ushort2 src2_data_0 = (ushort2)(src2.x, src2.y);
|
||||
ushort2 src2_data_1 = (ushort2)(src2.z, src2.x);
|
||||
ushort2 src2_data_2 = (ushort2)(src2.y, src2.z);
|
||||
|
||||
ushort2 data_0 = *((__global ushort2 *)((__global char *)dst + dst_index + 0));
|
||||
ushort2 data_1 = *((__global ushort2 *)((__global char *)dst + dst_index + 4));
|
||||
ushort2 data_2 = *((__global ushort2 *)((__global char *)dst + dst_index + 8));
|
||||
|
||||
ushort2 tmp_data_0 = src1_data_0 | src2_data_0 ;
|
||||
ushort2 tmp_data_1 = src1_data_1 | src2_data_1 ;
|
||||
ushort2 tmp_data_2 = src1_data_2 | src2_data_2 ;
|
||||
|
||||
data_0.xy = ((dst_index + 0 >= dst_start)) ? tmp_data_0.xy : data_0.xy;
|
||||
|
||||
data_1.x = ((dst_index + 0 >= dst_start) && (dst_index + 0 < dst_end))
|
||||
? tmp_data_1.x : data_1.x;
|
||||
data_1.y = ((dst_index + 6 >= dst_start) && (dst_index + 6 < dst_end))
|
||||
? tmp_data_1.y : data_1.y;
|
||||
|
||||
data_2.xy = ((dst_index + 6 >= dst_start) && (dst_index + 6 < dst_end))
|
||||
? tmp_data_2.xy : data_2.xy;
|
||||
|
||||
*((__global ushort2 *)((__global char *)dst + dst_index + 0))= data_0;
|
||||
*((__global ushort2 *)((__global char *)dst + dst_index + 4))= data_1;
|
||||
*((__global ushort2 *)((__global char *)dst + dst_index + 8))= data_2;
|
||||
}
|
||||
}
|
||||
__kernel void arithm_s_bitwise_or_C3_D3 (
|
||||
__global short *src1, int src1_step, int src1_offset,
|
||||
__global short *dst, int dst_step, int dst_offset,
|
||||
short4 src2, int rows, int cols, int dst_step1)
|
||||
|
||||
{
|
||||
|
||||
int x = get_global_id(0);
|
||||
int y = get_global_id(1);
|
||||
|
||||
if (x < cols && y < rows)
|
||||
{
|
||||
x = x << 1;
|
||||
|
||||
#ifdef dst_align
|
||||
#undef dst_align
|
||||
#endif
|
||||
#define dst_align (((dst_offset % dst_step) / 6 ) & 1)
|
||||
int src1_index = mad24(y, src1_step, (x * 6) + src1_offset - (dst_align * 6));
|
||||
|
||||
int dst_start = mad24(y, dst_step, dst_offset);
|
||||
int dst_end = mad24(y, dst_step, dst_offset + dst_step1);
|
||||
int dst_index = mad24(y, dst_step, dst_offset + (x * 6) - (dst_align * 6));
|
||||
|
||||
short2 src1_data_0 = vload2(0, (__global short *)((__global char *)src1 + src1_index + 0));
|
||||
short2 src1_data_1 = vload2(0, (__global short *)((__global char *)src1 + src1_index + 4));
|
||||
short2 src1_data_2 = vload2(0, (__global short *)((__global char *)src1 + src1_index + 8));
|
||||
|
||||
short2 src2_data_0 = (short2)(src2.x, src2.y);
|
||||
short2 src2_data_1 = (short2)(src2.z, src2.x);
|
||||
short2 src2_data_2 = (short2)(src2.y, src2.z);
|
||||
|
||||
short2 data_0 = *((__global short2 *)((__global char *)dst + dst_index + 0));
|
||||
short2 data_1 = *((__global short2 *)((__global char *)dst + dst_index + 4));
|
||||
short2 data_2 = *((__global short2 *)((__global char *)dst + dst_index + 8));
|
||||
|
||||
short2 tmp_data_0 = src1_data_0 | src2_data_0 ;
|
||||
short2 tmp_data_1 = src1_data_1 | src2_data_1 ;
|
||||
short2 tmp_data_2 = src1_data_2 | src2_data_2 ;
|
||||
|
||||
data_0.xy = ((dst_index + 0 >= dst_start)) ? tmp_data_0.xy : data_0.xy;
|
||||
|
||||
data_1.x = ((dst_index + 0 >= dst_start) && (dst_index + 0 < dst_end))
|
||||
? tmp_data_1.x : data_1.x;
|
||||
data_1.y = ((dst_index + 6 >= dst_start) && (dst_index + 6 < dst_end))
|
||||
? tmp_data_1.y : data_1.y;
|
||||
|
||||
data_2.xy = ((dst_index + 6 >= dst_start) && (dst_index + 6 < dst_end))
|
||||
? tmp_data_2.xy : data_2.xy;
|
||||
|
||||
*((__global short2 *)((__global char *)dst + dst_index + 0))= data_0;
|
||||
*((__global short2 *)((__global char *)dst + dst_index + 4))= data_1;
|
||||
*((__global short2 *)((__global char *)dst + dst_index + 8))= data_2;
|
||||
}
|
||||
}
|
||||
__kernel void arithm_s_bitwise_or_C3_D4 (
|
||||
__global int *src1, int src1_step, int src1_offset,
|
||||
__global int *dst, int dst_step, int dst_offset,
|
||||
int4 src2, int rows, int cols, int dst_step1)
|
||||
|
||||
{
|
||||
|
||||
int x = get_global_id(0);
|
||||
int y = get_global_id(1);
|
||||
|
||||
if (x < cols && y < rows)
|
||||
{
|
||||
int src1_index = mad24(y, src1_step, (x * 12) + src1_offset);
|
||||
int dst_index = mad24(y, dst_step, dst_offset + (x * 12));
|
||||
|
||||
int src1_data_0 = *((__global int *)((__global char *)src1 + src1_index + 0));
|
||||
int src1_data_1 = *((__global int *)((__global char *)src1 + src1_index + 4));
|
||||
int src1_data_2 = *((__global int *)((__global char *)src1 + src1_index + 8));
|
||||
|
||||
int src2_data_0 = src2.x;
|
||||
int src2_data_1 = src2.y;
|
||||
int src2_data_2 = src2.z;
|
||||
|
||||
int data_0 = *((__global int *)((__global char *)dst + dst_index + 0));
|
||||
int data_1 = *((__global int *)((__global char *)dst + dst_index + 4));
|
||||
int data_2 = *((__global int *)((__global char *)dst + dst_index + 8));
|
||||
|
||||
int tmp_data_0 = src1_data_0 | src2_data_0;
|
||||
int tmp_data_1 = src1_data_1 | src2_data_1;
|
||||
int tmp_data_2 = src1_data_2 | src2_data_2;
|
||||
|
||||
*((__global int *)((__global char *)dst + dst_index + 0))= tmp_data_0;
|
||||
*((__global int *)((__global char *)dst + dst_index + 4))= tmp_data_1;
|
||||
*((__global int *)((__global char *)dst + dst_index + 8))= tmp_data_2;
|
||||
}
|
||||
}
|
||||
__kernel void arithm_s_bitwise_or_C3_D5 (
|
||||
__global char *src1, int src1_step, int src1_offset,
|
||||
__global char *dst, int dst_step, int dst_offset,
|
||||
char16 src2, int rows, int cols, int dst_step1)
|
||||
|
||||
{
|
||||
|
||||
int x = get_global_id(0);
|
||||
int y = get_global_id(1);
|
||||
|
||||
if (x < cols && y < rows)
|
||||
|
||||
{
|
||||
int src1_index = mad24(y, src1_step, (x * 12) + src1_offset);
|
||||
int dst_index = mad24(y, dst_step, dst_offset + (x * 12));
|
||||
|
||||
char4 src1_data_0 = *((__global char4 *)((__global char *)src1 + src1_index + 0));
|
||||
char4 src1_data_1 = *((__global char4 *)((__global char *)src1 + src1_index + 4));
|
||||
char4 src1_data_2 = *((__global char4 *)((__global char *)src1 + src1_index + 8));
|
||||
|
||||
char4 src2_data_0 = (char4)(src2.s0, src2.s1, src2.s2, src2.s3);
|
||||
char4 src2_data_1 = (char4)(src2.s4, src2.s5, src2.s6, src2.s7);
|
||||
char4 src2_data_2 = (char4)(src2.s8, src2.s9, src2.sA, src2.sB);
|
||||
|
||||
char4 tmp_data_0 = src1_data_0 | src2_data_0;
|
||||
char4 tmp_data_1 = src1_data_1 | src2_data_1;
|
||||
char4 tmp_data_2 = src1_data_2 | src2_data_2;
|
||||
|
||||
*((__global char4 *)((__global char *)dst + dst_index + 0))= tmp_data_0;
|
||||
*((__global char4 *)((__global char *)dst + dst_index + 4))= tmp_data_1;
|
||||
*((__global char4 *)((__global char *)dst + dst_index + 8))= tmp_data_2;
|
||||
}
|
||||
}
|
||||
#if defined (DOUBLE_SUPPORT)
|
||||
__kernel void arithm_s_bitwise_or_C3_D6 (
|
||||
__global short *src1, int src1_step, int src1_offset,
|
||||
__global short *dst, int dst_step, int dst_offset,
|
||||
short16 src2, int rows, int cols, int dst_step1)
|
||||
|
||||
{
|
||||
|
||||
int x = get_global_id(0);
|
||||
int y = get_global_id(1);
|
||||
|
||||
if (x < cols && y < rows)
|
||||
{
|
||||
int src1_index = mad24(y, src1_step, (x * 24) + src1_offset);
|
||||
int dst_index = mad24(y, dst_step, dst_offset + (x * 24));
|
||||
|
||||
short4 src1_data_0 = *((__global short4 *)((__global char *)src1 + src1_index + 0 ));
|
||||
short4 src1_data_1 = *((__global short4 *)((__global char *)src1 + src1_index + 8 ));
|
||||
short4 src1_data_2 = *((__global short4 *)((__global char *)src1 + src1_index + 16));
|
||||
|
||||
short4 src2_data_0 = (short4)(src2.s0, src2.s1, src2.s2, src2.s3);
|
||||
short4 src2_data_1 = (short4)(src2.s4, src2.s5, src2.s6, src2.s7);
|
||||
short4 src2_data_2 = (short4)(src2.s8, src2.s9, src2.sa, src2.sb);
|
||||
|
||||
short4 data_0 = *((__global short4 *)((__global char *)dst + dst_index + 0 ));
|
||||
short4 data_1 = *((__global short4 *)((__global char *)dst + dst_index + 8 ));
|
||||
short4 data_2 = *((__global short4 *)((__global char *)dst + dst_index + 16));
|
||||
|
||||
short4 tmp_data_0 = src1_data_0 | src2_data_0;
|
||||
short4 tmp_data_1 = src1_data_1 | src2_data_1;
|
||||
short4 tmp_data_2 = src1_data_2 | src2_data_2;
|
||||
|
||||
*((__global short4 *)((__global char *)dst + dst_index + 0 ))= tmp_data_0;
|
||||
*((__global short4 *)((__global char *)dst + dst_index + 8 ))= tmp_data_1;
|
||||
*((__global short4 *)((__global char *)dst + dst_index + 16))= tmp_data_2;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
__kernel void arithm_s_bitwise_or_C4_D0 (
|
||||
__global uchar *src1, int src1_step, int src1_offset,
|
||||
__global uchar *dst, int dst_step, int dst_offset,
|
||||
|
@ -533,387 +533,7 @@ __kernel void arithm_s_bitwise_or_with_mask_C2_D6 (
|
||||
}
|
||||
}
|
||||
#endif
|
||||
__kernel void arithm_s_bitwise_or_with_mask_C3_D0 (
|
||||
__global uchar *src1, int src1_step, int src1_offset,
|
||||
__global uchar *dst, int dst_step, int dst_offset,
|
||||
__global uchar *mask, int mask_step, int mask_offset,
|
||||
uchar4 src2, int rows, int cols, int dst_step1)
|
||||
|
||||
{
|
||||
|
||||
int x = get_global_id(0);
|
||||
int y = get_global_id(1);
|
||||
|
||||
if (x < cols && y < rows)
|
||||
{
|
||||
x = x << 2;
|
||||
|
||||
#ifdef dst_align
|
||||
#undef dst_align
|
||||
#endif
|
||||
#define dst_align (((dst_offset % dst_step) / 3 ) & 3)
|
||||
int src1_index = mad24(y, src1_step, (x * 3) + src1_offset - (dst_align * 3));
|
||||
int mask_index = mad24(y, mask_step, x + mask_offset - dst_align);
|
||||
|
||||
int dst_start = mad24(y, dst_step, dst_offset);
|
||||
int dst_end = mad24(y, dst_step, dst_offset + dst_step1);
|
||||
int dst_index = mad24(y, dst_step, dst_offset + (x * 3) - (dst_align * 3));
|
||||
|
||||
uchar4 src1_data_0 = vload4(0, src1 + src1_index + 0);
|
||||
uchar4 src1_data_1 = vload4(0, src1 + src1_index + 4);
|
||||
uchar4 src1_data_2 = vload4(0, src1 + src1_index + 8);
|
||||
|
||||
uchar4 src2_data_0 = (uchar4)(src2.x, src2.y, src2.z, src2.x);
|
||||
uchar4 src2_data_1 = (uchar4)(src2.y, src2.z, src2.x, src2.y);
|
||||
uchar4 src2_data_2 = (uchar4)(src2.z, src2.x, src2.y, src2.z);
|
||||
|
||||
uchar4 mask_data = vload4(0, mask + mask_index);
|
||||
|
||||
uchar4 data_0 = *((__global uchar4 *)(dst + dst_index + 0));
|
||||
uchar4 data_1 = *((__global uchar4 *)(dst + dst_index + 4));
|
||||
uchar4 data_2 = *((__global uchar4 *)(dst + dst_index + 8));
|
||||
|
||||
uchar4 tmp_data_0 = src1_data_0 | src2_data_0;
|
||||
uchar4 tmp_data_1 = src1_data_1 | src2_data_1;
|
||||
uchar4 tmp_data_2 = src1_data_2 | src2_data_2;
|
||||
|
||||
data_0.xyz = ((mask_data.x) && (dst_index + 0 >= dst_start)) ? tmp_data_0.xyz : data_0.xyz;
|
||||
data_0.w = ((mask_data.y) && (dst_index + 3 >= dst_start) && (dst_index + 3 < dst_end))
|
||||
? tmp_data_0.w : data_0.w;
|
||||
|
||||
data_1.xy = ((mask_data.y) && (dst_index + 3 >= dst_start) && (dst_index + 3 < dst_end))
|
||||
? tmp_data_1.xy : data_1.xy;
|
||||
data_1.zw = ((mask_data.z) && (dst_index + 6 >= dst_start) && (dst_index + 6 < dst_end))
|
||||
? tmp_data_1.zw : data_1.zw;
|
||||
|
||||
data_2.x = ((mask_data.z) && (dst_index + 6 >= dst_start) && (dst_index + 6 < dst_end))
|
||||
? tmp_data_2.x : data_2.x;
|
||||
data_2.yzw = ((mask_data.w) && (dst_index + 9 >= dst_start) && (dst_index + 9 < dst_end))
|
||||
? tmp_data_2.yzw : data_2.yzw;
|
||||
|
||||
*((__global uchar4 *)(dst + dst_index + 0)) = data_0;
|
||||
*((__global uchar4 *)(dst + dst_index + 4)) = data_1;
|
||||
*((__global uchar4 *)(dst + dst_index + 8)) = data_2;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
__kernel void arithm_s_bitwise_or_with_mask_C3_D1 (
|
||||
__global char *src1, int src1_step, int src1_offset,
|
||||
__global char *dst, int dst_step, int dst_offset,
|
||||
__global uchar *mask, int mask_step, int mask_offset,
|
||||
char4 src2, int rows, int cols, int dst_step1)
|
||||
|
||||
{
|
||||
|
||||
int x = get_global_id(0);
|
||||
int y = get_global_id(1);
|
||||
|
||||
if (x < cols && y < rows)
|
||||
{
|
||||
x = x << 2;
|
||||
|
||||
#ifdef dst_align
|
||||
#undef dst_align
|
||||
#endif
|
||||
#define dst_align (((dst_offset % dst_step) / 3 ) & 3)
|
||||
int src1_index = mad24(y, src1_step, (x * 3) + src1_offset - (dst_align * 3));
|
||||
int mask_index = mad24(y, mask_step, x + mask_offset - dst_align);
|
||||
|
||||
int dst_start = mad24(y, dst_step, dst_offset);
|
||||
int dst_end = mad24(y, dst_step, dst_offset + dst_step1);
|
||||
int dst_index = mad24(y, dst_step, dst_offset + (x * 3) - (dst_align * 3));
|
||||
|
||||
char4 src1_data_0 = vload4(0, src1 + src1_index + 0);
|
||||
char4 src1_data_1 = vload4(0, src1 + src1_index + 4);
|
||||
char4 src1_data_2 = vload4(0, src1 + src1_index + 8);
|
||||
|
||||
char4 src2_data_0 = (char4)(src2.x, src2.y, src2.z, src2.x);
|
||||
char4 src2_data_1 = (char4)(src2.y, src2.z, src2.x, src2.y);
|
||||
char4 src2_data_2 = (char4)(src2.z, src2.x, src2.y, src2.z);
|
||||
|
||||
uchar4 mask_data = vload4(0, mask + mask_index);
|
||||
|
||||
char4 data_0 = *((__global char4 *)(dst + dst_index + 0));
|
||||
char4 data_1 = *((__global char4 *)(dst + dst_index + 4));
|
||||
char4 data_2 = *((__global char4 *)(dst + dst_index + 8));
|
||||
|
||||
char4 tmp_data_0 = src1_data_0 | src2_data_0;
|
||||
char4 tmp_data_1 = src1_data_1 | src2_data_1;
|
||||
char4 tmp_data_2 = src1_data_2 | src2_data_2;
|
||||
|
||||
data_0.xyz = ((mask_data.x) && (dst_index + 0 >= dst_start)) ? tmp_data_0.xyz : data_0.xyz;
|
||||
data_0.w = ((mask_data.y) && (dst_index + 3 >= dst_start) && (dst_index + 3 < dst_end))
|
||||
? tmp_data_0.w : data_0.w;
|
||||
|
||||
data_1.xy = ((mask_data.y) && (dst_index + 3 >= dst_start) && (dst_index + 3 < dst_end))
|
||||
? tmp_data_1.xy : data_1.xy;
|
||||
data_1.zw = ((mask_data.z) && (dst_index + 6 >= dst_start) && (dst_index + 6 < dst_end))
|
||||
? tmp_data_1.zw : data_1.zw;
|
||||
|
||||
data_2.x = ((mask_data.z) && (dst_index + 6 >= dst_start) && (dst_index + 6 < dst_end))
|
||||
? tmp_data_2.x : data_2.x;
|
||||
data_2.yzw = ((mask_data.w) && (dst_index + 9 >= dst_start) && (dst_index + 9 < dst_end))
|
||||
? tmp_data_2.yzw : data_2.yzw;
|
||||
|
||||
*((__global char4 *)(dst + dst_index + 0)) = data_0;
|
||||
*((__global char4 *)(dst + dst_index + 4)) = data_1;
|
||||
*((__global char4 *)(dst + dst_index + 8)) = data_2;
|
||||
}
|
||||
}
|
||||
|
||||
__kernel void arithm_s_bitwise_or_with_mask_C3_D2 (
|
||||
__global ushort *src1, int src1_step, int src1_offset,
|
||||
__global ushort *dst, int dst_step, int dst_offset,
|
||||
__global uchar *mask, int mask_step, int mask_offset,
|
||||
ushort4 src2, int rows, int cols, int dst_step1)
|
||||
|
||||
{
|
||||
|
||||
int x = get_global_id(0);
|
||||
int y = get_global_id(1);
|
||||
|
||||
if (x < cols && y < rows)
|
||||
{
|
||||
x = x << 1;
|
||||
|
||||
#ifdef dst_align
|
||||
#undef dst_align
|
||||
#endif
|
||||
#define dst_align (((dst_offset % dst_step) / 6 ) & 1)
|
||||
int src1_index = mad24(y, src1_step, (x * 6) + src1_offset - (dst_align * 6));
|
||||
int mask_index = mad24(y, mask_step, x + mask_offset - dst_align);
|
||||
|
||||
int dst_start = mad24(y, dst_step, dst_offset);
|
||||
int dst_end = mad24(y, dst_step, dst_offset + dst_step1);
|
||||
int dst_index = mad24(y, dst_step, dst_offset + (x * 6) - (dst_align * 6));
|
||||
|
||||
ushort2 src1_data_0 = vload2(0, (__global ushort *)((__global char *)src1 + src1_index + 0));
|
||||
ushort2 src1_data_1 = vload2(0, (__global ushort *)((__global char *)src1 + src1_index + 4));
|
||||
ushort2 src1_data_2 = vload2(0, (__global ushort *)((__global char *)src1 + src1_index + 8));
|
||||
|
||||
ushort2 src2_data_0 = (ushort2)(src2.x, src2.y);
|
||||
ushort2 src2_data_1 = (ushort2)(src2.z, src2.x);
|
||||
ushort2 src2_data_2 = (ushort2)(src2.y, src2.z);
|
||||
|
||||
uchar2 mask_data = vload2(0, mask + mask_index);
|
||||
|
||||
ushort2 data_0 = *((__global ushort2 *)((__global char *)dst + dst_index + 0));
|
||||
ushort2 data_1 = *((__global ushort2 *)((__global char *)dst + dst_index + 4));
|
||||
ushort2 data_2 = *((__global ushort2 *)((__global char *)dst + dst_index + 8));
|
||||
|
||||
ushort2 tmp_data_0 = src1_data_0 | src2_data_0;
|
||||
ushort2 tmp_data_1 = src1_data_1 | src2_data_1;
|
||||
ushort2 tmp_data_2 = src1_data_2 | src2_data_2;
|
||||
|
||||
data_0.xy = ((mask_data.x) && (dst_index + 0 >= dst_start)) ? tmp_data_0.xy : data_0.xy;
|
||||
|
||||
data_1.x = ((mask_data.x) && (dst_index + 0 >= dst_start) && (dst_index + 0 < dst_end))
|
||||
? tmp_data_1.x : data_1.x;
|
||||
data_1.y = ((mask_data.y) && (dst_index + 6 >= dst_start) && (dst_index + 6 < dst_end))
|
||||
? tmp_data_1.y : data_1.y;
|
||||
|
||||
data_2.xy = ((mask_data.y) && (dst_index + 6 >= dst_start) && (dst_index + 6 < dst_end))
|
||||
? tmp_data_2.xy : data_2.xy;
|
||||
|
||||
*((__global ushort2 *)((__global char *)dst + dst_index + 0))= data_0;
|
||||
*((__global ushort2 *)((__global char *)dst + dst_index + 4))= data_1;
|
||||
*((__global ushort2 *)((__global char *)dst + dst_index + 8))= data_2;
|
||||
}
|
||||
}
|
||||
__kernel void arithm_s_bitwise_or_with_mask_C3_D3 (
|
||||
__global short *src1, int src1_step, int src1_offset,
|
||||
__global short *dst, int dst_step, int dst_offset,
|
||||
__global uchar *mask, int mask_step, int mask_offset,
|
||||
short4 src2, int rows, int cols, int dst_step1)
|
||||
|
||||
{
|
||||
|
||||
int x = get_global_id(0);
|
||||
int y = get_global_id(1);
|
||||
|
||||
if (x < cols && y < rows)
|
||||
{
|
||||
x = x << 1;
|
||||
|
||||
#ifdef dst_align
|
||||
#undef dst_align
|
||||
#endif
|
||||
#define dst_align (((dst_offset % dst_step) / 6 ) & 1)
|
||||
int src1_index = mad24(y, src1_step, (x * 6) + src1_offset - (dst_align * 6));
|
||||
int mask_index = mad24(y, mask_step, x + mask_offset - dst_align);
|
||||
|
||||
int dst_start = mad24(y, dst_step, dst_offset);
|
||||
int dst_end = mad24(y, dst_step, dst_offset + dst_step1);
|
||||
int dst_index = mad24(y, dst_step, dst_offset + (x * 6) - (dst_align * 6));
|
||||
|
||||
short2 src1_data_0 = vload2(0, (__global short *)((__global char *)src1 + src1_index + 0));
|
||||
short2 src1_data_1 = vload2(0, (__global short *)((__global char *)src1 + src1_index + 4));
|
||||
short2 src1_data_2 = vload2(0, (__global short *)((__global char *)src1 + src1_index + 8));
|
||||
|
||||
short2 src2_data_0 = (short2)(src2.x, src2.y);
|
||||
short2 src2_data_1 = (short2)(src2.z, src2.x);
|
||||
short2 src2_data_2 = (short2)(src2.y, src2.z);
|
||||
|
||||
uchar2 mask_data = vload2(0, mask + mask_index);
|
||||
|
||||
short2 data_0 = *((__global short2 *)((__global char *)dst + dst_index + 0));
|
||||
short2 data_1 = *((__global short2 *)((__global char *)dst + dst_index + 4));
|
||||
short2 data_2 = *((__global short2 *)((__global char *)dst + dst_index + 8));
|
||||
|
||||
short2 tmp_data_0 = src1_data_0 | src2_data_0;
|
||||
short2 tmp_data_1 = src1_data_1 | src2_data_1;
|
||||
short2 tmp_data_2 = src1_data_2 | src2_data_2;
|
||||
|
||||
data_0.xy = ((mask_data.x) && (dst_index + 0 >= dst_start)) ? tmp_data_0.xy : data_0.xy;
|
||||
|
||||
data_1.x = ((mask_data.x) && (dst_index + 0 >= dst_start) && (dst_index + 0 < dst_end))
|
||||
? tmp_data_1.x : data_1.x;
|
||||
data_1.y = ((mask_data.y) && (dst_index + 6 >= dst_start) && (dst_index + 6 < dst_end))
|
||||
? tmp_data_1.y : data_1.y;
|
||||
|
||||
data_2.xy = ((mask_data.y) && (dst_index + 6 >= dst_start) && (dst_index + 6 < dst_end))
|
||||
? tmp_data_2.xy : data_2.xy;
|
||||
|
||||
*((__global short2 *)((__global char *)dst + dst_index + 0))= data_0;
|
||||
*((__global short2 *)((__global char *)dst + dst_index + 4))= data_1;
|
||||
*((__global short2 *)((__global char *)dst + dst_index + 8))= data_2;
|
||||
}
|
||||
}
|
||||
__kernel void arithm_s_bitwise_or_with_mask_C3_D4 (
|
||||
__global int *src1, int src1_step, int src1_offset,
|
||||
__global int *dst, int dst_step, int dst_offset,
|
||||
__global uchar *mask, int mask_step, int mask_offset,
|
||||
int4 src2, int rows, int cols, int dst_step1)
|
||||
|
||||
{
|
||||
|
||||
int x = get_global_id(0);
|
||||
int y = get_global_id(1);
|
||||
|
||||
if (x < cols && y < rows)
|
||||
{
|
||||
int src1_index = mad24(y, src1_step, (x * 12) + src1_offset);
|
||||
int mask_index = mad24(y, mask_step, x + mask_offset);
|
||||
int dst_index = mad24(y, dst_step, dst_offset + (x * 12));
|
||||
|
||||
int src1_data_0 = *((__global int *)((__global char *)src1 + src1_index + 0));
|
||||
int src1_data_1 = *((__global int *)((__global char *)src1 + src1_index + 4));
|
||||
int src1_data_2 = *((__global int *)((__global char *)src1 + src1_index + 8));
|
||||
|
||||
int src2_data_0 = src2.x;
|
||||
int src2_data_1 = src2.y;
|
||||
int src2_data_2 = src2.z;
|
||||
|
||||
uchar mask_data = * (mask + mask_index);
|
||||
|
||||
int data_0 = *((__global int *)((__global char *)dst + dst_index + 0));
|
||||
int data_1 = *((__global int *)((__global char *)dst + dst_index + 4));
|
||||
int data_2 = *((__global int *)((__global char *)dst + dst_index + 8));
|
||||
|
||||
int tmp_data_0 = src1_data_0 | src2_data_0;
|
||||
int tmp_data_1 = src1_data_1 | src2_data_1;
|
||||
int tmp_data_2 = src1_data_2 | src2_data_2;
|
||||
|
||||
data_0 = mask_data ? tmp_data_0 : data_0;
|
||||
data_1 = mask_data ? tmp_data_1 : data_1;
|
||||
data_2 = mask_data ? tmp_data_2 : data_2;
|
||||
|
||||
*((__global int *)((__global char *)dst + dst_index + 0))= data_0;
|
||||
*((__global int *)((__global char *)dst + dst_index + 4))= data_1;
|
||||
*((__global int *)((__global char *)dst + dst_index + 8))= data_2;
|
||||
}
|
||||
}
|
||||
__kernel void arithm_s_bitwise_or_with_mask_C3_D5 (
|
||||
__global char *src1, int src1_step, int src1_offset,
|
||||
__global char *dst, int dst_step, int dst_offset,
|
||||
__global uchar *mask, int mask_step, int mask_offset,
|
||||
char16 src2, int rows, int cols, int dst_step1)
|
||||
|
||||
{
|
||||
|
||||
int x = get_global_id(0);
|
||||
int y = get_global_id(1);
|
||||
|
||||
if (x < cols && y < rows)
|
||||
{
|
||||
int src1_index = mad24(y, src1_step, (x * 12) + src1_offset);
|
||||
int mask_index = mad24(y, mask_step, x + mask_offset);
|
||||
int dst_index = mad24(y, dst_step, dst_offset + (x * 12));
|
||||
|
||||
char4 src1_data_0 = *((__global char4 *)((__global char *)src1 + src1_index + 0));
|
||||
char4 src1_data_1 = *((__global char4 *)((__global char *)src1 + src1_index + 4));
|
||||
char4 src1_data_2 = *((__global char4 *)((__global char *)src1 + src1_index + 8));
|
||||
|
||||
char4 src2_data_0 = (char4)(src2.s0, src2.s1, src2.s2, src2.s3);
|
||||
char4 src2_data_1 = (char4)(src2.s4, src2.s5, src2.s6, src2.s7);
|
||||
char4 src2_data_2 = (char4)(src2.s8, src2.s9, src2.sA, src2.sB);
|
||||
|
||||
uchar mask_data = * (mask + mask_index);
|
||||
|
||||
char4 data_0 = *((__global char4 *)((__global char *)dst + dst_index + 0));
|
||||
char4 data_1 = *((__global char4 *)((__global char *)dst + dst_index + 4));
|
||||
char4 data_2 = *((__global char4 *)((__global char *)dst + dst_index + 8));
|
||||
|
||||
char4 tmp_data_0 = src1_data_0 | src2_data_0;
|
||||
char4 tmp_data_1 = src1_data_1 | src2_data_1;
|
||||
char4 tmp_data_2 = src1_data_2 | src2_data_2;
|
||||
|
||||
data_0 = mask_data ? tmp_data_0 : data_0;
|
||||
data_1 = mask_data ? tmp_data_1 : data_1;
|
||||
data_2 = mask_data ? tmp_data_2 : data_2;
|
||||
|
||||
*((__global char4 *)((__global char *)dst + dst_index + 0))= data_0;
|
||||
*((__global char4 *)((__global char *)dst + dst_index + 4))= data_1;
|
||||
*((__global char4 *)((__global char *)dst + dst_index + 8))= data_2;
|
||||
|
||||
}
|
||||
}
|
||||
#if defined (DOUBLE_SUPPORT)
|
||||
__kernel void arithm_s_bitwise_or_with_mask_C3_D6 (
|
||||
__global short *src1, int src1_step, int src1_offset,
|
||||
__global short *dst, int dst_step, int dst_offset,
|
||||
__global uchar *mask, int mask_step, int mask_offset,
|
||||
short16 src2, int rows, int cols, int dst_step1)
|
||||
{
|
||||
|
||||
int x = get_global_id(0);
|
||||
int y = get_global_id(1);
|
||||
|
||||
if (x < cols && y < rows)
|
||||
{
|
||||
int src1_index = mad24(y, src1_step, (x * 24) + src1_offset);
|
||||
int mask_index = mad24(y, mask_step, x + mask_offset);
|
||||
int dst_index = mad24(y, dst_step, dst_offset + (x * 24));
|
||||
|
||||
short4 src1_data_0 = *((__global short4 *)((__global char *)src1 + src1_index + 0 ));
|
||||
short4 src1_data_1 = *((__global short4 *)((__global char *)src1 + src1_index + 8 ));
|
||||
short4 src1_data_2 = *((__global short4 *)((__global char *)src1 + src1_index + 16));
|
||||
|
||||
short4 src2_data_0 = (short4)(src2.s0, src2.s1, src2.s2, src2.s3);
|
||||
short4 src2_data_1 = (short4)(src2.s4, src2.s5, src2.s6, src2.s7);
|
||||
short4 src2_data_2 = (short4)(src2.s8, src2.s9, src2.sa, src2.sb);
|
||||
|
||||
uchar mask_data = * (mask + mask_index);
|
||||
|
||||
short4 data_0 = *((__global short4 *)((__global char *)dst + dst_index + 0 ));
|
||||
short4 data_1 = *((__global short4 *)((__global char *)dst + dst_index + 8 ));
|
||||
short4 data_2 = *((__global short4 *)((__global char *)dst + dst_index + 16));
|
||||
|
||||
short4 tmp_data_0 = src1_data_0 | src2_data_0;
|
||||
short4 tmp_data_1 = src1_data_1 | src2_data_1;
|
||||
short4 tmp_data_2 = src1_data_2 | src2_data_2;
|
||||
|
||||
data_0 = mask_data ? tmp_data_0 : data_0;
|
||||
data_1 = mask_data ? tmp_data_1 : data_1;
|
||||
data_2 = mask_data ? tmp_data_2 : data_2;
|
||||
|
||||
*((__global short4 *)((__global char *)dst + dst_index + 0 ))= data_0;
|
||||
*((__global short4 *)((__global char *)dst + dst_index + 8 ))= data_1;
|
||||
*((__global short4 *)((__global char *)dst + dst_index + 16))= data_2;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
__kernel void arithm_s_bitwise_or_with_mask_C4_D0 (
|
||||
__global uchar *src1, int src1_step, int src1_offset,
|
||||
__global uchar *dst, int dst_step, int dst_offset,
|
||||
|
@ -565,397 +565,6 @@ __kernel void arithm_bitwise_xor_with_mask_C2_D6 (
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
__kernel void arithm_bitwise_xor_with_mask_C3_D0 (
|
||||
__global uchar *src1, int src1_step, int src1_offset,
|
||||
__global uchar *src2, int src2_step, int src2_offset,
|
||||
__global uchar *mask, int mask_step, int mask_offset,
|
||||
__global uchar *dst, int dst_step, int dst_offset,
|
||||
int rows, int cols, int dst_step1)
|
||||
{
|
||||
|
||||
int x = get_global_id(0);
|
||||
int y = get_global_id(1);
|
||||
|
||||
if (x < cols && y < rows)
|
||||
{
|
||||
x = x << 2;
|
||||
|
||||
#ifdef dst_align
|
||||
#undef dst_align
|
||||
#endif
|
||||
#define dst_align (((dst_offset % dst_step) / 3 ) & 3)
|
||||
int src1_index = mad24(y, src1_step, (x * 3) + src1_offset - (dst_align * 3));
|
||||
int src2_index = mad24(y, src2_step, (x * 3) + src2_offset - (dst_align * 3));
|
||||
int mask_index = mad24(y, mask_step, x + mask_offset - dst_align);
|
||||
|
||||
int dst_start = mad24(y, dst_step, dst_offset);
|
||||
int dst_end = mad24(y, dst_step, dst_offset + dst_step1);
|
||||
int dst_index = mad24(y, dst_step, dst_offset + (x * 3) - (dst_align * 3));
|
||||
|
||||
uchar4 src1_data_0 = vload4(0, src1 + src1_index + 0);
|
||||
uchar4 src1_data_1 = vload4(0, src1 + src1_index + 4);
|
||||
uchar4 src1_data_2 = vload4(0, src1 + src1_index + 8);
|
||||
|
||||
uchar4 src2_data_0 = vload4(0, src2 + src2_index + 0);
|
||||
uchar4 src2_data_1 = vload4(0, src2 + src2_index + 4);
|
||||
uchar4 src2_data_2 = vload4(0, src2 + src2_index + 8);
|
||||
|
||||
uchar4 mask_data = vload4(0, mask + mask_index);
|
||||
|
||||
uchar4 data_0 = *((__global uchar4 *)(dst + dst_index + 0));
|
||||
uchar4 data_1 = *((__global uchar4 *)(dst + dst_index + 4));
|
||||
uchar4 data_2 = *((__global uchar4 *)(dst + dst_index + 8));
|
||||
|
||||
uchar4 tmp_data_0 = src1_data_0 ^ src2_data_0;
|
||||
uchar4 tmp_data_1 = src1_data_1 ^ src2_data_1;
|
||||
uchar4 tmp_data_2 = src1_data_2 ^ src2_data_2;
|
||||
|
||||
data_0.xyz = ((mask_data.x) && (dst_index + 0 >= dst_start)) ? tmp_data_0.xyz : data_0.xyz;
|
||||
data_0.w = ((mask_data.y) && (dst_index + 3 >= dst_start) && (dst_index + 3 < dst_end))
|
||||
? tmp_data_0.w : data_0.w;
|
||||
|
||||
data_1.xy = ((mask_data.y) && (dst_index + 3 >= dst_start) && (dst_index + 3 < dst_end))
|
||||
? tmp_data_1.xy : data_1.xy;
|
||||
data_1.zw = ((mask_data.z) && (dst_index + 6 >= dst_start) && (dst_index + 6 < dst_end))
|
||||
? tmp_data_1.zw : data_1.zw;
|
||||
|
||||
data_2.x = ((mask_data.z) && (dst_index + 6 >= dst_start) && (dst_index + 6 < dst_end))
|
||||
? tmp_data_2.x : data_2.x;
|
||||
data_2.yzw = ((mask_data.w) && (dst_index + 9 >= dst_start) && (dst_index + 9 < dst_end))
|
||||
? tmp_data_2.yzw : data_2.yzw;
|
||||
|
||||
*((__global uchar4 *)(dst + dst_index + 0)) = data_0;
|
||||
*((__global uchar4 *)(dst + dst_index + 4)) = data_1;
|
||||
*((__global uchar4 *)(dst + dst_index + 8)) = data_2;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
__kernel void arithm_bitwise_xor_with_mask_C3_D1 (
|
||||
__global char *src1, int src1_step, int src1_offset,
|
||||
__global char *src2, int src2_step, int src2_offset,
|
||||
__global uchar *mask, int mask_step, int mask_offset,
|
||||
__global char *dst, int dst_step, int dst_offset,
|
||||
int rows, int cols, int dst_step1)
|
||||
{
|
||||
|
||||
int x = get_global_id(0);
|
||||
int y = get_global_id(1);
|
||||
|
||||
if (x < cols && y < rows)
|
||||
{
|
||||
x = x << 2;
|
||||
|
||||
#ifdef dst_align
|
||||
#undef dst_align
|
||||
#endif
|
||||
#define dst_align (((dst_offset % dst_step) / 3 ) & 3)
|
||||
int src1_index = mad24(y, src1_step, (x * 3) + src1_offset - (dst_align * 3));
|
||||
int src2_index = mad24(y, src2_step, (x * 3) + src2_offset - (dst_align * 3));
|
||||
int mask_index = mad24(y, mask_step, x + mask_offset - dst_align);
|
||||
|
||||
int dst_start = mad24(y, dst_step, dst_offset);
|
||||
int dst_end = mad24(y, dst_step, dst_offset + dst_step1);
|
||||
int dst_index = mad24(y, dst_step, dst_offset + (x * 3) - (dst_align * 3));
|
||||
|
||||
char4 src1_data_0 = vload4(0, src1 + src1_index + 0);
|
||||
char4 src1_data_1 = vload4(0, src1 + src1_index + 4);
|
||||
char4 src1_data_2 = vload4(0, src1 + src1_index + 8);
|
||||
|
||||
char4 src2_data_0 = vload4(0, src2 + src2_index + 0);
|
||||
char4 src2_data_1 = vload4(0, src2 + src2_index + 4);
|
||||
char4 src2_data_2 = vload4(0, src2 + src2_index + 8);
|
||||
|
||||
uchar4 mask_data = vload4(0, mask + mask_index);
|
||||
|
||||
char4 data_0 = *((__global char4 *)(dst + dst_index + 0));
|
||||
char4 data_1 = *((__global char4 *)(dst + dst_index + 4));
|
||||
char4 data_2 = *((__global char4 *)(dst + dst_index + 8));
|
||||
|
||||
char4 tmp_data_0 = src1_data_0 ^ src2_data_0;
|
||||
char4 tmp_data_1 = src1_data_1 ^ src2_data_1;
|
||||
char4 tmp_data_2 = src1_data_2 ^ src2_data_2;
|
||||
|
||||
data_0.xyz = ((mask_data.x) && (dst_index + 0 >= dst_start)) ? tmp_data_0.xyz : data_0.xyz;
|
||||
data_0.w = ((mask_data.y) && (dst_index + 3 >= dst_start) && (dst_index + 3 < dst_end))
|
||||
? tmp_data_0.w : data_0.w;
|
||||
|
||||
data_1.xy = ((mask_data.y) && (dst_index + 3 >= dst_start) && (dst_index + 3 < dst_end))
|
||||
? tmp_data_1.xy : data_1.xy;
|
||||
data_1.zw = ((mask_data.z) && (dst_index + 6 >= dst_start) && (dst_index + 6 < dst_end))
|
||||
? tmp_data_1.zw : data_1.zw;
|
||||
|
||||
data_2.x = ((mask_data.z) && (dst_index + 6 >= dst_start) && (dst_index + 6 < dst_end))
|
||||
? tmp_data_2.x : data_2.x;
|
||||
data_2.yzw = ((mask_data.w) && (dst_index + 9 >= dst_start) && (dst_index + 9 < dst_end))
|
||||
? tmp_data_2.yzw : data_2.yzw;
|
||||
|
||||
*((__global char4 *)(dst + dst_index + 0)) = data_0;
|
||||
*((__global char4 *)(dst + dst_index + 4)) = data_1;
|
||||
*((__global char4 *)(dst + dst_index + 8)) = data_2;
|
||||
}
|
||||
}
|
||||
|
||||
__kernel void arithm_bitwise_xor_with_mask_C3_D2 (
|
||||
__global ushort *src1, int src1_step, int src1_offset,
|
||||
__global ushort *src2, int src2_step, int src2_offset,
|
||||
__global uchar *mask, int mask_step, int mask_offset,
|
||||
__global ushort *dst, int dst_step, int dst_offset,
|
||||
int rows, int cols, int dst_step1)
|
||||
{
|
||||
|
||||
int x = get_global_id(0);
|
||||
int y = get_global_id(1);
|
||||
|
||||
if (x < cols && y < rows)
|
||||
{
|
||||
x = x << 1;
|
||||
|
||||
#ifdef dst_align
|
||||
#undef dst_align
|
||||
#endif
|
||||
#define dst_align (((dst_offset % dst_step) / 6 ) & 1)
|
||||
int src1_index = mad24(y, src1_step, (x * 6) + src1_offset - (dst_align * 6));
|
||||
int src2_index = mad24(y, src2_step, (x * 6) + src2_offset - (dst_align * 6));
|
||||
int mask_index = mad24(y, mask_step, x + mask_offset - dst_align);
|
||||
|
||||
int dst_start = mad24(y, dst_step, dst_offset);
|
||||
int dst_end = mad24(y, dst_step, dst_offset + dst_step1);
|
||||
int dst_index = mad24(y, dst_step, dst_offset + (x * 6) - (dst_align * 6));
|
||||
|
||||
ushort2 src1_data_0 = vload2(0, (__global ushort *)((__global char *)src1 + src1_index + 0));
|
||||
ushort2 src1_data_1 = vload2(0, (__global ushort *)((__global char *)src1 + src1_index + 4));
|
||||
ushort2 src1_data_2 = vload2(0, (__global ushort *)((__global char *)src1 + src1_index + 8));
|
||||
|
||||
ushort2 src2_data_0 = vload2(0, (__global ushort *)((__global char *)src2 + src2_index + 0));
|
||||
ushort2 src2_data_1 = vload2(0, (__global ushort *)((__global char *)src2 + src2_index + 4));
|
||||
ushort2 src2_data_2 = vload2(0, (__global ushort *)((__global char *)src2 + src2_index + 8));
|
||||
|
||||
uchar2 mask_data = vload2(0, mask + mask_index);
|
||||
|
||||
ushort2 data_0 = *((__global ushort2 *)((__global char *)dst + dst_index + 0));
|
||||
ushort2 data_1 = *((__global ushort2 *)((__global char *)dst + dst_index + 4));
|
||||
ushort2 data_2 = *((__global ushort2 *)((__global char *)dst + dst_index + 8));
|
||||
|
||||
ushort2 tmp_data_0 = src1_data_0 ^ src2_data_0 ;
|
||||
ushort2 tmp_data_1 = src1_data_1 ^ src2_data_1 ;
|
||||
ushort2 tmp_data_2 = src1_data_2 ^ src2_data_2 ;
|
||||
|
||||
data_0.xy = ((mask_data.x) && (dst_index + 0 >= dst_start)) ? tmp_data_0.xy : data_0.xy;
|
||||
|
||||
data_1.x = ((mask_data.x) && (dst_index + 0 >= dst_start) && (dst_index + 0 < dst_end))
|
||||
? tmp_data_1.x : data_1.x;
|
||||
data_1.y = ((mask_data.y) && (dst_index + 6 >= dst_start) && (dst_index + 6 < dst_end))
|
||||
? tmp_data_1.y : data_1.y;
|
||||
|
||||
data_2.xy = ((mask_data.y) && (dst_index + 6 >= dst_start) && (dst_index + 6 < dst_end))
|
||||
? tmp_data_2.xy : data_2.xy;
|
||||
|
||||
*((__global ushort2 *)((__global char *)dst + dst_index + 0))= data_0;
|
||||
*((__global ushort2 *)((__global char *)dst + dst_index + 4))= data_1;
|
||||
*((__global ushort2 *)((__global char *)dst + dst_index + 8))= data_2;
|
||||
}
|
||||
}
|
||||
__kernel void arithm_bitwise_xor_with_mask_C3_D3 (
|
||||
__global short *src1, int src1_step, int src1_offset,
|
||||
__global short *src2, int src2_step, int src2_offset,
|
||||
__global uchar *mask, int mask_step, int mask_offset,
|
||||
__global short *dst, int dst_step, int dst_offset,
|
||||
int rows, int cols, int dst_step1)
|
||||
{
|
||||
|
||||
int x = get_global_id(0);
|
||||
int y = get_global_id(1);
|
||||
|
||||
if (x < cols && y < rows)
|
||||
{
|
||||
x = x << 1;
|
||||
|
||||
#ifdef dst_align
|
||||
#undef dst_align
|
||||
#endif
|
||||
#define dst_align (((dst_offset % dst_step) / 6 ) & 1)
|
||||
int src1_index = mad24(y, src1_step, (x * 6) + src1_offset - (dst_align * 6));
|
||||
int src2_index = mad24(y, src2_step, (x * 6) + src2_offset - (dst_align * 6));
|
||||
int mask_index = mad24(y, mask_step, x + mask_offset - dst_align);
|
||||
|
||||
int dst_start = mad24(y, dst_step, dst_offset);
|
||||
int dst_end = mad24(y, dst_step, dst_offset + dst_step1);
|
||||
int dst_index = mad24(y, dst_step, dst_offset + (x * 6) - (dst_align * 6));
|
||||
|
||||
short2 src1_data_0 = vload2(0, (__global short *)((__global char *)src1 + src1_index + 0));
|
||||
short2 src1_data_1 = vload2(0, (__global short *)((__global char *)src1 + src1_index + 4));
|
||||
short2 src1_data_2 = vload2(0, (__global short *)((__global char *)src1 + src1_index + 8));
|
||||
|
||||
short2 src2_data_0 = vload2(0, (__global short *)((__global char *)src2 + src2_index + 0));
|
||||
short2 src2_data_1 = vload2(0, (__global short *)((__global char *)src2 + src2_index + 4));
|
||||
short2 src2_data_2 = vload2(0, (__global short *)((__global char *)src2 + src2_index + 8));
|
||||
|
||||
uchar2 mask_data = vload2(0, mask + mask_index);
|
||||
|
||||
short2 data_0 = *((__global short2 *)((__global char *)dst + dst_index + 0));
|
||||
short2 data_1 = *((__global short2 *)((__global char *)dst + dst_index + 4));
|
||||
short2 data_2 = *((__global short2 *)((__global char *)dst + dst_index + 8));
|
||||
|
||||
short2 tmp_data_0 = src1_data_0 ^ src2_data_0 ;
|
||||
short2 tmp_data_1 = src1_data_1 ^ src2_data_1 ;
|
||||
short2 tmp_data_2 = src1_data_2 ^ src2_data_2 ;
|
||||
|
||||
data_0.xy = ((mask_data.x) && (dst_index + 0 >= dst_start)) ? tmp_data_0.xy : data_0.xy;
|
||||
|
||||
data_1.x = ((mask_data.x) && (dst_index + 0 >= dst_start) && (dst_index + 0 < dst_end))
|
||||
? tmp_data_1.x : data_1.x;
|
||||
data_1.y = ((mask_data.y) && (dst_index + 6 >= dst_start) && (dst_index + 6 < dst_end))
|
||||
? tmp_data_1.y : data_1.y;
|
||||
|
||||
data_2.xy = ((mask_data.y) && (dst_index + 6 >= dst_start) && (dst_index + 6 < dst_end))
|
||||
? tmp_data_2.xy : data_2.xy;
|
||||
|
||||
*((__global short2 *)((__global char *)dst + dst_index + 0))= data_0;
|
||||
*((__global short2 *)((__global char *)dst + dst_index + 4))= data_1;
|
||||
*((__global short2 *)((__global char *)dst + dst_index + 8))= data_2;
|
||||
}
|
||||
}
|
||||
__kernel void arithm_bitwise_xor_with_mask_C3_D4 (
|
||||
__global int *src1, int src1_step, int src1_offset,
|
||||
__global int *src2, int src2_step, int src2_offset,
|
||||
__global uchar *mask, int mask_step, int mask_offset,
|
||||
__global int *dst, int dst_step, int dst_offset,
|
||||
int rows, int cols, int dst_step1)
|
||||
{
|
||||
|
||||
int x = get_global_id(0);
|
||||
int y = get_global_id(1);
|
||||
|
||||
if (x < cols && y < rows)
|
||||
{
|
||||
int src1_index = mad24(y, src1_step, (x * 12) + src1_offset);
|
||||
int src2_index = mad24(y, src2_step, (x * 12) + src2_offset);
|
||||
int mask_index = mad24(y, mask_step, x + mask_offset);
|
||||
int dst_index = mad24(y, dst_step, dst_offset + (x * 12));
|
||||
|
||||
int src1_data_0 = *((__global int *)((__global char *)src1 + src1_index + 0));
|
||||
int src1_data_1 = *((__global int *)((__global char *)src1 + src1_index + 4));
|
||||
int src1_data_2 = *((__global int *)((__global char *)src1 + src1_index + 8));
|
||||
|
||||
int src2_data_0 = *((__global int *)((__global char *)src2 + src2_index + 0));
|
||||
int src2_data_1 = *((__global int *)((__global char *)src2 + src2_index + 4));
|
||||
int src2_data_2 = *((__global int *)((__global char *)src2 + src2_index + 8));
|
||||
|
||||
uchar mask_data = * (mask + mask_index);
|
||||
|
||||
int data_0 = *((__global int *)((__global char *)dst + dst_index + 0));
|
||||
int data_1 = *((__global int *)((__global char *)dst + dst_index + 4));
|
||||
int data_2 = *((__global int *)((__global char *)dst + dst_index + 8));
|
||||
|
||||
int tmp_data_0 = src1_data_0 ^ src2_data_0 ;
|
||||
int tmp_data_1 = src1_data_1 ^ src2_data_1 ;
|
||||
int tmp_data_2 = src1_data_2 ^ src2_data_2 ;
|
||||
|
||||
data_0 = mask_data ? tmp_data_0 : data_0;
|
||||
data_1 = mask_data ? tmp_data_1 : data_1;
|
||||
data_2 = mask_data ? tmp_data_2 : data_2;
|
||||
|
||||
*((__global int *)((__global char *)dst + dst_index + 0))= data_0;
|
||||
*((__global int *)((__global char *)dst + dst_index + 4))= data_1;
|
||||
*((__global int *)((__global char *)dst + dst_index + 8))= data_2;
|
||||
}
|
||||
}
|
||||
__kernel void arithm_bitwise_xor_with_mask_C3_D5 (
|
||||
__global char *src1, int src1_step, int src1_offset,
|
||||
__global char *src2, int src2_step, int src2_offset,
|
||||
__global uchar *mask, int mask_step, int mask_offset,
|
||||
__global char *dst, int dst_step, int dst_offset,
|
||||
int rows, int cols, int dst_step1)
|
||||
{
|
||||
|
||||
int x = get_global_id(0);
|
||||
int y = get_global_id(1);
|
||||
|
||||
if (x < cols && y < rows)
|
||||
{
|
||||
int src1_index = mad24(y, src1_step, (x * 12) + src1_offset);
|
||||
int src2_index = mad24(y, src2_step, (x * 12) + src2_offset);
|
||||
int mask_index = mad24(y, mask_step, x + mask_offset);
|
||||
int dst_index = mad24(y, dst_step, dst_offset + (x * 12));
|
||||
|
||||
char4 src1_data_0 = *((__global char4 *)((__global char *)src1 + src1_index + 0));
|
||||
char4 src1_data_1 = *((__global char4 *)((__global char *)src1 + src1_index + 4));
|
||||
char4 src1_data_2 = *((__global char4 *)((__global char *)src1 + src1_index + 8));
|
||||
|
||||
char4 src2_data_0 = *((__global char4 *)((__global char *)src2 + src2_index + 0));
|
||||
char4 src2_data_1 = *((__global char4 *)((__global char *)src2 + src2_index + 4));
|
||||
char4 src2_data_2 = *((__global char4 *)((__global char *)src2 + src2_index + 8));
|
||||
|
||||
uchar mask_data = * (mask + mask_index);
|
||||
|
||||
char4 data_0 = *((__global char4 *)((__global char *)dst + dst_index + 0));
|
||||
char4 data_1 = *((__global char4 *)((__global char *)dst + dst_index + 4));
|
||||
char4 data_2 = *((__global char4 *)((__global char *)dst + dst_index + 8));
|
||||
|
||||
char4 tmp_data_0 = src1_data_0 ^ src2_data_0;
|
||||
char4 tmp_data_1 = src1_data_1 ^ src2_data_1;
|
||||
char4 tmp_data_2 = src1_data_2 ^ src2_data_2;
|
||||
|
||||
data_0 = mask_data ? tmp_data_0 : data_0;
|
||||
data_1 = mask_data ? tmp_data_1 : data_1;
|
||||
data_2 = mask_data ? tmp_data_2 : data_2;
|
||||
|
||||
*((__global char4 *)((__global char *)dst + dst_index + 0))= data_0;
|
||||
*((__global char4 *)((__global char *)dst + dst_index + 4))= data_1;
|
||||
*((__global char4 *)((__global char *)dst + dst_index + 8))= data_2;
|
||||
}
|
||||
}
|
||||
#if defined (DOUBLE_SUPPORT)
|
||||
__kernel void arithm_bitwise_xor_with_mask_C3_D6 (
|
||||
__global char *src1, int src1_step, int src1_offset,
|
||||
__global char *src2, int src2_step, int src2_offset,
|
||||
__global uchar *mask, int mask_step, int mask_offset,
|
||||
__global char *dst, int dst_step, int dst_offset,
|
||||
int rows, int cols, int dst_step1)
|
||||
{
|
||||
|
||||
int x = get_global_id(0);
|
||||
int y = get_global_id(1);
|
||||
|
||||
if (x < cols && y < rows)
|
||||
{
|
||||
int src1_index = mad24(y, src1_step, (x * 24) + src1_offset);
|
||||
int src2_index = mad24(y, src2_step, (x * 24) + src2_offset);
|
||||
int mask_index = mad24(y, mask_step, x + mask_offset);
|
||||
int dst_index = mad24(y, dst_step, dst_offset + (x * 24));
|
||||
|
||||
char8 src1_data_0 = *((__global char8 *)((__global char *)src1 + src1_index + 0 ));
|
||||
char8 src1_data_1 = *((__global char8 *)((__global char *)src1 + src1_index + 8 ));
|
||||
char8 src1_data_2 = *((__global char8 *)((__global char *)src1 + src1_index + 16));
|
||||
|
||||
char8 src2_data_0 = *((__global char8 *)((__global char *)src2 + src2_index + 0 ));
|
||||
char8 src2_data_1 = *((__global char8 *)((__global char *)src2 + src2_index + 8 ));
|
||||
char8 src2_data_2 = *((__global char8 *)((__global char *)src2 + src2_index + 16));
|
||||
|
||||
uchar mask_data = * (mask + mask_index);
|
||||
|
||||
char8 data_0 = *((__global char8 *)((__global char *)dst + dst_index + 0 ));
|
||||
char8 data_1 = *((__global char8 *)((__global char *)dst + dst_index + 8 ));
|
||||
char8 data_2 = *((__global char8 *)((__global char *)dst + dst_index + 16));
|
||||
|
||||
char8 tmp_data_0 = src1_data_0 ^ src2_data_0;
|
||||
char8 tmp_data_1 = src1_data_1 ^ src2_data_1;
|
||||
char8 tmp_data_2 = src1_data_2 ^ src2_data_2;
|
||||
|
||||
data_0 = mask_data ? tmp_data_0 : data_0;
|
||||
data_1 = mask_data ? tmp_data_1 : data_1;
|
||||
data_2 = mask_data ? tmp_data_2 : data_2;
|
||||
|
||||
*((__global char8 *)((__global char *)dst + dst_index + 0 ))= data_0;
|
||||
*((__global char8 *)((__global char *)dst + dst_index + 8 ))= data_1;
|
||||
*((__global char8 *)((__global char *)dst + dst_index + 16))= data_2;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
__kernel void arithm_bitwise_xor_with_mask_C4_D0 (
|
||||
__global uchar *src1, int src1_step, int src1_offset,
|
||||
__global uchar *src2, int src2_step, int src2_offset,
|
||||
|
@ -461,340 +461,7 @@ __kernel void arithm_s_bitwise_xor_C2_D6 (
|
||||
}
|
||||
}
|
||||
#endif
|
||||
__kernel void arithm_s_bitwise_xor_C3_D0 (
|
||||
__global uchar *src1, int src1_step, int src1_offset,
|
||||
__global uchar *dst, int dst_step, int dst_offset,
|
||||
uchar4 src2, int rows, int cols, int dst_step1)
|
||||
{
|
||||
|
||||
int x = get_global_id(0);
|
||||
int y = get_global_id(1);
|
||||
|
||||
if (x < cols && y < rows)
|
||||
{
|
||||
x = x << 2;
|
||||
|
||||
#ifdef dst_align
|
||||
#undef dst_align
|
||||
#endif
|
||||
#define dst_align (((dst_offset % dst_step) / 3 ) & 3)
|
||||
int src1_index = mad24(y, src1_step, (x * 3) + src1_offset - (dst_align * 3));
|
||||
|
||||
int dst_start = mad24(y, dst_step, dst_offset);
|
||||
int dst_end = mad24(y, dst_step, dst_offset + dst_step1);
|
||||
int dst_index = mad24(y, dst_step, dst_offset + (x * 3) - (dst_align * 3));
|
||||
|
||||
uchar4 src1_data_0 = vload4(0, src1 + src1_index + 0);
|
||||
uchar4 src1_data_1 = vload4(0, src1 + src1_index + 4);
|
||||
uchar4 src1_data_2 = vload4(0, src1 + src1_index + 8);
|
||||
|
||||
uchar4 src2_data_0 = (uchar4)(src2.x, src2.y, src2.z, src2.x);
|
||||
uchar4 src2_data_1 = (uchar4)(src2.y, src2.z, src2.x, src2.y);
|
||||
uchar4 src2_data_2 = (uchar4)(src2.z, src2.x, src2.y, src2.z);
|
||||
|
||||
uchar4 data_0 = *((__global uchar4 *)(dst + dst_index + 0));
|
||||
uchar4 data_1 = *((__global uchar4 *)(dst + dst_index + 4));
|
||||
uchar4 data_2 = *((__global uchar4 *)(dst + dst_index + 8));
|
||||
|
||||
uchar4 tmp_data_0 = src1_data_0 ^ src2_data_0;
|
||||
uchar4 tmp_data_1 = src1_data_1 ^ src2_data_1;
|
||||
uchar4 tmp_data_2 = src1_data_2 ^ src2_data_2;
|
||||
|
||||
data_0.xyz = ((dst_index + 0 >= dst_start)) ? tmp_data_0.xyz : data_0.xyz;
|
||||
data_0.w = ((dst_index + 3 >= dst_start) && (dst_index + 3 < dst_end))
|
||||
? tmp_data_0.w : data_0.w;
|
||||
|
||||
data_1.xy = ((dst_index + 3 >= dst_start) && (dst_index + 3 < dst_end))
|
||||
? tmp_data_1.xy : data_1.xy;
|
||||
data_1.zw = ((dst_index + 6 >= dst_start) && (dst_index + 6 < dst_end))
|
||||
? tmp_data_1.zw : data_1.zw;
|
||||
|
||||
data_2.x = ((dst_index + 6 >= dst_start) && (dst_index + 6 < dst_end))
|
||||
? tmp_data_2.x : data_2.x;
|
||||
data_2.yzw = ((dst_index + 9 >= dst_start) && (dst_index + 9 < dst_end))
|
||||
? tmp_data_2.yzw : data_2.yzw;
|
||||
|
||||
*((__global uchar4 *)(dst + dst_index + 0)) = data_0;
|
||||
*((__global uchar4 *)(dst + dst_index + 4)) = data_1;
|
||||
*((__global uchar4 *)(dst + dst_index + 8)) = data_2;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
__kernel void arithm_s_bitwise_xor_C3_D1 (
|
||||
__global char *src1, int src1_step, int src1_offset,
|
||||
__global char *dst, int dst_step, int dst_offset,
|
||||
char4 src2, int rows, int cols, int dst_step1)
|
||||
{
|
||||
|
||||
int x = get_global_id(0);
|
||||
int y = get_global_id(1);
|
||||
|
||||
if (x < cols && y < rows)
|
||||
{
|
||||
x = x << 2;
|
||||
|
||||
#ifdef dst_align
|
||||
#undef dst_align
|
||||
#endif
|
||||
#define dst_align (((dst_offset % dst_step) / 3 ) & 3)
|
||||
int src1_index = mad24(y, src1_step, (x * 3) + src1_offset - (dst_align * 3));
|
||||
|
||||
int dst_start = mad24(y, dst_step, dst_offset);
|
||||
int dst_end = mad24(y, dst_step, dst_offset + dst_step1);
|
||||
int dst_index = mad24(y, dst_step, dst_offset + (x * 3) - (dst_align * 3));
|
||||
|
||||
char4 src1_data_0 = vload4(0, src1 + src1_index + 0);
|
||||
char4 src1_data_1 = vload4(0, src1 + src1_index + 4);
|
||||
char4 src1_data_2 = vload4(0, src1 + src1_index + 8);
|
||||
|
||||
char4 src2_data_0 = (char4)(src2.x, src2.y, src2.z, src2.x);
|
||||
char4 src2_data_1 = (char4)(src2.y, src2.z, src2.x, src2.y);
|
||||
char4 src2_data_2 = (char4)(src2.z, src2.x, src2.y, src2.z);
|
||||
|
||||
char4 data_0 = *((__global char4 *)(dst + dst_index + 0));
|
||||
char4 data_1 = *((__global char4 *)(dst + dst_index + 4));
|
||||
char4 data_2 = *((__global char4 *)(dst + dst_index + 8));
|
||||
|
||||
char4 tmp_data_0 = src1_data_0 ^ src2_data_0;
|
||||
char4 tmp_data_1 = src1_data_1 ^ src2_data_1;
|
||||
char4 tmp_data_2 = src1_data_2 ^ src2_data_2;
|
||||
|
||||
data_0.xyz = ((dst_index + 0 >= dst_start)) ? tmp_data_0.xyz : data_0.xyz;
|
||||
data_0.w = ((dst_index + 3 >= dst_start) && (dst_index + 3 < dst_end))
|
||||
? tmp_data_0.w : data_0.w;
|
||||
|
||||
data_1.xy = ((dst_index + 3 >= dst_start) && (dst_index + 3 < dst_end))
|
||||
? tmp_data_1.xy : data_1.xy;
|
||||
data_1.zw = ((dst_index + 6 >= dst_start) && (dst_index + 6 < dst_end))
|
||||
? tmp_data_1.zw : data_1.zw;
|
||||
|
||||
data_2.x = ((dst_index + 6 >= dst_start) && (dst_index + 6 < dst_end))
|
||||
? tmp_data_2.x : data_2.x;
|
||||
data_2.yzw = ((dst_index + 9 >= dst_start) && (dst_index + 9 < dst_end))
|
||||
? tmp_data_2.yzw : data_2.yzw;
|
||||
|
||||
*((__global char4 *)(dst + dst_index + 0)) = data_0;
|
||||
*((__global char4 *)(dst + dst_index + 4)) = data_1;
|
||||
*((__global char4 *)(dst + dst_index + 8)) = data_2;
|
||||
}
|
||||
}
|
||||
|
||||
__kernel void arithm_s_bitwise_xor_C3_D2 (
|
||||
__global ushort *src1, int src1_step, int src1_offset,
|
||||
__global ushort *dst, int dst_step, int dst_offset,
|
||||
ushort4 src2, int rows, int cols, int dst_step1)
|
||||
{
|
||||
|
||||
int x = get_global_id(0);
|
||||
int y = get_global_id(1);
|
||||
|
||||
if (x < cols && y < rows)
|
||||
{
|
||||
x = x << 1;
|
||||
|
||||
#ifdef dst_align
|
||||
#undef dst_align
|
||||
#endif
|
||||
#define dst_align (((dst_offset % dst_step) / 6 ) & 1)
|
||||
int src1_index = mad24(y, src1_step, (x * 6) + src1_offset - (dst_align * 6));
|
||||
|
||||
int dst_start = mad24(y, dst_step, dst_offset);
|
||||
int dst_end = mad24(y, dst_step, dst_offset + dst_step1);
|
||||
int dst_index = mad24(y, dst_step, dst_offset + (x * 6) - (dst_align * 6));
|
||||
|
||||
ushort2 src1_data_0 = vload2(0, (__global ushort *)((__global char *)src1 + src1_index + 0));
|
||||
ushort2 src1_data_1 = vload2(0, (__global ushort *)((__global char *)src1 + src1_index + 4));
|
||||
ushort2 src1_data_2 = vload2(0, (__global ushort *)((__global char *)src1 + src1_index + 8));
|
||||
|
||||
ushort2 src2_data_0 = (ushort2)(src2.x, src2.y);
|
||||
ushort2 src2_data_1 = (ushort2)(src2.z, src2.x);
|
||||
ushort2 src2_data_2 = (ushort2)(src2.y, src2.z);
|
||||
|
||||
ushort2 data_0 = *((__global ushort2 *)((__global char *)dst + dst_index + 0));
|
||||
ushort2 data_1 = *((__global ushort2 *)((__global char *)dst + dst_index + 4));
|
||||
ushort2 data_2 = *((__global ushort2 *)((__global char *)dst + dst_index + 8));
|
||||
|
||||
ushort2 tmp_data_0 = src1_data_0 ^ src2_data_0;
|
||||
ushort2 tmp_data_1 = src1_data_1 ^ src2_data_1;
|
||||
ushort2 tmp_data_2 = src1_data_2 ^ src2_data_2;
|
||||
|
||||
data_0.xy = ((dst_index + 0 >= dst_start)) ? tmp_data_0.xy : data_0.xy;
|
||||
|
||||
data_1.x = ((dst_index + 0 >= dst_start) && (dst_index + 0 < dst_end))
|
||||
? tmp_data_1.x : data_1.x;
|
||||
data_1.y = ((dst_index + 6 >= dst_start) && (dst_index + 6 < dst_end))
|
||||
? tmp_data_1.y : data_1.y;
|
||||
|
||||
data_2.xy = ((dst_index + 6 >= dst_start) && (dst_index + 6 < dst_end))
|
||||
? tmp_data_2.xy : data_2.xy;
|
||||
|
||||
*((__global ushort2 *)((__global char *)dst + dst_index + 0))= data_0;
|
||||
*((__global ushort2 *)((__global char *)dst + dst_index + 4))= data_1;
|
||||
*((__global ushort2 *)((__global char *)dst + dst_index + 8))= data_2;
|
||||
}
|
||||
}
|
||||
__kernel void arithm_s_bitwise_xor_C3_D3 (
|
||||
__global short *src1, int src1_step, int src1_offset,
|
||||
__global short *dst, int dst_step, int dst_offset,
|
||||
short4 src2, int rows, int cols, int dst_step1)
|
||||
{
|
||||
|
||||
int x = get_global_id(0);
|
||||
int y = get_global_id(1);
|
||||
|
||||
if (x < cols && y < rows)
|
||||
{
|
||||
x = x << 1;
|
||||
|
||||
#ifdef dst_align
|
||||
#undef dst_align
|
||||
#endif
|
||||
#define dst_align (((dst_offset % dst_step) / 6 ) & 1)
|
||||
int src1_index = mad24(y, src1_step, (x * 6) + src1_offset - (dst_align * 6));
|
||||
|
||||
int dst_start = mad24(y, dst_step, dst_offset);
|
||||
int dst_end = mad24(y, dst_step, dst_offset + dst_step1);
|
||||
int dst_index = mad24(y, dst_step, dst_offset + (x * 6) - (dst_align * 6));
|
||||
|
||||
short2 src1_data_0 = vload2(0, (__global short *)((__global char *)src1 + src1_index + 0));
|
||||
short2 src1_data_1 = vload2(0, (__global short *)((__global char *)src1 + src1_index + 4));
|
||||
short2 src1_data_2 = vload2(0, (__global short *)((__global char *)src1 + src1_index + 8));
|
||||
|
||||
short2 src2_data_0 = (short2)(src2.x, src2.y);
|
||||
short2 src2_data_1 = (short2)(src2.z, src2.x);
|
||||
short2 src2_data_2 = (short2)(src2.y, src2.z);
|
||||
|
||||
short2 data_0 = *((__global short2 *)((__global char *)dst + dst_index + 0));
|
||||
short2 data_1 = *((__global short2 *)((__global char *)dst + dst_index + 4));
|
||||
short2 data_2 = *((__global short2 *)((__global char *)dst + dst_index + 8));
|
||||
|
||||
short2 tmp_data_0 = src1_data_0 ^ src2_data_0;
|
||||
short2 tmp_data_1 = src1_data_1 ^ src2_data_1;
|
||||
short2 tmp_data_2 = src1_data_2 ^ src2_data_2;
|
||||
|
||||
data_0.xy = ((dst_index + 0 >= dst_start)) ? tmp_data_0.xy : data_0.xy;
|
||||
|
||||
data_1.x = ((dst_index + 0 >= dst_start) && (dst_index + 0 < dst_end))
|
||||
? tmp_data_1.x : data_1.x;
|
||||
data_1.y = ((dst_index + 6 >= dst_start) && (dst_index + 6 < dst_end))
|
||||
? tmp_data_1.y : data_1.y;
|
||||
|
||||
data_2.xy = ((dst_index + 6 >= dst_start) && (dst_index + 6 < dst_end))
|
||||
? tmp_data_2.xy : data_2.xy;
|
||||
|
||||
*((__global short2 *)((__global char *)dst + dst_index + 0))= data_0;
|
||||
*((__global short2 *)((__global char *)dst + dst_index + 4))= data_1;
|
||||
*((__global short2 *)((__global char *)dst + dst_index + 8))= data_2;
|
||||
}
|
||||
}
|
||||
__kernel void arithm_s_bitwise_xor_C3_D4 (
|
||||
__global int *src1, int src1_step, int src1_offset,
|
||||
__global int *dst, int dst_step, int dst_offset,
|
||||
int4 src2, int rows, int cols, int dst_step1)
|
||||
{
|
||||
|
||||
int x = get_global_id(0);
|
||||
int y = get_global_id(1);
|
||||
|
||||
if (x < cols && y < rows)
|
||||
{
|
||||
int src1_index = mad24(y, src1_step, (x * 12) + src1_offset);
|
||||
int dst_index = mad24(y, dst_step, dst_offset + (x * 12));
|
||||
|
||||
int src1_data_0 = *((__global int *)((__global char *)src1 + src1_index + 0));
|
||||
int src1_data_1 = *((__global int *)((__global char *)src1 + src1_index + 4));
|
||||
int src1_data_2 = *((__global int *)((__global char *)src1 + src1_index + 8));
|
||||
|
||||
int src2_data_0 = src2.x;
|
||||
int src2_data_1 = src2.y;
|
||||
int src2_data_2 = src2.z;
|
||||
|
||||
int data_0 = *((__global int *)((__global char *)dst + dst_index + 0));
|
||||
int data_1 = *((__global int *)((__global char *)dst + dst_index + 4));
|
||||
int data_2 = *((__global int *)((__global char *)dst + dst_index + 8));
|
||||
|
||||
int tmp_data_0 = src1_data_0 ^ src2_data_0;
|
||||
int tmp_data_1 = src1_data_1 ^ src2_data_1;
|
||||
int tmp_data_2 = src1_data_2 ^ src2_data_2;
|
||||
|
||||
*((__global int *)((__global char *)dst + dst_index + 0))= tmp_data_0;
|
||||
*((__global int *)((__global char *)dst + dst_index + 4))= tmp_data_1;
|
||||
*((__global int *)((__global char *)dst + dst_index + 8))= tmp_data_2;
|
||||
}
|
||||
}
|
||||
__kernel void arithm_s_bitwise_xor_C3_D5 (
|
||||
__global char *src1, int src1_step, int src1_offset,
|
||||
__global char *dst, int dst_step, int dst_offset,
|
||||
char16 src2, int rows, int cols, int dst_step1)
|
||||
{
|
||||
|
||||
int x = get_global_id(0);
|
||||
int y = get_global_id(1);
|
||||
|
||||
if (x < cols && y < rows)
|
||||
{
|
||||
int src1_index = mad24(y, src1_step, (x * 12) + src1_offset);
|
||||
int dst_index = mad24(y, dst_step, dst_offset + (x * 12));
|
||||
|
||||
char4 src1_data_0 = *((__global char4 *)((__global char *)src1 + src1_index + 0));
|
||||
char4 src1_data_1 = *((__global char4 *)((__global char *)src1 + src1_index + 4));
|
||||
char4 src1_data_2 = *((__global char4 *)((__global char *)src1 + src1_index + 8));
|
||||
|
||||
char4 src2_data_0 = (char4)(src2.s0, src2.s1, src2.s2, src2.s3);
|
||||
char4 src2_data_1 = (char4)(src2.s4, src2.s5, src2.s6, src2.s7);
|
||||
char4 src2_data_2 = (char4)(src2.s8, src2.s9, src2.sA, src2.sB);
|
||||
|
||||
char4 data_0 = *((__global char4 *)((__global char *)dst + dst_index + 0));
|
||||
char4 data_1 = *((__global char4 *)((__global char *)dst + dst_index + 4));
|
||||
char4 data_2 = *((__global char4 *)((__global char *)dst + dst_index + 8));
|
||||
|
||||
char4 tmp_data_0 = src1_data_0 ^ src2_data_0;
|
||||
char4 tmp_data_1 = src1_data_1 ^ src2_data_1;
|
||||
char4 tmp_data_2 = src1_data_2 ^ src2_data_2;
|
||||
|
||||
*((__global char4 *)((__global char *)dst + dst_index + 0))= tmp_data_0;
|
||||
*((__global char4 *)((__global char *)dst + dst_index + 4))= tmp_data_1;
|
||||
*((__global char4 *)((__global char *)dst + dst_index + 8))= tmp_data_2;
|
||||
}
|
||||
}
|
||||
#if defined (DOUBLE_SUPPORT)
|
||||
__kernel void arithm_s_bitwise_xor_C3_D6 (
|
||||
__global short *src1, int src1_step, int src1_offset,
|
||||
__global short *dst, int dst_step, int dst_offset,
|
||||
short16 src2, int rows, int cols, int dst_step1)
|
||||
{
|
||||
|
||||
int x = get_global_id(0);
|
||||
int y = get_global_id(1);
|
||||
|
||||
if (x < cols && y < rows)
|
||||
{
|
||||
int src1_index = mad24(y, src1_step, (x * 24) + src1_offset);
|
||||
int dst_index = mad24(y, dst_step, dst_offset + (x * 24));
|
||||
|
||||
short4 src1_data_0 = *((__global short4 *)((__global char *)src1 + src1_index + 0 ));
|
||||
short4 src1_data_1 = *((__global short4 *)((__global char *)src1 + src1_index + 8 ));
|
||||
short4 src1_data_2 = *((__global short4 *)((__global char *)src1 + src1_index + 16));
|
||||
|
||||
short4 src2_data_0 = (short4)(src2.s0, src2.s1, src2.s2, src2.s3);
|
||||
short4 src2_data_1 = (short4)(src2.s4, src2.s5, src2.s6, src2.s7);
|
||||
short4 src2_data_2 = (short4)(src2.s8, src2.s9, src2.sa, src2.sb);
|
||||
|
||||
short4 data_0 = *((__global short4 *)((__global char *)dst + dst_index + 0 ));
|
||||
short4 data_1 = *((__global short4 *)((__global char *)dst + dst_index + 8 ));
|
||||
short4 data_2 = *((__global short4 *)((__global char *)dst + dst_index + 16));
|
||||
|
||||
short4 tmp_data_0 = src1_data_0 ^ src2_data_0;
|
||||
short4 tmp_data_1 = src1_data_1 ^ src2_data_1;
|
||||
short4 tmp_data_2 = src1_data_2 ^ src2_data_2;
|
||||
|
||||
*((__global short4 *)((__global char *)dst + dst_index + 0 ))= tmp_data_0;
|
||||
*((__global short4 *)((__global char *)dst + dst_index + 8 ))= tmp_data_1;
|
||||
*((__global short4 *)((__global char *)dst + dst_index + 16))= tmp_data_2;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
__kernel void arithm_s_bitwise_xor_C4_D0 (
|
||||
__global uchar *src1, int src1_step, int src1_offset,
|
||||
__global uchar *dst, int dst_step, int dst_offset,
|
||||
|
@ -523,380 +523,7 @@ __kernel void arithm_s_bitwise_xor_with_mask_C2_D6 (
|
||||
}
|
||||
}
|
||||
#endif
|
||||
__kernel void arithm_s_bitwise_xor_with_mask_C3_D0 (
|
||||
__global uchar *src1, int src1_step, int src1_offset,
|
||||
__global uchar *dst, int dst_step, int dst_offset,
|
||||
__global uchar *mask, int mask_step, int mask_offset,
|
||||
uchar4 src2, int rows, int cols, int dst_step1)
|
||||
{
|
||||
|
||||
int x = get_global_id(0);
|
||||
int y = get_global_id(1);
|
||||
|
||||
if (x < cols && y < rows)
|
||||
{
|
||||
x = x << 2;
|
||||
|
||||
#ifdef dst_align
|
||||
#undef dst_align
|
||||
#endif
|
||||
#define dst_align (((dst_offset % dst_step) / 3 ) & 3)
|
||||
int src1_index = mad24(y, src1_step, (x * 3) + src1_offset - (dst_align * 3));
|
||||
int mask_index = mad24(y, mask_step, x + mask_offset - dst_align);
|
||||
|
||||
int dst_start = mad24(y, dst_step, dst_offset);
|
||||
int dst_end = mad24(y, dst_step, dst_offset + dst_step1);
|
||||
int dst_index = mad24(y, dst_step, dst_offset + (x * 3) - (dst_align * 3));
|
||||
|
||||
uchar4 src1_data_0 = vload4(0, src1 + src1_index + 0);
|
||||
uchar4 src1_data_1 = vload4(0, src1 + src1_index + 4);
|
||||
uchar4 src1_data_2 = vload4(0, src1 + src1_index + 8);
|
||||
|
||||
uchar4 src2_data_0 = (uchar4)(src2.x, src2.y, src2.z, src2.x);
|
||||
uchar4 src2_data_1 = (uchar4)(src2.y, src2.z, src2.x, src2.y);
|
||||
uchar4 src2_data_2 = (uchar4)(src2.z, src2.x, src2.y, src2.z);
|
||||
|
||||
uchar4 mask_data = vload4(0, mask + mask_index);
|
||||
|
||||
uchar4 data_0 = *((__global uchar4 *)(dst + dst_index + 0));
|
||||
uchar4 data_1 = *((__global uchar4 *)(dst + dst_index + 4));
|
||||
uchar4 data_2 = *((__global uchar4 *)(dst + dst_index + 8));
|
||||
|
||||
uchar4 tmp_data_0 = src1_data_0 ^ src2_data_0;
|
||||
uchar4 tmp_data_1 = src1_data_1 ^ src2_data_1;
|
||||
uchar4 tmp_data_2 = src1_data_2 ^ src2_data_2;
|
||||
|
||||
data_0.xyz = ((mask_data.x) && (dst_index + 0 >= dst_start)) ? tmp_data_0.xyz : data_0.xyz;
|
||||
data_0.w = ((mask_data.y) && (dst_index + 3 >= dst_start) && (dst_index + 3 < dst_end))
|
||||
? tmp_data_0.w : data_0.w;
|
||||
|
||||
data_1.xy = ((mask_data.y) && (dst_index + 3 >= dst_start) && (dst_index + 3 < dst_end))
|
||||
? tmp_data_1.xy : data_1.xy;
|
||||
data_1.zw = ((mask_data.z) && (dst_index + 6 >= dst_start) && (dst_index + 6 < dst_end))
|
||||
? tmp_data_1.zw : data_1.zw;
|
||||
|
||||
data_2.x = ((mask_data.z) && (dst_index + 6 >= dst_start) && (dst_index + 6 < dst_end))
|
||||
? tmp_data_2.x : data_2.x;
|
||||
data_2.yzw = ((mask_data.w) && (dst_index + 9 >= dst_start) && (dst_index + 9 < dst_end))
|
||||
? tmp_data_2.yzw : data_2.yzw;
|
||||
|
||||
*((__global uchar4 *)(dst + dst_index + 0)) = data_0;
|
||||
*((__global uchar4 *)(dst + dst_index + 4)) = data_1;
|
||||
*((__global uchar4 *)(dst + dst_index + 8)) = data_2;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
__kernel void arithm_s_bitwise_xor_with_mask_C3_D1 (
|
||||
__global char *src1, int src1_step, int src1_offset,
|
||||
__global char *dst, int dst_step, int dst_offset,
|
||||
__global uchar *mask, int mask_step, int mask_offset,
|
||||
char4 src2, int rows, int cols, int dst_step1)
|
||||
{
|
||||
|
||||
int x = get_global_id(0);
|
||||
int y = get_global_id(1);
|
||||
|
||||
if (x < cols && y < rows)
|
||||
{
|
||||
x = x << 2;
|
||||
|
||||
#ifdef dst_align
|
||||
#undef dst_align
|
||||
#endif
|
||||
#define dst_align (((dst_offset % dst_step) / 3 ) & 3)
|
||||
int src1_index = mad24(y, src1_step, (x * 3) + src1_offset - (dst_align * 3));
|
||||
int mask_index = mad24(y, mask_step, x + mask_offset - dst_align);
|
||||
|
||||
int dst_start = mad24(y, dst_step, dst_offset);
|
||||
int dst_end = mad24(y, dst_step, dst_offset + dst_step1);
|
||||
int dst_index = mad24(y, dst_step, dst_offset + (x * 3) - (dst_align * 3));
|
||||
|
||||
char4 src1_data_0 = vload4(0, src1 + src1_index + 0);
|
||||
char4 src1_data_1 = vload4(0, src1 + src1_index + 4);
|
||||
char4 src1_data_2 = vload4(0, src1 + src1_index + 8);
|
||||
|
||||
char4 src2_data_0 = (char4)(src2.x, src2.y, src2.z, src2.x);
|
||||
char4 src2_data_1 = (char4)(src2.y, src2.z, src2.x, src2.y);
|
||||
char4 src2_data_2 = (char4)(src2.z, src2.x, src2.y, src2.z);
|
||||
|
||||
uchar4 mask_data = vload4(0, mask + mask_index);
|
||||
|
||||
char4 data_0 = *((__global char4 *)(dst + dst_index + 0));
|
||||
char4 data_1 = *((__global char4 *)(dst + dst_index + 4));
|
||||
char4 data_2 = *((__global char4 *)(dst + dst_index + 8));
|
||||
|
||||
char4 tmp_data_0 = src1_data_0 ^ src2_data_0;
|
||||
char4 tmp_data_1 = src1_data_1 ^ src2_data_1;
|
||||
char4 tmp_data_2 = src1_data_2 ^ src2_data_2;
|
||||
|
||||
data_0.xyz = ((mask_data.x) && (dst_index + 0 >= dst_start)) ? tmp_data_0.xyz : data_0.xyz;
|
||||
data_0.w = ((mask_data.y) && (dst_index + 3 >= dst_start) && (dst_index + 3 < dst_end))
|
||||
? tmp_data_0.w : data_0.w;
|
||||
|
||||
data_1.xy = ((mask_data.y) && (dst_index + 3 >= dst_start) && (dst_index + 3 < dst_end))
|
||||
? tmp_data_1.xy : data_1.xy;
|
||||
data_1.zw = ((mask_data.z) && (dst_index + 6 >= dst_start) && (dst_index + 6 < dst_end))
|
||||
? tmp_data_1.zw : data_1.zw;
|
||||
|
||||
data_2.x = ((mask_data.z) && (dst_index + 6 >= dst_start) && (dst_index + 6 < dst_end))
|
||||
? tmp_data_2.x : data_2.x;
|
||||
data_2.yzw = ((mask_data.w) && (dst_index + 9 >= dst_start) && (dst_index + 9 < dst_end))
|
||||
? tmp_data_2.yzw : data_2.yzw;
|
||||
|
||||
*((__global char4 *)(dst + dst_index + 0)) = data_0;
|
||||
*((__global char4 *)(dst + dst_index + 4)) = data_1;
|
||||
*((__global char4 *)(dst + dst_index + 8)) = data_2;
|
||||
}
|
||||
}
|
||||
|
||||
__kernel void arithm_s_bitwise_xor_with_mask_C3_D2 (
|
||||
__global ushort *src1, int src1_step, int src1_offset,
|
||||
__global ushort *dst, int dst_step, int dst_offset,
|
||||
__global uchar *mask, int mask_step, int mask_offset,
|
||||
ushort4 src2, int rows, int cols, int dst_step1)
|
||||
{
|
||||
|
||||
int x = get_global_id(0);
|
||||
int y = get_global_id(1);
|
||||
|
||||
if (x < cols && y < rows)
|
||||
{
|
||||
x = x << 1;
|
||||
|
||||
#ifdef dst_align
|
||||
#undef dst_align
|
||||
#endif
|
||||
#define dst_align (((dst_offset % dst_step) / 6 ) & 1)
|
||||
int src1_index = mad24(y, src1_step, (x * 6) + src1_offset - (dst_align * 6));
|
||||
int mask_index = mad24(y, mask_step, x + mask_offset - dst_align);
|
||||
|
||||
int dst_start = mad24(y, dst_step, dst_offset);
|
||||
int dst_end = mad24(y, dst_step, dst_offset + dst_step1);
|
||||
int dst_index = mad24(y, dst_step, dst_offset + (x * 6) - (dst_align * 6));
|
||||
|
||||
ushort2 src1_data_0 = vload2(0, (__global ushort *)((__global char *)src1 + src1_index + 0));
|
||||
ushort2 src1_data_1 = vload2(0, (__global ushort *)((__global char *)src1 + src1_index + 4));
|
||||
ushort2 src1_data_2 = vload2(0, (__global ushort *)((__global char *)src1 + src1_index + 8));
|
||||
|
||||
ushort2 src2_data_0 = (ushort2)(src2.x, src2.y);
|
||||
ushort2 src2_data_1 = (ushort2)(src2.z, src2.x);
|
||||
ushort2 src2_data_2 = (ushort2)(src2.y, src2.z);
|
||||
|
||||
uchar2 mask_data = vload2(0, mask + mask_index);
|
||||
|
||||
ushort2 data_0 = *((__global ushort2 *)((__global char *)dst + dst_index + 0));
|
||||
ushort2 data_1 = *((__global ushort2 *)((__global char *)dst + dst_index + 4));
|
||||
ushort2 data_2 = *((__global ushort2 *)((__global char *)dst + dst_index + 8));
|
||||
|
||||
ushort2 tmp_data_0 = src1_data_0 ^ src2_data_0;
|
||||
ushort2 tmp_data_1 = src1_data_1 ^ src2_data_1;
|
||||
ushort2 tmp_data_2 = src1_data_2 ^ src2_data_2;
|
||||
|
||||
data_0.xy = ((mask_data.x) && (dst_index + 0 >= dst_start)) ? tmp_data_0.xy : data_0.xy;
|
||||
|
||||
data_1.x = ((mask_data.x) && (dst_index + 0 >= dst_start) && (dst_index + 0 < dst_end))
|
||||
? tmp_data_1.x : data_1.x;
|
||||
data_1.y = ((mask_data.y) && (dst_index + 6 >= dst_start) && (dst_index + 6 < dst_end))
|
||||
? tmp_data_1.y : data_1.y;
|
||||
|
||||
data_2.xy = ((mask_data.y) && (dst_index + 6 >= dst_start) && (dst_index + 6 < dst_end))
|
||||
? tmp_data_2.xy : data_2.xy;
|
||||
|
||||
*((__global ushort2 *)((__global char *)dst + dst_index + 0))= data_0;
|
||||
*((__global ushort2 *)((__global char *)dst + dst_index + 4))= data_1;
|
||||
*((__global ushort2 *)((__global char *)dst + dst_index + 8))= data_2;
|
||||
}
|
||||
}
|
||||
__kernel void arithm_s_bitwise_xor_with_mask_C3_D3 (
|
||||
__global short *src1, int src1_step, int src1_offset,
|
||||
__global short *dst, int dst_step, int dst_offset,
|
||||
__global uchar *mask, int mask_step, int mask_offset,
|
||||
short4 src2, int rows, int cols, int dst_step1)
|
||||
{
|
||||
|
||||
int x = get_global_id(0);
|
||||
int y = get_global_id(1);
|
||||
|
||||
if (x < cols && y < rows)
|
||||
{
|
||||
x = x << 1;
|
||||
|
||||
#ifdef dst_align
|
||||
#undef dst_align
|
||||
#endif
|
||||
#define dst_align (((dst_offset % dst_step) / 6 ) & 1)
|
||||
int src1_index = mad24(y, src1_step, (x * 6) + src1_offset - (dst_align * 6));
|
||||
int mask_index = mad24(y, mask_step, x + mask_offset - dst_align);
|
||||
|
||||
int dst_start = mad24(y, dst_step, dst_offset);
|
||||
int dst_end = mad24(y, dst_step, dst_offset + dst_step1);
|
||||
int dst_index = mad24(y, dst_step, dst_offset + (x * 6) - (dst_align * 6));
|
||||
|
||||
short2 src1_data_0 = vload2(0, (__global short *)((__global char *)src1 + src1_index + 0));
|
||||
short2 src1_data_1 = vload2(0, (__global short *)((__global char *)src1 + src1_index + 4));
|
||||
short2 src1_data_2 = vload2(0, (__global short *)((__global char *)src1 + src1_index + 8));
|
||||
|
||||
short2 src2_data_0 = (short2)(src2.x, src2.y);
|
||||
short2 src2_data_1 = (short2)(src2.z, src2.x);
|
||||
short2 src2_data_2 = (short2)(src2.y, src2.z);
|
||||
|
||||
uchar2 mask_data = vload2(0, mask + mask_index);
|
||||
|
||||
short2 data_0 = *((__global short2 *)((__global char *)dst + dst_index + 0));
|
||||
short2 data_1 = *((__global short2 *)((__global char *)dst + dst_index + 4));
|
||||
short2 data_2 = *((__global short2 *)((__global char *)dst + dst_index + 8));
|
||||
|
||||
short2 tmp_data_0 = src1_data_0 ^ src2_data_0;
|
||||
short2 tmp_data_1 = src1_data_1 ^ src2_data_1;
|
||||
short2 tmp_data_2 = src1_data_2 ^ src2_data_2;
|
||||
|
||||
data_0.xy = ((mask_data.x) && (dst_index + 0 >= dst_start)) ? tmp_data_0.xy : data_0.xy;
|
||||
|
||||
data_1.x = ((mask_data.x) && (dst_index + 0 >= dst_start) && (dst_index + 0 < dst_end))
|
||||
? tmp_data_1.x : data_1.x;
|
||||
data_1.y = ((mask_data.y) && (dst_index + 6 >= dst_start) && (dst_index + 6 < dst_end))
|
||||
? tmp_data_1.y : data_1.y;
|
||||
|
||||
data_2.xy = ((mask_data.y) && (dst_index + 6 >= dst_start) && (dst_index + 6 < dst_end))
|
||||
? tmp_data_2.xy : data_2.xy;
|
||||
|
||||
*((__global short2 *)((__global char *)dst + dst_index + 0))= data_0;
|
||||
*((__global short2 *)((__global char *)dst + dst_index + 4))= data_1;
|
||||
*((__global short2 *)((__global char *)dst + dst_index + 8))= data_2;
|
||||
}
|
||||
}
|
||||
__kernel void arithm_s_bitwise_xor_with_mask_C3_D4 (
|
||||
__global int *src1, int src1_step, int src1_offset,
|
||||
__global int *dst, int dst_step, int dst_offset,
|
||||
__global uchar *mask, int mask_step, int mask_offset,
|
||||
int4 src2, int rows, int cols, int dst_step1)
|
||||
{
|
||||
|
||||
int x = get_global_id(0);
|
||||
int y = get_global_id(1);
|
||||
|
||||
if (x < cols && y < rows)
|
||||
{
|
||||
int src1_index = mad24(y, src1_step, (x * 12) + src1_offset);
|
||||
int mask_index = mad24(y, mask_step, x + mask_offset);
|
||||
int dst_index = mad24(y, dst_step, dst_offset + (x * 12));
|
||||
|
||||
int src1_data_0 = *((__global int *)((__global char *)src1 + src1_index + 0));
|
||||
int src1_data_1 = *((__global int *)((__global char *)src1 + src1_index + 4));
|
||||
int src1_data_2 = *((__global int *)((__global char *)src1 + src1_index + 8));
|
||||
|
||||
int src2_data_0 = src2.x;
|
||||
int src2_data_1 = src2.y;
|
||||
int src2_data_2 = src2.z;
|
||||
|
||||
uchar mask_data = * (mask + mask_index);
|
||||
|
||||
int data_0 = *((__global int *)((__global char *)dst + dst_index + 0));
|
||||
int data_1 = *((__global int *)((__global char *)dst + dst_index + 4));
|
||||
int data_2 = *((__global int *)((__global char *)dst + dst_index + 8));
|
||||
|
||||
int tmp_data_0 = src1_data_0 ^ src2_data_0;
|
||||
int tmp_data_1 = src1_data_1 ^ src2_data_1;
|
||||
int tmp_data_2 = src1_data_2 ^ src2_data_2;
|
||||
|
||||
data_0 = mask_data ? tmp_data_0 : data_0;
|
||||
data_1 = mask_data ? tmp_data_1 : data_1;
|
||||
data_2 = mask_data ? tmp_data_2 : data_2;
|
||||
|
||||
*((__global int *)((__global char *)dst + dst_index + 0))= data_0;
|
||||
*((__global int *)((__global char *)dst + dst_index + 4))= data_1;
|
||||
*((__global int *)((__global char *)dst + dst_index + 8))= data_2;
|
||||
}
|
||||
}
|
||||
__kernel void arithm_s_bitwise_xor_with_mask_C3_D5 (
|
||||
__global char *src1, int src1_step, int src1_offset,
|
||||
__global char *dst, int dst_step, int dst_offset,
|
||||
__global uchar *mask, int mask_step, int mask_offset,
|
||||
char16 src2, int rows, int cols, int dst_step1)
|
||||
{
|
||||
|
||||
int x = get_global_id(0);
|
||||
int y = get_global_id(1);
|
||||
|
||||
if (x < cols && y < rows)
|
||||
{
|
||||
int src1_index = mad24(y, src1_step, (x * 12) + src1_offset);
|
||||
int mask_index = mad24(y, mask_step, x + mask_offset);
|
||||
int dst_index = mad24(y, dst_step, dst_offset + (x * 12));
|
||||
|
||||
char4 src1_data_0 = *((__global char4 *)((__global char *)src1 + src1_index + 0));
|
||||
char4 src1_data_1 = *((__global char4 *)((__global char *)src1 + src1_index + 4));
|
||||
char4 src1_data_2 = *((__global char4 *)((__global char *)src1 + src1_index + 8));
|
||||
|
||||
char4 src2_data_0 = (char4)(src2.s0, src2.s1, src2.s2, src2.s3);
|
||||
char4 src2_data_1 = (char4)(src2.s4, src2.s5, src2.s6, src2.s7);
|
||||
char4 src2_data_2 = (char4)(src2.s8, src2.s9, src2.sA, src2.sB);
|
||||
|
||||
uchar mask_data = * (mask + mask_index);
|
||||
|
||||
char4 data_0 = *((__global char4 *)((__global char *)dst + dst_index + 0));
|
||||
char4 data_1 = *((__global char4 *)((__global char *)dst + dst_index + 4));
|
||||
char4 data_2 = *((__global char4 *)((__global char *)dst + dst_index + 8));
|
||||
|
||||
char4 tmp_data_0 = src1_data_0 ^ src2_data_0;
|
||||
char4 tmp_data_1 = src1_data_1 ^ src2_data_1;
|
||||
char4 tmp_data_2 = src1_data_2 ^ src2_data_2;
|
||||
|
||||
data_0 = mask_data ? tmp_data_0 : data_0;
|
||||
data_1 = mask_data ? tmp_data_1 : data_1;
|
||||
data_2 = mask_data ? tmp_data_2 : data_2;
|
||||
|
||||
*((__global char4 *)((__global char *)dst + dst_index + 0))= data_0;
|
||||
*((__global char4 *)((__global char *)dst + dst_index + 4))= data_1;
|
||||
*((__global char4 *)((__global char *)dst + dst_index + 8))= data_2;
|
||||
}
|
||||
}
|
||||
#if defined (DOUBLE_SUPPORT)
|
||||
__kernel void arithm_s_bitwise_xor_with_mask_C3_D6 (
|
||||
__global short *src1, int src1_step, int src1_offset,
|
||||
__global short *dst, int dst_step, int dst_offset,
|
||||
__global uchar *mask, int mask_step, int mask_offset,
|
||||
short16 src2, int rows, int cols, int dst_step1)
|
||||
{
|
||||
|
||||
int x = get_global_id(0);
|
||||
int y = get_global_id(1);
|
||||
|
||||
if (x < cols && y < rows)
|
||||
{
|
||||
int src1_index = mad24(y, src1_step, (x * 24) + src1_offset);
|
||||
int mask_index = mad24(y, mask_step, x + mask_offset);
|
||||
int dst_index = mad24(y, dst_step, dst_offset + (x * 24));
|
||||
|
||||
short4 src1_data_0 = *((__global short4 *)((__global char *)src1 + src1_index + 0 ));
|
||||
short4 src1_data_1 = *((__global short4 *)((__global char *)src1 + src1_index + 8 ));
|
||||
short4 src1_data_2 = *((__global short4 *)((__global char *)src1 + src1_index + 16));
|
||||
|
||||
short4 src2_data_0 = (short4)(src2.s0, src2.s1, src2.s2, src2.s3);
|
||||
short4 src2_data_1 = (short4)(src2.s4, src2.s5, src2.s6, src2.s7);
|
||||
short4 src2_data_2 = (short4)(src2.s8, src2.s9, src2.sa, src2.sb);
|
||||
|
||||
uchar mask_data = * (mask + mask_index);
|
||||
|
||||
short4 data_0 = *((__global short4 *)((__global char *)dst + dst_index + 0 ));
|
||||
short4 data_1 = *((__global short4 *)((__global char *)dst + dst_index + 8 ));
|
||||
short4 data_2 = *((__global short4 *)((__global char *)dst + dst_index + 16));
|
||||
|
||||
short4 tmp_data_0 = src1_data_0 ^ src2_data_0;
|
||||
short4 tmp_data_1 = src1_data_1 ^ src2_data_1;
|
||||
short4 tmp_data_2 = src1_data_2 ^ src2_data_2;
|
||||
|
||||
data_0 = mask_data ? tmp_data_0 : data_0;
|
||||
data_1 = mask_data ? tmp_data_1 : data_1;
|
||||
data_2 = mask_data ? tmp_data_2 : data_2;
|
||||
|
||||
*((__global short4 *)((__global char *)dst + dst_index + 0 ))= data_0;
|
||||
*((__global short4 *)((__global char *)dst + dst_index + 8 ))= data_1;
|
||||
*((__global short4 *)((__global char *)dst + dst_index + 16))= data_2;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
__kernel void arithm_s_bitwise_xor_with_mask_C4_D0 (
|
||||
__global uchar *src1, int src1_step, int src1_offset,
|
||||
__global uchar *dst, int dst_step, int dst_offset,
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -1,806 +0,0 @@
|
||||
/*M///////////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||
//
|
||||
// By downloading, copying, installing or using the software you agree to this license.
|
||||
// If you do not agree to this license, do not download, install,
|
||||
// copy or use the software.
|
||||
//
|
||||
//
|
||||
// License Agreement
|
||||
// For Open Source Computer Vision Library
|
||||
//
|
||||
// Copyright (C) 2010-2012, Institute Of Software Chinese Academy Of Science, all rights reserved.
|
||||
// Copyright (C) 2010-2012, Advanced Micro Devices, Inc., all rights reserved.
|
||||
// Third party copyrights are property of their respective owners.
|
||||
//
|
||||
// @Authors
|
||||
// Jia Haipeng, jiahaipeng95@gmail.com
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without modification,
|
||||
// are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistribution's of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other oclMaterials provided with the distribution.
|
||||
//
|
||||
// * The name of the copyright holders may not be used to endorse or promote products
|
||||
// derived from this software without specific prior written permission.
|
||||
//
|
||||
// This software is provided by the copyright holders and contributors as is and
|
||||
// any express or implied warranties, including, but not limited to, the implied
|
||||
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
||||
// indirect, incidental, special, exemplary, or consequential damages
|
||||
// (including, but not limited to, procurement of substitute goods or services;
|
||||
// loss of use, data, or profits; or business interruption) however caused
|
||||
// and on any theory of liability, whether in contract, strict liability,
|
||||
// or tort (including negligence or otherwise) arising in any way out of
|
||||
// the use of this software, even if advised of the possibility of such damage.
|
||||
//
|
||||
//M*/
|
||||
|
||||
#if defined (DOUBLE_SUPPORT)
|
||||
#pragma OPENCL EXTENSION cl_khr_fp64:enable
|
||||
#endif
|
||||
/**************************************sub with scalar without mask**************************************/
|
||||
__kernel void arithm_s_sub_C1_D0 (__global uchar *src1, int src1_step, int src1_offset,
|
||||
__global uchar *dst, int dst_step, int dst_offset,
|
||||
int4 src2, int rows, int cols, int dst_step1, int isMatSubScalar)
|
||||
{
|
||||
int x = get_global_id(0);
|
||||
int y = get_global_id(1);
|
||||
|
||||
if (x < cols && y < rows)
|
||||
{
|
||||
x = x << 2;
|
||||
|
||||
#define dst_align (dst_offset & 3)
|
||||
int src1_index = mad24(y, src1_step, x + src1_offset - dst_align);
|
||||
|
||||
int dst_start = mad24(y, dst_step, dst_offset);
|
||||
int dst_end = mad24(y, dst_step, dst_offset + dst_step1);
|
||||
int dst_index = mad24(y, dst_step, dst_offset + x & (int)0xfffffffc);
|
||||
|
||||
uchar4 src1_data = vload4(0, src1 + src1_index);
|
||||
int4 src2_data = (int4)(src2.x, src2.x, src2.x, src2.x);
|
||||
|
||||
uchar4 data = *((__global uchar4 *)(dst + dst_index));
|
||||
int4 tmp = convert_int4_sat(src1_data) - src2_data;
|
||||
tmp = isMatSubScalar ? tmp : -tmp;
|
||||
uchar4 tmp_data = convert_uchar4_sat(tmp);
|
||||
|
||||
data.x = ((dst_index + 0 >= dst_start) && (dst_index + 0 < dst_end)) ? tmp_data.x : data.x;
|
||||
data.y = ((dst_index + 1 >= dst_start) && (dst_index + 1 < dst_end)) ? tmp_data.y : data.y;
|
||||
data.z = ((dst_index + 2 >= dst_start) && (dst_index + 2 < dst_end)) ? tmp_data.z : data.z;
|
||||
data.w = ((dst_index + 3 >= dst_start) && (dst_index + 3 < dst_end)) ? tmp_data.w : data.w;
|
||||
|
||||
*((__global uchar4 *)(dst + dst_index)) = data;
|
||||
}
|
||||
}
|
||||
__kernel void arithm_s_sub_C1_D2 (__global ushort *src1, int src1_step, int src1_offset,
|
||||
__global ushort *dst, int dst_step, int dst_offset,
|
||||
int4 src2, int rows, int cols, int dst_step1, int isMatSubScalar)
|
||||
{
|
||||
|
||||
int x = get_global_id(0);
|
||||
int y = get_global_id(1);
|
||||
|
||||
if (x < cols && y < rows)
|
||||
{
|
||||
x = x << 1;
|
||||
|
||||
#define dst_align ((dst_offset >> 1) & 1)
|
||||
int src1_index = mad24(y, src1_step, (x << 1) + src1_offset - (dst_align << 1));
|
||||
|
||||
int dst_start = mad24(y, dst_step, dst_offset);
|
||||
int dst_end = mad24(y, dst_step, dst_offset + dst_step1);
|
||||
int dst_index = mad24(y, dst_step, dst_offset + (x << 1) & (int)0xfffffffc);
|
||||
|
||||
ushort2 src1_data = vload2(0, (__global ushort *)((__global char *)src1 + src1_index));
|
||||
int2 src2_data = (int2)(src2.x, src2.x);
|
||||
|
||||
ushort2 data = *((__global ushort2 *)((__global uchar *)dst + dst_index));
|
||||
int2 tmp = convert_int2_sat(src1_data) - src2_data;
|
||||
tmp = isMatSubScalar ? tmp : -tmp;
|
||||
ushort2 tmp_data = convert_ushort2_sat(tmp);
|
||||
|
||||
data.x = (dst_index + 0 >= dst_start) ? tmp_data.x : data.x;
|
||||
data.y = (dst_index + 2 < dst_end ) ? tmp_data.y : data.y;
|
||||
|
||||
*((__global ushort2 *)((__global uchar *)dst + dst_index)) = data;
|
||||
}
|
||||
}
|
||||
__kernel void arithm_s_sub_C1_D3 (__global short *src1, int src1_step, int src1_offset,
|
||||
__global short *dst, int dst_step, int dst_offset,
|
||||
int4 src2, int rows, int cols, int dst_step1, int isMatSubScalar)
|
||||
{
|
||||
|
||||
int x = get_global_id(0);
|
||||
int y = get_global_id(1);
|
||||
|
||||
if (x < cols && y < rows)
|
||||
{
|
||||
x = x << 1;
|
||||
|
||||
#define dst_align ((dst_offset >> 1) & 1)
|
||||
int src1_index = mad24(y, src1_step, (x << 1) + src1_offset - (dst_align << 1));
|
||||
|
||||
int dst_start = mad24(y, dst_step, dst_offset);
|
||||
int dst_end = mad24(y, dst_step, dst_offset + dst_step1);
|
||||
int dst_index = mad24(y, dst_step, dst_offset + (x << 1) & (int)0xfffffffc);
|
||||
|
||||
short2 src1_data = vload2(0, (__global short *)((__global char *)src1 + src1_index));
|
||||
int2 src2_data = (int2)(src2.x, src2.x);
|
||||
short2 data = *((__global short2 *)((__global uchar *)dst + dst_index));
|
||||
|
||||
int2 tmp = convert_int2_sat(src1_data) - src2_data;
|
||||
tmp = isMatSubScalar ? tmp : -tmp;
|
||||
short2 tmp_data = convert_short2_sat(tmp);
|
||||
|
||||
data.x = (dst_index + 0 >= dst_start) ? tmp_data.x : data.x;
|
||||
data.y = (dst_index + 2 < dst_end ) ? tmp_data.y : data.y;
|
||||
|
||||
*((__global short2 *)((__global uchar *)dst + dst_index)) = data;
|
||||
}
|
||||
}
|
||||
__kernel void arithm_s_sub_C1_D4 (__global int *src1, int src1_step, int src1_offset,
|
||||
__global int *dst, int dst_step, int dst_offset,
|
||||
int4 src2, int rows, int cols, int dst_step1, int isMatSubScalar)
|
||||
{
|
||||
|
||||
int x = get_global_id(0);
|
||||
int y = get_global_id(1);
|
||||
|
||||
if (x < cols && y < rows)
|
||||
{
|
||||
int src1_index = mad24(y, src1_step, (x << 2) + src1_offset);
|
||||
int dst_index = mad24(y, dst_step, (x << 2) + dst_offset);
|
||||
|
||||
int src_data1 = *((__global int *)((__global char *)src1 + src1_index));
|
||||
int src_data2 = src2.x;
|
||||
|
||||
long tmp = (long)src_data1 - (long)src_data2;
|
||||
tmp = isMatSubScalar ? tmp : -tmp;
|
||||
int data = convert_int_sat(tmp);
|
||||
|
||||
*((__global int *)((__global char *)dst + dst_index)) = data;
|
||||
}
|
||||
}
|
||||
__kernel void arithm_s_sub_C1_D5 (__global float *src1, int src1_step, int src1_offset,
|
||||
__global float *dst, int dst_step, int dst_offset,
|
||||
float4 src2, int rows, int cols, int dst_step1, int isMatSubScalar)
|
||||
{
|
||||
|
||||
int x = get_global_id(0);
|
||||
int y = get_global_id(1);
|
||||
|
||||
if (x < cols && y < rows)
|
||||
{
|
||||
int src1_index = mad24(y, src1_step, (x << 2) + src1_offset);
|
||||
int dst_index = mad24(y, dst_step, (x << 2) + dst_offset);
|
||||
|
||||
float src_data1 = *((__global float *)((__global char *)src1 + src1_index));
|
||||
float src_data2 = src2.x;
|
||||
|
||||
float tmp = src_data1 - src_data2;
|
||||
tmp = isMatSubScalar ? tmp : -tmp;
|
||||
|
||||
*((__global float *)((__global char *)dst + dst_index)) = tmp;
|
||||
}
|
||||
}
|
||||
|
||||
#if defined (DOUBLE_SUPPORT)
|
||||
__kernel void arithm_s_sub_C1_D6 (__global double *src1, int src1_step, int src1_offset,
|
||||
__global double *dst, int dst_step, int dst_offset,
|
||||
double4 src2, int rows, int cols, int dst_step1, int isMatSubScalar)
|
||||
{
|
||||
|
||||
int x = get_global_id(0);
|
||||
int y = get_global_id(1);
|
||||
|
||||
if (x < cols && y < rows)
|
||||
{
|
||||
int src1_index = mad24(y, src1_step, (x << 3) + src1_offset);
|
||||
int dst_index = mad24(y, dst_step, (x << 3) + dst_offset);
|
||||
|
||||
double src_data1 = *((__global double *)((__global char *)src1 + src1_index));
|
||||
double src2_data = src2.x;
|
||||
|
||||
double data = src_data1 - src2_data;
|
||||
data = isMatSubScalar ? data : -data;
|
||||
|
||||
*((__global double *)((__global char *)dst + dst_index)) = data;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
__kernel void arithm_s_sub_C2_D0 (__global uchar *src1, int src1_step, int src1_offset,
|
||||
__global uchar *dst, int dst_step, int dst_offset,
|
||||
int4 src2, int rows, int cols, int dst_step1, int isMatSubScalar)
|
||||
{
|
||||
|
||||
int x = get_global_id(0);
|
||||
int y = get_global_id(1);
|
||||
|
||||
if (x < cols && y < rows)
|
||||
{
|
||||
x = x << 1;
|
||||
|
||||
#define dst_align ((dst_offset >> 1) & 1)
|
||||
int src1_index = mad24(y, src1_step, (x << 1) + src1_offset - (dst_align << 1));
|
||||
|
||||
int dst_start = mad24(y, dst_step, dst_offset);
|
||||
int dst_end = mad24(y, dst_step, dst_offset + dst_step1);
|
||||
int dst_index = mad24(y, dst_step, dst_offset + (x << 1) & (int)0xfffffffc);
|
||||
|
||||
uchar4 src1_data = vload4(0, src1 + src1_index);
|
||||
int4 src2_data = (int4)(src2.x, src2.y, src2.x, src2.y);
|
||||
|
||||
uchar4 data = *((__global uchar4 *)(dst + dst_index));
|
||||
int4 tmp = convert_int4_sat(src1_data) - src2_data;
|
||||
tmp = isMatSubScalar ? tmp : -tmp;
|
||||
uchar4 tmp_data = convert_uchar4_sat(tmp);
|
||||
|
||||
data.xy = (dst_index + 0 >= dst_start) ? tmp_data.xy : data.xy;
|
||||
data.zw = (dst_index + 2 < dst_end ) ? tmp_data.zw : data.zw;
|
||||
|
||||
*((__global uchar4 *)(dst + dst_index)) = data;
|
||||
}
|
||||
}
|
||||
__kernel void arithm_s_sub_C2_D2 (__global ushort *src1, int src1_step, int src1_offset,
|
||||
__global ushort *dst, int dst_step, int dst_offset,
|
||||
int4 src2, int rows, int cols, int dst_step1, int isMatSubScalar)
|
||||
{
|
||||
|
||||
int x = get_global_id(0);
|
||||
int y = get_global_id(1);
|
||||
|
||||
if (x < cols && y < rows)
|
||||
{
|
||||
int src1_index = mad24(y, src1_step, (x << 2) + src1_offset);
|
||||
int dst_index = mad24(y, dst_step, (x << 2) + dst_offset);
|
||||
|
||||
ushort2 src_data1 = *((__global ushort2 *)((__global char *)src1 + src1_index));
|
||||
int2 src_data2 = (int2)(src2.x, src2.y);
|
||||
ushort2 dst_data = *((__global ushort2 *)((__global char *)dst + dst_index));
|
||||
|
||||
int2 tmp = convert_int2_sat(src_data1) - src_data2;
|
||||
tmp = isMatSubScalar ? tmp : -tmp;
|
||||
ushort2 data = convert_ushort2_sat(tmp);
|
||||
|
||||
*((__global ushort2 *)((__global char *)dst + dst_index)) = data;
|
||||
}
|
||||
}
|
||||
__kernel void arithm_s_sub_C2_D3 (__global short *src1, int src1_step, int src1_offset,
|
||||
__global short *dst, int dst_step, int dst_offset,
|
||||
int4 src2, int rows, int cols, int dst_step1, int isMatSubScalar)
|
||||
{
|
||||
|
||||
int x = get_global_id(0);
|
||||
int y = get_global_id(1);
|
||||
|
||||
if (x < cols && y < rows)
|
||||
{
|
||||
int src1_index = mad24(y, src1_step, (x << 2) + src1_offset);
|
||||
int dst_index = mad24(y, dst_step, (x << 2) + dst_offset);
|
||||
|
||||
short2 src_data1 = *((__global short2 *)((__global char *)src1 + src1_index));
|
||||
int2 src_data2 = (int2)(src2.x, src2.y);
|
||||
short2 dst_data = *((__global short2 *)((__global char *)dst + dst_index));
|
||||
|
||||
int2 tmp = convert_int2_sat(src_data1) - src_data2;
|
||||
tmp = isMatSubScalar ? tmp : -tmp;
|
||||
short2 data = convert_short2_sat(tmp);
|
||||
|
||||
*((__global short2 *)((__global char *)dst + dst_index)) = data;
|
||||
}
|
||||
}
|
||||
__kernel void arithm_s_sub_C2_D4 (__global int *src1, int src1_step, int src1_offset,
|
||||
__global int *dst, int dst_step, int dst_offset,
|
||||
int4 src2, int rows, int cols, int dst_step1, int isMatSubScalar)
|
||||
{
|
||||
|
||||
int x = get_global_id(0);
|
||||
int y = get_global_id(1);
|
||||
|
||||
if (x < cols && y < rows)
|
||||
{
|
||||
int src1_index = mad24(y, src1_step, (x << 3) + src1_offset);
|
||||
int dst_index = mad24(y, dst_step, (x << 3) + dst_offset);
|
||||
|
||||
int2 src_data1 = *((__global int2 *)((__global char *)src1 + src1_index));
|
||||
int2 src_data2 = (int2)(src2.x, src2.y);
|
||||
int2 dst_data = *((__global int2 *)((__global char *)dst + dst_index));
|
||||
|
||||
long2 tmp = convert_long2_sat(src_data1) - convert_long2_sat(src_data2);
|
||||
tmp = isMatSubScalar ? tmp : -tmp;
|
||||
int2 data = convert_int2_sat(tmp);
|
||||
|
||||
*((__global int2 *)((__global char *)dst + dst_index)) = data;
|
||||
}
|
||||
}
|
||||
__kernel void arithm_s_sub_C2_D5 (__global float *src1, int src1_step, int src1_offset,
|
||||
__global float *dst, int dst_step, int dst_offset,
|
||||
float4 src2, int rows, int cols, int dst_step1, int isMatSubScalar)
|
||||
{
|
||||
|
||||
int x = get_global_id(0);
|
||||
int y = get_global_id(1);
|
||||
|
||||
if (x < cols && y < rows)
|
||||
{
|
||||
int src1_index = mad24(y, src1_step, (x << 3) + src1_offset);
|
||||
int dst_index = mad24(y, dst_step, (x << 3) + dst_offset);
|
||||
|
||||
float2 src_data1 = *((__global float2 *)((__global char *)src1 + src1_index));
|
||||
float2 src_data2 = (float2)(src2.x, src2.y);
|
||||
float2 dst_data = *((__global float2 *)((__global char *)dst + dst_index));
|
||||
|
||||
float2 tmp = src_data1 - src_data2;
|
||||
tmp = isMatSubScalar ? tmp : -tmp;
|
||||
|
||||
*((__global float2 *)((__global char *)dst + dst_index)) = tmp;
|
||||
}
|
||||
}
|
||||
|
||||
#if defined (DOUBLE_SUPPORT)
|
||||
__kernel void arithm_s_sub_C2_D6 (__global double *src1, int src1_step, int src1_offset,
|
||||
__global double *dst, int dst_step, int dst_offset,
|
||||
double4 src2, int rows, int cols, int dst_step1, int isMatSubScalar)
|
||||
{
|
||||
|
||||
int x = get_global_id(0);
|
||||
int y = get_global_id(1);
|
||||
|
||||
if (x < cols && y < rows)
|
||||
{
|
||||
int src1_index = mad24(y, src1_step, (x << 4) + src1_offset);
|
||||
int dst_index = mad24(y, dst_step, (x << 4) + dst_offset);
|
||||
|
||||
double2 src_data1 = *((__global double2 *)((__global char *)src1 + src1_index));
|
||||
double2 src_data2 = (double2)(src2.x, src2.y);
|
||||
double2 dst_data = *((__global double2 *)((__global char *)dst + dst_index));
|
||||
|
||||
double2 data = src_data1 - src_data2;
|
||||
data = isMatSubScalar ? data : -data;
|
||||
|
||||
*((__global double2 *)((__global char *)dst + dst_index)) = data;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
__kernel void arithm_s_sub_C3_D0 (__global uchar *src1, int src1_step, int src1_offset,
|
||||
__global uchar *dst, int dst_step, int dst_offset,
|
||||
int4 src2, int rows, int cols, int dst_step1, int isMatSubScalar)
|
||||
{
|
||||
|
||||
int x = get_global_id(0);
|
||||
int y = get_global_id(1);
|
||||
|
||||
if (x < cols && y < rows)
|
||||
{
|
||||
x = x << 2;
|
||||
|
||||
#define dst_align (((dst_offset % dst_step) / 3 ) & 3)
|
||||
int src1_index = mad24(y, src1_step, (x * 3) + src1_offset - (dst_align * 3));
|
||||
|
||||
int dst_start = mad24(y, dst_step, dst_offset);
|
||||
int dst_end = mad24(y, dst_step, dst_offset + dst_step1);
|
||||
int dst_index = mad24(y, dst_step, dst_offset + (x * 3) - (dst_align * 3));
|
||||
|
||||
uchar4 src1_data_0 = vload4(0, src1 + src1_index + 0);
|
||||
uchar4 src1_data_1 = vload4(0, src1 + src1_index + 4);
|
||||
uchar4 src1_data_2 = vload4(0, src1 + src1_index + 8);
|
||||
|
||||
int4 src2_data_0 = (int4)(src2.x, src2.y, src2.z, src2.x);
|
||||
int4 src2_data_1 = (int4)(src2.y, src2.z, src2.x, src2.y);
|
||||
int4 src2_data_2 = (int4)(src2.z, src2.x, src2.y, src2.z);
|
||||
|
||||
uchar4 data_0 = *((__global uchar4 *)(dst + dst_index + 0));
|
||||
uchar4 data_1 = *((__global uchar4 *)(dst + dst_index + 4));
|
||||
uchar4 data_2 = *((__global uchar4 *)(dst + dst_index + 8));
|
||||
|
||||
int4 tmp_0 = convert_int4_sat(src1_data_0) - src2_data_0;
|
||||
int4 tmp_1 = convert_int4_sat(src1_data_1) - src2_data_1;
|
||||
int4 tmp_2 = convert_int4_sat(src1_data_2) - src2_data_2;
|
||||
|
||||
tmp_0 = isMatSubScalar ? tmp_0 : -tmp_0;
|
||||
tmp_1 = isMatSubScalar ? tmp_1 : -tmp_1;
|
||||
tmp_2 = isMatSubScalar ? tmp_2 : -tmp_2;
|
||||
|
||||
uchar4 tmp_data_0 = convert_uchar4_sat(tmp_0);
|
||||
uchar4 tmp_data_1 = convert_uchar4_sat(tmp_1);
|
||||
uchar4 tmp_data_2 = convert_uchar4_sat(tmp_2);
|
||||
|
||||
data_0.xyz = ((dst_index + 0 >= dst_start)) ? tmp_data_0.xyz : data_0.xyz;
|
||||
data_0.w = ((dst_index + 3 >= dst_start) && (dst_index + 3 < dst_end))
|
||||
? tmp_data_0.w : data_0.w;
|
||||
|
||||
data_1.xy = ((dst_index + 3 >= dst_start) && (dst_index + 3 < dst_end))
|
||||
? tmp_data_1.xy : data_1.xy;
|
||||
data_1.zw = ((dst_index + 6 >= dst_start) && (dst_index + 6 < dst_end))
|
||||
? tmp_data_1.zw : data_1.zw;
|
||||
|
||||
data_2.x = ((dst_index + 6 >= dst_start) && (dst_index + 6 < dst_end))
|
||||
? tmp_data_2.x : data_2.x;
|
||||
data_2.yzw = ((dst_index + 9 >= dst_start) && (dst_index + 9 < dst_end))
|
||||
? tmp_data_2.yzw : data_2.yzw;
|
||||
|
||||
*((__global uchar4 *)(dst + dst_index + 0)) = data_0;
|
||||
*((__global uchar4 *)(dst + dst_index + 4)) = data_1;
|
||||
*((__global uchar4 *)(dst + dst_index + 8)) = data_2;
|
||||
}
|
||||
}
|
||||
__kernel void arithm_s_sub_C3_D2 (__global ushort *src1, int src1_step, int src1_offset,
|
||||
__global ushort *dst, int dst_step, int dst_offset,
|
||||
int4 src2, int rows, int cols, int dst_step1, int isMatSubScalar)
|
||||
{
|
||||
|
||||
int x = get_global_id(0);
|
||||
int y = get_global_id(1);
|
||||
|
||||
if (x < cols && y < rows)
|
||||
{
|
||||
x = x << 1;
|
||||
|
||||
#define dst_align (((dst_offset % dst_step) / 6 ) & 1)
|
||||
int src1_index = mad24(y, src1_step, (x * 6) + src1_offset - (dst_align * 6));
|
||||
|
||||
int dst_start = mad24(y, dst_step, dst_offset);
|
||||
int dst_end = mad24(y, dst_step, dst_offset + dst_step1);
|
||||
int dst_index = mad24(y, dst_step, dst_offset + (x * 6) - (dst_align * 6));
|
||||
|
||||
ushort2 src1_data_0 = vload2(0, (__global ushort *)((__global char *)src1 + src1_index + 0));
|
||||
ushort2 src1_data_1 = vload2(0, (__global ushort *)((__global char *)src1 + src1_index + 4));
|
||||
ushort2 src1_data_2 = vload2(0, (__global ushort *)((__global char *)src1 + src1_index + 8));
|
||||
|
||||
int2 src2_data_0 = (int2)(src2.x, src2.y);
|
||||
int2 src2_data_1 = (int2)(src2.z, src2.x);
|
||||
int2 src2_data_2 = (int2)(src2.y, src2.z);
|
||||
|
||||
ushort2 data_0 = *((__global ushort2 *)((__global char *)dst + dst_index + 0));
|
||||
ushort2 data_1 = *((__global ushort2 *)((__global char *)dst + dst_index + 4));
|
||||
ushort2 data_2 = *((__global ushort2 *)((__global char *)dst + dst_index + 8));
|
||||
|
||||
int2 tmp_0 = convert_int2_sat(src1_data_0) - src2_data_0;
|
||||
int2 tmp_1 = convert_int2_sat(src1_data_1) - src2_data_1;
|
||||
int2 tmp_2 = convert_int2_sat(src1_data_2) - src2_data_2;
|
||||
|
||||
tmp_0 = isMatSubScalar ? tmp_0 : -tmp_0;
|
||||
tmp_1 = isMatSubScalar ? tmp_1 : -tmp_1;
|
||||
tmp_2 = isMatSubScalar ? tmp_2 : -tmp_2;
|
||||
|
||||
ushort2 tmp_data_0 = convert_ushort2_sat(tmp_0);
|
||||
ushort2 tmp_data_1 = convert_ushort2_sat(tmp_1);
|
||||
ushort2 tmp_data_2 = convert_ushort2_sat(tmp_2);
|
||||
|
||||
data_0.xy = ((dst_index + 0 >= dst_start)) ? tmp_data_0.xy : data_0.xy;
|
||||
|
||||
data_1.x = ((dst_index + 0 >= dst_start) && (dst_index + 0 < dst_end))
|
||||
? tmp_data_1.x : data_1.x;
|
||||
data_1.y = ((dst_index + 6 >= dst_start) && (dst_index + 6 < dst_end))
|
||||
? tmp_data_1.y : data_1.y;
|
||||
|
||||
data_2.xy = ((dst_index + 6 >= dst_start) && (dst_index + 6 < dst_end))
|
||||
? tmp_data_2.xy : data_2.xy;
|
||||
|
||||
*((__global ushort2 *)((__global char *)dst + dst_index + 0))= data_0;
|
||||
*((__global ushort2 *)((__global char *)dst + dst_index + 4))= data_1;
|
||||
*((__global ushort2 *)((__global char *)dst + dst_index + 8))= data_2;
|
||||
}
|
||||
}
|
||||
__kernel void arithm_s_sub_C3_D3 (__global short *src1, int src1_step, int src1_offset,
|
||||
__global short *dst, int dst_step, int dst_offset,
|
||||
int4 src2, int rows, int cols, int dst_step1, int isMatSubScalar)
|
||||
{
|
||||
|
||||
int x = get_global_id(0);
|
||||
int y = get_global_id(1);
|
||||
|
||||
if (x < cols && y < rows)
|
||||
{
|
||||
x = x << 1;
|
||||
|
||||
#define dst_align (((dst_offset % dst_step) / 6 ) & 1)
|
||||
int src1_index = mad24(y, src1_step, (x * 6) + src1_offset - (dst_align * 6));
|
||||
|
||||
int dst_start = mad24(y, dst_step, dst_offset);
|
||||
int dst_end = mad24(y, dst_step, dst_offset + dst_step1);
|
||||
int dst_index = mad24(y, dst_step, dst_offset + (x * 6) - (dst_align * 6));
|
||||
|
||||
short2 src1_data_0 = vload2(0, (__global short *)((__global char *)src1 + src1_index + 0));
|
||||
short2 src1_data_1 = vload2(0, (__global short *)((__global char *)src1 + src1_index + 4));
|
||||
short2 src1_data_2 = vload2(0, (__global short *)((__global char *)src1 + src1_index + 8));
|
||||
|
||||
int2 src2_data_0 = (int2)(src2.x, src2.y);
|
||||
int2 src2_data_1 = (int2)(src2.z, src2.x);
|
||||
int2 src2_data_2 = (int2)(src2.y, src2.z);
|
||||
|
||||
short2 data_0 = *((__global short2 *)((__global char *)dst + dst_index + 0));
|
||||
short2 data_1 = *((__global short2 *)((__global char *)dst + dst_index + 4));
|
||||
short2 data_2 = *((__global short2 *)((__global char *)dst + dst_index + 8));
|
||||
|
||||
int2 tmp_0 = convert_int2_sat(src1_data_0) - src2_data_0;
|
||||
int2 tmp_1 = convert_int2_sat(src1_data_1) - src2_data_1;
|
||||
int2 tmp_2 = convert_int2_sat(src1_data_2) - src2_data_2;
|
||||
|
||||
tmp_0 = isMatSubScalar ? tmp_0 : -tmp_0;
|
||||
tmp_1 = isMatSubScalar ? tmp_1 : -tmp_1;
|
||||
tmp_2 = isMatSubScalar ? tmp_2 : -tmp_2;
|
||||
|
||||
short2 tmp_data_0 = convert_short2_sat(tmp_0);
|
||||
short2 tmp_data_1 = convert_short2_sat(tmp_1);
|
||||
short2 tmp_data_2 = convert_short2_sat(tmp_2);
|
||||
|
||||
data_0.xy = ((dst_index + 0 >= dst_start)) ? tmp_data_0.xy : data_0.xy;
|
||||
|
||||
data_1.x = ((dst_index + 0 >= dst_start) && (dst_index + 0 < dst_end))
|
||||
? tmp_data_1.x : data_1.x;
|
||||
data_1.y = ((dst_index + 6 >= dst_start) && (dst_index + 6 < dst_end))
|
||||
? tmp_data_1.y : data_1.y;
|
||||
|
||||
data_2.xy = ((dst_index + 6 >= dst_start) && (dst_index + 6 < dst_end))
|
||||
? tmp_data_2.xy : data_2.xy;
|
||||
|
||||
*((__global short2 *)((__global char *)dst + dst_index + 0))= data_0;
|
||||
*((__global short2 *)((__global char *)dst + dst_index + 4))= data_1;
|
||||
*((__global short2 *)((__global char *)dst + dst_index + 8))= data_2;
|
||||
}
|
||||
}
|
||||
__kernel void arithm_s_sub_C3_D4 (__global int *src1, int src1_step, int src1_offset,
|
||||
__global int *dst, int dst_step, int dst_offset,
|
||||
int4 src2, int rows, int cols, int dst_step1, int isMatSubScalar)
|
||||
{
|
||||
|
||||
int x = get_global_id(0);
|
||||
int y = get_global_id(1);
|
||||
|
||||
if (x < cols && y < rows)
|
||||
{
|
||||
int src1_index = mad24(y, src1_step, (x * 12) + src1_offset);
|
||||
int dst_index = mad24(y, dst_step, dst_offset + (x * 12));
|
||||
|
||||
int src1_data_0 = *((__global int *)((__global char *)src1 + src1_index + 0));
|
||||
int src1_data_1 = *((__global int *)((__global char *)src1 + src1_index + 4));
|
||||
int src1_data_2 = *((__global int *)((__global char *)src1 + src1_index + 8));
|
||||
|
||||
int src2_data_0 = src2.x;
|
||||
int src2_data_1 = src2.y;
|
||||
int src2_data_2 = src2.z;
|
||||
|
||||
int data_0 = *((__global int *)((__global char *)dst + dst_index + 0));
|
||||
int data_1 = *((__global int *)((__global char *)dst + dst_index + 4));
|
||||
int data_2 = *((__global int *)((__global char *)dst + dst_index + 8));
|
||||
|
||||
long tmp_0 = (long)src1_data_0 - (long)src2_data_0;
|
||||
long tmp_1 = (long)src1_data_1 - (long)src2_data_1;
|
||||
long tmp_2 = (long)src1_data_2 - (long)src2_data_2;
|
||||
|
||||
tmp_0 = isMatSubScalar ? tmp_0 : -tmp_0;
|
||||
tmp_1 = isMatSubScalar ? tmp_1 : -tmp_1;
|
||||
tmp_2 = isMatSubScalar ? tmp_2 : -tmp_2;
|
||||
|
||||
int tmp_data_0 = convert_int_sat(tmp_0);
|
||||
int tmp_data_1 = convert_int_sat(tmp_1);
|
||||
int tmp_data_2 = convert_int_sat(tmp_2);
|
||||
|
||||
*((__global int *)((__global char *)dst + dst_index + 0))= tmp_data_0;
|
||||
*((__global int *)((__global char *)dst + dst_index + 4))= tmp_data_1;
|
||||
*((__global int *)((__global char *)dst + dst_index + 8))= tmp_data_2;
|
||||
}
|
||||
}
|
||||
__kernel void arithm_s_sub_C3_D5 (__global float *src1, int src1_step, int src1_offset,
|
||||
__global float *dst, int dst_step, int dst_offset,
|
||||
float4 src2, int rows, int cols, int dst_step1, int isMatSubScalar)
|
||||
{
|
||||
|
||||
int x = get_global_id(0);
|
||||
int y = get_global_id(1);
|
||||
|
||||
if (x < cols && y < rows)
|
||||
{
|
||||
int src1_index = mad24(y, src1_step, (x * 12) + src1_offset);
|
||||
int dst_index = mad24(y, dst_step, dst_offset + (x * 12));
|
||||
|
||||
float src1_data_0 = *((__global float *)((__global char *)src1 + src1_index + 0));
|
||||
float src1_data_1 = *((__global float *)((__global char *)src1 + src1_index + 4));
|
||||
float src1_data_2 = *((__global float *)((__global char *)src1 + src1_index + 8));
|
||||
|
||||
float src2_data_0 = src2.x;
|
||||
float src2_data_1 = src2.y;
|
||||
float src2_data_2 = src2.z;
|
||||
|
||||
float data_0 = *((__global float *)((__global char *)dst + dst_index + 0));
|
||||
float data_1 = *((__global float *)((__global char *)dst + dst_index + 4));
|
||||
float data_2 = *((__global float *)((__global char *)dst + dst_index + 8));
|
||||
|
||||
float tmp_0 = src1_data_0 - src2_data_0;
|
||||
float tmp_1 = src1_data_1 - src2_data_1;
|
||||
float tmp_2 = src1_data_2 - src2_data_2;
|
||||
|
||||
tmp_0 = isMatSubScalar ? tmp_0 : -tmp_0;
|
||||
tmp_1 = isMatSubScalar ? tmp_1 : -tmp_1;
|
||||
tmp_2 = isMatSubScalar ? tmp_2 : -tmp_2;
|
||||
|
||||
*((__global float *)((__global char *)dst + dst_index + 0))= tmp_0;
|
||||
*((__global float *)((__global char *)dst + dst_index + 4))= tmp_1;
|
||||
*((__global float *)((__global char *)dst + dst_index + 8))= tmp_2;
|
||||
}
|
||||
}
|
||||
|
||||
#if defined (DOUBLE_SUPPORT)
|
||||
__kernel void arithm_s_sub_C3_D6 (__global double *src1, int src1_step, int src1_offset,
|
||||
__global double *dst, int dst_step, int dst_offset,
|
||||
double4 src2, int rows, int cols, int dst_step1, int isMatSubScalar)
|
||||
{
|
||||
|
||||
int x = get_global_id(0);
|
||||
int y = get_global_id(1);
|
||||
|
||||
if (x < cols && y < rows)
|
||||
{
|
||||
int src1_index = mad24(y, src1_step, (x * 24) + src1_offset);
|
||||
int dst_index = mad24(y, dst_step, dst_offset + (x * 24));
|
||||
|
||||
double src1_data_0 = *((__global double *)((__global char *)src1 + src1_index + 0 ));
|
||||
double src1_data_1 = *((__global double *)((__global char *)src1 + src1_index + 8 ));
|
||||
double src1_data_2 = *((__global double *)((__global char *)src1 + src1_index + 16));
|
||||
|
||||
double src2_data_0 = src2.x;
|
||||
double src2_data_1 = src2.y;
|
||||
double src2_data_2 = src2.z;
|
||||
|
||||
double data_0 = *((__global double *)((__global char *)dst + dst_index + 0 ));
|
||||
double data_1 = *((__global double *)((__global char *)dst + dst_index + 8 ));
|
||||
double data_2 = *((__global double *)((__global char *)dst + dst_index + 16));
|
||||
|
||||
double tmp_data_0 = src1_data_0 - src2_data_0;
|
||||
double tmp_data_1 = src1_data_1 - src2_data_1;
|
||||
double tmp_data_2 = src1_data_2 - src2_data_2;
|
||||
|
||||
tmp_data_0 = isMatSubScalar ? tmp_data_0 : -tmp_data_0;
|
||||
tmp_data_1 = isMatSubScalar ? tmp_data_1 : -tmp_data_1;
|
||||
tmp_data_2 = isMatSubScalar ? tmp_data_2 : -tmp_data_2;
|
||||
|
||||
*((__global double *)((__global char *)dst + dst_index + 0 ))= tmp_data_0;
|
||||
*((__global double *)((__global char *)dst + dst_index + 8 ))= tmp_data_1;
|
||||
*((__global double *)((__global char *)dst + dst_index + 16))= tmp_data_2;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
__kernel void arithm_s_sub_C4_D0 (__global uchar *src1, int src1_step, int src1_offset,
|
||||
__global uchar *dst, int dst_step, int dst_offset,
|
||||
int4 src2, int rows, int cols, int dst_step1, int isMatSubScalar)
|
||||
{
|
||||
|
||||
int x = get_global_id(0);
|
||||
int y = get_global_id(1);
|
||||
|
||||
if (x < cols && y < rows)
|
||||
{
|
||||
int src1_index = mad24(y, src1_step, (x << 2) + src1_offset);
|
||||
int dst_index = mad24(y, dst_step, (x << 2) + dst_offset);
|
||||
|
||||
uchar4 src_data1 = *((__global uchar4 *)(src1 + src1_index));
|
||||
|
||||
int4 tmp = convert_int4_sat(src_data1) - src2;
|
||||
tmp = isMatSubScalar ? tmp : -tmp;
|
||||
uchar4 data = convert_uchar4_sat(tmp);
|
||||
|
||||
*((__global uchar4 *)(dst + dst_index)) = data;
|
||||
}
|
||||
}
|
||||
__kernel void arithm_s_sub_C4_D2 (__global ushort *src1, int src1_step, int src1_offset,
|
||||
__global ushort *dst, int dst_step, int dst_offset,
|
||||
int4 src2, int rows, int cols, int dst_step1, int isMatSubScalar)
|
||||
{
|
||||
|
||||
int x = get_global_id(0);
|
||||
int y = get_global_id(1);
|
||||
|
||||
if (x < cols && y < rows)
|
||||
{
|
||||
int src1_index = mad24(y, src1_step, (x << 3) + src1_offset);
|
||||
int dst_index = mad24(y, dst_step, (x << 3) + dst_offset);
|
||||
|
||||
ushort4 src_data1 = *((__global ushort4 *)((__global char *)src1 + src1_index));
|
||||
|
||||
int4 tmp = convert_int4_sat(src_data1) - src2;
|
||||
tmp = isMatSubScalar ? tmp : -tmp;
|
||||
ushort4 data = convert_ushort4_sat(tmp);
|
||||
|
||||
*((__global ushort4 *)((__global char *)dst + dst_index)) = data;
|
||||
}
|
||||
}
|
||||
__kernel void arithm_s_sub_C4_D3 (__global short *src1, int src1_step, int src1_offset,
|
||||
__global short *dst, int dst_step, int dst_offset,
|
||||
int4 src2, int rows, int cols, int dst_step1, int isMatSubScalar)
|
||||
{
|
||||
|
||||
int x = get_global_id(0);
|
||||
int y = get_global_id(1);
|
||||
|
||||
if (x < cols && y < rows)
|
||||
{
|
||||
int src1_index = mad24(y, src1_step, (x << 3) + src1_offset);
|
||||
int dst_index = mad24(y, dst_step, (x << 3) + dst_offset);
|
||||
|
||||
short4 src_data1 = *((__global short4 *)((__global char *)src1 + src1_index));
|
||||
|
||||
int4 tmp = convert_int4_sat(src_data1) - src2;
|
||||
tmp = isMatSubScalar ? tmp : -tmp;
|
||||
short4 data = convert_short4_sat(tmp);
|
||||
|
||||
*((__global short4 *)((__global char *)dst + dst_index)) = data;
|
||||
}
|
||||
}
|
||||
__kernel void arithm_s_sub_C4_D4 (__global int *src1, int src1_step, int src1_offset,
|
||||
__global int *dst, int dst_step, int dst_offset,
|
||||
int4 src2, int rows, int cols, int dst_step1, int isMatSubScalar)
|
||||
{
|
||||
|
||||
int x = get_global_id(0);
|
||||
int y = get_global_id(1);
|
||||
|
||||
if (x < cols && y < rows)
|
||||
{
|
||||
int src1_index = mad24(y, src1_step, (x << 4) + src1_offset);
|
||||
int dst_index = mad24(y, dst_step, (x << 4) + dst_offset);
|
||||
|
||||
int4 src_data1 = *((__global int4 *)((__global char *)src1 + src1_index));
|
||||
|
||||
long4 tmp = convert_long4_sat(src_data1) - convert_long4_sat(src2);
|
||||
tmp = isMatSubScalar ? tmp : -tmp;
|
||||
int4 data = convert_int4_sat(tmp);
|
||||
|
||||
*((__global int4 *)((__global char *)dst + dst_index)) = data;
|
||||
}
|
||||
}
|
||||
__kernel void arithm_s_sub_C4_D5 (__global float *src1, int src1_step, int src1_offset,
|
||||
__global float *dst, int dst_step, int dst_offset,
|
||||
float4 src2, int rows, int cols, int dst_step1, int isMatSubScalar)
|
||||
{
|
||||
|
||||
int x = get_global_id(0);
|
||||
int y = get_global_id(1);
|
||||
|
||||
if (x < cols && y < rows)
|
||||
{
|
||||
int src1_index = mad24(y, src1_step, (x << 4) + src1_offset);
|
||||
int dst_index = mad24(y, dst_step, (x << 4) + dst_offset);
|
||||
|
||||
float4 src_data1 = *((__global float4 *)((__global char *)src1 + src1_index));
|
||||
|
||||
float4 tmp = src_data1 - src2;
|
||||
tmp = isMatSubScalar ? tmp : -tmp;
|
||||
|
||||
*((__global float4 *)((__global char *)dst + dst_index)) = tmp;
|
||||
}
|
||||
}
|
||||
|
||||
#if defined (DOUBLE_SUPPORT)
|
||||
__kernel void arithm_s_sub_C4_D6 (__global double *src1, int src1_step, int src1_offset,
|
||||
__global double *dst, int dst_step, int dst_offset,
|
||||
double4 src2, int rows, int cols, int dst_step1, int isMatSubScalar)
|
||||
{
|
||||
|
||||
int x = get_global_id(0);
|
||||
int y = get_global_id(1);
|
||||
|
||||
if (x < cols && y < rows)
|
||||
{
|
||||
int src1_index = mad24(y, src1_step, (x << 5) + src1_offset);
|
||||
int dst_index = mad24(y, dst_step, (x << 5) + dst_offset);
|
||||
|
||||
double4 src_data1 = *((__global double4 *)((__global char *)src1 + src1_index));
|
||||
|
||||
double4 data = src_data1 - src2;
|
||||
data = isMatSubScalar ? data : -data;
|
||||
|
||||
*((__global double4 *)((__global char *)dst + dst_index)) = data;
|
||||
}
|
||||
}
|
||||
#endif
|
@ -1,941 +0,0 @@
|
||||
/*M///////////////////////////////////////////////////////////////////////////////////////
|
||||
//
|
||||
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
||||
//
|
||||
// By downloading, copying, installing or using the software you agree to this license.
|
||||
// If you do not agree to this license, do not download, install,
|
||||
// copy or use the software.
|
||||
//
|
||||
//
|
||||
// License Agreement
|
||||
// For Open Source Computer Vision Library
|
||||
//
|
||||
// Copyright (C) 2010-2012, Institute Of Software Chinese Academy Of Science, all rights reserved.
|
||||
// Copyright (C) 2010-2012, Advanced Micro Devices, Inc., all rights reserved.
|
||||
// Third party copyrights are property of their respective owners.
|
||||
//
|
||||
// @Authors
|
||||
// Jia Haipeng, jiahaipeng95@gmail.com
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without modification,
|
||||
// are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistribution's of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// * Redistribution's in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other GpuMaterials provided with the distribution.
|
||||
//
|
||||
// * The name of the copyright holders may not be used to endorse or promote products
|
||||
// derived from this software without specific prior written permission.
|
||||
//
|
||||
// This software is provided by the copyright holders and contributors as is and
|
||||
// any express or implied warranties, including, but not limited to, the implied
|
||||
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
||||
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
||||
// indirect, incidental, special, exemplary, or consequential damages
|
||||
// (including, but not limited to, procurement of substitute goods or services;
|
||||
// loss of use, data, or profits; or business interruption) however caused
|
||||
// and on any theory of liability, whether in contract, strict liability,
|
||||
// or tort (including negligence or otherwise) arising in any way out of
|
||||
// the use of this software, even if advised of the possibility of such damage.
|
||||
//
|
||||
//M*/
|
||||
|
||||
#if defined (DOUBLE_SUPPORT)
|
||||
#pragma OPENCL EXTENSION cl_khr_fp64:enable
|
||||
#endif
|
||||
|
||||
/**************************************sub with scalar with mask**************************************/
|
||||
__kernel void arithm_s_sub_with_mask_C1_D0 (__global uchar *src1, int src1_step, int src1_offset,
|
||||
__global uchar *dst, int dst_step, int dst_offset,
|
||||
__global uchar *mask, int mask_step, int mask_offset,
|
||||
int4 src2, int rows, int cols, int dst_step1, int isMatSubScalar)
|
||||
{
|
||||
|
||||
int x = get_global_id(0);
|
||||
int y = get_global_id(1);
|
||||
|
||||
if (x < cols && y < rows)
|
||||
{
|
||||
x = x << 2;
|
||||
|
||||
#define dst_align (dst_offset & 3)
|
||||
int src1_index = mad24(y, src1_step, x + src1_offset - dst_align);
|
||||
int mask_index = mad24(y, mask_step, x + mask_offset - dst_align);
|
||||
|
||||
int dst_start = mad24(y, dst_step, dst_offset);
|
||||
int dst_end = mad24(y, dst_step, dst_offset + dst_step1);
|
||||
int dst_index = mad24(y, dst_step, dst_offset + x & (int)0xfffffffc);
|
||||
|
||||
uchar4 src1_data = vload4(0, src1 + src1_index);
|
||||
int4 src2_data = (int4)(src2.x, src2.x, src2.x, src2.x);
|
||||
uchar4 mask_data = vload4(0, mask + mask_index);
|
||||
|
||||
uchar4 data = *((__global uchar4 *)(dst + dst_index));
|
||||
int4 tmp = convert_int4_sat(src1_data) - src2_data;
|
||||
tmp = isMatSubScalar ? tmp : -tmp;
|
||||
uchar4 tmp_data = convert_uchar4_sat(tmp);
|
||||
|
||||
data.x = ((mask_data.x) && (dst_index + 0 >= dst_start) && (dst_index + 0 < dst_end)) ? tmp_data.x : data.x;
|
||||
data.y = ((mask_data.y) && (dst_index + 1 >= dst_start) && (dst_index + 1 < dst_end)) ? tmp_data.y : data.y;
|
||||
data.z = ((mask_data.z) && (dst_index + 2 >= dst_start) && (dst_index + 2 < dst_end)) ? tmp_data.z : data.z;
|
||||
data.w = ((mask_data.w) && (dst_index + 3 >= dst_start) && (dst_index + 3 < dst_end)) ? tmp_data.w : data.w;
|
||||
|
||||
*((__global uchar4 *)(dst + dst_index)) = data;
|
||||
}
|
||||
}
|
||||
__kernel void arithm_s_sub_with_mask_C1_D2 (__global ushort *src1, int src1_step, int src1_offset,
|
||||
__global ushort *dst, int dst_step, int dst_offset,
|
||||
__global uchar *mask, int mask_step, int mask_offset,
|
||||
int4 src2, int rows, int cols, int dst_step1, int isMatSubScalar)
|
||||
{
|
||||
|
||||
int x = get_global_id(0);
|
||||
int y = get_global_id(1);
|
||||
|
||||
if (x < cols && y < rows)
|
||||
{
|
||||
x = x << 1;
|
||||
|
||||
#define dst_align ((dst_offset >> 1) & 1)
|
||||
int src1_index = mad24(y, src1_step, (x << 1) + src1_offset - (dst_align << 1));
|
||||
int mask_index = mad24(y, mask_step, x + mask_offset - dst_align);
|
||||
|
||||
int dst_start = mad24(y, dst_step, dst_offset);
|
||||
int dst_end = mad24(y, dst_step, dst_offset + dst_step1);
|
||||
int dst_index = mad24(y, dst_step, dst_offset + (x << 1) & (int)0xfffffffc);
|
||||
|
||||
ushort2 src1_data = vload2(0, (__global ushort *)((__global char *)src1 + src1_index));
|
||||
int2 src2_data = (int2)(src2.x, src2.x);
|
||||
uchar2 mask_data = vload2(0, mask + mask_index);
|
||||
|
||||
ushort2 data = *((__global ushort2 *)((__global uchar *)dst + dst_index));
|
||||
int2 tmp = convert_int2_sat(src1_data) - src2_data;
|
||||
tmp = isMatSubScalar ? tmp : -tmp;
|
||||
ushort2 tmp_data = convert_ushort2_sat(tmp);
|
||||
|
||||
data.x = ((mask_data.x) && (dst_index + 0 >= dst_start)) ? tmp_data.x : data.x;
|
||||
data.y = ((mask_data.y) && (dst_index + 2 < dst_end )) ? tmp_data.y : data.y;
|
||||
|
||||
*((__global ushort2 *)((__global uchar *)dst + dst_index)) = data;
|
||||
}
|
||||
}
|
||||
__kernel void arithm_s_sub_with_mask_C1_D3 (__global short *src1, int src1_step, int src1_offset,
|
||||
__global short *dst, int dst_step, int dst_offset,
|
||||
__global uchar *mask, int mask_step, int mask_offset,
|
||||
int4 src2, int rows, int cols, int dst_step1, int isMatSubScalar)
|
||||
{
|
||||
|
||||
int x = get_global_id(0);
|
||||
int y = get_global_id(1);
|
||||
|
||||
if (x < cols && y < rows)
|
||||
{
|
||||
x = x << 1;
|
||||
|
||||
#define dst_align ((dst_offset >> 1) & 1)
|
||||
int src1_index = mad24(y, src1_step, (x << 1) + src1_offset - (dst_align << 1));
|
||||
int mask_index = mad24(y, mask_step, x + mask_offset - dst_align);
|
||||
|
||||
int dst_start = mad24(y, dst_step, dst_offset);
|
||||
int dst_end = mad24(y, dst_step, dst_offset + dst_step1);
|
||||
int dst_index = mad24(y, dst_step, dst_offset + (x << 1) & (int)0xfffffffc);
|
||||
|
||||
short2 src1_data = vload2(0, (__global short *)((__global char *)src1 + src1_index));
|
||||
int2 src2_data = (int2)(src2.x, src2.x);
|
||||
uchar2 mask_data = vload2(0, mask + mask_index);
|
||||
|
||||
short2 data = *((__global short2 *)((__global uchar *)dst + dst_index));
|
||||
int2 tmp = convert_int2_sat(src1_data) - src2_data;
|
||||
tmp = isMatSubScalar ? tmp : -tmp;
|
||||
short2 tmp_data = convert_short2_sat(tmp);
|
||||
|
||||
data.x = ((mask_data.x) && (dst_index + 0 >= dst_start)) ? tmp_data.x : data.x;
|
||||
data.y = ((mask_data.y) && (dst_index + 2 < dst_end )) ? tmp_data.y : data.y;
|
||||
|
||||
*((__global short2 *)((__global uchar *)dst + dst_index)) = data;
|
||||
}
|
||||
}
|
||||
__kernel void arithm_s_sub_with_mask_C1_D4 (__global int *src1, int src1_step, int src1_offset,
|
||||
__global int *dst, int dst_step, int dst_offset,
|
||||
__global uchar *mask, int mask_step, int mask_offset,
|
||||
int4 src2, int rows, int cols, int dst_step1, int isMatSubScalar)
|
||||
{
|
||||
|
||||
int x = get_global_id(0);
|
||||
int y = get_global_id(1);
|
||||
|
||||
if (x < cols && y < rows)
|
||||
{
|
||||
int src1_index = mad24(y, src1_step, (x << 2) + src1_offset);
|
||||
int mask_index = mad24(y, mask_step, x + mask_offset);
|
||||
int dst_index = mad24(y, dst_step, (x << 2) + dst_offset);
|
||||
|
||||
uchar mask_data = *(mask + mask_index);
|
||||
|
||||
int src_data1 = *((__global int *)((__global char *)src1 + src1_index));
|
||||
int src_data2 = src2.x;
|
||||
int dst_data = *((__global int *)((__global char *)dst + dst_index));
|
||||
|
||||
long tmp = (long)src_data1 - (long)src_data2;
|
||||
tmp = isMatSubScalar ? tmp : - tmp;
|
||||
int data = convert_int_sat(tmp);
|
||||
data = mask_data ? data : dst_data;
|
||||
|
||||
*((__global int *)((__global char *)dst + dst_index)) = data;
|
||||
}
|
||||
}
|
||||
|
||||
__kernel void arithm_s_sub_with_mask_C1_D5 (__global float *src1, int src1_step, int src1_offset,
|
||||
__global float *dst, int dst_step, int dst_offset,
|
||||
__global uchar *mask, int mask_step, int mask_offset,
|
||||
float4 src2, int rows, int cols, int dst_step1, int isMatSubScalar)
|
||||
{
|
||||
|
||||
int x = get_global_id(0);
|
||||
int y = get_global_id(1);
|
||||
|
||||
if (x < cols && y < rows)
|
||||
{
|
||||
int src1_index = mad24(y, src1_step, (x << 2) + src1_offset);
|
||||
int mask_index = mad24(y, mask_step, x + mask_offset);
|
||||
int dst_index = mad24(y, dst_step, (x << 2) + dst_offset);
|
||||
|
||||
uchar mask_data = *(mask + mask_index);
|
||||
|
||||
float src_data1 = *((__global float *)((__global char *)src1 + src1_index));
|
||||
float src_data2 = src2.x;
|
||||
float dst_data = *((__global float *)((__global char *)dst + dst_index));
|
||||
|
||||
float data = src_data1 - src_data2;
|
||||
data = isMatSubScalar ? data : -data;
|
||||
data = mask_data ? data : dst_data;
|
||||
|
||||
*((__global float *)((__global char *)dst + dst_index)) = data;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
#if defined (DOUBLE_SUPPORT)
|
||||
__kernel void arithm_s_sub_with_mask_C1_D6 (__global double *src1, int src1_step, int src1_offset,
|
||||
__global double *dst, int dst_step, int dst_offset,
|
||||
__global uchar *mask, int mask_step, int mask_offset,
|
||||
double4 src2, int rows, int cols, int dst_step1, int isMatSubScalar)
|
||||
{
|
||||
|
||||
int x = get_global_id(0);
|
||||
int y = get_global_id(1);
|
||||
|
||||
if (x < cols && y < rows)
|
||||
{
|
||||
int src1_index = mad24(y, src1_step, (x << 3) + src1_offset);
|
||||
int mask_index = mad24(y, mask_step, x + mask_offset);
|
||||
int dst_index = mad24(y, dst_step, (x << 3) + dst_offset);
|
||||
|
||||
uchar mask_data = *(mask + mask_index);
|
||||
|
||||
double src_data1 = *((__global double *)((__global char *)src1 + src1_index));
|
||||
double src_data2 = src2.x;
|
||||
double dst_data = *((__global double *)((__global char *)dst + dst_index));
|
||||
|
||||
double data = src_data1 - src_data2;
|
||||
data = isMatSubScalar ? data : -data;
|
||||
data = mask_data ? data : dst_data;
|
||||
|
||||
*((__global double *)((__global char *)dst + dst_index)) = data;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
__kernel void arithm_s_sub_with_mask_C2_D0 (__global uchar *src1, int src1_step, int src1_offset,
|
||||
__global uchar *dst, int dst_step, int dst_offset,
|
||||
__global uchar *mask, int mask_step, int mask_offset,
|
||||
int4 src2, int rows, int cols, int dst_step1, int isMatSubScalar)
|
||||
{
|
||||
|
||||
int x = get_global_id(0);
|
||||
int y = get_global_id(1);
|
||||
|
||||
if (x < cols && y < rows)
|
||||
{
|
||||
x = x << 1;
|
||||
|
||||
#define dst_align ((dst_offset >> 1) & 1)
|
||||
int src1_index = mad24(y, src1_step, (x << 1) + src1_offset - (dst_align << 1));
|
||||
int mask_index = mad24(y, mask_step, x + mask_offset - dst_align);
|
||||
|
||||
int dst_start = mad24(y, dst_step, dst_offset);
|
||||
int dst_end = mad24(y, dst_step, dst_offset + dst_step1);
|
||||
int dst_index = mad24(y, dst_step, dst_offset + (x << 1) & (int)0xfffffffc);
|
||||
|
||||
uchar4 src1_data = vload4(0, src1 + src1_index);
|
||||
int4 src2_data = (int4)(src2.x, src2.y, src2.x, src2.y);
|
||||
uchar2 mask_data = vload2(0, mask + mask_index);
|
||||
|
||||
uchar4 data = *((__global uchar4 *)(dst + dst_index));
|
||||
int4 tmp = convert_int4_sat(src1_data) - src2_data;
|
||||
tmp = isMatSubScalar ? tmp : -tmp;
|
||||
uchar4 tmp_data = convert_uchar4_sat(tmp);
|
||||
|
||||
data.xy = ((mask_data.x) && (dst_index + 0 >= dst_start)) ? tmp_data.xy : data.xy;
|
||||
data.zw = ((mask_data.y) && (dst_index + 2 < dst_end )) ? tmp_data.zw : data.zw;
|
||||
|
||||
*((__global uchar4 *)(dst + dst_index)) = data;
|
||||
}
|
||||
}
|
||||
__kernel void arithm_s_sub_with_mask_C2_D2 (__global ushort *src1, int src1_step, int src1_offset,
|
||||
__global ushort *dst, int dst_step, int dst_offset,
|
||||
__global uchar *mask, int mask_step, int mask_offset,
|
||||
int4 src2, int rows, int cols, int dst_step1, int isMatSubScalar)
|
||||
{
|
||||
|
||||
int x = get_global_id(0);
|
||||
int y = get_global_id(1);
|
||||
|
||||
if (x < cols && y < rows)
|
||||
{
|
||||
int src1_index = mad24(y, src1_step, (x << 2) + src1_offset);
|
||||
int mask_index = mad24(y, mask_step, x + mask_offset);
|
||||
int dst_index = mad24(y, dst_step, (x << 2) + dst_offset);
|
||||
|
||||
uchar mask_data = *(mask + mask_index);
|
||||
|
||||
ushort2 src_data1 = *((__global ushort2 *)((__global char *)src1 + src1_index));
|
||||
int2 src_data2 = (int2)(src2.x, src2.y);
|
||||
ushort2 dst_data = *((__global ushort2 *)((__global char *)dst + dst_index));
|
||||
|
||||
int2 tmp = convert_int2_sat(src_data1) - src_data2;
|
||||
tmp = isMatSubScalar ? tmp : -tmp;
|
||||
ushort2 data = convert_ushort2_sat(tmp);
|
||||
data = mask_data ? data : dst_data;
|
||||
|
||||
*((__global ushort2 *)((__global char *)dst + dst_index)) = data;
|
||||
}
|
||||
}
|
||||
__kernel void arithm_s_sub_with_mask_C2_D3 (__global short *src1, int src1_step, int src1_offset,
|
||||
__global short *dst, int dst_step, int dst_offset,
|
||||
__global uchar *mask, int mask_step, int mask_offset,
|
||||
int4 src2, int rows, int cols, int dst_step1, int isMatSubScalar)
|
||||
{
|
||||
|
||||
int x = get_global_id(0);
|
||||
int y = get_global_id(1);
|
||||
|
||||
if (x < cols && y < rows)
|
||||
{
|
||||
int src1_index = mad24(y, src1_step, (x << 2) + src1_offset);
|
||||
int mask_index = mad24(y, mask_step, x + mask_offset);
|
||||
int dst_index = mad24(y, dst_step, (x << 2) + dst_offset);
|
||||
|
||||
uchar mask_data = *(mask + mask_index);
|
||||
|
||||
short2 src_data1 = *((__global short2 *)((__global char *)src1 + src1_index));
|
||||
int2 src_data2 = (int2)(src2.x, src2.y);
|
||||
short2 dst_data = *((__global short2 *)((__global char *)dst + dst_index));
|
||||
|
||||
int2 tmp = convert_int2_sat(src_data1) - src_data2;
|
||||
tmp = isMatSubScalar ? tmp : -tmp;
|
||||
short2 data = convert_short2_sat(tmp);
|
||||
data = mask_data ? data : dst_data;
|
||||
|
||||
*((__global short2 *)((__global char *)dst + dst_index)) = data;
|
||||
}
|
||||
}
|
||||
__kernel void arithm_s_sub_with_mask_C2_D4 (__global int *src1, int src1_step, int src1_offset,
|
||||
__global int *dst, int dst_step, int dst_offset,
|
||||
__global uchar *mask, int mask_step, int mask_offset,
|
||||
int4 src2, int rows, int cols, int dst_step1, int isMatSubScalar)
|
||||
{
|
||||
|
||||
int x = get_global_id(0);
|
||||
int y = get_global_id(1);
|
||||
|
||||
if (x < cols && y < rows)
|
||||
{
|
||||
int src1_index = mad24(y, src1_step, (x << 3) + src1_offset);
|
||||
int mask_index = mad24(y, mask_step, x + mask_offset);
|
||||
int dst_index = mad24(y, dst_step, (x << 3) + dst_offset);
|
||||
|
||||
uchar mask_data = *(mask + mask_index);
|
||||
|
||||
int2 src_data1 = *((__global int2 *)((__global char *)src1 + src1_index));
|
||||
int2 src_data2 = (int2)(src2.x, src2.y);
|
||||
int2 dst_data = *((__global int2 *)((__global char *)dst + dst_index));
|
||||
|
||||
long2 tmp = convert_long2_sat(src_data1) - convert_long2_sat(src_data2);
|
||||
tmp = isMatSubScalar ? tmp : -tmp;
|
||||
int2 data = convert_int2_sat(tmp);
|
||||
data = mask_data ? data : dst_data;
|
||||
|
||||
*((__global int2 *)((__global char *)dst + dst_index)) = data;
|
||||
}
|
||||
}
|
||||
__kernel void arithm_s_sub_with_mask_C2_D5 (__global float *src1, int src1_step, int src1_offset,
|
||||
__global float *dst, int dst_step, int dst_offset,
|
||||
__global uchar *mask, int mask_step, int mask_offset,
|
||||
float4 src2, int rows, int cols, int dst_step1, int isMatSubScalar)
|
||||
{
|
||||
|
||||
int x = get_global_id(0);
|
||||
int y = get_global_id(1);
|
||||
|
||||
if (x < cols && y < rows)
|
||||
{
|
||||
int src1_index = mad24(y, src1_step, (x << 3) + src1_offset);
|
||||
int mask_index = mad24(y, mask_step, x + mask_offset);
|
||||
int dst_index = mad24(y, dst_step, (x << 3) + dst_offset);
|
||||
|
||||
uchar mask_data = *(mask + mask_index);
|
||||
|
||||
float2 src_data1 = *((__global float2 *)((__global char *)src1 + src1_index));
|
||||
float2 src_data2 = (float2)(src2.x, src2.y);
|
||||
float2 dst_data = *((__global float2 *)((__global char *)dst + dst_index));
|
||||
|
||||
float2 data = src_data1 - src_data2;
|
||||
data = isMatSubScalar ? data : -data;
|
||||
data = mask_data ? data : dst_data;
|
||||
|
||||
*((__global float2 *)((__global char *)dst + dst_index)) = data;
|
||||
}
|
||||
}
|
||||
|
||||
#if defined (DOUBLE_SUPPORT)
|
||||
__kernel void arithm_s_sub_with_mask_C2_D6 (__global double *src1, int src1_step, int src1_offset,
|
||||
__global double *dst, int dst_step, int dst_offset,
|
||||
__global uchar *mask, int mask_step, int mask_offset,
|
||||
double4 src2, int rows, int cols, int dst_step1, int isMatSubScalar)
|
||||
{
|
||||
|
||||
int x = get_global_id(0);
|
||||
int y = get_global_id(1);
|
||||
|
||||
if (x < cols && y < rows)
|
||||
{
|
||||
int src1_index = mad24(y, src1_step, (x << 4) + src1_offset);
|
||||
int mask_index = mad24(y, mask_step, x + mask_offset);
|
||||
int dst_index = mad24(y, dst_step, (x << 4) + dst_offset);
|
||||
|
||||
uchar mask_data = *(mask + mask_index);
|
||||
|
||||
double2 src_data1 = *((__global double2 *)((__global char *)src1 + src1_index));
|
||||
double2 src_data2 = (double2)(src2.x, src2.y);
|
||||
double2 dst_data = *((__global double2 *)((__global char *)dst + dst_index));
|
||||
|
||||
double2 data = src_data1 - src_data2;
|
||||
data = isMatSubScalar ? data : -data;
|
||||
data = mask_data ? data : dst_data;
|
||||
|
||||
*((__global double2 *)((__global char *)dst + dst_index)) = data;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
__kernel void arithm_s_sub_with_mask_C3_D0 (__global uchar *src1, int src1_step, int src1_offset,
|
||||
__global uchar *dst, int dst_step, int dst_offset,
|
||||
__global uchar *mask, int mask_step, int mask_offset,
|
||||
int4 src2, int rows, int cols, int dst_step1, int isMatSubScalar)
|
||||
{
|
||||
|
||||
int x = get_global_id(0);
|
||||
int y = get_global_id(1);
|
||||
|
||||
if (x < cols && y < rows)
|
||||
{
|
||||
x = x << 2;
|
||||
|
||||
#define dst_align (((dst_offset % dst_step) / 3 ) & 3)
|
||||
int src1_index = mad24(y, src1_step, (x * 3) + src1_offset - (dst_align * 3));
|
||||
int mask_index = mad24(y, mask_step, x + mask_offset - dst_align);
|
||||
|
||||
int dst_start = mad24(y, dst_step, dst_offset);
|
||||
int dst_end = mad24(y, dst_step, dst_offset + dst_step1);
|
||||
int dst_index = mad24(y, dst_step, dst_offset + (x * 3) - (dst_align * 3));
|
||||
|
||||
uchar4 src1_data_0 = vload4(0, src1 + src1_index + 0);
|
||||
uchar4 src1_data_1 = vload4(0, src1 + src1_index + 4);
|
||||
uchar4 src1_data_2 = vload4(0, src1 + src1_index + 8);
|
||||
|
||||
int4 src2_data_0 = (int4)(src2.x, src2.y, src2.z, src2.x);
|
||||
int4 src2_data_1 = (int4)(src2.y, src2.z, src2.x, src2.y);
|
||||
int4 src2_data_2 = (int4)(src2.z, src2.x, src2.y, src2.z);
|
||||
|
||||
uchar4 mask_data = vload4(0, mask + mask_index);
|
||||
|
||||
uchar4 data_0 = *((__global uchar4 *)(dst + dst_index + 0));
|
||||
uchar4 data_1 = *((__global uchar4 *)(dst + dst_index + 4));
|
||||
uchar4 data_2 = *((__global uchar4 *)(dst + dst_index + 8));
|
||||
|
||||
int4 tmp_0 = convert_int4_sat(src1_data_0) - src2_data_0;
|
||||
int4 tmp_1 = convert_int4_sat(src1_data_1) - src2_data_1;
|
||||
int4 tmp_2 = convert_int4_sat(src1_data_2) - src2_data_2;
|
||||
|
||||
tmp_0 = isMatSubScalar ? tmp_0 : -tmp_0;
|
||||
tmp_1 = isMatSubScalar ? tmp_1 : -tmp_1;
|
||||
tmp_2 = isMatSubScalar ? tmp_2 : -tmp_2;
|
||||
|
||||
uchar4 tmp_data_0 = convert_uchar4_sat(tmp_0);
|
||||
uchar4 tmp_data_1 = convert_uchar4_sat(tmp_1);
|
||||
uchar4 tmp_data_2 = convert_uchar4_sat(tmp_2);
|
||||
|
||||
data_0.xyz = ((mask_data.x) && (dst_index + 0 >= dst_start)) ? tmp_data_0.xyz : data_0.xyz;
|
||||
data_0.w = ((mask_data.y) && (dst_index + 3 >= dst_start) && (dst_index + 3 < dst_end))
|
||||
? tmp_data_0.w : data_0.w;
|
||||
|
||||
data_1.xy = ((mask_data.y) && (dst_index + 3 >= dst_start) && (dst_index + 3 < dst_end))
|
||||
? tmp_data_1.xy : data_1.xy;
|
||||
data_1.zw = ((mask_data.z) && (dst_index + 6 >= dst_start) && (dst_index + 6 < dst_end))
|
||||
? tmp_data_1.zw : data_1.zw;
|
||||
|
||||
data_2.x = ((mask_data.z) && (dst_index + 6 >= dst_start) && (dst_index + 6 < dst_end))
|
||||
? tmp_data_2.x : data_2.x;
|
||||
data_2.yzw = ((mask_data.w) && (dst_index + 9 >= dst_start) && (dst_index + 9 < dst_end))
|
||||
? tmp_data_2.yzw : data_2.yzw;
|
||||
|
||||
*((__global uchar4 *)(dst + dst_index + 0)) = data_0;
|
||||
*((__global uchar4 *)(dst + dst_index + 4)) = data_1;
|
||||
*((__global uchar4 *)(dst + dst_index + 8)) = data_2;
|
||||
}
|
||||
}
|
||||
__kernel void arithm_s_sub_with_mask_C3_D2 (__global ushort *src1, int src1_step, int src1_offset,
|
||||
__global ushort *dst, int dst_step, int dst_offset,
|
||||
__global uchar *mask, int mask_step, int mask_offset,
|
||||
int4 src2, int rows, int cols, int dst_step1, int isMatSubScalar)
|
||||
{
|
||||
|
||||
int x = get_global_id(0);
|
||||
int y = get_global_id(1);
|
||||
|
||||
if (x < cols && y < rows)
|
||||
{
|
||||
x = x << 1;
|
||||
|
||||
#define dst_align (((dst_offset % dst_step) / 6 ) & 1)
|
||||
int src1_index = mad24(y, src1_step, (x * 6) + src1_offset - (dst_align * 6));
|
||||
int mask_index = mad24(y, mask_step, x + mask_offset - dst_align);
|
||||
|
||||
int dst_start = mad24(y, dst_step, dst_offset);
|
||||
int dst_end = mad24(y, dst_step, dst_offset + dst_step1);
|
||||
int dst_index = mad24(y, dst_step, dst_offset + (x * 6) - (dst_align * 6));
|
||||
|
||||
ushort2 src1_data_0 = vload2(0, (__global ushort *)((__global char *)src1 + src1_index + 0));
|
||||
ushort2 src1_data_1 = vload2(0, (__global ushort *)((__global char *)src1 + src1_index + 4));
|
||||
ushort2 src1_data_2 = vload2(0, (__global ushort *)((__global char *)src1 + src1_index + 8));
|
||||
|
||||
int2 src2_data_0 = (int2)(src2.x, src2.y);
|
||||
int2 src2_data_1 = (int2)(src2.z, src2.x);
|
||||
int2 src2_data_2 = (int2)(src2.y, src2.z);
|
||||
|
||||
uchar2 mask_data = vload2(0, mask + mask_index);
|
||||
|
||||
ushort2 data_0 = *((__global ushort2 *)((__global char *)dst + dst_index + 0));
|
||||
ushort2 data_1 = *((__global ushort2 *)((__global char *)dst + dst_index + 4));
|
||||
ushort2 data_2 = *((__global ushort2 *)((__global char *)dst + dst_index + 8));
|
||||
|
||||
int2 tmp_0 = convert_int2_sat(src1_data_0) - src2_data_0;
|
||||
int2 tmp_1 = convert_int2_sat(src1_data_1) - src2_data_1;
|
||||
int2 tmp_2 = convert_int2_sat(src1_data_2) - src2_data_2;
|
||||
|
||||
tmp_0 = isMatSubScalar ? tmp_0 : -tmp_0;
|
||||
tmp_1 = isMatSubScalar ? tmp_1 : -tmp_1;
|
||||
tmp_2 = isMatSubScalar ? tmp_2 : -tmp_2;
|
||||
|
||||
ushort2 tmp_data_0 = convert_ushort2_sat(tmp_0);
|
||||
ushort2 tmp_data_1 = convert_ushort2_sat(tmp_1);
|
||||
ushort2 tmp_data_2 = convert_ushort2_sat(tmp_2);
|
||||
|
||||
data_0.xy = ((mask_data.x) && (dst_index + 0 >= dst_start)) ? tmp_data_0.xy : data_0.xy;
|
||||
|
||||
data_1.x = ((mask_data.x) && (dst_index + 0 >= dst_start) && (dst_index + 0 < dst_end))
|
||||
? tmp_data_1.x : data_1.x;
|
||||
data_1.y = ((mask_data.y) && (dst_index + 6 >= dst_start) && (dst_index + 6 < dst_end))
|
||||
? tmp_data_1.y : data_1.y;
|
||||
|
||||
data_2.xy = ((mask_data.y) && (dst_index + 6 >= dst_start) && (dst_index + 6 < dst_end))
|
||||
? tmp_data_2.xy : data_2.xy;
|
||||
|
||||
*((__global ushort2 *)((__global char *)dst + dst_index + 0))= data_0;
|
||||
*((__global ushort2 *)((__global char *)dst + dst_index + 4))= data_1;
|
||||
*((__global ushort2 *)((__global char *)dst + dst_index + 8))= data_2;
|
||||
}
|
||||
}
|
||||
__kernel void arithm_s_sub_with_mask_C3_D3 (__global short *src1, int src1_step, int src1_offset,
|
||||
__global short *dst, int dst_step, int dst_offset,
|
||||
__global uchar *mask, int mask_step, int mask_offset,
|
||||
int4 src2, int rows, int cols, int dst_step1, int isMatSubScalar)
|
||||
{
|
||||
|
||||
int x = get_global_id(0);
|
||||
int y = get_global_id(1);
|
||||
|
||||
if (x < cols && y < rows)
|
||||
{
|
||||
x = x << 1;
|
||||
|
||||
#define dst_align (((dst_offset % dst_step) / 6 ) & 1)
|
||||
int src1_index = mad24(y, src1_step, (x * 6) + src1_offset - (dst_align * 6));
|
||||
int mask_index = mad24(y, mask_step, x + mask_offset - dst_align);
|
||||
|
||||
int dst_start = mad24(y, dst_step, dst_offset);
|
||||
int dst_end = mad24(y, dst_step, dst_offset + dst_step1);
|
||||
int dst_index = mad24(y, dst_step, dst_offset + (x * 6) - (dst_align * 6));
|
||||
|
||||
short2 src1_data_0 = vload2(0, (__global short *)((__global char *)src1 + src1_index + 0));
|
||||
short2 src1_data_1 = vload2(0, (__global short *)((__global char *)src1 + src1_index + 4));
|
||||
short2 src1_data_2 = vload2(0, (__global short *)((__global char *)src1 + src1_index + 8));
|
||||
|
||||
int2 src2_data_0 = (int2)(src2.x, src2.y);
|
||||
int2 src2_data_1 = (int2)(src2.z, src2.x);
|
||||
int2 src2_data_2 = (int2)(src2.y, src2.z);
|
||||
|
||||
uchar2 mask_data = vload2(0, mask + mask_index);
|
||||
|
||||
short2 data_0 = *((__global short2 *)((__global char *)dst + dst_index + 0));
|
||||
short2 data_1 = *((__global short2 *)((__global char *)dst + dst_index + 4));
|
||||
short2 data_2 = *((__global short2 *)((__global char *)dst + dst_index + 8));
|
||||
|
||||
int2 tmp_0 = convert_int2_sat(src1_data_0) - src2_data_0;
|
||||
int2 tmp_1 = convert_int2_sat(src1_data_1) - src2_data_1;
|
||||
int2 tmp_2 = convert_int2_sat(src1_data_2) - src2_data_2;
|
||||
|
||||
tmp_0 = isMatSubScalar ? tmp_0 : -tmp_0;
|
||||
tmp_1 = isMatSubScalar ? tmp_1 : -tmp_1;
|
||||
tmp_2 = isMatSubScalar ? tmp_2 : -tmp_2;
|
||||
|
||||
short2 tmp_data_0 = convert_short2_sat(tmp_0);
|
||||
short2 tmp_data_1 = convert_short2_sat(tmp_1);
|
||||
short2 tmp_data_2 = convert_short2_sat(tmp_2);
|
||||
|
||||
data_0.xy = ((mask_data.x) && (dst_index + 0 >= dst_start)) ? tmp_data_0.xy : data_0.xy;
|
||||
|
||||
data_1.x = ((mask_data.x) && (dst_index + 0 >= dst_start) && (dst_index + 0 < dst_end))
|
||||
? tmp_data_1.x : data_1.x;
|
||||
data_1.y = ((mask_data.y) && (dst_index + 6 >= dst_start) && (dst_index + 6 < dst_end))
|
||||
? tmp_data_1.y : data_1.y;
|
||||
|
||||
data_2.xy = ((mask_data.y) && (dst_index + 6 >= dst_start) && (dst_index + 6 < dst_end))
|
||||
? tmp_data_2.xy : data_2.xy;
|
||||
|
||||
*((__global short2 *)((__global char *)dst + dst_index + 0))= data_0;
|
||||
*((__global short2 *)((__global char *)dst + dst_index + 4))= data_1;
|
||||
*((__global short2 *)((__global char *)dst + dst_index + 8))= data_2;
|
||||
}
|
||||
}
|
||||
__kernel void arithm_s_sub_with_mask_C3_D4 (__global int *src1, int src1_step, int src1_offset,
|
||||
__global int *dst, int dst_step, int dst_offset,
|
||||
__global uchar *mask, int mask_step, int mask_offset,
|
||||
int4 src2, int rows, int cols, int dst_step1, int isMatSubScalar)
|
||||
{
|
||||
|
||||
int x = get_global_id(0);
|
||||
int y = get_global_id(1);
|
||||
|
||||
if (x < cols && y < rows)
|
||||
{
|
||||
int src1_index = mad24(y, src1_step, (x * 12) + src1_offset);
|
||||
int mask_index = mad24(y, mask_step, x + mask_offset);
|
||||
int dst_index = mad24(y, dst_step, dst_offset + (x * 12));
|
||||
|
||||
int src1_data_0 = *((__global int *)((__global char *)src1 + src1_index + 0));
|
||||
int src1_data_1 = *((__global int *)((__global char *)src1 + src1_index + 4));
|
||||
int src1_data_2 = *((__global int *)((__global char *)src1 + src1_index + 8));
|
||||
|
||||
int src2_data_0 = src2.x;
|
||||
int src2_data_1 = src2.y;
|
||||
int src2_data_2 = src2.z;
|
||||
|
||||
uchar mask_data = * (mask + mask_index);
|
||||
|
||||
int data_0 = *((__global int *)((__global char *)dst + dst_index + 0));
|
||||
int data_1 = *((__global int *)((__global char *)dst + dst_index + 4));
|
||||
int data_2 = *((__global int *)((__global char *)dst + dst_index + 8));
|
||||
|
||||
long tmp_0 = (long)src1_data_0 - (long)src2_data_0;
|
||||
long tmp_1 = (long)src1_data_1 - (long)src2_data_1;
|
||||
long tmp_2 = (long)src1_data_2 - (long)src2_data_2;
|
||||
|
||||
tmp_0 = isMatSubScalar ? tmp_0 : -tmp_0;
|
||||
tmp_1 = isMatSubScalar ? tmp_1 : -tmp_1;
|
||||
tmp_2 = isMatSubScalar ? tmp_2 : -tmp_2;
|
||||
|
||||
int tmp_data_0 = convert_int_sat(tmp_0);
|
||||
int tmp_data_1 = convert_int_sat(tmp_1);
|
||||
int tmp_data_2 = convert_int_sat(tmp_2);
|
||||
|
||||
data_0 = mask_data ? tmp_data_0 : data_0;
|
||||
data_1 = mask_data ? tmp_data_1 : data_1;
|
||||
data_2 = mask_data ? tmp_data_2 : data_2;
|
||||
|
||||
*((__global int *)((__global char *)dst + dst_index + 0))= data_0;
|
||||
*((__global int *)((__global char *)dst + dst_index + 4))= data_1;
|
||||
*((__global int *)((__global char *)dst + dst_index + 8))= data_2;
|
||||
}
|
||||
}
|
||||
__kernel void arithm_s_sub_with_mask_C3_D5 (__global float *src1, int src1_step, int src1_offset,
|
||||
__global float *dst, int dst_step, int dst_offset,
|
||||
__global uchar *mask, int mask_step, int mask_offset,
|
||||
float4 src2, int rows, int cols, int dst_step1, int isMatSubScalar)
|
||||
{
|
||||
|
||||
int x = get_global_id(0);
|
||||
int y = get_global_id(1);
|
||||
|
||||
if (x < cols && y < rows)
|
||||
{
|
||||
int src1_index = mad24(y, src1_step, (x * 12) + src1_offset);
|
||||
int mask_index = mad24(y, mask_step, x + mask_offset);
|
||||
int dst_index = mad24(y, dst_step, dst_offset + (x * 12));
|
||||
|
||||
float src1_data_0 = *((__global float *)((__global char *)src1 + src1_index + 0));
|
||||
float src1_data_1 = *((__global float *)((__global char *)src1 + src1_index + 4));
|
||||
float src1_data_2 = *((__global float *)((__global char *)src1 + src1_index + 8));
|
||||
|
||||
float src2_data_0 = src2.x;
|
||||
float src2_data_1 = src2.y;
|
||||
float src2_data_2 = src2.z;
|
||||
|
||||
uchar mask_data = * (mask + mask_index);
|
||||
|
||||
float data_0 = *((__global float *)((__global char *)dst + dst_index + 0));
|
||||
float data_1 = *((__global float *)((__global char *)dst + dst_index + 4));
|
||||
float data_2 = *((__global float *)((__global char *)dst + dst_index + 8));
|
||||
|
||||
float tmp_data_0 = src1_data_0 - src2_data_0;
|
||||
float tmp_data_1 = src1_data_1 - src2_data_1;
|
||||
float tmp_data_2 = src1_data_2 - src2_data_2;
|
||||
|
||||
tmp_data_0 = isMatSubScalar ? tmp_data_0 : -tmp_data_0;
|
||||
tmp_data_1 = isMatSubScalar ? tmp_data_1 : -tmp_data_1;
|
||||
tmp_data_2 = isMatSubScalar ? tmp_data_2 : -tmp_data_2;
|
||||
|
||||
data_0 = mask_data ? tmp_data_0 : data_0;
|
||||
data_1 = mask_data ? tmp_data_1 : data_1;
|
||||
data_2 = mask_data ? tmp_data_2 : data_2;
|
||||
|
||||
*((__global float *)((__global char *)dst + dst_index + 0))= data_0;
|
||||
*((__global float *)((__global char *)dst + dst_index + 4))= data_1;
|
||||
*((__global float *)((__global char *)dst + dst_index + 8))= data_2;
|
||||
}
|
||||
}
|
||||
|
||||
#if defined (DOUBLE_SUPPORT)
|
||||
__kernel void arithm_s_sub_with_mask_C3_D6 (__global double *src1, int src1_step, int src1_offset,
|
||||
__global double *dst, int dst_step, int dst_offset,
|
||||
__global uchar *mask, int mask_step, int mask_offset,
|
||||
double4 src2, int rows, int cols, int dst_step1, int isMatSubScalar)
|
||||
{
|
||||
|
||||
int x = get_global_id(0);
|
||||
int y = get_global_id(1);
|
||||
|
||||
if (x < cols && y < rows)
|
||||
{
|
||||
int src1_index = mad24(y, src1_step, (x * 24) + src1_offset);
|
||||
int mask_index = mad24(y, mask_step, x + mask_offset);
|
||||
int dst_index = mad24(y, dst_step, dst_offset + (x * 24));
|
||||
|
||||
double src1_data_0 = *((__global double *)((__global char *)src1 + src1_index + 0 ));
|
||||
double src1_data_1 = *((__global double *)((__global char *)src1 + src1_index + 8 ));
|
||||
double src1_data_2 = *((__global double *)((__global char *)src1 + src1_index + 16));
|
||||
|
||||
double src2_data_0 = src2.x;
|
||||
double src2_data_1 = src2.y;
|
||||
double src2_data_2 = src2.z;
|
||||
|
||||
uchar mask_data = * (mask + mask_index);
|
||||
|
||||
double data_0 = *((__global double *)((__global char *)dst + dst_index + 0 ));
|
||||
double data_1 = *((__global double *)((__global char *)dst + dst_index + 8 ));
|
||||
double data_2 = *((__global double *)((__global char *)dst + dst_index + 16));
|
||||
|
||||
double tmp_data_0 = src1_data_0 - src2_data_0;
|
||||
double tmp_data_1 = src1_data_1 - src2_data_1;
|
||||
double tmp_data_2 = src1_data_2 - src2_data_2;
|
||||
|
||||
tmp_data_0 = isMatSubScalar ? tmp_data_0 : -tmp_data_0;
|
||||
tmp_data_1 = isMatSubScalar ? tmp_data_1 : -tmp_data_1;
|
||||
tmp_data_2 = isMatSubScalar ? tmp_data_2 : -tmp_data_2;
|
||||
|
||||
data_0 = mask_data ? tmp_data_0 : data_0;
|
||||
data_1 = mask_data ? tmp_data_1 : data_1;
|
||||
data_2 = mask_data ? tmp_data_2 : data_2;
|
||||
|
||||
*((__global double *)((__global char *)dst + dst_index + 0 ))= data_0;
|
||||
*((__global double *)((__global char *)dst + dst_index + 8 ))= data_1;
|
||||
*((__global double *)((__global char *)dst + dst_index + 16))= data_2;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
__kernel void arithm_s_sub_with_mask_C4_D0 (__global uchar *src1, int src1_step, int src1_offset,
|
||||
__global uchar *dst, int dst_step, int dst_offset,
|
||||
__global uchar *mask, int mask_step, int mask_offset,
|
||||
int4 src2, int rows, int cols, int dst_step1, int isMatSubScalar)
|
||||
{
|
||||
|
||||
int x = get_global_id(0);
|
||||
int y = get_global_id(1);
|
||||
|
||||
if (x < cols && y < rows)
|
||||
{
|
||||
int src1_index = mad24(y, src1_step, (x << 2) + src1_offset);
|
||||
int mask_index = mad24(y, mask_step, x + mask_offset);
|
||||
int dst_index = mad24(y, dst_step, (x << 2) + dst_offset);
|
||||
|
||||
uchar mask_data = *(mask + mask_index);
|
||||
|
||||
uchar4 src_data1 = *((__global uchar4 *)(src1 + src1_index));
|
||||
uchar4 dst_data = *((__global uchar4 *)(dst + dst_index));
|
||||
|
||||
int4 tmp = convert_int4_sat(src_data1) - src2;
|
||||
tmp = isMatSubScalar ? tmp : -tmp;
|
||||
uchar4 data = convert_uchar4_sat(tmp);
|
||||
|
||||
data = mask_data ? data : dst_data;
|
||||
|
||||
*((__global uchar4 *)(dst + dst_index)) = data;
|
||||
}
|
||||
}
|
||||
__kernel void arithm_s_sub_with_mask_C4_D2 (__global ushort *src1, int src1_step, int src1_offset,
|
||||
__global ushort *dst, int dst_step, int dst_offset,
|
||||
__global uchar *mask, int mask_step, int mask_offset,
|
||||
int4 src2, int rows, int cols, int dst_step1, int isMatSubScalar)
|
||||
{
|
||||
|
||||
int x = get_global_id(0);
|
||||
int y = get_global_id(1);
|
||||
|
||||
if (x < cols && y < rows)
|
||||
{
|
||||
int src1_index = mad24(y, src1_step, (x << 3) + src1_offset);
|
||||
int mask_index = mad24(y, mask_step, x + mask_offset);
|
||||
int dst_index = mad24(y, dst_step, (x << 3) + dst_offset);
|
||||
|
||||
uchar mask_data = *(mask + mask_index);
|
||||
|
||||
ushort4 src_data1 = *((__global ushort4 *)((__global char *)src1 + src1_index));
|
||||
ushort4 dst_data = *((__global ushort4 *)((__global char *)dst + dst_index));
|
||||
|
||||
int4 tmp = convert_int4_sat(src_data1) - src2;
|
||||
tmp = isMatSubScalar ? tmp : -tmp;
|
||||
ushort4 data = convert_ushort4_sat(tmp);
|
||||
|
||||
data = mask_data ? data : dst_data;
|
||||
|
||||
*((__global ushort4 *)((__global char *)dst + dst_index)) = data;
|
||||
}
|
||||
}
|
||||
__kernel void arithm_s_sub_with_mask_C4_D3 (__global short *src1, int src1_step, int src1_offset,
|
||||
__global short *dst, int dst_step, int dst_offset,
|
||||
__global uchar *mask, int mask_step, int mask_offset,
|
||||
int4 src2, int rows, int cols, int dst_step1, int isMatSubScalar)
|
||||
{
|
||||
|
||||
int x = get_global_id(0);
|
||||
int y = get_global_id(1);
|
||||
|
||||
if (x < cols && y < rows)
|
||||
{
|
||||
int src1_index = mad24(y, src1_step, (x << 3) + src1_offset);
|
||||
int mask_index = mad24(y, mask_step, x + mask_offset);
|
||||
int dst_index = mad24(y, dst_step, (x << 3) + dst_offset);
|
||||
|
||||
uchar mask_data = *(mask + mask_index);
|
||||
|
||||
short4 src_data1 = *((__global short4 *)((__global char *)src1 + src1_index));
|
||||
short4 dst_data = *((__global short4 *)((__global char *)dst + dst_index));
|
||||
|
||||
int4 tmp = convert_int4_sat(src_data1) - src2;
|
||||
tmp = isMatSubScalar ? tmp : -tmp;
|
||||
short4 data = convert_short4_sat(tmp);
|
||||
|
||||
data = mask_data ? data : dst_data;
|
||||
|
||||
*((__global short4 *)((__global char *)dst + dst_index)) = data;
|
||||
}
|
||||
}
|
||||
__kernel void arithm_s_sub_with_mask_C4_D4 (__global int *src1, int src1_step, int src1_offset,
|
||||
__global int *dst, int dst_step, int dst_offset,
|
||||
__global uchar *mask, int mask_step, int mask_offset,
|
||||
int4 src2, int rows, int cols, int dst_step1, int isMatSubScalar)
|
||||
{
|
||||
|
||||
int x = get_global_id(0);
|
||||
int y = get_global_id(1);
|
||||
|
||||
if (x < cols && y < rows)
|
||||
{
|
||||
int src1_index = mad24(y, src1_step, (x << 4) + src1_offset);
|
||||
int mask_index = mad24(y, mask_step, x + mask_offset);
|
||||
int dst_index = mad24(y, dst_step, (x << 4) + dst_offset);
|
||||
|
||||
uchar mask_data = *(mask + mask_index);
|
||||
|
||||
int4 src_data1 = *((__global int4 *)((__global char *)src1 + src1_index));
|
||||
int4 dst_data = *((__global int4 *)((__global char *)dst + dst_index));
|
||||
|
||||
long4 tmp = convert_long4_sat(src_data1) - convert_long4_sat(src2);
|
||||
tmp = isMatSubScalar ? tmp : -tmp;
|
||||
int4 data = convert_int4_sat(tmp);
|
||||
|
||||
data = mask_data ? data : dst_data;
|
||||
|
||||
*((__global int4 *)((__global char *)dst + dst_index)) = data;
|
||||
}
|
||||
}
|
||||
__kernel void arithm_s_sub_with_mask_C4_D5 (__global float *src1, int src1_step, int src1_offset,
|
||||
__global float *dst, int dst_step, int dst_offset,
|
||||
__global uchar *mask, int mask_step, int mask_offset,
|
||||
float4 src2, int rows, int cols, int dst_step1, int isMatSubScalar)
|
||||
{
|
||||
|
||||
int x = get_global_id(0);
|
||||
int y = get_global_id(1);
|
||||
|
||||
if (x < cols && y < rows)
|
||||
{
|
||||
int src1_index = mad24(y, src1_step, (x << 4) + src1_offset);
|
||||
int mask_index = mad24(y, mask_step, x + mask_offset);
|
||||
int dst_index = mad24(y, dst_step, (x << 4) + dst_offset);
|
||||
|
||||
uchar mask_data = *(mask + mask_index);
|
||||
|
||||
float4 src_data1 = *((__global float4 *)((__global char *)src1 + src1_index));
|
||||
float4 dst_data = *((__global float4 *)((__global char *)dst + dst_index));
|
||||
|
||||
float4 data = src_data1 - src2;
|
||||
data = isMatSubScalar ? data : -data;
|
||||
|
||||
data = mask_data ? data : dst_data;
|
||||
|
||||
*((__global float4 *)((__global char *)dst + dst_index)) = data;
|
||||
}
|
||||
}
|
||||
|
||||
#if defined (DOUBLE_SUPPORT)
|
||||
__kernel void arithm_s_sub_with_mask_C4_D6 (__global double *src1, int src1_step, int src1_offset,
|
||||
__global double *dst, int dst_step, int dst_offset,
|
||||
__global uchar *mask, int mask_step, int mask_offset,
|
||||
double4 src2, int rows, int cols, int dst_step1, int isMatSubScalar)
|
||||
{
|
||||
|
||||
int x = get_global_id(0);
|
||||
int y = get_global_id(1);
|
||||
|
||||
if (x < cols && y < rows)
|
||||
{
|
||||
int src1_index = mad24(y, src1_step, (x << 5) + src1_offset);
|
||||
int mask_index = mad24(y, mask_step, x + mask_offset);
|
||||
int dst_index = mad24(y, dst_step, (x << 5) + dst_offset);
|
||||
|
||||
uchar mask_data = *(mask + mask_index);
|
||||
|
||||
double4 src_data1 = *((__global double4 *)((__global char *)src1 + src1_index));
|
||||
double4 dst_data = *((__global double4 *)((__global char *)dst + dst_index));
|
||||
|
||||
double4 data = src_data1 - src2;
|
||||
data = isMatSubScalar ? data : -data;
|
||||
data = mask_data ? data : dst_data;
|
||||
|
||||
*((__global double4 *)((__global char *)dst + dst_index)) = data;
|
||||
}
|
||||
}
|
||||
#endif
|
@ -1531,6 +1531,10 @@ INSTANTIATE_TEST_CASE_P(Arithm, Add, Combine(
|
||||
Values(CV_8UC1, CV_8UC3, CV_8UC4, CV_32SC1, CV_32SC3, CV_32SC4, CV_32FC1, CV_32FC3, CV_32FC4),
|
||||
Values(false)));
|
||||
|
||||
INSTANTIATE_TEST_CASE_P(Arithm, Sub, Combine(
|
||||
Values(CV_8UC1, CV_8UC3, CV_8UC4, CV_32SC1, CV_32SC3, CV_32SC4, CV_32FC1, CV_32FC3, CV_32FC4),
|
||||
Values(false)));
|
||||
|
||||
INSTANTIATE_TEST_CASE_P(Arithm, Mul, Combine(
|
||||
Values(CV_8UC1, CV_8UC3, CV_8UC4, CV_32SC1, CV_32SC3, CV_32SC4, CV_32FC1, CV_32FC3, CV_32FC4),
|
||||
Values(false))); // Values(false) is the reserved parameter
|
||||
@ -1586,19 +1590,19 @@ INSTANTIATE_TEST_CASE_P(Arithm, Phase, Combine(Values(CV_32FC1, CV_32FC3, CV_32F
|
||||
|
||||
|
||||
INSTANTIATE_TEST_CASE_P(Arithm, Bitwise_and, Combine(
|
||||
Values(CV_8UC1, CV_32SC1, CV_32SC3, CV_32SC4, CV_32FC1, CV_32FC3, CV_32FC4), Values(false)));
|
||||
Values(CV_8UC1, CV_8UC3, CV_8UC4, CV_32SC1, CV_32SC3, CV_32SC4, CV_32FC1, CV_32FC3, CV_32FC4), Values(false)));
|
||||
//Values(false) is the reserved parameter
|
||||
|
||||
INSTANTIATE_TEST_CASE_P(Arithm, Bitwise_or, Combine(
|
||||
Values(CV_8UC1, CV_8UC3, CV_32SC1, CV_32FC1, CV_32FC3, CV_32FC4), Values(false)));
|
||||
Values(CV_8UC1, CV_8UC3, CV_8UC4, CV_32SC1, CV_32FC1, CV_32FC3, CV_32FC4), Values(false)));
|
||||
//Values(false) is the reserved parameter
|
||||
|
||||
INSTANTIATE_TEST_CASE_P(Arithm, Bitwise_xor, Combine(
|
||||
Values(CV_8UC1, CV_8UC3, CV_32SC1, CV_32FC1, CV_32FC3, CV_32FC4), Values(false)));
|
||||
Values(CV_8UC1, CV_8UC3, CV_8UC4, CV_32SC1, CV_32FC1, CV_32FC3, CV_32FC4), Values(false)));
|
||||
//Values(false) is the reserved parameter
|
||||
|
||||
INSTANTIATE_TEST_CASE_P(Arithm, Bitwise_not, Combine(
|
||||
Values(CV_8UC1, CV_8UC3, CV_32SC1, CV_32FC1, CV_32FC3, CV_32FC4), Values(false)));
|
||||
Values(CV_8UC1, CV_8UC3, CV_8UC4, CV_32SC1, CV_32FC1, CV_32FC3, CV_32FC4), Values(false)));
|
||||
//Values(false) is the reserved parameter
|
||||
|
||||
INSTANTIATE_TEST_CASE_P(Arithm, Compare, Combine(Values(CV_8UC1, CV_32SC1, CV_32FC1), Values(false)));
|
||||
|
Loading…
Reference in New Issue
Block a user