Commit d32d576d authored by Alexander Alekhin's avatar Alexander Alekhin

core: dispatch convert_scale

parent 39b90ae9
......@@ -4,6 +4,7 @@ ocv_add_dispatched_file(mathfuncs_core SSE2 AVX AVX2)
ocv_add_dispatched_file(stat SSE4_2 AVX2)
ocv_add_dispatched_file(arithm SSE2 SSE4_1 AVX2 VSX3)
ocv_add_dispatched_file(convert SSE2 AVX2)
ocv_add_dispatched_file(convert_scale SSE2 AVX2)
# dispatching for accuracy tests
ocv_add_dispatched_file_force_all(test_intrin128 TEST SSE2 SSE3 SSSE3 SSE4_1 SSE4_2 AVX FP16 AVX2)
......
This diff is collapsed.
......@@ -4,16 +4,20 @@
#include "precomp.hpp"
#include "opencl_kernels_core.hpp"
#include "convert.hpp"
namespace cv {
CV_CPU_OPTIMIZATION_NAMESPACE_BEGIN
BinaryFunc getCvtScaleAbsFunc(int depth);
BinaryFunc getConvertScaleFunc(int sdepth, int ddepth);
#ifndef CV_CPU_OPTIMIZATION_DECLARATIONS_ONLY
/****************************************************************************************\
* convertScale[Abs] *
\****************************************************************************************/
namespace cv
{
template<typename _Ts, typename _Td> inline void
cvtabs_32f( const _Ts* src, size_t sstep, _Td* dst, size_t dstep,
Size size, float a, float b )
......@@ -287,7 +291,7 @@ DEF_CVT_SCALE_FUNC(32f16f, cvt1_32f, float, float16_t, float)
DEF_CVT_SCALE_FUNC(64f16f, cvt_64f, double, float16_t, double)
DEF_CVT_SCALE_FUNC(16f, cvt1_32f, float16_t, float16_t, float)*/
static BinaryFunc getCvtScaleAbsFunc(int depth)
BinaryFunc getCvtScaleAbsFunc(int depth)
{
static BinaryFunc cvtScaleAbsTab[] =
{
......@@ -349,238 +353,7 @@ BinaryFunc getConvertScaleFunc(int sdepth, int ddepth)
return cvtScaleTab[CV_MAT_DEPTH(ddepth)][CV_MAT_DEPTH(sdepth)];
}
#ifdef HAVE_OPENCL
static bool ocl_convertScaleAbs( InputArray _src, OutputArray _dst, double alpha, double beta )
{
const ocl::Device & d = ocl::Device::getDefault();
int type = _src.type(), depth = CV_MAT_DEPTH(type), cn = CV_MAT_CN(type);
bool doubleSupport = d.doubleFPConfig() > 0;
if (!doubleSupport && depth == CV_64F)
return false;
_dst.create(_src.size(), CV_8UC(cn));
int kercn = 1;
if (d.isIntel())
{
static const int vectorWidths[] = {4, 4, 4, 4, 4, 4, 4, -1};
kercn = ocl::checkOptimalVectorWidth( vectorWidths, _src, _dst,
noArray(), noArray(), noArray(),
noArray(), noArray(), noArray(),
noArray(), ocl::OCL_VECTOR_MAX);
}
else
kercn = ocl::predictOptimalVectorWidthMax(_src, _dst);
int rowsPerWI = d.isIntel() ? 4 : 1;
char cvt[2][50];
int wdepth = std::max(depth, CV_32F);
String build_opt = format("-D OP_CONVERT_SCALE_ABS -D UNARY_OP -D dstT=%s -D DEPTH_dst=%d -D srcT1=%s"
" -D workT=%s -D wdepth=%d -D convertToWT1=%s -D convertToDT=%s"
" -D workT1=%s -D rowsPerWI=%d%s",
ocl::typeToStr(CV_8UC(kercn)), CV_8U,
ocl::typeToStr(CV_MAKE_TYPE(depth, kercn)),
ocl::typeToStr(CV_MAKE_TYPE(wdepth, kercn)), wdepth,
ocl::convertTypeStr(depth, wdepth, kercn, cvt[0]),
ocl::convertTypeStr(wdepth, CV_8U, kercn, cvt[1]),
ocl::typeToStr(wdepth), rowsPerWI,
doubleSupport ? " -D DOUBLE_SUPPORT" : "");
ocl::Kernel k("KF", ocl::core::arithm_oclsrc, build_opt);
if (k.empty())
return false;
UMat src = _src.getUMat();
UMat dst = _dst.getUMat();
ocl::KernelArg srcarg = ocl::KernelArg::ReadOnlyNoSize(src),
dstarg = ocl::KernelArg::WriteOnly(dst, cn, kercn);
if (wdepth == CV_32F)
k.args(srcarg, dstarg, (float)alpha, (float)beta);
else if (wdepth == CV_64F)
k.args(srcarg, dstarg, alpha, beta);
size_t globalsize[2] = { (size_t)src.cols * cn / kercn, ((size_t)src.rows + rowsPerWI - 1) / rowsPerWI };
return k.run(2, globalsize, NULL, false);
}
#endif
} //cv::
void cv::convertScaleAbs( InputArray _src, OutputArray _dst, double alpha, double beta )
{
CV_INSTRUMENT_REGION();
CV_OCL_RUN(_src.dims() <= 2 && _dst.isUMat(),
ocl_convertScaleAbs(_src, _dst, alpha, beta))
Mat src = _src.getMat();
int cn = src.channels();
double scale[] = {alpha, beta};
_dst.create( src.dims, src.size, CV_8UC(cn) );
Mat dst = _dst.getMat();
BinaryFunc func = getCvtScaleAbsFunc(src.depth());
CV_Assert( func != 0 );
if( src.dims <= 2 )
{
Size sz = getContinuousSize2D(src, dst, cn);
func( src.ptr(), src.step, 0, 0, dst.ptr(), dst.step, sz, scale );
}
else
{
const Mat* arrays[] = {&src, &dst, 0};
uchar* ptrs[2] = {};
NAryMatIterator it(arrays, ptrs);
Size sz((int)it.size*cn, 1);
for( size_t i = 0; i < it.nplanes; i++, ++it )
func( ptrs[0], 0, 0, 0, ptrs[1], 0, sz, scale );
}
}
//==================================================================================================
namespace cv {
#ifdef HAVE_OPENCL
static bool ocl_normalize( InputArray _src, InputOutputArray _dst, InputArray _mask, int dtype,
double scale, double delta )
{
UMat src = _src.getUMat();
if( _mask.empty() )
src.convertTo( _dst, dtype, scale, delta );
else if (src.channels() <= 4)
{
const ocl::Device & dev = ocl::Device::getDefault();
int stype = _src.type(), sdepth = CV_MAT_DEPTH(stype), cn = CV_MAT_CN(stype),
ddepth = CV_MAT_DEPTH(dtype), wdepth = std::max(CV_32F, std::max(sdepth, ddepth)),
rowsPerWI = dev.isIntel() ? 4 : 1;
float fscale = static_cast<float>(scale), fdelta = static_cast<float>(delta);
bool haveScale = std::fabs(scale - 1) > DBL_EPSILON,
haveZeroScale = !(std::fabs(scale) > DBL_EPSILON),
haveDelta = std::fabs(delta) > DBL_EPSILON,
doubleSupport = dev.doubleFPConfig() > 0;
if (!haveScale && !haveDelta && stype == dtype)
{
_src.copyTo(_dst, _mask);
return true;
}
if (haveZeroScale)
{
_dst.setTo(Scalar(delta), _mask);
return true;
}
if ((sdepth == CV_64F || ddepth == CV_64F) && !doubleSupport)
return false;
char cvt[2][40];
String opts = format("-D srcT=%s -D dstT=%s -D convertToWT=%s -D cn=%d -D rowsPerWI=%d"
" -D convertToDT=%s -D workT=%s%s%s%s -D srcT1=%s -D dstT1=%s",
ocl::typeToStr(stype), ocl::typeToStr(dtype),
ocl::convertTypeStr(sdepth, wdepth, cn, cvt[0]), cn,
rowsPerWI, ocl::convertTypeStr(wdepth, ddepth, cn, cvt[1]),
ocl::typeToStr(CV_MAKE_TYPE(wdepth, cn)),
doubleSupport ? " -D DOUBLE_SUPPORT" : "",
haveScale ? " -D HAVE_SCALE" : "",
haveDelta ? " -D HAVE_DELTA" : "",
ocl::typeToStr(sdepth), ocl::typeToStr(ddepth));
ocl::Kernel k("normalizek", ocl::core::normalize_oclsrc, opts);
if (k.empty())
return false;
UMat mask = _mask.getUMat(), dst = _dst.getUMat();
ocl::KernelArg srcarg = ocl::KernelArg::ReadOnlyNoSize(src),
maskarg = ocl::KernelArg::ReadOnlyNoSize(mask),
dstarg = ocl::KernelArg::ReadWrite(dst);
if (haveScale)
{
if (haveDelta)
k.args(srcarg, maskarg, dstarg, fscale, fdelta);
else
k.args(srcarg, maskarg, dstarg, fscale);
}
else
{
if (haveDelta)
k.args(srcarg, maskarg, dstarg, fdelta);
else
k.args(srcarg, maskarg, dstarg);
}
size_t globalsize[2] = { (size_t)src.cols, ((size_t)src.rows + rowsPerWI - 1) / rowsPerWI };
return k.run(2, globalsize, NULL, false);
}
else
{
UMat temp;
src.convertTo( temp, dtype, scale, delta );
temp.copyTo( _dst, _mask );
}
return true;
}
#endif
} // cv::
void cv::normalize( InputArray _src, InputOutputArray _dst, double a, double b,
int norm_type, int rtype, InputArray _mask )
{
CV_INSTRUMENT_REGION();
double scale = 1, shift = 0;
int type = _src.type(), depth = CV_MAT_DEPTH(type);
if( rtype < 0 )
rtype = _dst.fixedType() ? _dst.depth() : depth;
if( norm_type == CV_MINMAX )
{
double smin = 0, smax = 0;
double dmin = MIN( a, b ), dmax = MAX( a, b );
minMaxIdx( _src, &smin, &smax, 0, 0, _mask );
scale = (dmax - dmin)*(smax - smin > DBL_EPSILON ? 1./(smax - smin) : 0);
if( rtype == CV_32F )
{
scale = (float)scale;
shift = (float)dmin - (float)(smin*scale);
}
else
shift = dmin - smin*scale;
}
else if( norm_type == CV_L2 || norm_type == CV_L1 || norm_type == CV_C )
{
scale = norm( _src, norm_type, _mask );
scale = scale > DBL_EPSILON ? a/scale : 0.;
shift = 0;
}
else
CV_Error( CV_StsBadArg, "Unknown/unsupported norm type" );
CV_OCL_RUN(_dst.isUMat(),
ocl_normalize(_src, _dst, _mask, rtype, scale, shift))
Mat src = _src.getMat();
if( _mask.empty() )
src.convertTo( _dst, rtype, scale, shift );
else
{
Mat temp;
src.convertTo( temp, rtype, scale, shift );
temp.copyTo( _dst, _mask );
}
}
CV_CPU_OPTIMIZATION_NAMESPACE_END
} // namespace
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment