Commit dfef04b3 authored by Alexander Alekhin's avatar Alexander Alekhin

Merge remote-tracking branch 'upstream/3.4' into merge-3.4

parents f8ac46ba b1caa4f4
......@@ -77,7 +77,7 @@ by the @ref cv::VideoCapture::read or the overloaded \>\> operator:
@code{.cpp}
Mat frameReference, frameUnderTest;
captRefrnc >> frameReference;
captUndTst.open(frameUnderTest);
captUndTst.read(frameUnderTest);
@endcode
The upper read operations will leave empty the *Mat* objects if no frame could be acquired (either
cause the video stream was closed or you got to the end of the video file). We can check this with a
......
......@@ -3348,10 +3348,17 @@ static void collectCalibrationData( InputArrayOfArrays objectPoints,
for( i = 0; i < nimages; i++ )
{
ni = objectPoints.getMat(i).checkVector(3, CV_32F);
Mat objectPoint = objectPoints.getMat(i);
if (objectPoint.empty())
CV_Error(CV_StsBadSize, "objectPoints should not contain empty vector of vectors of points");
ni = objectPoint.checkVector(3, CV_32F);
if( ni <= 0 )
CV_Error(CV_StsUnsupportedFormat, "objectPoints should contain vector of vectors of points of type Point3f");
int ni1 = imagePoints1.getMat(i).checkVector(2, CV_32F);
Mat imagePoint1 = imagePoints1.getMat(i);
if (imagePoint1.empty())
CV_Error(CV_StsBadSize, "imagePoints1 should not contain empty vector of vectors of points");
int ni1 = imagePoint1.checkVector(2, CV_32F);
if( ni1 <= 0 )
CV_Error(CV_StsUnsupportedFormat, "imagePoints1 should contain vector of vectors of points of type Point2f");
CV_Assert( ni == ni1 );
......
......@@ -173,12 +173,12 @@ void CV_CameraCalibrationBadArgTest::run( int /* start_from */ )
caller.initArgs();
caller.imgPts[0].clear();
errors += run_test_case( CV_StsUnsupportedFormat, "Bad imgpts[0]", caller );
errors += run_test_case( CV_StsBadSize, "Bad imgpts[0]", caller );
caller.imgPts[0] = caller.imgPts[1];
caller.initArgs();
caller.objPts[1].clear();
errors += run_test_case( CV_StsUnsupportedFormat, "Bad objpts[1]", caller );
errors += run_test_case( CV_StsBadSize, "Bad objpts[1]", caller );
caller.objPts[1] = caller.objPts[0];
caller.initArgs();
......
......@@ -3,6 +3,8 @@ set(the_description "The Core Functionality")
ocv_add_dispatched_file(mathfuncs_core SSE2 AVX AVX2)
ocv_add_dispatched_file(stat SSE4_2 AVX2)
ocv_add_dispatched_file(arithm SSE2 SSE4_1 AVX2 VSX3)
ocv_add_dispatched_file(convert SSE2 AVX2)
ocv_add_dispatched_file(convert_scale SSE2 AVX2)
# dispatching for accuracy tests
ocv_add_dispatched_file_force_all(test_intrin128 TEST SSE2 SSE3 SSSE3 SSE4_1 SSE4_2 AVX FP16 AVX2)
......
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html
#include "precomp.hpp"
#include "opencl_kernels_core.hpp"
#include "convert.simd.hpp"
#include "convert.simd_declarations.hpp" // defines CV_CPU_DISPATCH_MODES_ALL=AVX2,...,BASELINE based on CMakeLists.txt content
namespace cv {
namespace hal {
void cvt16f32f(const float16_t* src, float* dst, int len)
{
CV_INSTRUMENT_REGION();
CV_CPU_DISPATCH(cvt16f32f, (src, dst, len),
CV_CPU_DISPATCH_MODES_ALL);
}
void cvt32f16f(const float* src, float16_t* dst, int len)
{
CV_INSTRUMENT_REGION();
CV_CPU_DISPATCH(cvt32f16f, (src, dst, len),
CV_CPU_DISPATCH_MODES_ALL);
}
void addRNGBias32f(float* arr, const float* scaleBiasPairs, int len)
{
CV_INSTRUMENT_REGION();
CV_CPU_DISPATCH(addRNGBias32f, (arr, scaleBiasPairs, len),
CV_CPU_DISPATCH_MODES_ALL);
}
void addRNGBias64f(double* arr, const double* scaleBiasPairs, int len)
{
CV_INSTRUMENT_REGION();
CV_CPU_DISPATCH(addRNGBias64f, (arr, scaleBiasPairs, len),
CV_CPU_DISPATCH_MODES_ALL);
}
} // namespace
/* [TODO] Recover IPP calls
#if defined(HAVE_IPP)
#define DEF_CVT_FUNC_F(suffix, stype, dtype, ippFavor) \
static void cvt##suffix( const stype* src, size_t sstep, const uchar*, size_t, \
dtype* dst, size_t dstep, Size size, double*) \
{ \
CV_IPP_RUN(src && dst, CV_INSTRUMENT_FUN_IPP(ippiConvert_##ippFavor, src, (int)sstep, dst, (int)dstep, ippiSize(size.width, size.height)) >= 0) \
cvt_(src, sstep, dst, dstep, size); \
}
#define DEF_CVT_FUNC_F2(suffix, stype, dtype, ippFavor) \
static void cvt##suffix( const stype* src, size_t sstep, const uchar*, size_t, \
dtype* dst, size_t dstep, Size size, double*) \
{ \
CV_IPP_RUN(src && dst, CV_INSTRUMENT_FUN_IPP(ippiConvert_##ippFavor, src, (int)sstep, dst, (int)dstep, ippiSize(size.width, size.height), ippRndFinancial, 0) >= 0) \
cvt_(src, sstep, dst, dstep, size); \
}
#else
#define DEF_CVT_FUNC_F(suffix, stype, dtype, ippFavor) \
static void cvt##suffix( const stype* src, size_t sstep, const uchar*, size_t, \
dtype* dst, size_t dstep, Size size, double*) \
{ \
cvt_(src, sstep, dst, dstep, size); \
}
#define DEF_CVT_FUNC_F2 DEF_CVT_FUNC_F
#endif
#define DEF_CVT_FUNC(suffix, stype, dtype) \
static void cvt##suffix( const stype* src, size_t sstep, const uchar*, size_t, \
dtype* dst, size_t dstep, Size size, double*) \
{ \
cvt_(src, sstep, dst, dstep, size); \
}
#define DEF_CPY_FUNC(suffix, stype) \
static void cvt##suffix( const stype* src, size_t sstep, const uchar*, size_t, \
stype* dst, size_t dstep, Size size, double*) \
{ \
cpy_(src, sstep, dst, dstep, size); \
}
DEF_CPY_FUNC(8u, uchar)
DEF_CVT_FUNC_F(8s8u, schar, uchar, 8s8u_C1Rs)
DEF_CVT_FUNC_F(16u8u, ushort, uchar, 16u8u_C1R)
DEF_CVT_FUNC_F(16s8u, short, uchar, 16s8u_C1R)
DEF_CVT_FUNC_F(32s8u, int, uchar, 32s8u_C1R)
DEF_CVT_FUNC_F2(32f8u, float, uchar, 32f8u_C1RSfs)
DEF_CVT_FUNC(64f8u, double, uchar)
DEF_CVT_FUNC_F2(8u8s, uchar, schar, 8u8s_C1RSfs)
DEF_CVT_FUNC_F2(16u8s, ushort, schar, 16u8s_C1RSfs)
DEF_CVT_FUNC_F2(16s8s, short, schar, 16s8s_C1RSfs)
DEF_CVT_FUNC_F(32s8s, int, schar, 32s8s_C1R)
DEF_CVT_FUNC_F2(32f8s, float, schar, 32f8s_C1RSfs)
DEF_CVT_FUNC(64f8s, double, schar)
DEF_CVT_FUNC_F(8u16u, uchar, ushort, 8u16u_C1R)
DEF_CVT_FUNC_F(8s16u, schar, ushort, 8s16u_C1Rs)
DEF_CPY_FUNC(16u, ushort)
DEF_CVT_FUNC_F(16s16u, short, ushort, 16s16u_C1Rs)
DEF_CVT_FUNC_F2(32s16u, int, ushort, 32s16u_C1RSfs)
DEF_CVT_FUNC_F2(32f16u, float, ushort, 32f16u_C1RSfs)
DEF_CVT_FUNC(64f16u, double, ushort)
DEF_CVT_FUNC_F(8u16s, uchar, short, 8u16s_C1R)
DEF_CVT_FUNC_F(8s16s, schar, short, 8s16s_C1R)
DEF_CVT_FUNC_F2(16u16s, ushort, short, 16u16s_C1RSfs)
DEF_CVT_FUNC_F2(32s16s, int, short, 32s16s_C1RSfs)
DEF_CVT_FUNC(32f16s, float, short)
DEF_CVT_FUNC(64f16s, double, short)
DEF_CVT_FUNC_F(8u32s, uchar, int, 8u32s_C1R)
DEF_CVT_FUNC_F(8s32s, schar, int, 8s32s_C1R)
DEF_CVT_FUNC_F(16u32s, ushort, int, 16u32s_C1R)
DEF_CVT_FUNC_F(16s32s, short, int, 16s32s_C1R)
DEF_CPY_FUNC(32s, int)
DEF_CVT_FUNC_F2(32f32s, float, int, 32f32s_C1RSfs)
DEF_CVT_FUNC(64f32s, double, int)
DEF_CVT_FUNC_F(8u32f, uchar, float, 8u32f_C1R)
DEF_CVT_FUNC_F(8s32f, schar, float, 8s32f_C1R)
DEF_CVT_FUNC_F(16u32f, ushort, float, 16u32f_C1R)
DEF_CVT_FUNC_F(16s32f, short, float, 16s32f_C1R)
DEF_CVT_FUNC_F(32s32f, int, float, 32s32f_C1R)
DEF_CVT_FUNC(64f32f, double, float)
DEF_CVT_FUNC(8u64f, uchar, double)
DEF_CVT_FUNC(8s64f, schar, double)
DEF_CVT_FUNC(16u64f, ushort, double)
DEF_CVT_FUNC(16s64f, short, double)
DEF_CVT_FUNC(32s64f, int, double)
DEF_CVT_FUNC(32f64f, float, double)
DEF_CPY_FUNC(64s, int64)
*/
BinaryFunc getConvertFunc(int sdepth, int ddepth)
{
CV_INSTRUMENT_REGION();
CV_CPU_DISPATCH(getConvertFunc, (sdepth, ddepth),
CV_CPU_DISPATCH_MODES_ALL);
}
#ifdef HAVE_OPENCL
static bool ocl_convertFp16( InputArray _src, OutputArray _dst, int sdepth, int ddepth )
{
int type = _src.type(), cn = CV_MAT_CN(type);
_dst.createSameSize( _src, CV_MAKETYPE(ddepth, cn) );
int kercn = 1;
int rowsPerWI = 1;
String build_opt = format("-D HALF_SUPPORT -D srcT=%s -D dstT=%s -D rowsPerWI=%d%s",
sdepth == CV_32F ? "float" : "half",
sdepth == CV_32F ? "half" : "float",
rowsPerWI,
sdepth == CV_32F ? " -D FLOAT_TO_HALF " : "");
ocl::Kernel k("convertFp16", ocl::core::halfconvert_oclsrc, build_opt);
if (k.empty())
return false;
UMat src = _src.getUMat();
UMat dst = _dst.getUMat();
ocl::KernelArg srcarg = ocl::KernelArg::ReadOnlyNoSize(src),
dstarg = ocl::KernelArg::WriteOnly(dst, cn, kercn);
k.args(srcarg, dstarg);
size_t globalsize[2] = { (size_t)src.cols * cn / kercn, ((size_t)src.rows + rowsPerWI - 1) / rowsPerWI };
return k.run(2, globalsize, NULL, false);
}
#endif
void Mat::convertTo(OutputArray _dst, int _type, double alpha, double beta) const
{
CV_INSTRUMENT_REGION();
if( empty() )
{
_dst.release();
return;
}
bool noScale = fabs(alpha-1) < DBL_EPSILON && fabs(beta) < DBL_EPSILON;
if( _type < 0 )
_type = _dst.fixedType() ? _dst.type() : type();
else
_type = CV_MAKETYPE(CV_MAT_DEPTH(_type), channels());
int sdepth = depth(), ddepth = CV_MAT_DEPTH(_type);
if( sdepth == ddepth && noScale )
{
copyTo(_dst);
return;
}
Mat src = *this;
if( dims <= 2 )
_dst.create( size(), _type );
else
_dst.create( dims, size, _type );
Mat dst = _dst.getMat();
BinaryFunc func = noScale ? getConvertFunc(sdepth, ddepth) : getConvertScaleFunc(sdepth, ddepth);
double scale[] = {alpha, beta};
int cn = channels();
CV_Assert( func != 0 );
if( dims <= 2 )
{
Size sz = getContinuousSize2D(src, dst, cn);
func( src.data, src.step, 0, 0, dst.data, dst.step, sz, scale );
}
else
{
const Mat* arrays[] = {&src, &dst, 0};
uchar* ptrs[2] = {};
NAryMatIterator it(arrays, ptrs);
Size sz((int)(it.size*cn), 1);
for( size_t i = 0; i < it.nplanes; i++, ++it )
func(ptrs[0], 1, 0, 0, ptrs[1], 1, sz, scale);
}
}
//==================================================================================================
void convertFp16(InputArray _src, OutputArray _dst)
{
CV_INSTRUMENT_REGION();
int sdepth = _src.depth(), ddepth = 0;
BinaryFunc func = 0;
switch( sdepth )
{
case CV_32F:
if(_dst.fixedType())
{
ddepth = _dst.depth();
CV_Assert(ddepth == CV_16S || ddepth == CV_16F);
CV_Assert(_dst.channels() == _src.channels());
}
else
ddepth = CV_16S;
func = (BinaryFunc)getConvertFunc(CV_32F, CV_16F);
break;
case CV_16S:
case CV_16F:
ddepth = CV_32F;
func = (BinaryFunc)getConvertFunc(CV_16F, CV_32F);
break;
default:
CV_Error(Error::StsUnsupportedFormat, "Unsupported input depth");
return;
}
CV_OCL_RUN(_src.dims() <= 2 && _dst.isUMat(),
ocl_convertFp16(_src, _dst, sdepth, ddepth))
Mat src = _src.getMat();
int type = CV_MAKETYPE(ddepth, src.channels());
_dst.create( src.dims, src.size, type );
Mat dst = _dst.getMat();
int cn = src.channels();
CV_Assert( func != 0 );
if( src.dims <= 2 )
{
Size sz = getContinuousSize2D(src, dst, cn);
func( src.data, src.step, 0, 0, dst.data, dst.step, sz, 0);
}
else
{
const Mat* arrays[] = {&src, &dst, 0};
uchar* ptrs[2] = {};
NAryMatIterator it(arrays, ptrs);
Size sz((int)(it.size*cn), 1);
for( size_t i = 0; i < it.nplanes; i++, ++it )
func(ptrs[0], 0, 0, 0, ptrs[1], 0, sz, 0);
}
}
} // namespace cv
......@@ -3,15 +3,36 @@
// of this distribution and at http://opencv.org/license.html
#include "precomp.hpp"
#include "opencl_kernels_core.hpp"
#include "convert.hpp"
namespace cv {
namespace hal {
CV_CPU_OPTIMIZATION_NAMESPACE_BEGIN
void cvt16f32f(const float16_t* src, float* dst, int len);
void cvt32f16f(const float* src, float16_t* dst, int len);
void addRNGBias32f(float* arr, const float* scaleBiasPairs, int len);
void addRNGBias64f(double* arr, const double* scaleBiasPairs, int len);
CV_CPU_OPTIMIZATION_NAMESPACE_END
} // namespace cv::hal
CV_CPU_OPTIMIZATION_NAMESPACE_BEGIN
BinaryFunc getConvertFunc(int sdepth, int ddepth);
CV_CPU_OPTIMIZATION_NAMESPACE_END
#ifndef CV_CPU_OPTIMIZATION_DECLARATIONS_ONLY
namespace hal {
CV_CPU_OPTIMIZATION_NAMESPACE_BEGIN
BinaryFunc getConvertFunc(int sdepth, int ddepth);
void cvt16f32f( const float16_t* src, float* dst, int len )
{
CV_INSTRUMENT_REGION();
int j = 0;
#if CV_SIMD
const int VECSZ = v_float32::nlanes;
......@@ -32,6 +53,7 @@ void cvt16f32f( const float16_t* src, float* dst, int len )
void cvt32f16f( const float* src, float16_t* dst, int len )
{
CV_INSTRUMENT_REGION();
int j = 0;
#if CV_SIMD
const int VECSZ = v_float32::nlanes;
......@@ -52,6 +74,7 @@ void cvt32f16f( const float* src, float16_t* dst, int len )
void addRNGBias32f( float* arr, const float* scaleBiasPairs, int len )
{
CV_INSTRUMENT_REGION();
// the loop is simple enough, so we let the compiler to vectorize it
for( int i = 0; i < len; i++ )
arr[i] += scaleBiasPairs[i*2 + 1];
......@@ -59,14 +82,19 @@ void addRNGBias32f( float* arr, const float* scaleBiasPairs, int len )
void addRNGBias64f( double* arr, const double* scaleBiasPairs, int len )
{
CV_INSTRUMENT_REGION();
// the loop is simple enough, so we let the compiler to vectorize it
for( int i = 0; i < len; i++ )
arr[i] += scaleBiasPairs[i*2 + 1];
}
}
CV_CPU_OPTIMIZATION_NAMESPACE_END
} // namespace cv::hal
// cv::
CV_CPU_OPTIMIZATION_NAMESPACE_BEGIN
template<typename _Ts, typename _Td, typename _Twvec> inline void
template<typename _Ts, typename _Td, typename _Twvec> static inline void
cvt_( const _Ts* src, size_t sstep, _Td* dst, size_t dstep, Size size )
{
sstep /= sizeof(src[0]);
......@@ -97,7 +125,7 @@ cvt_( const _Ts* src, size_t sstep, _Td* dst, size_t dstep, Size size )
// in order to reduce the code size, for (16f <-> ...) conversions
// we add a conversion function without loop unrolling
template<typename _Ts, typename _Td, typename _Twvec> inline void
template<typename _Ts, typename _Td, typename _Twvec> static inline void
cvt1_( const _Ts* src, size_t sstep, _Td* dst, size_t dstep, Size size )
{
sstep /= sizeof(src[0]);
......@@ -140,7 +168,10 @@ static void cvtCopy( const uchar* src, size_t sstep,
#define DEF_CVT_FUNC(suffix, cvtfunc, _Ts, _Td, _Twvec) \
static void cvt##suffix(const _Ts* src, size_t sstep, uchar*, size_t, \
_Td* dst, size_t dstep, Size size, void*) \
{ cvtfunc<_Ts, _Td, _Twvec>(src, sstep, dst, dstep, size); }
{ \
CV_INSTRUMENT_REGION(); \
cvtfunc<_Ts, _Td, _Twvec>(src, sstep, dst, dstep, size); \
}
////////////////////// 8u -> ... ////////////////////////
......@@ -225,16 +256,16 @@ DEF_CVT_FUNC(16f64f, cvt1_, float16_t, double, v_float32)
///////////// "conversion" w/o conversion ///////////////
static void cvt8u(const uchar* src, size_t sstep, uchar*, size_t, uchar* dst, size_t dstep, Size size, void*)
{ cvtCopy(src, sstep, dst, dstep, size, 1); }
{ CV_INSTRUMENT_REGION(); cvtCopy(src, sstep, dst, dstep, size, 1); }
static void cvt16u(const ushort* src, size_t sstep, uchar*, size_t, ushort* dst, size_t dstep, Size size, void*)
{ cvtCopy((const uchar*)src, sstep, (uchar*)dst, dstep, size, 2); }
{ CV_INSTRUMENT_REGION(); cvtCopy((const uchar*)src, sstep, (uchar*)dst, dstep, size, 2); }
static void cvt32s(const int* src, size_t sstep, uchar*, size_t, int* dst, size_t dstep, Size size, void*)
{ cvtCopy((const uchar*)src, sstep, (uchar*)dst, dstep, size, 4); }
{ CV_INSTRUMENT_REGION(); cvtCopy((const uchar*)src, sstep, (uchar*)dst, dstep, size, 4); }
static void cvt64s(const int64* src, size_t sstep, uchar*, size_t, int64* dst, size_t dstep, Size size, void*)
{ cvtCopy((const uchar*)src, sstep, (uchar*)dst, dstep, size, 8); }
{ CV_INSTRUMENT_REGION(); cvtCopy((const uchar*)src, sstep, (uchar*)dst, dstep, size, 8); }
/* [TODO] Recover IPP calls
......@@ -379,148 +410,6 @@ BinaryFunc getConvertFunc(int sdepth, int ddepth)
return cvtTab[CV_MAT_DEPTH(ddepth)][CV_MAT_DEPTH(sdepth)];
}
#ifdef HAVE_OPENCL
static bool ocl_convertFp16( InputArray _src, OutputArray _dst, int sdepth, int ddepth )
{
int type = _src.type(), cn = CV_MAT_CN(type);
_dst.createSameSize( _src, CV_MAKETYPE(ddepth, cn) );
int kercn = 1;
int rowsPerWI = 1;
String build_opt = format("-D HALF_SUPPORT -D srcT=%s -D dstT=%s -D rowsPerWI=%d%s",
sdepth == CV_32F ? "float" : "half",
sdepth == CV_32F ? "half" : "float",
rowsPerWI,
sdepth == CV_32F ? " -D FLOAT_TO_HALF " : "");
ocl::Kernel k("convertFp16", ocl::core::halfconvert_oclsrc, build_opt);
if (k.empty())
return false;
UMat src = _src.getUMat();
UMat dst = _dst.getUMat();
ocl::KernelArg srcarg = ocl::KernelArg::ReadOnlyNoSize(src),
dstarg = ocl::KernelArg::WriteOnly(dst, cn, kercn);
k.args(srcarg, dstarg);
size_t globalsize[2] = { (size_t)src.cols * cn / kercn, ((size_t)src.rows + rowsPerWI - 1) / rowsPerWI };
return k.run(2, globalsize, NULL, false);
}
CV_CPU_OPTIMIZATION_NAMESPACE_END
#endif
} // cv::
void cv::Mat::convertTo(OutputArray _dst, int _type, double alpha, double beta) const
{
CV_INSTRUMENT_REGION();
if( empty() )
{
_dst.release();
return;
}
bool noScale = fabs(alpha-1) < DBL_EPSILON && fabs(beta) < DBL_EPSILON;
if( _type < 0 )
_type = _dst.fixedType() ? _dst.type() : type();
else
_type = CV_MAKETYPE(CV_MAT_DEPTH(_type), channels());
int sdepth = depth(), ddepth = CV_MAT_DEPTH(_type);
if( sdepth == ddepth && noScale )
{
copyTo(_dst);
return;
}
Mat src = *this;
if( dims <= 2 )
_dst.create( size(), _type );
else
_dst.create( dims, size, _type );
Mat dst = _dst.getMat();
BinaryFunc func = noScale ? getConvertFunc(sdepth, ddepth) : getConvertScaleFunc(sdepth, ddepth);
double scale[] = {alpha, beta};
int cn = channels();
CV_Assert( func != 0 );
if( dims <= 2 )
{
Size sz = getContinuousSize2D(src, dst, cn);
func( src.data, src.step, 0, 0, dst.data, dst.step, sz, scale );
}
else
{
const Mat* arrays[] = {&src, &dst, 0};
uchar* ptrs[2] = {};
NAryMatIterator it(arrays, ptrs);
Size sz((int)(it.size*cn), 1);
for( size_t i = 0; i < it.nplanes; i++, ++it )
func(ptrs[0], 1, 0, 0, ptrs[1], 1, sz, scale);
}
}
//==================================================================================================
void cv::convertFp16( InputArray _src, OutputArray _dst )
{
CV_INSTRUMENT_REGION();
int sdepth = _src.depth(), ddepth = 0;
BinaryFunc func = 0;
switch( sdepth )
{
case CV_32F:
if(_dst.fixedType())
{
ddepth = _dst.depth();
CV_Assert(ddepth == CV_16S || ddepth == CV_16F);
CV_Assert(_dst.channels() == _src.channels());
}
else
ddepth = CV_16S;
func = (BinaryFunc)cvt32f16f;
break;
case CV_16S:
case CV_16F:
ddepth = CV_32F;
func = (BinaryFunc)cvt16f32f;
break;
default:
CV_Error(Error::StsUnsupportedFormat, "Unsupported input depth");
return;
}
CV_OCL_RUN(_src.dims() <= 2 && _dst.isUMat(),
ocl_convertFp16(_src, _dst, sdepth, ddepth))
Mat src = _src.getMat();
int type = CV_MAKETYPE(ddepth, src.channels());
_dst.create( src.dims, src.size, type );
Mat dst = _dst.getMat();
int cn = src.channels();
CV_Assert( func != 0 );
if( src.dims <= 2 )
{
Size sz = getContinuousSize2D(src, dst, cn);
func( src.data, src.step, 0, 0, dst.data, dst.step, sz, 0);
}
else
{
const Mat* arrays[] = {&src, &dst, 0};
uchar* ptrs[2] = {};
NAryMatIterator it(arrays, ptrs);
Size sz((int)(it.size*cn), 1);
for( size_t i = 0; i < it.nplanes; i++, ++it )
func(ptrs[0], 0, 0, 0, ptrs[1], 0, sz, 0);
}
}
} // namespace
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html
#include "precomp.hpp"
#include "opencl_kernels_core.hpp"
#include "convert_scale.simd.hpp"
#include "convert_scale.simd_declarations.hpp" // defines CV_CPU_DISPATCH_MODES_ALL=AVX2,...,BASELINE based on CMakeLists.txt content
namespace cv
{
static BinaryFunc getCvtScaleAbsFunc(int depth)
{
CV_INSTRUMENT_REGION();
CV_CPU_DISPATCH(getCvtScaleAbsFunc, (depth),
CV_CPU_DISPATCH_MODES_ALL);
}
BinaryFunc getConvertScaleFunc(int sdepth, int ddepth)
{
CV_INSTRUMENT_REGION();
CV_CPU_DISPATCH(getConvertScaleFunc, (sdepth, ddepth),
CV_CPU_DISPATCH_MODES_ALL);
}
#ifdef HAVE_OPENCL
static bool ocl_convertScaleAbs( InputArray _src, OutputArray _dst, double alpha, double beta )
{
const ocl::Device & d = ocl::Device::getDefault();
int type = _src.type(), depth = CV_MAT_DEPTH(type), cn = CV_MAT_CN(type);
bool doubleSupport = d.doubleFPConfig() > 0;
if (!doubleSupport && depth == CV_64F)
return false;
_dst.create(_src.size(), CV_8UC(cn));
int kercn = 1;
if (d.isIntel())
{
static const int vectorWidths[] = {4, 4, 4, 4, 4, 4, 4, -1};
kercn = ocl::checkOptimalVectorWidth( vectorWidths, _src, _dst,
noArray(), noArray(), noArray(),
noArray(), noArray(), noArray(),
noArray(), ocl::OCL_VECTOR_MAX);
}
else
kercn = ocl::predictOptimalVectorWidthMax(_src, _dst);
int rowsPerWI = d.isIntel() ? 4 : 1;
char cvt[2][50];
int wdepth = std::max(depth, CV_32F);
String build_opt = format("-D OP_CONVERT_SCALE_ABS -D UNARY_OP -D dstT=%s -D DEPTH_dst=%d -D srcT1=%s"
" -D workT=%s -D wdepth=%d -D convertToWT1=%s -D convertToDT=%s"
" -D workT1=%s -D rowsPerWI=%d%s",
ocl::typeToStr(CV_8UC(kercn)), CV_8U,
ocl::typeToStr(CV_MAKE_TYPE(depth, kercn)),
ocl::typeToStr(CV_MAKE_TYPE(wdepth, kercn)), wdepth,
ocl::convertTypeStr(depth, wdepth, kercn, cvt[0]),
ocl::convertTypeStr(wdepth, CV_8U, kercn, cvt[1]),
ocl::typeToStr(wdepth), rowsPerWI,
doubleSupport ? " -D DOUBLE_SUPPORT" : "");
ocl::Kernel k("KF", ocl::core::arithm_oclsrc, build_opt);
if (k.empty())
return false;
UMat src = _src.getUMat();
UMat dst = _dst.getUMat();
ocl::KernelArg srcarg = ocl::KernelArg::ReadOnlyNoSize(src),
dstarg = ocl::KernelArg::WriteOnly(dst, cn, kercn);
if (wdepth == CV_32F)
k.args(srcarg, dstarg, (float)alpha, (float)beta);
else if (wdepth == CV_64F)
k.args(srcarg, dstarg, alpha, beta);
size_t globalsize[2] = { (size_t)src.cols * cn / kercn, ((size_t)src.rows + rowsPerWI - 1) / rowsPerWI };
return k.run(2, globalsize, NULL, false);
}
#endif
void convertScaleAbs(InputArray _src, OutputArray _dst, double alpha, double beta)
{
CV_INSTRUMENT_REGION();
CV_OCL_RUN(_src.dims() <= 2 && _dst.isUMat(),
ocl_convertScaleAbs(_src, _dst, alpha, beta))
Mat src = _src.getMat();
int cn = src.channels();
double scale[] = {alpha, beta};
_dst.create( src.dims, src.size, CV_8UC(cn) );
Mat dst = _dst.getMat();
BinaryFunc func = getCvtScaleAbsFunc(src.depth());
CV_Assert( func != 0 );
if( src.dims <= 2 )
{
Size sz = getContinuousSize2D(src, dst, cn);
func( src.ptr(), src.step, 0, 0, dst.ptr(), dst.step, sz, scale );
}
else
{
const Mat* arrays[] = {&src, &dst, 0};
uchar* ptrs[2] = {};
NAryMatIterator it(arrays, ptrs);
Size sz((int)it.size*cn, 1);
for( size_t i = 0; i < it.nplanes; i++, ++it )
func( ptrs[0], 0, 0, 0, ptrs[1], 0, sz, scale );
}
}
//==================================================================================================
#ifdef HAVE_OPENCL
static bool ocl_normalize( InputArray _src, InputOutputArray _dst, InputArray _mask, int dtype,
double scale, double delta )
{
UMat src = _src.getUMat();
if( _mask.empty() )
src.convertTo( _dst, dtype, scale, delta );
else if (src.channels() <= 4)
{
const ocl::Device & dev = ocl::Device::getDefault();
int stype = _src.type(), sdepth = CV_MAT_DEPTH(stype), cn = CV_MAT_CN(stype),
ddepth = CV_MAT_DEPTH(dtype), wdepth = std::max(CV_32F, std::max(sdepth, ddepth)),
rowsPerWI = dev.isIntel() ? 4 : 1;
float fscale = static_cast<float>(scale), fdelta = static_cast<float>(delta);
bool haveScale = std::fabs(scale - 1) > DBL_EPSILON,
haveZeroScale = !(std::fabs(scale) > DBL_EPSILON),
haveDelta = std::fabs(delta) > DBL_EPSILON,
doubleSupport = dev.doubleFPConfig() > 0;
if (!haveScale && !haveDelta && stype == dtype)
{
_src.copyTo(_dst, _mask);
return true;
}
if (haveZeroScale)
{
_dst.setTo(Scalar(delta), _mask);
return true;
}
if ((sdepth == CV_64F || ddepth == CV_64F) && !doubleSupport)
return false;
char cvt[2][40];
String opts = format("-D srcT=%s -D dstT=%s -D convertToWT=%s -D cn=%d -D rowsPerWI=%d"
" -D convertToDT=%s -D workT=%s%s%s%s -D srcT1=%s -D dstT1=%s",
ocl::typeToStr(stype), ocl::typeToStr(dtype),
ocl::convertTypeStr(sdepth, wdepth, cn, cvt[0]), cn,
rowsPerWI, ocl::convertTypeStr(wdepth, ddepth, cn, cvt[1]),
ocl::typeToStr(CV_MAKE_TYPE(wdepth, cn)),
doubleSupport ? " -D DOUBLE_SUPPORT" : "",
haveScale ? " -D HAVE_SCALE" : "",
haveDelta ? " -D HAVE_DELTA" : "",
ocl::typeToStr(sdepth), ocl::typeToStr(ddepth));
ocl::Kernel k("normalizek", ocl::core::normalize_oclsrc, opts);
if (k.empty())
return false;
UMat mask = _mask.getUMat(), dst = _dst.getUMat();
ocl::KernelArg srcarg = ocl::KernelArg::ReadOnlyNoSize(src),
maskarg = ocl::KernelArg::ReadOnlyNoSize(mask),
dstarg = ocl::KernelArg::ReadWrite(dst);
if (haveScale)
{
if (haveDelta)
k.args(srcarg, maskarg, dstarg, fscale, fdelta);
else
k.args(srcarg, maskarg, dstarg, fscale);
}
else
{
if (haveDelta)
k.args(srcarg, maskarg, dstarg, fdelta);
else
k.args(srcarg, maskarg, dstarg);
}
size_t globalsize[2] = { (size_t)src.cols, ((size_t)src.rows + rowsPerWI - 1) / rowsPerWI };
return k.run(2, globalsize, NULL, false);
}
else
{
UMat temp;
src.convertTo( temp, dtype, scale, delta );
temp.copyTo( _dst, _mask );
}
return true;
}
#endif
void normalize(InputArray _src, InputOutputArray _dst, double a, double b,
int norm_type, int rtype, InputArray _mask)
{
CV_INSTRUMENT_REGION();
double scale = 1, shift = 0;
int type = _src.type(), depth = CV_MAT_DEPTH(type);
if( rtype < 0 )
rtype = _dst.fixedType() ? _dst.depth() : depth;
if( norm_type == CV_MINMAX )
{
double smin = 0, smax = 0;
double dmin = MIN( a, b ), dmax = MAX( a, b );
minMaxIdx( _src, &smin, &smax, 0, 0, _mask );
scale = (dmax - dmin)*(smax - smin > DBL_EPSILON ? 1./(smax - smin) : 0);
if( rtype == CV_32F )
{
scale = (float)scale;
shift = (float)dmin - (float)(smin*scale);
}
else
shift = dmin - smin*scale;
}
else if( norm_type == CV_L2 || norm_type == CV_L1 || norm_type == CV_C )
{
scale = norm( _src, norm_type, _mask );
scale = scale > DBL_EPSILON ? a/scale : 0.;
shift = 0;
}
else
CV_Error( CV_StsBadArg, "Unknown/unsupported norm type" );
CV_OCL_RUN(_dst.isUMat(),
ocl_normalize(_src, _dst, _mask, rtype, scale, shift))
Mat src = _src.getMat();
if( _mask.empty() )
src.convertTo( _dst, rtype, scale, shift );
else
{
Mat temp;
src.convertTo( temp, rtype, scale, shift );
temp.copyTo( _dst, _mask );
}
}
} // namespace
......@@ -4,16 +4,20 @@
#include "precomp.hpp"
#include "opencl_kernels_core.hpp"
#include "convert.hpp"
namespace cv {
CV_CPU_OPTIMIZATION_NAMESPACE_BEGIN
BinaryFunc getCvtScaleAbsFunc(int depth);
BinaryFunc getConvertScaleFunc(int sdepth, int ddepth);
#ifndef CV_CPU_OPTIMIZATION_DECLARATIONS_ONLY
/****************************************************************************************\
* convertScale[Abs] *
\****************************************************************************************/
namespace cv
{
template<typename _Ts, typename _Td> inline void
cvtabs_32f( const _Ts* src, size_t sstep, _Td* dst, size_t dstep,
Size size, float a, float b )
......@@ -287,7 +291,7 @@ DEF_CVT_SCALE_FUNC(32f16f, cvt1_32f, float, float16_t, float)
DEF_CVT_SCALE_FUNC(64f16f, cvt_64f, double, float16_t, double)
DEF_CVT_SCALE_FUNC(16f, cvt1_32f, float16_t, float16_t, float)
static BinaryFunc getCvtScaleAbsFunc(int depth)
BinaryFunc getCvtScaleAbsFunc(int depth)
{
static BinaryFunc cvtScaleAbsTab[] =
{
......@@ -348,238 +352,7 @@ BinaryFunc getConvertScaleFunc(int sdepth, int ddepth)
return cvtScaleTab[CV_MAT_DEPTH(ddepth)][CV_MAT_DEPTH(sdepth)];
}
#ifdef HAVE_OPENCL
static bool ocl_convertScaleAbs( InputArray _src, OutputArray _dst, double alpha, double beta )
{
const ocl::Device & d = ocl::Device::getDefault();
int type = _src.type(), depth = CV_MAT_DEPTH(type), cn = CV_MAT_CN(type);
bool doubleSupport = d.doubleFPConfig() > 0;
if (!doubleSupport && depth == CV_64F)
return false;
_dst.create(_src.size(), CV_8UC(cn));
int kercn = 1;
if (d.isIntel())
{
static const int vectorWidths[] = {4, 4, 4, 4, 4, 4, 4, -1};
kercn = ocl::checkOptimalVectorWidth( vectorWidths, _src, _dst,
noArray(), noArray(), noArray(),
noArray(), noArray(), noArray(),
noArray(), ocl::OCL_VECTOR_MAX);
}
else
kercn = ocl::predictOptimalVectorWidthMax(_src, _dst);
int rowsPerWI = d.isIntel() ? 4 : 1;
char cvt[2][50];
int wdepth = std::max(depth, CV_32F);
String build_opt = format("-D OP_CONVERT_SCALE_ABS -D UNARY_OP -D dstT=%s -D DEPTH_dst=%d -D srcT1=%s"
" -D workT=%s -D wdepth=%d -D convertToWT1=%s -D convertToDT=%s"
" -D workT1=%s -D rowsPerWI=%d%s",
ocl::typeToStr(CV_8UC(kercn)), CV_8U,
ocl::typeToStr(CV_MAKE_TYPE(depth, kercn)),
ocl::typeToStr(CV_MAKE_TYPE(wdepth, kercn)), wdepth,
ocl::convertTypeStr(depth, wdepth, kercn, cvt[0]),
ocl::convertTypeStr(wdepth, CV_8U, kercn, cvt[1]),
ocl::typeToStr(wdepth), rowsPerWI,
doubleSupport ? " -D DOUBLE_SUPPORT" : "");
ocl::Kernel k("KF", ocl::core::arithm_oclsrc, build_opt);
if (k.empty())
return false;
UMat src = _src.getUMat();
UMat dst = _dst.getUMat();
ocl::KernelArg srcarg = ocl::KernelArg::ReadOnlyNoSize(src),
dstarg = ocl::KernelArg::WriteOnly(dst, cn, kercn);
if (wdepth == CV_32F)
k.args(srcarg, dstarg, (float)alpha, (float)beta);
else if (wdepth == CV_64F)
k.args(srcarg, dstarg, alpha, beta);
size_t globalsize[2] = { (size_t)src.cols * cn / kercn, ((size_t)src.rows + rowsPerWI - 1) / rowsPerWI };
return k.run(2, globalsize, NULL, false);
}
#endif
} //cv::
void cv::convertScaleAbs( InputArray _src, OutputArray _dst, double alpha, double beta )
{
CV_INSTRUMENT_REGION();
CV_OCL_RUN(_src.dims() <= 2 && _dst.isUMat(),
ocl_convertScaleAbs(_src, _dst, alpha, beta))
Mat src = _src.getMat();
int cn = src.channels();
double scale[] = {alpha, beta};
_dst.create( src.dims, src.size, CV_8UC(cn) );
Mat dst = _dst.getMat();
BinaryFunc func = getCvtScaleAbsFunc(src.depth());
CV_Assert( func != 0 );
if( src.dims <= 2 )
{
Size sz = getContinuousSize2D(src, dst, cn);
func( src.ptr(), src.step, 0, 0, dst.ptr(), dst.step, sz, scale );
}
else
{
const Mat* arrays[] = {&src, &dst, 0};
uchar* ptrs[2] = {};
NAryMatIterator it(arrays, ptrs);
Size sz((int)it.size*cn, 1);
for( size_t i = 0; i < it.nplanes; i++, ++it )
func( ptrs[0], 0, 0, 0, ptrs[1], 0, sz, scale );
}
}
//==================================================================================================
namespace cv {
#ifdef HAVE_OPENCL
static bool ocl_normalize( InputArray _src, InputOutputArray _dst, InputArray _mask, int dtype,
double scale, double delta )
{
UMat src = _src.getUMat();
if( _mask.empty() )
src.convertTo( _dst, dtype, scale, delta );
else if (src.channels() <= 4)
{
const ocl::Device & dev = ocl::Device::getDefault();
int stype = _src.type(), sdepth = CV_MAT_DEPTH(stype), cn = CV_MAT_CN(stype),
ddepth = CV_MAT_DEPTH(dtype), wdepth = std::max(CV_32F, std::max(sdepth, ddepth)),
rowsPerWI = dev.isIntel() ? 4 : 1;
float fscale = static_cast<float>(scale), fdelta = static_cast<float>(delta);
bool haveScale = std::fabs(scale - 1) > DBL_EPSILON,
haveZeroScale = !(std::fabs(scale) > DBL_EPSILON),
haveDelta = std::fabs(delta) > DBL_EPSILON,
doubleSupport = dev.doubleFPConfig() > 0;
if (!haveScale && !haveDelta && stype == dtype)
{
_src.copyTo(_dst, _mask);
return true;
}
if (haveZeroScale)
{
_dst.setTo(Scalar(delta), _mask);
return true;
}
if ((sdepth == CV_64F || ddepth == CV_64F) && !doubleSupport)
return false;
char cvt[2][40];
String opts = format("-D srcT=%s -D dstT=%s -D convertToWT=%s -D cn=%d -D rowsPerWI=%d"
" -D convertToDT=%s -D workT=%s%s%s%s -D srcT1=%s -D dstT1=%s",
ocl::typeToStr(stype), ocl::typeToStr(dtype),
ocl::convertTypeStr(sdepth, wdepth, cn, cvt[0]), cn,
rowsPerWI, ocl::convertTypeStr(wdepth, ddepth, cn, cvt[1]),
ocl::typeToStr(CV_MAKE_TYPE(wdepth, cn)),
doubleSupport ? " -D DOUBLE_SUPPORT" : "",
haveScale ? " -D HAVE_SCALE" : "",
haveDelta ? " -D HAVE_DELTA" : "",
ocl::typeToStr(sdepth), ocl::typeToStr(ddepth));
ocl::Kernel k("normalizek", ocl::core::normalize_oclsrc, opts);
if (k.empty())
return false;
UMat mask = _mask.getUMat(), dst = _dst.getUMat();
ocl::KernelArg srcarg = ocl::KernelArg::ReadOnlyNoSize(src),
maskarg = ocl::KernelArg::ReadOnlyNoSize(mask),
dstarg = ocl::KernelArg::ReadWrite(dst);
if (haveScale)
{
if (haveDelta)
k.args(srcarg, maskarg, dstarg, fscale, fdelta);
else
k.args(srcarg, maskarg, dstarg, fscale);
}
else
{
if (haveDelta)
k.args(srcarg, maskarg, dstarg, fdelta);
else
k.args(srcarg, maskarg, dstarg);
}
size_t globalsize[2] = { (size_t)src.cols, ((size_t)src.rows + rowsPerWI - 1) / rowsPerWI };
return k.run(2, globalsize, NULL, false);
}
else
{
UMat temp;
src.convertTo( temp, dtype, scale, delta );
temp.copyTo( _dst, _mask );
}
return true;
}
#endif
} // cv::
void cv::normalize( InputArray _src, InputOutputArray _dst, double a, double b,
int norm_type, int rtype, InputArray _mask )
{
CV_INSTRUMENT_REGION();
double scale = 1, shift = 0;
int type = _src.type(), depth = CV_MAT_DEPTH(type);
if( rtype < 0 )
rtype = _dst.fixedType() ? _dst.depth() : depth;
if( norm_type == CV_MINMAX )
{
double smin = 0, smax = 0;
double dmin = MIN( a, b ), dmax = MAX( a, b );
minMaxIdx( _src, &smin, &smax, 0, 0, _mask );
scale = (dmax - dmin)*(smax - smin > DBL_EPSILON ? 1./(smax - smin) : 0);
if( rtype == CV_32F )
{
scale = (float)scale;
shift = (float)dmin - (float)(smin*scale);
}
else
shift = dmin - smin*scale;
}
else if( norm_type == CV_L2 || norm_type == CV_L1 || norm_type == CV_C )
{
scale = norm( _src, norm_type, _mask );
scale = scale > DBL_EPSILON ? a/scale : 0.;
shift = 0;
}
else
CV_Error( CV_StsBadArg, "Unknown/unsupported norm type" );
CV_OCL_RUN(_dst.isUMat(),
ocl_normalize(_src, _dst, _mask, rtype, scale, shift))
Mat src = _src.getMat();
if( _mask.empty() )
src.convertTo( _dst, rtype, scale, shift );
else
{
Mat temp;
src.convertTo( temp, rtype, scale, shift );
temp.copyTo( _dst, _mask );
}
}
CV_CPU_OPTIMIZATION_NAMESPACE_END
} // namespace
......@@ -260,14 +260,23 @@ public:
}
else
{
// Half precision floats.
CV_Assert(pbBlob.raw_data_type() == caffe::FLOAT16);
std::string raw_data = pbBlob.raw_data();
CV_Assert(raw_data.size() / 2 == (int)dstBlob.total());
CV_Assert(pbBlob.has_raw_data());
const std::string& raw_data = pbBlob.raw_data();
if (pbBlob.raw_data_type() == caffe::FLOAT16)
{
// Half precision floats.
CV_Assert(raw_data.size() / 2 == (int)dstBlob.total());
Mat halfs((int)shape.size(), &shape[0], CV_16SC1, (void*)raw_data.c_str());
convertFp16(halfs, dstBlob);
Mat halfs((int)shape.size(), &shape[0], CV_16SC1, (void*)raw_data.c_str());
convertFp16(halfs, dstBlob);
}
else if (pbBlob.raw_data_type() == caffe::FLOAT)
{
CV_Assert(raw_data.size() / 4 == (int)dstBlob.total());
Mat((int)shape.size(), &shape[0], CV_32FC1, (void*)raw_data.c_str()).copyTo(dstBlob);
}
else
CV_Error(Error::StsNotImplemented, "Unexpected blob data type");
}
}
......
......@@ -1700,6 +1700,27 @@ struct Net::Impl
preferableTarget == DNN_TARGET_MYRIAD ||
preferableTarget == DNN_TARGET_FPGA) && !fused)
{
#if INF_ENGINE_VER_MAJOR_GT(INF_ENGINE_RELEASE_2018R5)
bool hasWeights = false;
for (const std::string& name : {"weights", "biases"})
{
auto it = ieNode->layer.getParameters().find(name);
if (it != ieNode->layer.getParameters().end())
{
InferenceEngine::Blob::CPtr bp = it->second.as<InferenceEngine::Blob::CPtr>();
it->second = (InferenceEngine::Blob::CPtr)convertFp16(std::const_pointer_cast<InferenceEngine::Blob>(bp));
hasWeights = true;
}
}
if (!hasWeights)
{
InferenceEngine::Blob::Ptr blob = InferenceEngine::make_shared_blob<int16_t>(
InferenceEngine::Precision::FP16,
InferenceEngine::Layout::C, {1});
blob->allocate();
ieNode->layer.getParameters()["weights"] = (InferenceEngine::Blob::CPtr)blob;
}
#else
auto& blobs = ieNode->layer.getConstantData();
if (blobs.empty())
{
......@@ -1716,6 +1737,7 @@ struct Net::Impl
for (auto& it : blobs)
it.second = convertFp16(std::const_pointer_cast<InferenceEngine::Blob>(it.second));
}
#endif
}
if (!fused)
......@@ -1787,7 +1809,7 @@ struct Net::Impl
if (!ieNode->net->isInitialized())
{
#if INF_ENGINE_VER_MAJOR_GT(INF_ENGINE_RELEASE_2018R3)
#if INF_ENGINE_VER_MAJOR_EQ(INF_ENGINE_RELEASE_2018R4)
// For networks which is built in runtime we need to specify a
// version of it's hyperparameters.
std::string versionTrigger = "<net name=\"TestInput\" version=\"3\" batch=\"1\">"
......
......@@ -276,23 +276,29 @@ public:
InferenceEngine::Builder::Layer l = ieLayer;
const int numChannels = input->dims[2]; // NOTE: input->dims are reversed (whcn)
InferenceEngine::Blob::Ptr weights;
if (blobs.empty())
{
auto weights = InferenceEngine::make_shared_blob<float>(InferenceEngine::Precision::FP32,
InferenceEngine::Layout::C,
{(size_t)numChannels});
weights->allocate();
auto onesBlob = InferenceEngine::make_shared_blob<float>(InferenceEngine::Precision::FP32,
InferenceEngine::Layout::C,
{(size_t)numChannels});
onesBlob->allocate();
std::vector<float> ones(numChannels, 1);
weights->set(ones);
l.addConstantData("weights", weights);
onesBlob->set(ones);
weights = onesBlob;
l.getParameters()["channel_shared"] = false;
}
else
{
CV_Assert(numChannels == blobs[0].total());
l.addConstantData("weights", wrapToInfEngineBlob(blobs[0], {(size_t)numChannels}, InferenceEngine::Layout::C));
weights = wrapToInfEngineBlob(blobs[0], {(size_t)numChannels}, InferenceEngine::Layout::C);
l.getParameters()["channel_shared"] = blobs[0].total() == 1;
}
#if INF_ENGINE_VER_MAJOR_GT(INF_ENGINE_RELEASE_2018R5)
l.getParameters()["weights"] = (InferenceEngine::Blob::CPtr)weights;
#else
l.addConstantData("weights", weights);
#endif
l.getParameters()["across_spatial"] = acrossSpatial;
return Ptr<BackendNode>(new InfEngineBackendNode(l));
}
......
......@@ -173,7 +173,7 @@ public:
ieLayer.getParameters()["antialias"] = false;
if (scaleWidth != scaleHeight)
CV_Error(Error::StsNotImplemented, "resample with sw != sh");
ieLayer.getParameters()["factor"] = 1.0 / scaleWidth;
ieLayer.getParameters()["factor"] = 1.0f / scaleWidth;
}
else if (interpolation == "bilinear")
{
......
......@@ -766,7 +766,7 @@ void InfEngineBackendLayer::forward(InputArrayOfArrays inputs, OutputArrayOfArra
CV_Error(Error::StsInternal, "Choose Inference Engine as a preferable backend.");
}
InferenceEngine::TBlob<int16_t>::Ptr convertFp16(const InferenceEngine::Blob::Ptr& blob)
InferenceEngine::Blob::Ptr convertFp16(const InferenceEngine::Blob::Ptr& blob)
{
auto halfs = InferenceEngine::make_shared_blob<int16_t>(InferenceEngine::Precision::FP16, blob->layout(), blob->dims());
halfs->allocate();
......
......@@ -36,6 +36,7 @@
#define INF_ENGINE_VER_MAJOR_GT(ver) (((INF_ENGINE_RELEASE) / 10000) > ((ver) / 10000))
#define INF_ENGINE_VER_MAJOR_GE(ver) (((INF_ENGINE_RELEASE) / 10000) >= ((ver) / 10000))
#define INF_ENGINE_VER_MAJOR_LT(ver) (((INF_ENGINE_RELEASE) / 10000) < ((ver) / 10000))
#define INF_ENGINE_VER_MAJOR_EQ(ver) (((INF_ENGINE_RELEASE) / 10000) == ((ver) / 10000))
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5)
#include <ie_builders.hpp>
......@@ -252,7 +253,7 @@ Mat infEngineBlobToMat(const InferenceEngine::Blob::Ptr& blob);
// Convert Inference Engine blob with FP32 precision to FP16 precision.
// Allocates memory for a new blob.
InferenceEngine::TBlob<int16_t>::Ptr convertFp16(const InferenceEngine::Blob::Ptr& blob);
InferenceEngine::Blob::Ptr convertFp16(const InferenceEngine::Blob::Ptr& blob);
// This is a fake class to run networks from Model Optimizer. Objects of that
// class simulate responses of layers are imported by OpenCV and supported by
......
......@@ -694,6 +694,11 @@ TEST_P(Eltwise, Accuracy)
Backend backendId = get<0>(get<4>(GetParam()));
Target targetId = get<1>(get<4>(GetParam()));
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_RELEASE > 2018050000
if (backendId == DNN_BACKEND_INFERENCE_ENGINE && targetId == DNN_TARGET_OPENCL)
throw SkipTestException("");
#endif
Net net;
std::vector<int> convLayerIds(numConv);
......
......@@ -65,7 +65,7 @@ def bootstrap():
if DEBUG: print('OpenCV loader: BINARIES_PATHS={}'.format(str(l_vars['BINARIES_PATHS'])))
for p in reversed(l_vars['PYTHON_EXTENSIONS_PATHS']):
sys.path.insert(0, p)
sys.path.insert(1, p)
if os.name == 'nt':
os.environ['PATH'] = ';'.join(l_vars['BINARIES_PATHS']) + ';' + os.environ.get('PATH', '')
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment