Commit a7983866 authored by Vadim Pisarevsky's avatar Vadim Pisarevsky

Merge pull request #3326 from ilya-lavrenov:neon_canny

parents 1edefac4 af04a853
This diff is collapsed.
This diff is collapsed.
......@@ -2804,7 +2804,8 @@ dotProd_(const T* src1, const T* src2, int len)
{
int i = 0;
double result = 0;
#if CV_ENABLE_UNROLLED
#if CV_ENABLE_UNROLLED
for( ; i <= len - 4; i += 4 )
result += (double)src1[i]*src2[i] + (double)src1[i+1]*src2[i+1] +
(double)src1[i+2]*src2[i+2] + (double)src1[i+3]*src2[i+3];
......@@ -2833,10 +2834,12 @@ static double dotProd_8u(const uchar* src1, const uchar* src2, int len)
{
int j, len0 = len & -4, blockSize0 = (1 << 13), blockSize;
__m128i z = _mm_setzero_si128();
CV_DECL_ALIGNED(16) int buf[4];
while( i < len0 )
{
blockSize = std::min(len0 - i, blockSize0);
__m128i s = _mm_setzero_si128();
__m128i s = z;
j = 0;
for( ; j <= blockSize - 16; j += 16 )
{
......@@ -2860,7 +2863,7 @@ static double dotProd_8u(const uchar* src1, const uchar* src2, int len)
s0 = _mm_madd_epi16(s0, s1);
s = _mm_add_epi32(s, s0);
}
CV_DECL_ALIGNED(16) int buf[4];
_mm_store_si128((__m128i*)buf, s);
r += buf[0] + buf[1] + buf[2] + buf[3];
......@@ -2869,6 +2872,45 @@ static double dotProd_8u(const uchar* src1, const uchar* src2, int len)
i += blockSize;
}
}
#elif CV_NEON
int len0 = len & -8, blockSize0 = (1 << 15), blockSize;
uint32x4_t v_zero = vdupq_n_u32(0u);
CV_DECL_ALIGNED(16) uint buf[4];
while( i < len0 )
{
blockSize = std::min(len0 - i, blockSize0);
uint32x4_t v_sum = v_zero;
int j = 0;
for( ; j <= blockSize - 16; j += 16 )
{
uint8x16_t v_src1 = vld1q_u8(src1 + j), v_src2 = vld1q_u8(src2 + j);
uint16x8_t v_src10 = vmovl_u8(vget_low_u8(v_src1)), v_src20 = vmovl_u8(vget_low_u8(v_src2));
v_sum = vmlal_u16(v_sum, vget_low_u16(v_src10), vget_low_u16(v_src20));
v_sum = vmlal_u16(v_sum, vget_high_u16(v_src10), vget_high_u16(v_src20));
v_src10 = vmovl_u8(vget_high_u8(v_src1));
v_src20 = vmovl_u8(vget_high_u8(v_src2));
v_sum = vmlal_u16(v_sum, vget_low_u16(v_src10), vget_low_u16(v_src20));
v_sum = vmlal_u16(v_sum, vget_high_u16(v_src10), vget_high_u16(v_src20));
}
for( ; j <= blockSize - 8; j += 8 )
{
uint16x8_t v_src1 = vmovl_u8(vld1_u8(src1 + j)), v_src2 = vmovl_u8(vld1_u8(src2 + j));
v_sum = vmlal_u16(v_sum, vget_low_u16(v_src1), vget_low_u16(v_src2));
v_sum = vmlal_u16(v_sum, vget_high_u16(v_src1), vget_high_u16(v_src2));
}
vst1q_u32(buf, v_sum);
r += buf[0] + buf[1] + buf[2] + buf[3];
src1 += blockSize;
src2 += blockSize;
i += blockSize;
}
#endif
return r + dotProd_(src1, src2, len - i);
}
......@@ -2876,7 +2918,51 @@ static double dotProd_8u(const uchar* src1, const uchar* src2, int len)
static double dotProd_8s(const schar* src1, const schar* src2, int len)
{
return dotProd_(src1, src2, len);
int i = 0;
double r = 0.0;
#if CV_NEON
int len0 = len & -8, blockSize0 = (1 << 14), blockSize;
int32x4_t v_zero = vdupq_n_s32(0);
CV_DECL_ALIGNED(16) int buf[4];
while( i < len0 )
{
blockSize = std::min(len0 - i, blockSize0);
int32x4_t v_sum = v_zero;
int j = 0;
for( ; j <= blockSize - 16; j += 16 )
{
int8x16_t v_src1 = vld1q_s8(src1 + j), v_src2 = vld1q_s8(src2 + j);
int16x8_t v_src10 = vmovl_s8(vget_low_s8(v_src1)), v_src20 = vmovl_s8(vget_low_s8(v_src2));
v_sum = vmlal_s16(v_sum, vget_low_s16(v_src10), vget_low_s16(v_src20));
v_sum = vmlal_s16(v_sum, vget_high_s16(v_src10), vget_high_s16(v_src20));
v_src10 = vmovl_s8(vget_high_s8(v_src1));
v_src20 = vmovl_s8(vget_high_s8(v_src2));
v_sum = vmlal_s16(v_sum, vget_low_s16(v_src10), vget_low_s16(v_src20));
v_sum = vmlal_s16(v_sum, vget_high_s16(v_src10), vget_high_s16(v_src20));
}
for( ; j <= blockSize - 8; j += 8 )
{
int16x8_t v_src1 = vmovl_s8(vld1_s8(src1 + j)), v_src2 = vmovl_s8(vld1_s8(src2 + j));
v_sum = vmlal_s16(v_sum, vget_low_s16(v_src1), vget_low_s16(v_src2));
v_sum = vmlal_s16(v_sum, vget_high_s16(v_src1), vget_high_s16(v_src2));
}
vst1q_s32(buf, v_sum);
r += buf[0] + buf[1] + buf[2] + buf[3];
src1 += blockSize;
src2 += blockSize;
i += blockSize;
}
#endif
return r + dotProd_(src1, src2, len - i);
}
static double dotProd_16u(const ushort* src1, const ushort* src2, int len)
......@@ -2914,13 +3000,36 @@ static double dotProd_32s(const int* src1, const int* src2, int len)
static double dotProd_32f(const float* src1, const float* src2, int len)
{
double r = 0.0;
int i = 0;
#if (ARITHM_USE_IPP == 1)
double r = 0;
if (0 <= ippsDotProd_32f64f(src1, src2, len, &r))
return r;
setIppErrorStatus();
#elif CV_NEON
int len0 = len & -4, blockSize0 = (1 << 13), blockSize;
float32x4_t v_zero = vdupq_n_f32(0.0f);
CV_DECL_ALIGNED(16) float buf[4];
while( i < len0 )
{
blockSize = std::min(len0 - i, blockSize0);
float32x4_t v_sum = v_zero;
int j = 0;
for( ; j <= blockSize - 4; j += 4 )
v_sum = vmlaq_f32(v_sum, vld1q_f32(src1 + j), vld1q_f32(src2 + j));
vst1q_f32(buf, v_sum);
r += buf[0] + buf[1] + buf[2] + buf[3];
src1 += blockSize;
src2 += blockSize;
i += blockSize;
}
#endif
return dotProd_(src1, src2, len);
return r + dotProd_(src1, src2, len - i);
}
static double dotProd_64f(const double* src1, const double* src2, int len)
......
This diff is collapsed.
......@@ -98,6 +98,11 @@ PERF_TEST_P(Size_MatType_BorderType, blur16x16,
Size size = get<0>(GetParam());
int type = get<1>(GetParam());
BorderType btype = get<2>(GetParam());
double eps = 1e-3;
#if CV_NEON
eps = CV_MAT_DEPTH(type) <= CV_32S ? 1 : eps;
#endif
Mat src(size, type);
Mat dst(size, type);
......@@ -106,7 +111,7 @@ PERF_TEST_P(Size_MatType_BorderType, blur16x16,
TEST_CYCLE() blur(src, dst, Size(16,16), Point(-1,-1), btype);
SANITY_CHECK(dst, 1e-3);
SANITY_CHECK(dst, eps);
}
PERF_TEST_P(Size_MatType_BorderType3x3, box3x3,
......
This diff is collapsed.
......@@ -361,6 +361,15 @@ void cv::Canny( InputArray _src, OutputArray _dst,
_mm_storeu_si128((__m128i *)(_norm + j + 4), v_norm);
}
}
#elif CV_NEON
for ( ; j <= width - 8; j += 8)
{
int16x8_t v_dx = vld1q_s16(_dx + j), v_dy = vld1q_s16(_dy + j);
vst1q_s32(_norm + j, vaddq_s32(vabsq_s32(vmovl_s16(vget_low_s16(v_dx))),
vabsq_s32(vmovl_s16(vget_low_s16(v_dy)))));
vst1q_s32(_norm + j + 4, vaddq_s32(vabsq_s32(vmovl_s16(vget_high_s16(v_dx))),
vabsq_s32(vmovl_s16(vget_high_s16(v_dy)))));
}
#endif
for ( ; j < width; ++j)
_norm[j] = std::abs(int(_dx[j])) + std::abs(int(_dy[j]));
......@@ -386,6 +395,18 @@ void cv::Canny( InputArray _src, OutputArray _dst,
_mm_storeu_si128((__m128i *)(_norm + j + 4), v_norm);
}
}
#elif CV_NEON
for ( ; j <= width - 8; j += 8)
{
int16x8_t v_dx = vld1q_s16(_dx + j), v_dy = vld1q_s16(_dy + j);
int16x4_t v_dxp = vget_low_s16(v_dx), v_dyp = vget_low_s16(v_dy);
int32x4_t v_dst = vmlal_s16(vmull_s16(v_dxp, v_dxp), v_dyp, v_dyp);
vst1q_s32(_norm + j, v_dst);
v_dxp = vget_high_s16(v_dx), v_dyp = vget_high_s16(v_dy);
v_dst = vmlal_s16(vmull_s16(v_dxp, v_dxp), v_dyp, v_dyp);
vst1q_s32(_norm + j + 4, v_dst);
}
#endif
for ( ; j < width; ++j)
_norm[j] = int(_dx[j])*_dx[j] + int(_dy[j])*_dy[j];
......
......@@ -233,6 +233,31 @@ namespace
CLAHE_Interpolation_Body(const cv::Mat& src, const cv::Mat& dst, const cv::Mat& lut, const cv::Size& tileSize, const int& tilesX, const int& tilesY) :
src_(src), dst_(dst), lut_(lut), tileSize_(tileSize), tilesX_(tilesX), tilesY_(tilesY)
{
buf.allocate(src.cols << 2);
ind1_p = (int *)buf;
ind2_p = ind1_p + src.cols;
xa_p = (float *)(ind2_p + src.cols);
xa1_p = xa_p + src.cols;
int lut_step = static_cast<int>(lut_.step / sizeof(T));
float inv_tw = 1.0f / tileSize_.width;
for (int x = 0; x < src.cols; ++x)
{
float txf = x * inv_tw - 0.5f;
int tx1 = cvFloor(txf);
int tx2 = tx1 + 1;
xa_p[x] = txf - tx1;
xa1_p[x] = 1.0f - xa_p[x];
tx1 = std::max(tx1, 0);
tx2 = std::min(tx2, tilesX_ - 1);
ind1_p[x] = tx1 * lut_step;
ind2_p[x] = tx2 * lut_step;
}
}
void operator ()(const cv::Range& range) const;
......@@ -245,24 +270,28 @@ namespace
cv::Size tileSize_;
int tilesX_;
int tilesY_;
cv::AutoBuffer<int> buf;
int * ind1_p, * ind2_p;
float * xa_p, * xa1_p;
};
template <class T>
void CLAHE_Interpolation_Body<T>::operator ()(const cv::Range& range) const
{
const size_t lut_step = lut_.step / sizeof(T);
float inv_th = 1.0f / tileSize_.height;
for (int y = range.start; y < range.end; ++y)
{
const T* srcRow = src_.ptr<T>(y);
T* dstRow = dst_.ptr<T>(y);
const float tyf = (static_cast<float>(y) / tileSize_.height) - 0.5f;
float tyf = y * inv_th - 0.5f;
int ty1 = cvFloor(tyf);
int ty2 = ty1 + 1;
const float ya = tyf - ty1;
float ya = tyf - ty1, ya1 = 1.0f - ya;
ty1 = std::max(ty1, 0);
ty2 = std::min(ty2, tilesY_ - 1);
......@@ -272,27 +301,13 @@ namespace
for (int x = 0; x < src_.cols; ++x)
{
const float txf = (static_cast<float>(x) / tileSize_.width) - 0.5f;
int tx1 = cvFloor(txf);
int tx2 = tx1 + 1;
int srcVal = srcRow[x];
const float xa = txf - tx1;
tx1 = std::max(tx1, 0);
tx2 = std::min(tx2, tilesX_ - 1);
int ind1 = ind1_p[x] + srcVal;
int ind2 = ind2_p[x] + srcVal;
const int srcVal = srcRow[x];
const size_t ind1 = tx1 * lut_step + srcVal;
const size_t ind2 = tx2 * lut_step + srcVal;
float res = 0;
res += lutPlane1[ind1] * ((1.0f - xa) * (1.0f - ya));
res += lutPlane1[ind2] * ((xa) * (1.0f - ya));
res += lutPlane2[ind1] * ((1.0f - xa) * (ya));
res += lutPlane2[ind2] * ((xa) * (ya));
float res = (lutPlane1[ind1] * xa1_p[x] + lutPlane1[ind2] * xa_p[x]) * ya1 +
(lutPlane2[ind1] * xa1_p[x] + lutPlane2[ind2] * xa_p[x]) * ya;
dstRow[x] = cv::saturate_cast<T>(res);
}
......@@ -403,7 +418,9 @@ namespace
calcLutBody = cv::makePtr<CLAHE_CalcLut_Body<uchar, 256, 0> >(srcForLut, lut_, tileSize, tilesX_, clipLimit, lutScale);
else if (_src.type() == CV_16UC1)
calcLutBody = cv::makePtr<CLAHE_CalcLut_Body<ushort, 4096, 4> >(srcForLut, lut_, tileSize, tilesX_, clipLimit, lutScale);
CV_Assert(!calcLutBody.empty());
else
CV_Error( CV_StsBadArg, "Unsupported type" );
cv::parallel_for_(cv::Range(0, tilesX_ * tilesY_), *calcLutBody);
cv::Ptr<cv::ParallelLoopBody> interpolationBody;
......@@ -411,7 +428,7 @@ namespace
interpolationBody = cv::makePtr<CLAHE_Interpolation_Body<uchar> >(src, dst, lut_, tileSize, tilesX_, tilesY_);
else if (_src.type() == CV_16UC1)
interpolationBody = cv::makePtr<CLAHE_Interpolation_Body<ushort> >(src, dst, lut_, tileSize, tilesX_, tilesY_);
CV_Assert(!interpolationBody.empty());
cv::parallel_for_(cv::Range(0, src.rows), *interpolationBody);
}
......
This diff is collapsed.
......@@ -126,7 +126,7 @@ static void calcHarris( const Mat& _cov, Mat& _dst, double k )
if( simd )
{
__m128 k4 = _mm_set1_ps((float)k);
for( ; j <= size.width - 5; j += 4 )
for( ; j <= size.width - 4; j += 4 )
{
__m128 t0 = _mm_loadu_ps(cov + j*3); // a0 b0 c0 x
__m128 t1 = _mm_loadu_ps(cov + j*3 + 3); // a1 b1 c1 x
......@@ -146,6 +146,17 @@ static void calcHarris( const Mat& _cov, Mat& _dst, double k )
_mm_storeu_ps(dst + j, a);
}
}
#elif CV_NEON
float32x4_t v_k = vdupq_n_f32((float)k);
for( ; j <= size.width - 4; j += 4 )
{
float32x4x3_t v_src = vld3q_f32(cov + j * 3);
float32x4_t v_a = v_src.val[0], v_b = v_src.val[1], v_c = v_src.val[2];
float32x4_t v_ac_bb = vmlsq_f32(vmulq_f32(v_a, v_c), v_b, v_b);
float32x4_t v_ac = vaddq_f32(v_a, v_c);
vst1q_f32(dst + j, vmlsq_f32(v_ac_bb, v_k, vmulq_f32(v_ac, v_ac)));
}
#endif
for( ; j < size.width; j++ )
......@@ -607,10 +618,13 @@ void cv::preCornerDetect( InputArray _src, OutputArray _dst, int ksize, int bord
if( src.depth() == CV_8U )
factor *= 255;
factor = 1./(factor * factor * factor);
#if CV_NEON || CV_SSE2
float factor_f = (float)factor;
#endif
#if CV_SSE2
volatile bool haveSSE2 = cv::checkHardwareSupport(CV_CPU_SSE2);
__m128 v_factor = _mm_set1_ps((float)factor), v_m2 = _mm_set1_ps(-2.0f);
__m128 v_factor = _mm_set1_ps(factor_f), v_m2 = _mm_set1_ps(-2.0f);
#endif
Size size = src.size();
......@@ -641,6 +655,15 @@ void cv::preCornerDetect( InputArray _src, OutputArray _dst, int ksize, int bord
_mm_storeu_ps(dstdata + j, v_s1);
}
}
#elif CV_NEON
for( ; j <= size.width - 4; j += 4 )
{
float32x4_t v_dx = vld1q_f32(dxdata + j), v_dy = vld1q_f32(dydata + j);
float32x4_t v_s = vmulq_f32(v_dx, vmulq_f32(v_dx, vld1q_f32(d2ydata + j)));
v_s = vmlaq_f32(v_s, vld1q_f32(d2xdata + j), vmulq_f32(v_dy, v_dy));
v_s = vmlaq_f32(v_s, vld1q_f32(dxydata + j), vmulq_n_f32(vmulq_f32(v_dy, v_dx), -2));
vst1q_f32(dstdata + j, vmulq_n_f32(v_s, factor_f));
}
#endif
for( ; j < size.width; j++ )
......
This diff is collapsed.
......@@ -203,7 +203,7 @@ static Moments contourMoments( const Mat& contour )
\****************************************************************************************/
template<typename T, typename WT, typename MT>
struct MomentsInTile_SSE
struct MomentsInTile_SIMD
{
int operator() (const T *, int, WT &, WT &, WT &, MT &)
{
......@@ -214,9 +214,9 @@ struct MomentsInTile_SSE
#if CV_SSE2
template <>
struct MomentsInTile_SSE<uchar, int, int>
struct MomentsInTile_SIMD<uchar, int, int>
{
MomentsInTile_SSE()
MomentsInTile_SIMD()
{
useSIMD = checkHardwareSupport(CV_CPU_SSE2);
}
......@@ -234,17 +234,16 @@ struct MomentsInTile_SSE<uchar, int, int>
for( ; x <= len - 8; x += 8 )
{
__m128i p = _mm_unpacklo_epi8(_mm_loadl_epi64((const __m128i*)(ptr + x)), z);
qx0 = _mm_add_epi32(qx0, _mm_sad_epu8(p, z));
__m128i px = _mm_mullo_epi16(p, qx);
__m128i sx = _mm_mullo_epi16(qx, qx);
qx0 = _mm_add_epi32(qx0, _mm_sad_epu8(p, z));
qx1 = _mm_add_epi32(qx1, _mm_madd_epi16(p, qx));
qx2 = _mm_add_epi32(qx2, _mm_madd_epi16(p, sx));
qx3 = _mm_add_epi32(qx3, _mm_madd_epi16(px, sx));
qx3 = _mm_add_epi32(qx3, _mm_madd_epi16( _mm_mullo_epi16(p, qx), sx));
qx = _mm_add_epi16(qx, dx);
}
int CV_DECL_ALIGNED(16) buf[4];
_mm_store_si128((__m128i*)buf, qx0);
x0 = buf[0] + buf[1] + buf[2] + buf[3];
_mm_store_si128((__m128i*)buf, qx1);
......@@ -258,17 +257,84 @@ struct MomentsInTile_SSE<uchar, int, int>
return x;
}
int CV_DECL_ALIGNED(16) buf[4];
bool useSIMD;
};
#elif CV_NEON
template <>
struct MomentsInTile_SIMD<uchar, int, int>
{
MomentsInTile_SIMD()
{
ushort CV_DECL_ALIGNED(8) init[4] = { 0, 1, 2, 3 };
qx_init = vld1_u16(init);
v_step = vdup_n_u16(4);
}
int operator() (const uchar * ptr, int len, int & x0, int & x1, int & x2, int & x3)
{
int x = 0;
uint32x4_t v_z = vdupq_n_u32(0), v_x0 = v_z, v_x1 = v_z,
v_x2 = v_z, v_x3 = v_z;
uint16x4_t qx = qx_init;
for( ; x <= len - 8; x += 8 )
{
uint16x8_t v_src = vmovl_u8(vld1_u8(ptr + x));
// first part
uint32x4_t v_qx = vmovl_u16(qx);
uint16x4_t v_p = vget_low_u16(v_src);
uint32x4_t v_px = vmull_u16(qx, v_p);
v_x0 = vaddw_u16(v_x0, v_p);
v_x1 = vaddq_u32(v_x1, v_px);
v_px = vmulq_u32(v_px, v_qx);
v_x2 = vaddq_u32(v_x2, v_px);
v_x3 = vaddq_u32(v_x3, vmulq_u32(v_px, v_qx));
qx = vadd_u16(qx, v_step);
// second part
v_qx = vmovl_u16(qx);
v_p = vget_high_u16(v_src);
v_px = vmull_u16(qx, v_p);
v_x0 = vaddw_u16(v_x0, v_p);
v_x1 = vaddq_u32(v_x1, v_px);
v_px = vmulq_u32(v_px, v_qx);
v_x2 = vaddq_u32(v_x2, v_px);
v_x3 = vaddq_u32(v_x3, vmulq_u32(v_px, v_qx));
qx = vadd_u16(qx, v_step);
}
vst1q_u32(buf, v_x0);
x0 = buf[0] + buf[1] + buf[2] + buf[3];
vst1q_u32(buf, v_x1);
x1 = buf[0] + buf[1] + buf[2] + buf[3];
vst1q_u32(buf, v_x2);
x2 = buf[0] + buf[1] + buf[2] + buf[3];
vst1q_u32(buf, v_x3);
x3 = buf[0] + buf[1] + buf[2] + buf[3];
return x;
}
uint CV_DECL_ALIGNED(16) buf[4];
uint16x4_t qx_init, v_step;
};
#endif
#if CV_SSE4_1
template <>
struct MomentsInTile_SSE<ushort, int, int64>
struct MomentsInTile_SIMD<ushort, int, int64>
{
MomentsInTile_SSE()
MomentsInTile_SIMD()
{
useSIMD = checkHardwareSupport(CV_CPU_SSE4_1);
}
......@@ -302,9 +368,6 @@ struct MomentsInTile_SSE<ushort, int, int64>
v_ix1 = _mm_add_epi32(v_ix1, v_delta);
}
int CV_DECL_ALIGNED(16) buf[4];
int64 CV_DECL_ALIGNED(16) buf64[2];
_mm_store_si128((__m128i*)buf, v_x0);
x0 = buf[0] + buf[1] + buf[2] + buf[3];
_mm_store_si128((__m128i*)buf, v_x1);
......@@ -319,6 +382,8 @@ struct MomentsInTile_SSE<ushort, int, int64>
return x;
}
int CV_DECL_ALIGNED(16) buf[4];
int64 CV_DECL_ALIGNED(16) buf64[2];
bool useSIMD;
};
......@@ -334,7 +399,7 @@ static void momentsInTile( const Mat& img, double* moments )
Size size = img.size();
int x, y;
MT mom[10] = {0,0,0,0,0,0,0,0,0,0};
MomentsInTile_SSE<T, WT, MT> vop;
MomentsInTile_SIMD<T, WT, MT> vop;
for( y = 0; y < size.height; y++ )
{
......
......@@ -178,11 +178,190 @@ struct PyrDownVec_32f
}
};
typedef NoVec<int, ushort> PyrDownVec_32s16u;
typedef NoVec<int, short> PyrDownVec_32s16s;
typedef NoVec<float, float> PyrUpVec_32f;
#elif CV_NEON
struct PyrDownVec_32s8u
{
int operator()(int** src, uchar* dst, int, int width) const
{
int x = 0;
const unsigned int *row0 = (unsigned int*)src[0], *row1 = (unsigned int*)src[1],
*row2 = (unsigned int*)src[2], *row3 = (unsigned int*)src[3],
*row4 = (unsigned int*)src[4];
uint16x8_t v_delta = vdupq_n_u16(128);
for( ; x <= width - 16; x += 16 )
{
uint16x8_t v_r0 = vcombine_u16(vqmovn_u32(vld1q_u32(row0 + x)), vqmovn_u32(vld1q_u32(row0 + x + 4)));
uint16x8_t v_r1 = vcombine_u16(vqmovn_u32(vld1q_u32(row1 + x)), vqmovn_u32(vld1q_u32(row1 + x + 4)));
uint16x8_t v_r2 = vcombine_u16(vqmovn_u32(vld1q_u32(row2 + x)), vqmovn_u32(vld1q_u32(row2 + x + 4)));
uint16x8_t v_r3 = vcombine_u16(vqmovn_u32(vld1q_u32(row3 + x)), vqmovn_u32(vld1q_u32(row3 + x + 4)));
uint16x8_t v_r4 = vcombine_u16(vqmovn_u32(vld1q_u32(row4 + x)), vqmovn_u32(vld1q_u32(row4 + x + 4)));
v_r0 = vqaddq_u16(vqaddq_u16(v_r0, v_r4), vqaddq_u16(v_r2, v_r2));
v_r1 = vqaddq_u16(vqaddq_u16(v_r1, v_r2), v_r3);
uint16x8_t v_dst0 = vqaddq_u16(v_r0, vshlq_n_u16(v_r1, 2));
v_r0 = vcombine_u16(vqmovn_u32(vld1q_u32(row0 + x + 8)), vqmovn_u32(vld1q_u32(row0 + x + 12)));
v_r1 = vcombine_u16(vqmovn_u32(vld1q_u32(row1 + x + 8)), vqmovn_u32(vld1q_u32(row1 + x + 12)));
v_r2 = vcombine_u16(vqmovn_u32(vld1q_u32(row2 + x + 8)), vqmovn_u32(vld1q_u32(row2 + x + 12)));
v_r3 = vcombine_u16(vqmovn_u32(vld1q_u32(row3 + x + 8)), vqmovn_u32(vld1q_u32(row3 + x + 12)));
v_r4 = vcombine_u16(vqmovn_u32(vld1q_u32(row4 + x + 8)), vqmovn_u32(vld1q_u32(row4 + x + 12)));
v_r0 = vqaddq_u16(vqaddq_u16(v_r0, v_r4), vqaddq_u16(v_r2, v_r2));
v_r1 = vqaddq_u16(vqaddq_u16(v_r1, v_r2), v_r3);
uint16x8_t v_dst1 = vqaddq_u16(v_r0, vshlq_n_u16(v_r1, 2));
vst1q_u8(dst + x, vcombine_u8(vqmovn_u16(vshrq_n_u16(vaddq_u16(v_dst0, v_delta), 8)),
vqmovn_u16(vshrq_n_u16(vaddq_u16(v_dst1, v_delta), 8))));
}
return x;
}
};
struct PyrDownVec_32s16u
{
int operator()(int** src, ushort* dst, int, int width) const
{
int x = 0;
const int *row0 = src[0], *row1 = src[1], *row2 = src[2], *row3 = src[3], *row4 = src[4];
int32x4_t v_delta = vdupq_n_s32(128);
for( ; x <= width - 8; x += 8 )
{
int32x4_t v_r00 = vld1q_s32(row0 + x), v_r01 = vld1q_s32(row0 + x + 4);
int32x4_t v_r10 = vld1q_s32(row1 + x), v_r11 = vld1q_s32(row1 + x + 4);
int32x4_t v_r20 = vld1q_s32(row2 + x), v_r21 = vld1q_s32(row2 + x + 4);
int32x4_t v_r30 = vld1q_s32(row3 + x), v_r31 = vld1q_s32(row3 + x + 4);
int32x4_t v_r40 = vld1q_s32(row4 + x), v_r41 = vld1q_s32(row4 + x + 4);
v_r00 = vaddq_s32(vqaddq_s32(v_r00, v_r40), vqaddq_s32(v_r20, v_r20));
v_r10 = vaddq_s32(vqaddq_s32(v_r10, v_r20), v_r30);
int32x4_t v_dst0 = vshrq_n_s32(vaddq_s32(vqaddq_s32(v_r00, vshlq_n_s32(v_r10, 2)), v_delta), 8);
v_r01 = vaddq_s32(vqaddq_s32(v_r01, v_r41), vqaddq_s32(v_r21, v_r21));
v_r11 = vaddq_s32(vqaddq_s32(v_r11, v_r21), v_r31);
int32x4_t v_dst1 = vshrq_n_s32(vaddq_s32(vqaddq_s32(v_r01, vshlq_n_s32(v_r11, 2)), v_delta), 8);
vst1q_u16(dst + x, vcombine_u16(vqmovun_s32(v_dst0), vqmovun_s32(v_dst1)));
}
return x;
}
};
struct PyrDownVec_32s16s
{
int operator()(int** src, short* dst, int, int width) const
{
int x = 0;
const int *row0 = src[0], *row1 = src[1], *row2 = src[2], *row3 = src[3], *row4 = src[4];
int32x4_t v_delta = vdupq_n_s32(128);
for( ; x <= width - 8; x += 8 )
{
int32x4_t v_r00 = vld1q_s32(row0 + x), v_r01 = vld1q_s32(row0 + x + 4);
int32x4_t v_r10 = vld1q_s32(row1 + x), v_r11 = vld1q_s32(row1 + x + 4);
int32x4_t v_r20 = vld1q_s32(row2 + x), v_r21 = vld1q_s32(row2 + x + 4);
int32x4_t v_r30 = vld1q_s32(row3 + x), v_r31 = vld1q_s32(row3 + x + 4);
int32x4_t v_r40 = vld1q_s32(row4 + x), v_r41 = vld1q_s32(row4 + x + 4);
v_r00 = vaddq_s32(vqaddq_s32(v_r00, v_r40), vqaddq_s32(v_r20, v_r20));
v_r10 = vaddq_s32(vqaddq_s32(v_r10, v_r20), v_r30);
int32x4_t v_dst0 = vshrq_n_s32(vaddq_s32(vqaddq_s32(v_r00, vshlq_n_s32(v_r10, 2)), v_delta), 8);
v_r01 = vaddq_s32(vqaddq_s32(v_r01, v_r41), vqaddq_s32(v_r21, v_r21));
v_r11 = vaddq_s32(vqaddq_s32(v_r11, v_r21), v_r31);
int32x4_t v_dst1 = vshrq_n_s32(vaddq_s32(vqaddq_s32(v_r01, vshlq_n_s32(v_r11, 2)), v_delta), 8);
vst1q_s16(dst + x, vcombine_s16(vqmovn_s32(v_dst0), vqmovn_s32(v_dst1)));
}
return x;
}
};
struct PyrDownVec_32f
{
int operator()(float** src, float* dst, int, int width) const
{
int x = 0;
const float *row0 = src[0], *row1 = src[1], *row2 = src[2], *row3 = src[3], *row4 = src[4];
float32x4_t v_4 = vdupq_n_f32(4.0f), v_scale = vdupq_n_f32(1.f/256.0f);
for( ; x <= width - 8; x += 8 )
{
float32x4_t v_r0 = vld1q_f32(row0 + x);
float32x4_t v_r1 = vld1q_f32(row1 + x);
float32x4_t v_r2 = vld1q_f32(row2 + x);
float32x4_t v_r3 = vld1q_f32(row3 + x);
float32x4_t v_r4 = vld1q_f32(row4 + x);
v_r0 = vaddq_f32(vaddq_f32(v_r0, v_r4), vaddq_f32(v_r2, v_r2));
v_r1 = vaddq_f32(vaddq_f32(v_r1, v_r2), v_r3);
vst1q_f32(dst + x, vmulq_f32(vmlaq_f32(v_r0, v_4, v_r1), v_scale));
v_r0 = vld1q_f32(row0 + x + 4);
v_r1 = vld1q_f32(row1 + x + 4);
v_r2 = vld1q_f32(row2 + x + 4);
v_r3 = vld1q_f32(row3 + x + 4);
v_r4 = vld1q_f32(row4 + x + 4);
v_r0 = vaddq_f32(vaddq_f32(v_r0, v_r4), vaddq_f32(v_r2, v_r2));
v_r1 = vaddq_f32(vaddq_f32(v_r1, v_r2), v_r3);
vst1q_f32(dst + x + 4, vmulq_f32(vmlaq_f32(v_r0, v_4, v_r1), v_scale));
}
return x;
}
};
struct PyrUpVec_32f
{
int operator()(float** src, float* dst, int, int width) const
{
int x = 0;
float ** dsts = (float **)dst;
const float *row0 = src[0], *row1 = src[1], *row2 = src[2];
float *dst0 = dsts[0], *dst1 = dsts[1];
float32x4_t v_6 = vdupq_n_f32(6.0f), v_scale = vdupq_n_f32(1.f/64.0f), v_scale4 = vmulq_n_f32(v_scale, 4.0f);
for( ; x <= width - 8; x += 8 )
{
float32x4_t v_r0 = vld1q_f32(row0 + x);
float32x4_t v_r1 = vld1q_f32(row1 + x);
float32x4_t v_r2 = vld1q_f32(row2 + x);
vst1q_f32(dst1 + x, vmulq_f32(v_scale4, vaddq_f32(v_r1, v_r2)));
vst1q_f32(dst0 + x, vmulq_f32(v_scale, vaddq_f32(vmlaq_f32(v_r0, v_6, v_r1), v_r2)));
v_r0 = vld1q_f32(row0 + x + 4);
v_r1 = vld1q_f32(row1 + x + 4);
v_r2 = vld1q_f32(row2 + x + 4);
vst1q_f32(dst1 + x + 4, vmulq_f32(v_scale4, vaddq_f32(v_r1, v_r2)));
vst1q_f32(dst0 + x + 4, vmulq_f32(v_scale, vaddq_f32(vmlaq_f32(v_r0, v_6, v_r1), v_r2)));
}
return x;
}
};
#else
typedef NoVec<int, uchar> PyrDownVec_32s8u;
typedef NoVec<int, ushort> PyrDownVec_32s16u;
typedef NoVec<int, short> PyrDownVec_32s16s;
typedef NoVec<float, float> PyrDownVec_32f;
typedef NoVec<float, float> PyrUpVec_32f;
#endif
template<class CastOp, class VecOp> void
......@@ -325,6 +504,7 @@ pyrUp_( const Mat& _src, Mat& _dst, int)
AutoBuffer<int> _dtab(ssize.width*cn);
int* dtab = _dtab;
WT* rows[PU_SZ];
T* dsts[2];
CastOp castOp;
VecOp vecOp;
......@@ -385,8 +565,9 @@ pyrUp_( const Mat& _src, Mat& _dst, int)
for( k = 0; k < PU_SZ; k++ )
rows[k] = buf + ((y - PU_SZ/2 + k - sy0) % PU_SZ)*bufstep;
row0 = rows[0]; row1 = rows[1]; row2 = rows[2];
dsts[0] = dst0; dsts[1] = dst1;
x = vecOp(rows, dst0, (int)_dst.step, dsize.width);
x = vecOp(rows, (T*)dsts, (int)_dst.step, dsize.width);
for( ; x < dsize.width; x++ )
{
T t1 = castOp((row1[x] + row2[x])*4);
......@@ -561,9 +742,9 @@ void cv::pyrDown( InputArray _src, OutputArray _dst, const Size& _dsz, int borde
if( depth == CV_8U )
func = pyrDown_<FixPtCast<uchar, 8>, PyrDownVec_32s8u>;
else if( depth == CV_16S )
func = pyrDown_<FixPtCast<short, 8>, NoVec<int, short> >;
func = pyrDown_<FixPtCast<short, 8>, PyrDownVec_32s16s >;
else if( depth == CV_16U )
func = pyrDown_<FixPtCast<ushort, 8>, NoVec<int, ushort> >;
func = pyrDown_<FixPtCast<ushort, 8>, PyrDownVec_32s16u >;
else if( depth == CV_32F )
func = pyrDown_<FltCast<float, 8>, PyrDownVec_32f>;
else if( depth == CV_64F )
......@@ -636,7 +817,7 @@ void cv::pyrUp( InputArray _src, OutputArray _dst, const Size& _dsz, int borderT
else if( depth == CV_16U )
func = pyrUp_<FixPtCast<ushort, 6>, NoVec<int, ushort> >;
else if( depth == CV_32F )
func = pyrUp_<FltCast<float, 6>, NoVec<float, float> >;
func = pyrUp_<FltCast<float, 6>, PyrUpVec_32f >;
else if( depth == CV_64F )
func = pyrUp_<FltCast<double, 6>, NoVec<double, double> >;
else
......
This diff is collapsed.
......@@ -264,6 +264,74 @@ thresh_8u( const Mat& _src, Mat& _dst, uchar thresh, uchar maxval, int type )
}
}
}
#elif CV_NEON
uint8x16_t v_thresh = vdupq_n_u8(thresh), v_maxval = vdupq_n_u8(maxval);
switch( type )
{
case THRESH_BINARY:
for( i = 0; i < roi.height; i++ )
{
const uchar* src = _src.ptr() + src_step*i;
uchar* dst = _dst.ptr() + dst_step*i;
for ( j_scalar = 0; j_scalar <= roi.width - 16; j_scalar += 16)
vst1q_u8(dst + j_scalar, vandq_u8(vcgtq_u8(vld1q_u8(src + j_scalar), v_thresh), v_maxval));
}
break;
case THRESH_BINARY_INV:
for( i = 0; i < roi.height; i++ )
{
const uchar* src = _src.ptr() + src_step*i;
uchar* dst = _dst.ptr() + dst_step*i;
for ( j_scalar = 0; j_scalar <= roi.width - 16; j_scalar += 16)
vst1q_u8(dst + j_scalar, vandq_u8(vcleq_u8(vld1q_u8(src + j_scalar), v_thresh), v_maxval));
}
break;
case THRESH_TRUNC:
for( i = 0; i < roi.height; i++ )
{
const uchar* src = _src.ptr() + src_step*i;
uchar* dst = _dst.ptr() + dst_step*i;
for ( j_scalar = 0; j_scalar <= roi.width - 16; j_scalar += 16)
vst1q_u8(dst + j_scalar, vminq_u8(vld1q_u8(src + j_scalar), v_thresh));
}
break;
case THRESH_TOZERO:
for( i = 0; i < roi.height; i++ )
{
const uchar* src = _src.ptr() + src_step*i;
uchar* dst = _dst.ptr() + dst_step*i;
for ( j_scalar = 0; j_scalar <= roi.width - 16; j_scalar += 16)
{
uint8x16_t v_src = vld1q_u8(src + j_scalar), v_mask = vcgtq_u8(v_src, v_thresh);
vst1q_u8(dst + j_scalar, vandq_u8(v_mask, v_src));
}
}
break;
case THRESH_TOZERO_INV:
for( i = 0; i < roi.height; i++ )
{
const uchar* src = _src.ptr() + src_step*i;
uchar* dst = _dst.ptr() + dst_step*i;
for ( j_scalar = 0; j_scalar <= roi.width - 16; j_scalar += 16)
{
uint8x16_t v_src = vld1q_u8(src + j_scalar), v_mask = vcleq_u8(v_src, v_thresh);
vst1q_u8(dst + j_scalar, vandq_u8(v_mask, v_src));
}
}
break;
default:
return CV_Error( CV_StsBadArg, "" );
}
#endif
if( j_scalar < roi.width )
......@@ -382,6 +450,14 @@ thresh_16s( const Mat& _src, Mat& _dst, short thresh, short maxval, int type )
_mm_storeu_si128((__m128i*)(dst + j + 8), v1 );
}
}
#elif CV_NEON
int16x8_t v_thresh = vdupq_n_s16(thresh), v_maxval = vdupq_n_s16(maxval);
for( ; j <= roi.width - 8; j += 8 )
{
uint16x8_t v_mask = vcgtq_s16(vld1q_s16(src + j), v_thresh);
vst1q_s16(dst + j, vandq_s16(vreinterpretq_s16_u16(v_mask), v_maxval));
}
#endif
for( ; j < roi.width; j++ )
......@@ -410,6 +486,14 @@ thresh_16s( const Mat& _src, Mat& _dst, short thresh, short maxval, int type )
_mm_storeu_si128((__m128i*)(dst + j + 8), v1 );
}
}
#elif CV_NEON
int16x8_t v_thresh = vdupq_n_s16(thresh), v_maxval = vdupq_n_s16(maxval);
for( ; j <= roi.width - 8; j += 8 )
{
uint16x8_t v_mask = vcleq_s16(vld1q_s16(src + j), v_thresh);
vst1q_s16(dst + j, vandq_s16(vreinterpretq_s16_u16(v_mask), v_maxval));
}
#endif
for( ; j < roi.width; j++ )
......@@ -436,6 +520,11 @@ thresh_16s( const Mat& _src, Mat& _dst, short thresh, short maxval, int type )
_mm_storeu_si128((__m128i*)(dst + j + 8), v1 );
}
}
#elif CV_NEON
int16x8_t v_thresh = vdupq_n_s16(thresh);
for( ; j <= roi.width - 8; j += 8 )
vst1q_s16(dst + j, vminq_s16(vld1q_s16(src + j), v_thresh));
#endif
for( ; j < roi.width; j++ )
......@@ -462,6 +551,15 @@ thresh_16s( const Mat& _src, Mat& _dst, short thresh, short maxval, int type )
_mm_storeu_si128((__m128i*)(dst + j + 8), v1 );
}
}
#elif CV_NEON
int16x8_t v_thresh = vdupq_n_s16(thresh);
for( ; j <= roi.width - 8; j += 8 )
{
int16x8_t v_src = vld1q_s16(src + j);
uint16x8_t v_mask = vcgtq_s16(v_src, v_thresh);
vst1q_s16(dst + j, vandq_s16(vreinterpretq_s16_u16(v_mask), v_src));
}
#endif
for( ; j < roi.width; j++ )
......@@ -491,6 +589,15 @@ thresh_16s( const Mat& _src, Mat& _dst, short thresh, short maxval, int type )
_mm_storeu_si128((__m128i*)(dst + j + 8), v1 );
}
}
#elif CV_NEON
int16x8_t v_thresh = vdupq_n_s16(thresh);
for( ; j <= roi.width - 8; j += 8 )
{
int16x8_t v_src = vld1q_s16(src + j);
uint16x8_t v_mask = vcleq_s16(v_src, v_thresh);
vst1q_s16(dst + j, vandq_s16(vreinterpretq_s16_u16(v_mask), v_src));
}
#endif
for( ; j < roi.width; j++ )
{
......@@ -576,6 +683,16 @@ thresh_32f( const Mat& _src, Mat& _dst, float thresh, float maxval, int type )
_mm_storeu_ps( dst + j + 4, v1 );
}
}
#elif CV_NEON
float32x4_t v_thresh = vdupq_n_f32(thresh);
uint32x4_t v_maxval = vreinterpretq_u32_f32(vdupq_n_f32(maxval));
for( ; j <= roi.width - 4; j += 4 )
{
float32x4_t v_src = vld1q_f32(src + j);
uint32x4_t v_dst = vandq_u32(vcgtq_f32(v_src, v_thresh), v_maxval);
vst1q_f32(dst + j, vreinterpretq_f32_u32(v_dst));
}
#endif
for( ; j < roi.width; j++ )
......@@ -604,6 +721,16 @@ thresh_32f( const Mat& _src, Mat& _dst, float thresh, float maxval, int type )
_mm_storeu_ps( dst + j + 4, v1 );
}
}
#elif CV_NEON
float32x4_t v_thresh = vdupq_n_f32(thresh);
uint32x4_t v_maxval = vreinterpretq_u32_f32(vdupq_n_f32(maxval));
for( ; j <= roi.width - 4; j += 4 )
{
float32x4_t v_src = vld1q_f32(src + j);
uint32x4_t v_dst = vandq_u32(vcleq_f32(v_src, v_thresh), v_maxval);
vst1q_f32(dst + j, vreinterpretq_f32_u32(v_dst));
}
#endif
for( ; j < roi.width; j++ )
......@@ -630,6 +757,11 @@ thresh_32f( const Mat& _src, Mat& _dst, float thresh, float maxval, int type )
_mm_storeu_ps( dst + j + 4, v1 );
}
}
#elif CV_NEON
float32x4_t v_thresh = vdupq_n_f32(thresh);
for( ; j <= roi.width - 4; j += 4 )
vst1q_f32(dst + j, vminq_f32(vld1q_f32(src + j), v_thresh));
#endif
for( ; j < roi.width; j++ )
......@@ -656,6 +788,16 @@ thresh_32f( const Mat& _src, Mat& _dst, float thresh, float maxval, int type )
_mm_storeu_ps( dst + j + 4, v1 );
}
}
#elif CV_NEON
float32x4_t v_thresh = vdupq_n_f32(thresh);
for( ; j <= roi.width - 4; j += 4 )
{
float32x4_t v_src = vld1q_f32(src + j);
uint32x4_t v_dst = vandq_u32(vcgtq_f32(v_src, v_thresh),
vreinterpretq_u32_f32(v_src));
vst1q_f32(dst + j, vreinterpretq_f32_u32(v_dst));
}
#endif
for( ; j < roi.width; j++ )
......@@ -685,6 +827,16 @@ thresh_32f( const Mat& _src, Mat& _dst, float thresh, float maxval, int type )
_mm_storeu_ps( dst + j + 4, v1 );
}
}
#elif CV_NEON
float32x4_t v_thresh = vdupq_n_f32(thresh);
for( ; j <= roi.width - 4; j += 4 )
{
float32x4_t v_src = vld1q_f32(src + j);
uint32x4_t v_dst = vandq_u32(vcleq_f32(v_src, v_thresh),
vreinterpretq_u32_f32(v_src));
vst1q_f32(dst + j, vreinterpretq_f32_u32(v_dst));
}
#endif
for( ; j < roi.width; j++ )
{
......
......@@ -1545,4 +1545,63 @@ TEST(Imgproc_InitUndistortMap, accuracy) { CV_UndistortMapTest test; test.safe_r
TEST(Imgproc_GetRectSubPix, accuracy) { CV_GetRectSubPixTest test; test.safe_run(); }
TEST(Imgproc_GetQuadSubPix, accuracy) { CV_GetQuadSubPixTest test; test.safe_run(); }
//////////////////////////////////////////////////////////////////////////
template <typename T, typename WT>
void resizeArea(const cv::Mat & src, cv::Mat & dst)
{
int cn = src.channels();
for (int y = 0; y < dst.rows; ++y)
{
const T * sptr0 = src.ptr<T>(y << 1);
const T * sptr1 = src.ptr<T>((y << 1) + 1);
T * dptr = dst.ptr<T>(y);
for (int x = 0; x < dst.cols * cn; x += cn)
{
int x1 = x << 1;
for (int c = 0; c < cn; ++c)
{
WT sum = WT(sptr0[x1 + c]) + WT(sptr0[x1 + c + cn]);
sum += WT(sptr1[x1 + c]) + WT(sptr1[x1 + c + cn]) + (WT)(2);
dptr[x + c] = cv::saturate_cast<T>(sum >> 2);
}
}
}
}
TEST(Resize, Area_half)
{
const int size = 10;
int types[] = { CV_8UC1, CV_8UC4, CV_16UC1, CV_16UC4 };
cv::RNG rng(17);
for (int i = 0, _size = sizeof(types) / sizeof(types[0]); i < _size; ++i)
{
int type = types[i], depth = CV_MAT_DEPTH(type);
SCOPED_TRACE(depth);
cv::Mat src(size, size, type), dst_actual(size >> 1, size >> 1, type),
dst_reference(size >> 1, size >> 1, type);
rng.fill(src, cv::RNG::UNIFORM, 0, 1000, true);
if (depth == CV_8U)
resizeArea<uchar, ushort>(src, dst_reference);
else if (depth == CV_16U)
resizeArea<ushort, int>(src, dst_reference);
else
CV_Assert(0);
cv::resize(src, dst_actual, dst_actual.size(), 0, 0, cv::INTER_AREA);
ASSERT_EQ(0, cvtest::norm(dst_reference, dst_actual, cv::NORM_INF));
}
}
/* End of file. */
......@@ -733,19 +733,25 @@ void CV_Remap_Test::generate_test_data()
case CV_32FC2:
{
MatIterator_<Vec2f> begin_x = mapx.begin<Vec2f>(), end_x = mapx.end<Vec2f>();
float fscols = static_cast<float>(std::max(src.cols - 1 + n, 0)),
fsrows = static_cast<float>(std::max(src.rows - 1 + n, 0));
for ( ; begin_x != end_x; ++begin_x)
int width = mapx.cols << 1;
for (int y = 0; y < mapx.rows; ++y)
{
begin_x[0] = rng.uniform(_n, fscols);
begin_x[1] = rng.uniform(_n, fsrows);
float * ptr = mapx.ptr<float>(y);
for (int x = 0; x < width; x += 2)
{
ptr[x] = rng.uniform(_n, fscols);
ptr[x + 1] = rng.uniform(_n, fsrows);
}
}
}
break;
default:
assert(0);
CV_Assert(0);
break;
}
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment