Commit 756a98a3 authored by Alexander Alekhin's avatar Alexander Alekhin

imgproc: keep history of filters files

parents 9dc75540 6eac8f78
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, 2018, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Copyright (C) 2014-2015, Itseez Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "precomp.hpp"
#include <vector>
#include "opencv2/core/hal/intrin.hpp"
#include "opencl_kernels_imgproc.hpp"
/****************************************************************************************\
Bilateral Filtering
\****************************************************************************************/
namespace cv
{
class BilateralFilter_8u_Invoker :
public ParallelLoopBody
{
public:
BilateralFilter_8u_Invoker(Mat& _dest, const Mat& _temp, int _radius, int _maxk,
int* _space_ofs, float *_space_weight, float *_color_weight) :
temp(&_temp), dest(&_dest), radius(_radius),
maxk(_maxk), space_ofs(_space_ofs), space_weight(_space_weight), color_weight(_color_weight)
{
}
virtual void operator() (const Range& range) const CV_OVERRIDE
{
int i, j, cn = dest->channels(), k;
Size size = dest->size();
for( i = range.start; i < range.end; i++ )
{
const uchar* sptr = temp->ptr(i+radius) + radius*cn;
uchar* dptr = dest->ptr(i);
if( cn == 1 )
{
AutoBuffer<float> buf(alignSize(size.width, CV_SIMD_WIDTH) + size.width + CV_SIMD_WIDTH - 1);
memset(buf.data(), 0, buf.size() * sizeof(float));
float *sum = alignPtr(buf.data(), CV_SIMD_WIDTH);
float *wsum = sum + alignSize(size.width, CV_SIMD_WIDTH);
k = 0;
for(; k <= maxk-4; k+=4)
{
const uchar* ksptr0 = sptr + space_ofs[k];
const uchar* ksptr1 = sptr + space_ofs[k+1];
const uchar* ksptr2 = sptr + space_ofs[k+2];
const uchar* ksptr3 = sptr + space_ofs[k+3];
j = 0;
#if CV_SIMD
v_float32 kweight0 = vx_setall_f32(space_weight[k]);
v_float32 kweight1 = vx_setall_f32(space_weight[k+1]);
v_float32 kweight2 = vx_setall_f32(space_weight[k+2]);
v_float32 kweight3 = vx_setall_f32(space_weight[k+3]);
for (; j <= size.width - v_float32::nlanes; j += v_float32::nlanes)
{
v_uint32 rval = vx_load_expand_q(sptr + j);
v_uint32 val = vx_load_expand_q(ksptr0 + j);
v_float32 w = kweight0 * v_lut(color_weight, v_reinterpret_as_s32(v_absdiff(val, rval)));
v_float32 v_wsum = vx_load_aligned(wsum + j) + w;
v_float32 v_sum = v_muladd(v_cvt_f32(v_reinterpret_as_s32(val)), w, vx_load_aligned(sum + j));
val = vx_load_expand_q(ksptr1 + j);
w = kweight1 * v_lut(color_weight, v_reinterpret_as_s32(v_absdiff(val, rval)));
v_wsum += w;
v_sum = v_muladd(v_cvt_f32(v_reinterpret_as_s32(val)), w, v_sum);
val = vx_load_expand_q(ksptr2 + j);
w = kweight2 * v_lut(color_weight, v_reinterpret_as_s32(v_absdiff(val, rval)));
v_wsum += w;
v_sum = v_muladd(v_cvt_f32(v_reinterpret_as_s32(val)), w, v_sum);
val = vx_load_expand_q(ksptr3 + j);
w = kweight3 * v_lut(color_weight, v_reinterpret_as_s32(v_absdiff(val, rval)));
v_wsum += w;
v_sum = v_muladd(v_cvt_f32(v_reinterpret_as_s32(val)), w, v_sum);
v_store_aligned(wsum + j, v_wsum);
v_store_aligned(sum + j, v_sum);
}
#endif
#if CV_SIMD128
v_float32x4 kweight4 = v_load(space_weight + k);
#endif
for (; j < size.width; j++)
{
#if CV_SIMD128
v_uint32x4 rval = v_setall_u32(sptr[j]);
v_uint32x4 val(ksptr0[j], ksptr1[j], ksptr2[j], ksptr3[j]);
v_float32x4 w = kweight4 * v_lut(color_weight, v_reinterpret_as_s32(v_absdiff(val, rval)));
wsum[j] += v_reduce_sum(w);
sum[j] += v_reduce_sum(v_cvt_f32(v_reinterpret_as_s32(val)) * w);
#else
int rval = sptr[j];
int val = ksptr0[j];
float w = space_weight[k] * color_weight[std::abs(val - rval)];
wsum[j] += w;
sum[j] += val * w;
val = ksptr1[j];
w = space_weight[k+1] * color_weight[std::abs(val - rval)];
wsum[j] += w;
sum[j] += val * w;
val = ksptr2[j];
w = space_weight[k+2] * color_weight[std::abs(val - rval)];
wsum[j] += w;
sum[j] += val * w;
val = ksptr3[j];
w = space_weight[k+3] * color_weight[std::abs(val - rval)];
wsum[j] += w;
sum[j] += val * w;
#endif
}
}
for(; k < maxk; k++)
{
const uchar* ksptr = sptr + space_ofs[k];
j = 0;
#if CV_SIMD
v_float32 kweight = vx_setall_f32(space_weight[k]);
for (; j <= size.width - v_float32::nlanes; j += v_float32::nlanes)
{
v_uint32 val = vx_load_expand_q(ksptr + j);
v_float32 w = kweight * v_lut(color_weight, v_reinterpret_as_s32(v_absdiff(val, vx_load_expand_q(sptr + j))));
v_store_aligned(wsum + j, vx_load_aligned(wsum + j) + w);
v_store_aligned(sum + j, v_muladd(v_cvt_f32(v_reinterpret_as_s32(val)), w, vx_load_aligned(sum + j)));
}
#endif
for (; j < size.width; j++)
{
int val = ksptr[j];
float w = space_weight[k] * color_weight[std::abs(val - sptr[j])];
wsum[j] += w;
sum[j] += val * w;
}
}
j = 0;
#if CV_SIMD
for (; j <= size.width - 2*v_float32::nlanes; j += 2*v_float32::nlanes)
v_pack_u_store(dptr + j, v_pack(v_round(vx_load_aligned(sum + j ) / vx_load_aligned(wsum + j )),
v_round(vx_load_aligned(sum + j + v_float32::nlanes) / vx_load_aligned(wsum + j + v_float32::nlanes))));
#endif
for (; j < size.width; j++)
{
// overflow is not possible here => there is no need to use cv::saturate_cast
CV_DbgAssert(fabs(wsum[j]) > 0);
dptr[j] = (uchar)cvRound(sum[j]/wsum[j]);
}
}
else
{
assert( cn == 3 );
AutoBuffer<float> buf(alignSize(size.width, CV_SIMD_WIDTH)*3 + size.width + CV_SIMD_WIDTH - 1);
memset(buf.data(), 0, buf.size() * sizeof(float));
float *sum_b = alignPtr(buf.data(), CV_SIMD_WIDTH);
float *sum_g = sum_b + alignSize(size.width, CV_SIMD_WIDTH);
float *sum_r = sum_g + alignSize(size.width, CV_SIMD_WIDTH);
float *wsum = sum_r + alignSize(size.width, CV_SIMD_WIDTH);
k = 0;
for(; k <= maxk-4; k+=4)
{
const uchar* ksptr0 = sptr + space_ofs[k];
const uchar* ksptr1 = sptr + space_ofs[k+1];
const uchar* ksptr2 = sptr + space_ofs[k+2];
const uchar* ksptr3 = sptr + space_ofs[k+3];
const uchar* rsptr = sptr;
j = 0;
#if CV_SIMD
v_float32 kweight0 = vx_setall_f32(space_weight[k]);
v_float32 kweight1 = vx_setall_f32(space_weight[k+1]);
v_float32 kweight2 = vx_setall_f32(space_weight[k+2]);
v_float32 kweight3 = vx_setall_f32(space_weight[k+3]);
for (; j <= size.width - v_uint8::nlanes; j += v_uint8::nlanes, rsptr += 3*v_uint8::nlanes,
ksptr0 += 3*v_uint8::nlanes, ksptr1 += 3*v_uint8::nlanes, ksptr2 += 3*v_uint8::nlanes, ksptr3 += 3*v_uint8::nlanes)
{
v_uint8 kb, kg, kr, rb, rg, rr;
v_load_deinterleave(rsptr, rb, rg, rr);
v_load_deinterleave(ksptr0, kb, kg, kr);
v_uint16 val0, val1, val2, val3, val4;
v_expand(v_absdiff(kb, rb), val0, val1);
v_expand(v_absdiff(kg, rg), val2, val3);
val0 += val2; val1 += val3;
v_expand(v_absdiff(kr, rr), val2, val3);
val0 += val2; val1 += val3;
v_uint32 vall, valh;
v_expand(val0, vall, valh);
v_float32 w0 = kweight0 * v_lut(color_weight, v_reinterpret_as_s32(vall));
v_float32 w1 = kweight0 * v_lut(color_weight, v_reinterpret_as_s32(valh));
v_store_aligned(wsum + j, w0 + vx_load_aligned(wsum + j));
v_store_aligned(wsum + j + v_float32::nlanes, w1 + vx_load_aligned(wsum + j + v_float32::nlanes));
v_expand(kb, val0, val2);
v_expand(val0, vall, valh);
v_store_aligned(sum_b + j , v_muladd(v_cvt_f32(v_reinterpret_as_s32(vall)), w0, vx_load_aligned(sum_b + j)));
v_store_aligned(sum_b + j + v_float32::nlanes, v_muladd(v_cvt_f32(v_reinterpret_as_s32(valh)), w1, vx_load_aligned(sum_b + j + v_float32::nlanes)));
v_expand(kg, val0, val3);
v_expand(val0, vall, valh);
v_store_aligned(sum_g + j , v_muladd(v_cvt_f32(v_reinterpret_as_s32(vall)), w0, vx_load_aligned(sum_g + j)));
v_store_aligned(sum_g + j + v_float32::nlanes, v_muladd(v_cvt_f32(v_reinterpret_as_s32(valh)), w1, vx_load_aligned(sum_g + j + v_float32::nlanes)));
v_expand(kr, val0, val4);
v_expand(val0, vall, valh);
v_store_aligned(sum_r + j , v_muladd(v_cvt_f32(v_reinterpret_as_s32(vall)), w0, vx_load_aligned(sum_r + j)));
v_store_aligned(sum_r + j + v_float32::nlanes, v_muladd(v_cvt_f32(v_reinterpret_as_s32(valh)), w1, vx_load_aligned(sum_r + j + v_float32::nlanes)));
v_expand(val1, vall, valh);
w0 = kweight0 * v_lut(color_weight, v_reinterpret_as_s32(vall));
w1 = kweight0 * v_lut(color_weight, v_reinterpret_as_s32(valh));
v_store_aligned(wsum + j + 2 * v_float32::nlanes, w0 + vx_load_aligned(wsum + j + 2 * v_float32::nlanes));
v_store_aligned(wsum + j + 3 * v_float32::nlanes, w1 + vx_load_aligned(wsum + j + 3 * v_float32::nlanes));
v_expand(val2, vall, valh);
v_store_aligned(sum_b + j + 2 * v_float32::nlanes, v_muladd(v_cvt_f32(v_reinterpret_as_s32(vall)), w0, vx_load_aligned(sum_b + j + 2 * v_float32::nlanes)));
v_store_aligned(sum_b + j + 3 * v_float32::nlanes, v_muladd(v_cvt_f32(v_reinterpret_as_s32(valh)), w1, vx_load_aligned(sum_b + j + 3 * v_float32::nlanes)));
v_expand(val3, vall, valh);
v_store_aligned(sum_g + j + 2 * v_float32::nlanes, v_muladd(v_cvt_f32(v_reinterpret_as_s32(vall)), w0, vx_load_aligned(sum_g + j + 2 * v_float32::nlanes)));
v_store_aligned(sum_g + j + 3 * v_float32::nlanes, v_muladd(v_cvt_f32(v_reinterpret_as_s32(valh)), w1, vx_load_aligned(sum_g + j + 3 * v_float32::nlanes)));
v_expand(val4, vall, valh);
v_store_aligned(sum_r + j + 2*v_float32::nlanes, v_muladd(v_cvt_f32(v_reinterpret_as_s32(vall)), w0, vx_load_aligned(sum_r + j + 2*v_float32::nlanes)));
v_store_aligned(sum_r + j + 3*v_float32::nlanes, v_muladd(v_cvt_f32(v_reinterpret_as_s32(valh)), w1, vx_load_aligned(sum_r + j + 3*v_float32::nlanes)));
v_load_deinterleave(ksptr1, kb, kg, kr);
v_expand(v_absdiff(kb, rb), val0, val1);
v_expand(v_absdiff(kg, rg), val2, val3);
val0 += val2; val1 += val3;
v_expand(v_absdiff(kr, rr), val2, val3);
val0 += val2; val1 += val3;
v_expand(val0, vall, valh);
w0 = kweight1 * v_lut(color_weight, v_reinterpret_as_s32(vall));
w1 = kweight1 * v_lut(color_weight, v_reinterpret_as_s32(valh));
v_store_aligned(wsum + j, w0 + vx_load_aligned(wsum + j));
v_store_aligned(wsum + j + v_float32::nlanes, w1 + vx_load_aligned(wsum + j + v_float32::nlanes));
v_expand(kb, val0, val2);
v_expand(val0, vall, valh);
v_store_aligned(sum_b + j, v_muladd(v_cvt_f32(v_reinterpret_as_s32(vall)), w0, vx_load_aligned(sum_b + j)));
v_store_aligned(sum_b + j + v_float32::nlanes, v_muladd(v_cvt_f32(v_reinterpret_as_s32(valh)), w1, vx_load_aligned(sum_b + j + v_float32::nlanes)));
v_expand(kg, val0, val3);
v_expand(val0, vall, valh);
v_store_aligned(sum_g + j, v_muladd(v_cvt_f32(v_reinterpret_as_s32(vall)), w0, vx_load_aligned(sum_g + j)));
v_store_aligned(sum_g + j + v_float32::nlanes, v_muladd(v_cvt_f32(v_reinterpret_as_s32(valh)), w1, vx_load_aligned(sum_g + j + v_float32::nlanes)));
v_expand(kr, val0, val4);
v_expand(val0, vall, valh);
v_store_aligned(sum_r + j, v_muladd(v_cvt_f32(v_reinterpret_as_s32(vall)), w0, vx_load_aligned(sum_r + j)));
v_store_aligned(sum_r + j + v_float32::nlanes, v_muladd(v_cvt_f32(v_reinterpret_as_s32(valh)), w1, vx_load_aligned(sum_r + j + v_float32::nlanes)));
v_expand(val1, vall, valh);
w0 = kweight1 * v_lut(color_weight, v_reinterpret_as_s32(vall));
w1 = kweight1 * v_lut(color_weight, v_reinterpret_as_s32(valh));
v_store_aligned(wsum + j + 2 * v_float32::nlanes, w0 + vx_load_aligned(wsum + j + 2 * v_float32::nlanes));
v_store_aligned(wsum + j + 3 * v_float32::nlanes, w1 + vx_load_aligned(wsum + j + 3 * v_float32::nlanes));
v_expand(val2, vall, valh);
v_store_aligned(sum_b + j + 2 * v_float32::nlanes, v_muladd(v_cvt_f32(v_reinterpret_as_s32(vall)), w0, vx_load_aligned(sum_b + j + 2 * v_float32::nlanes)));
v_store_aligned(sum_b + j + 3 * v_float32::nlanes, v_muladd(v_cvt_f32(v_reinterpret_as_s32(valh)), w1, vx_load_aligned(sum_b + j + 3 * v_float32::nlanes)));
v_expand(val3, vall, valh);
v_store_aligned(sum_g + j + 2 * v_float32::nlanes, v_muladd(v_cvt_f32(v_reinterpret_as_s32(vall)), w0, vx_load_aligned(sum_g + j + 2 * v_float32::nlanes)));
v_store_aligned(sum_g + j + 3 * v_float32::nlanes, v_muladd(v_cvt_f32(v_reinterpret_as_s32(valh)), w1, vx_load_aligned(sum_g + j + 3 * v_float32::nlanes)));
v_expand(val4, vall, valh);
v_store_aligned(sum_r + j + 2 * v_float32::nlanes, v_muladd(v_cvt_f32(v_reinterpret_as_s32(vall)), w0, vx_load_aligned(sum_r + j + 2 * v_float32::nlanes)));
v_store_aligned(sum_r + j + 3 * v_float32::nlanes, v_muladd(v_cvt_f32(v_reinterpret_as_s32(valh)), w1, vx_load_aligned(sum_r + j + 3 * v_float32::nlanes)));
v_load_deinterleave(ksptr2, kb, kg, kr);
v_expand(v_absdiff(kb, rb), val0, val1);
v_expand(v_absdiff(kg, rg), val2, val3);
val0 += val2; val1 += val3;
v_expand(v_absdiff(kr, rr), val2, val3);
val0 += val2; val1 += val3;
v_expand(val0, vall, valh);
w0 = kweight2 * v_lut(color_weight, v_reinterpret_as_s32(vall));
w1 = kweight2 * v_lut(color_weight, v_reinterpret_as_s32(valh));
v_store_aligned(wsum + j, w0 + vx_load_aligned(wsum + j));
v_store_aligned(wsum + j + v_float32::nlanes, w1 + vx_load_aligned(wsum + j + v_float32::nlanes));
v_expand(kb, val0, val2);
v_expand(val0, vall, valh);
v_store_aligned(sum_b + j, v_muladd(v_cvt_f32(v_reinterpret_as_s32(vall)), w0, vx_load_aligned(sum_b + j)));
v_store_aligned(sum_b + j + v_float32::nlanes, v_muladd(v_cvt_f32(v_reinterpret_as_s32(valh)), w1, vx_load_aligned(sum_b + j + v_float32::nlanes)));
v_expand(kg, val0, val3);
v_expand(val0, vall, valh);
v_store_aligned(sum_g + j, v_muladd(v_cvt_f32(v_reinterpret_as_s32(vall)), w0, vx_load_aligned(sum_g + j)));
v_store_aligned(sum_g + j + v_float32::nlanes, v_muladd(v_cvt_f32(v_reinterpret_as_s32(valh)), w1, vx_load_aligned(sum_g + j + v_float32::nlanes)));
v_expand(kr, val0, val4);
v_expand(val0, vall, valh);
v_store_aligned(sum_r + j, v_muladd(v_cvt_f32(v_reinterpret_as_s32(vall)), w0, vx_load_aligned(sum_r + j)));
v_store_aligned(sum_r + j + v_float32::nlanes, v_muladd(v_cvt_f32(v_reinterpret_as_s32(valh)), w1, vx_load_aligned(sum_r + j + v_float32::nlanes)));
v_expand(val1, vall, valh);
w0 = kweight2 * v_lut(color_weight, v_reinterpret_as_s32(vall));
w1 = kweight2 * v_lut(color_weight, v_reinterpret_as_s32(valh));
v_store_aligned(wsum + j + 2 * v_float32::nlanes, w0 + vx_load_aligned(wsum + j + 2 * v_float32::nlanes));
v_store_aligned(wsum + j + 3 * v_float32::nlanes, w1 + vx_load_aligned(wsum + j + 3 * v_float32::nlanes));
v_expand(val2, vall, valh);
v_store_aligned(sum_b + j + 2 * v_float32::nlanes, v_muladd(v_cvt_f32(v_reinterpret_as_s32(vall)), w0, vx_load_aligned(sum_b + j + 2 * v_float32::nlanes)));
v_store_aligned(sum_b + j + 3 * v_float32::nlanes, v_muladd(v_cvt_f32(v_reinterpret_as_s32(valh)), w1, vx_load_aligned(sum_b + j + 3 * v_float32::nlanes)));
v_expand(val3, vall, valh);
v_store_aligned(sum_g + j + 2 * v_float32::nlanes, v_muladd(v_cvt_f32(v_reinterpret_as_s32(vall)), w0, vx_load_aligned(sum_g + j + 2 * v_float32::nlanes)));
v_store_aligned(sum_g + j + 3 * v_float32::nlanes, v_muladd(v_cvt_f32(v_reinterpret_as_s32(valh)), w1, vx_load_aligned(sum_g + j + 3 * v_float32::nlanes)));
v_expand(val4, vall, valh);
v_store_aligned(sum_r + j + 2 * v_float32::nlanes, v_muladd(v_cvt_f32(v_reinterpret_as_s32(vall)), w0, vx_load_aligned(sum_r + j + 2 * v_float32::nlanes)));
v_store_aligned(sum_r + j + 3 * v_float32::nlanes, v_muladd(v_cvt_f32(v_reinterpret_as_s32(valh)), w1, vx_load_aligned(sum_r + j + 3 * v_float32::nlanes)));
v_load_deinterleave(ksptr3, kb, kg, kr);
v_expand(v_absdiff(kb, rb), val0, val1);
v_expand(v_absdiff(kg, rg), val2, val3);
val0 += val2; val1 += val3;
v_expand(v_absdiff(kr, rr), val2, val3);
val0 += val2; val1 += val3;
v_expand(val0, vall, valh);
w0 = kweight3 * v_lut(color_weight, v_reinterpret_as_s32(vall));
w1 = kweight3 * v_lut(color_weight, v_reinterpret_as_s32(valh));
v_store_aligned(wsum + j, w0 + vx_load_aligned(wsum + j));
v_store_aligned(wsum + j + v_float32::nlanes, w1 + vx_load_aligned(wsum + j + v_float32::nlanes));
v_expand(kb, val0, val2);
v_expand(val0, vall, valh);
v_store_aligned(sum_b + j, v_muladd(v_cvt_f32(v_reinterpret_as_s32(vall)), w0, vx_load_aligned(sum_b + j)));
v_store_aligned(sum_b + j + v_float32::nlanes, v_muladd(v_cvt_f32(v_reinterpret_as_s32(valh)), w1, vx_load_aligned(sum_b + j + v_float32::nlanes)));
v_expand(kg, val0, val3);
v_expand(val0, vall, valh);
v_store_aligned(sum_g + j, v_muladd(v_cvt_f32(v_reinterpret_as_s32(vall)), w0, vx_load_aligned(sum_g + j)));
v_store_aligned(sum_g + j + v_float32::nlanes, v_muladd(v_cvt_f32(v_reinterpret_as_s32(valh)), w1, vx_load_aligned(sum_g + j + v_float32::nlanes)));
v_expand(kr, val0, val4);
v_expand(val0, vall, valh);
v_store_aligned(sum_r + j, v_muladd(v_cvt_f32(v_reinterpret_as_s32(vall)), w0, vx_load_aligned(sum_r + j)));
v_store_aligned(sum_r + j + v_float32::nlanes, v_muladd(v_cvt_f32(v_reinterpret_as_s32(valh)), w1, vx_load_aligned(sum_r + j + v_float32::nlanes)));
v_expand(val1, vall, valh);
w0 = kweight3 * v_lut(color_weight, v_reinterpret_as_s32(vall));
w1 = kweight3 * v_lut(color_weight, v_reinterpret_as_s32(valh));
v_store_aligned(wsum + j + 2 * v_float32::nlanes, w0 + vx_load_aligned(wsum + j + 2 * v_float32::nlanes));
v_store_aligned(wsum + j + 3 * v_float32::nlanes, w1 + vx_load_aligned(wsum + j + 3 * v_float32::nlanes));
v_expand(val2, vall, valh);
v_store_aligned(sum_b + j + 2 * v_float32::nlanes, v_muladd(v_cvt_f32(v_reinterpret_as_s32(vall)), w0, vx_load_aligned(sum_b + j + 2 * v_float32::nlanes)));
v_store_aligned(sum_b + j + 3 * v_float32::nlanes, v_muladd(v_cvt_f32(v_reinterpret_as_s32(valh)), w1, vx_load_aligned(sum_b + j + 3 * v_float32::nlanes)));
v_expand(val3, vall, valh);
v_store_aligned(sum_g + j + 2 * v_float32::nlanes, v_muladd(v_cvt_f32(v_reinterpret_as_s32(vall)), w0, vx_load_aligned(sum_g + j + 2 * v_float32::nlanes)));
v_store_aligned(sum_g + j + 3 * v_float32::nlanes, v_muladd(v_cvt_f32(v_reinterpret_as_s32(valh)), w1, vx_load_aligned(sum_g + j + 3 * v_float32::nlanes)));
v_expand(val4, vall, valh);
v_store_aligned(sum_r + j + 2 * v_float32::nlanes, v_muladd(v_cvt_f32(v_reinterpret_as_s32(vall)), w0, vx_load_aligned(sum_r + j + 2 * v_float32::nlanes)));
v_store_aligned(sum_r + j + 3 * v_float32::nlanes, v_muladd(v_cvt_f32(v_reinterpret_as_s32(valh)), w1, vx_load_aligned(sum_r + j + 3 * v_float32::nlanes)));
}
#endif
#if CV_SIMD128
v_float32x4 kweight4 = v_load(space_weight + k);
#endif
for(; j < size.width; j++, rsptr += 3, ksptr0 += 3, ksptr1 += 3, ksptr2 += 3, ksptr3 += 3)
{
#if CV_SIMD128
v_uint32x4 rb = v_setall_u32(rsptr[0]);
v_uint32x4 rg = v_setall_u32(rsptr[1]);
v_uint32x4 rr = v_setall_u32(rsptr[2]);
v_uint32x4 b(ksptr0[0], ksptr1[0], ksptr2[0], ksptr3[0]);
v_uint32x4 g(ksptr0[1], ksptr1[1], ksptr2[1], ksptr3[1]);
v_uint32x4 r(ksptr0[2], ksptr1[2], ksptr2[2], ksptr3[2]);
v_float32x4 w = kweight4 * v_lut(color_weight, v_reinterpret_as_s32(v_absdiff(b, rb) + v_absdiff(g, rg) + v_absdiff(r, rr)));
wsum[j] += v_reduce_sum(w);
sum_b[j] += v_reduce_sum(v_cvt_f32(v_reinterpret_as_s32(b)) * w);
sum_g[j] += v_reduce_sum(v_cvt_f32(v_reinterpret_as_s32(g)) * w);
sum_r[j] += v_reduce_sum(v_cvt_f32(v_reinterpret_as_s32(r)) * w);
#else
int rb = rsptr[0], rg = rsptr[1], rr = rsptr[2];
int b = ksptr0[0], g = ksptr0[1], r = ksptr0[2];
float w = space_weight[k]*color_weight[std::abs(b - rb) + std::abs(g - rg) + std::abs(r - rr)];
wsum[j] += w;
sum_b[j] += b*w; sum_g[j] += g*w; sum_r[j] += r*w;
b = ksptr1[0]; g = ksptr1[1]; r = ksptr1[2];
w = space_weight[k+1] * color_weight[std::abs(b - rb) + std::abs(g - rg) + std::abs(r - rr)];
wsum[j] += w;
sum_b[j] += b*w; sum_g[j] += g*w; sum_r[j] += r*w;
b = ksptr2[0]; g = ksptr2[1]; r = ksptr2[2];
w = space_weight[k+2] * color_weight[std::abs(b - rb) + std::abs(g - rg) + std::abs(r - rr)];
wsum[j] += w;
sum_b[j] += b*w; sum_g[j] += g*w; sum_r[j] += r*w;
b = ksptr3[0]; g = ksptr3[1]; r = ksptr3[2];
w = space_weight[k+3] * color_weight[std::abs(b - rb) + std::abs(g - rg) + std::abs(r - rr)];
wsum[j] += w;
sum_b[j] += b*w; sum_g[j] += g*w; sum_r[j] += r*w;
#endif
}
}
for(; k < maxk; k++)
{
const uchar* ksptr = sptr + space_ofs[k];
const uchar* rsptr = sptr;
j = 0;
#if CV_SIMD
v_float32 kweight = vx_setall_f32(space_weight[k]);
for (; j <= size.width - v_uint8::nlanes; j += v_uint8::nlanes, ksptr += 3*v_uint8::nlanes, rsptr += 3*v_uint8::nlanes)
{
v_uint8 kb, kg, kr, rb, rg, rr;
v_load_deinterleave(ksptr, kb, kg, kr);
v_load_deinterleave(rsptr, rb, rg, rr);
v_uint16 b_l, b_h, g_l, g_h, r_l, r_h;
v_expand(v_absdiff(kb, rb), b_l, b_h);
v_expand(v_absdiff(kg, rg), g_l, g_h);
v_expand(v_absdiff(kr, rr), r_l, r_h);
v_uint32 val0, val1, val2, val3;
v_expand(b_l + g_l + r_l, val0, val1);
v_expand(b_h + g_h + r_h, val2, val3);
v_expand(kb, b_l, b_h);
v_expand(kg, g_l, g_h);
v_expand(kr, r_l, r_h);
v_float32 w0 = kweight * v_lut(color_weight, v_reinterpret_as_s32(val0));
v_float32 w1 = kweight * v_lut(color_weight, v_reinterpret_as_s32(val1));
v_float32 w2 = kweight * v_lut(color_weight, v_reinterpret_as_s32(val2));
v_float32 w3 = kweight * v_lut(color_weight, v_reinterpret_as_s32(val3));
v_store_aligned(wsum + j , w0 + vx_load_aligned(wsum + j));
v_store_aligned(wsum + j + v_float32::nlanes, w1 + vx_load_aligned(wsum + j + v_float32::nlanes));
v_store_aligned(wsum + j + 2*v_float32::nlanes, w2 + vx_load_aligned(wsum + j + 2*v_float32::nlanes));
v_store_aligned(wsum + j + 3*v_float32::nlanes, w3 + vx_load_aligned(wsum + j + 3*v_float32::nlanes));
v_expand(b_l, val0, val1);
v_expand(b_h, val2, val3);
v_store_aligned(sum_b + j , v_muladd(v_cvt_f32(v_reinterpret_as_s32(val0)), w0, vx_load_aligned(sum_b + j)));
v_store_aligned(sum_b + j + v_float32::nlanes, v_muladd(v_cvt_f32(v_reinterpret_as_s32(val1)), w1, vx_load_aligned(sum_b + j + v_float32::nlanes)));
v_store_aligned(sum_b + j + 2*v_float32::nlanes, v_muladd(v_cvt_f32(v_reinterpret_as_s32(val2)), w2, vx_load_aligned(sum_b + j + 2*v_float32::nlanes)));
v_store_aligned(sum_b + j + 3*v_float32::nlanes, v_muladd(v_cvt_f32(v_reinterpret_as_s32(val3)), w3, vx_load_aligned(sum_b + j + 3*v_float32::nlanes)));
v_expand(g_l, val0, val1);
v_expand(g_h, val2, val3);
v_store_aligned(sum_g + j , v_muladd(v_cvt_f32(v_reinterpret_as_s32(val0)), w0, vx_load_aligned(sum_g + j)));
v_store_aligned(sum_g + j + v_float32::nlanes, v_muladd(v_cvt_f32(v_reinterpret_as_s32(val1)), w1, vx_load_aligned(sum_g + j + v_float32::nlanes)));
v_store_aligned(sum_g + j + 2*v_float32::nlanes, v_muladd(v_cvt_f32(v_reinterpret_as_s32(val2)), w2, vx_load_aligned(sum_g + j + 2*v_float32::nlanes)));
v_store_aligned(sum_g + j + 3*v_float32::nlanes, v_muladd(v_cvt_f32(v_reinterpret_as_s32(val3)), w3, vx_load_aligned(sum_g + j + 3*v_float32::nlanes)));
v_expand(r_l, val0, val1);
v_expand(r_h, val2, val3);
v_store_aligned(sum_r + j , v_muladd(v_cvt_f32(v_reinterpret_as_s32(val0)), w0, vx_load_aligned(sum_r + j)));
v_store_aligned(sum_r + j + v_float32::nlanes, v_muladd(v_cvt_f32(v_reinterpret_as_s32(val1)), w1, vx_load_aligned(sum_r + j + v_float32::nlanes)));
v_store_aligned(sum_r + j + 2*v_float32::nlanes, v_muladd(v_cvt_f32(v_reinterpret_as_s32(val2)), w2, vx_load_aligned(sum_r + j + 2*v_float32::nlanes)));
v_store_aligned(sum_r + j + 3*v_float32::nlanes, v_muladd(v_cvt_f32(v_reinterpret_as_s32(val3)), w3, vx_load_aligned(sum_r + j + 3*v_float32::nlanes)));
}
#endif
for(; j < size.width; j++, ksptr += 3, rsptr += 3)
{
int b = ksptr[0], g = ksptr[1], r = ksptr[2];
float w = space_weight[k]*color_weight[std::abs(b - rsptr[0]) + std::abs(g - rsptr[1]) + std::abs(r - rsptr[2])];
wsum[j] += w;
sum_b[j] += b*w; sum_g[j] += g*w; sum_r[j] += r*w;
}
}
j = 0;
#if CV_SIMD
v_float32 v_one = vx_setall_f32(1.f);
for(; j <= size.width - v_uint8::nlanes; j += v_uint8::nlanes, dptr += 3*v_uint8::nlanes)
{
v_float32 w0 = v_one / vx_load_aligned(wsum + j);
v_float32 w1 = v_one / vx_load_aligned(wsum + j + v_float32::nlanes);
v_float32 w2 = v_one / vx_load_aligned(wsum + j + 2*v_float32::nlanes);
v_float32 w3 = v_one / vx_load_aligned(wsum + j + 3*v_float32::nlanes);
v_store_interleave(dptr, v_pack_u(v_pack(v_round(w0 * vx_load_aligned(sum_b + j)),
v_round(w1 * vx_load_aligned(sum_b + j + v_float32::nlanes))),
v_pack(v_round(w2 * vx_load_aligned(sum_b + j + 2*v_float32::nlanes)),
v_round(w3 * vx_load_aligned(sum_b + j + 3*v_float32::nlanes)))),
v_pack_u(v_pack(v_round(w0 * vx_load_aligned(sum_g + j)),
v_round(w1 * vx_load_aligned(sum_g + j + v_float32::nlanes))),
v_pack(v_round(w2 * vx_load_aligned(sum_g + j + 2*v_float32::nlanes)),
v_round(w3 * vx_load_aligned(sum_g + j + 3*v_float32::nlanes)))),
v_pack_u(v_pack(v_round(w0 * vx_load_aligned(sum_r + j)),
v_round(w1 * vx_load_aligned(sum_r + j + v_float32::nlanes))),
v_pack(v_round(w2 * vx_load_aligned(sum_r + j + 2*v_float32::nlanes)),
v_round(w3 * vx_load_aligned(sum_r + j + 3*v_float32::nlanes)))));
}
#endif
for(; j < size.width; j++)
{
CV_DbgAssert(fabs(wsum[j]) > 0);
wsum[j] = 1.f/wsum[j];
*(dptr++) = (uchar)cvRound(sum_b[j]*wsum[j]);
*(dptr++) = (uchar)cvRound(sum_g[j]*wsum[j]);
*(dptr++) = (uchar)cvRound(sum_r[j]*wsum[j]);
}
}
}
#if CV_SIMD
vx_cleanup();
#endif
}
private:
const Mat *temp;
Mat *dest;
int radius, maxk, *space_ofs;
float *space_weight, *color_weight;
};
#ifdef HAVE_OPENCL
static bool ocl_bilateralFilter_8u(InputArray _src, OutputArray _dst, int d,
double sigma_color, double sigma_space,
int borderType)
{
#ifdef __ANDROID__
if (ocl::Device::getDefault().isNVidia())
return false;
#endif
int type = _src.type(), depth = CV_MAT_DEPTH(type), cn = CV_MAT_CN(type);
int i, j, maxk, radius;
if (depth != CV_8U || cn > 4)
return false;
if (sigma_color <= 0)
sigma_color = 1;
if (sigma_space <= 0)
sigma_space = 1;
double gauss_color_coeff = -0.5 / (sigma_color * sigma_color);
double gauss_space_coeff = -0.5 / (sigma_space * sigma_space);
if ( d <= 0 )
radius = cvRound(sigma_space * 1.5);
else
radius = d / 2;
radius = MAX(radius, 1);
d = radius * 2 + 1;
UMat src = _src.getUMat(), dst = _dst.getUMat(), temp;
if (src.u == dst.u)
return false;
copyMakeBorder(src, temp, radius, radius, radius, radius, borderType);
std::vector<float> _space_weight(d * d);
std::vector<int> _space_ofs(d * d);
float * const space_weight = &_space_weight[0];
int * const space_ofs = &_space_ofs[0];
// initialize space-related bilateral filter coefficients
for( i = -radius, maxk = 0; i <= radius; i++ )
for( j = -radius; j <= radius; j++ )
{
double r = std::sqrt((double)i * i + (double)j * j);
if ( r > radius )
continue;
space_weight[maxk] = (float)std::exp(r * r * gauss_space_coeff);
space_ofs[maxk++] = (int)(i * temp.step + j * cn);
}
char cvt[3][40];
String cnstr = cn > 1 ? format("%d", cn) : "";
String kernelName("bilateral");
size_t sizeDiv = 1;
if ((ocl::Device::getDefault().isIntel()) &&
(ocl::Device::getDefault().type() == ocl::Device::TYPE_GPU))
{
//Intel GPU
if (dst.cols % 4 == 0 && cn == 1) // For single channel x4 sized images.
{
kernelName = "bilateral_float4";
sizeDiv = 4;
}
}
ocl::Kernel k(kernelName.c_str(), ocl::imgproc::bilateral_oclsrc,
format("-D radius=%d -D maxk=%d -D cn=%d -D int_t=%s -D uint_t=uint%s -D convert_int_t=%s"
" -D uchar_t=%s -D float_t=%s -D convert_float_t=%s -D convert_uchar_t=%s -D gauss_color_coeff=(float)%f",
radius, maxk, cn, ocl::typeToStr(CV_32SC(cn)), cnstr.c_str(),
ocl::convertTypeStr(CV_8U, CV_32S, cn, cvt[0]),
ocl::typeToStr(type), ocl::typeToStr(CV_32FC(cn)),
ocl::convertTypeStr(CV_32S, CV_32F, cn, cvt[1]),
ocl::convertTypeStr(CV_32F, CV_8U, cn, cvt[2]), gauss_color_coeff));
if (k.empty())
return false;
Mat mspace_weight(1, d * d, CV_32FC1, space_weight);
Mat mspace_ofs(1, d * d, CV_32SC1, space_ofs);
UMat ucolor_weight, uspace_weight, uspace_ofs;
mspace_weight.copyTo(uspace_weight);
mspace_ofs.copyTo(uspace_ofs);
k.args(ocl::KernelArg::ReadOnlyNoSize(temp), ocl::KernelArg::WriteOnly(dst),
ocl::KernelArg::PtrReadOnly(uspace_weight),
ocl::KernelArg::PtrReadOnly(uspace_ofs));
size_t globalsize[2] = { (size_t)dst.cols / sizeDiv, (size_t)dst.rows };
return k.run(2, globalsize, NULL, false);
}
#endif
static void
bilateralFilter_8u( const Mat& src, Mat& dst, int d,
double sigma_color, double sigma_space,
int borderType )
{
int cn = src.channels();
int i, j, maxk, radius;
Size size = src.size();
CV_Assert( (src.type() == CV_8UC1 || src.type() == CV_8UC3) && src.data != dst.data );
if( sigma_color <= 0 )
sigma_color = 1;
if( sigma_space <= 0 )
sigma_space = 1;
double gauss_color_coeff = -0.5/(sigma_color*sigma_color);
double gauss_space_coeff = -0.5/(sigma_space*sigma_space);
if( d <= 0 )
radius = cvRound(sigma_space*1.5);
else
radius = d/2;
radius = MAX(radius, 1);
d = radius*2 + 1;
Mat temp;
copyMakeBorder( src, temp, radius, radius, radius, radius, borderType );
std::vector<float> _color_weight(cn*256);
std::vector<float> _space_weight(d*d);
std::vector<int> _space_ofs(d*d);
float* color_weight = &_color_weight[0];
float* space_weight = &_space_weight[0];
int* space_ofs = &_space_ofs[0];
// initialize color-related bilateral filter coefficients
for( i = 0; i < 256*cn; i++ )
color_weight[i] = (float)std::exp(i*i*gauss_color_coeff);
// initialize space-related bilateral filter coefficients
for( i = -radius, maxk = 0; i <= radius; i++ )
{
j = -radius;
for( ; j <= radius; j++ )
{
double r = std::sqrt((double)i*i + (double)j*j);
if( r > radius )
continue;
space_weight[maxk] = (float)std::exp(r*r*gauss_space_coeff);
space_ofs[maxk++] = (int)(i*temp.step + j*cn);
}
}
BilateralFilter_8u_Invoker body(dst, temp, radius, maxk, space_ofs, space_weight, color_weight);
parallel_for_(Range(0, size.height), body, dst.total()/(double)(1<<16));
}
class BilateralFilter_32f_Invoker :
public ParallelLoopBody
{
public:
BilateralFilter_32f_Invoker(int _cn, int _radius, int _maxk, int *_space_ofs,
const Mat& _temp, Mat& _dest, float _scale_index, float *_space_weight, float *_expLUT) :
cn(_cn), radius(_radius), maxk(_maxk), space_ofs(_space_ofs),
temp(&_temp), dest(&_dest), scale_index(_scale_index), space_weight(_space_weight), expLUT(_expLUT)
{
}
virtual void operator() (const Range& range) const CV_OVERRIDE
{
int i, j, k;
Size size = dest->size();
for( i = range.start; i < range.end; i++ )
{
const float* sptr = temp->ptr<float>(i+radius) + radius*cn;
float* dptr = dest->ptr<float>(i);
if( cn == 1 )
{
AutoBuffer<float> buf(alignSize(size.width, CV_SIMD_WIDTH) + size.width + CV_SIMD_WIDTH - 1);
memset(buf.data(), 0, buf.size() * sizeof(float));
float *sum = alignPtr(buf.data(), CV_SIMD_WIDTH);
float *wsum = sum + alignSize(size.width, CV_SIMD_WIDTH);
#if CV_SIMD
v_float32 v_one = vx_setall_f32(1.f);
v_float32 sindex = vx_setall_f32(scale_index);
#endif
k = 0;
for(; k <= maxk - 4; k+=4)
{
const float* ksptr0 = sptr + space_ofs[k];
const float* ksptr1 = sptr + space_ofs[k + 1];
const float* ksptr2 = sptr + space_ofs[k + 2];
const float* ksptr3 = sptr + space_ofs[k + 3];
j = 0;
#if CV_SIMD
v_float32 kweight0 = vx_setall_f32(space_weight[k]);
v_float32 kweight1 = vx_setall_f32(space_weight[k+1]);
v_float32 kweight2 = vx_setall_f32(space_weight[k+2]);
v_float32 kweight3 = vx_setall_f32(space_weight[k+3]);
for (; j <= size.width - v_float32::nlanes; j += v_float32::nlanes)
{
v_float32 rval = vx_load(sptr + j);
v_float32 val = vx_load(ksptr0 + j);
v_float32 knan = v_not_nan(val);
v_float32 alpha = (v_absdiff(val, rval) * sindex) & v_not_nan(rval) & knan;
v_int32 idx = v_trunc(alpha);
alpha -= v_cvt_f32(idx);
v_float32 w = (kweight0 * v_muladd(v_lut(expLUT + 1, idx), alpha, v_lut(expLUT, idx) * (v_one-alpha))) & knan;
v_float32 v_wsum = vx_load_aligned(wsum + j) + w;
v_float32 v_sum = v_muladd(val & knan, w, vx_load_aligned(sum + j));
val = vx_load(ksptr1 + j);
knan = v_not_nan(val);
alpha = (v_absdiff(val, rval) * sindex) & v_not_nan(rval) & knan;
idx = v_trunc(alpha);
alpha -= v_cvt_f32(idx);
w = (kweight1 * v_muladd(v_lut(expLUT + 1, idx), alpha, v_lut(expLUT, idx) * (v_one - alpha))) & knan;
v_wsum += w;
v_sum = v_muladd(val & knan, w, v_sum);
val = vx_load(ksptr2 + j);
knan = v_not_nan(val);
alpha = (v_absdiff(val, rval) * sindex) & v_not_nan(rval) & knan;
idx = v_trunc(alpha);
alpha -= v_cvt_f32(idx);
w = (kweight2 * v_muladd(v_lut(expLUT + 1, idx), alpha, v_lut(expLUT, idx) * (v_one - alpha))) & knan;
v_wsum += w;
v_sum = v_muladd(val & knan, w, v_sum);
val = vx_load(ksptr3 + j);
knan = v_not_nan(val);
alpha = (v_absdiff(val, rval) * sindex) & v_not_nan(rval) & knan;
idx = v_trunc(alpha);
alpha -= v_cvt_f32(idx);
w = (kweight3 * v_muladd(v_lut(expLUT + 1, idx), alpha, v_lut(expLUT, idx) * (v_one - alpha))) & knan;
v_wsum += w;
v_sum = v_muladd(val & knan, w, v_sum);
v_store_aligned(wsum + j, v_wsum);
v_store_aligned(sum + j, v_sum);
}
#endif
#if CV_SIMD128
v_float32x4 v_one4 = v_setall_f32(1.f);
v_float32x4 sindex4 = v_setall_f32(scale_index);
v_float32x4 kweight4 = v_load(space_weight + k);
#endif
for (; j < size.width; j++)
{
#if CV_SIMD128
v_float32x4 rval = v_setall_f32(sptr[j]);
v_float32x4 val(ksptr0[j], ksptr1[j], ksptr2[j], ksptr3[j]);
v_float32x4 knan = v_not_nan(val);
v_float32x4 alpha = (v_absdiff(val, rval) * sindex4) & v_not_nan(rval) & knan;
v_int32x4 idx = v_trunc(alpha);
alpha -= v_cvt_f32(idx);
v_float32x4 w = (kweight4 * v_muladd(v_lut(expLUT + 1, idx), alpha, v_lut(expLUT, idx) * (v_one4 - alpha))) & knan;
wsum[j] += v_reduce_sum(w);
sum[j] += v_reduce_sum((val & knan) * w);
#else
float rval = sptr[j];
float val = ksptr0[j];
float alpha = std::abs(val - rval) * scale_index;
int idx = cvFloor(alpha);
alpha -= idx;
if (!cvIsNaN(val))
{
float w = space_weight[k] * (cvIsNaN(rval) ? 1.f : (expLUT[idx] + alpha*(expLUT[idx + 1] - expLUT[idx])));
wsum[j] += w;
sum[j] += val * w;
}
val = ksptr1[j];
alpha = std::abs(val - rval) * scale_index;
idx = cvFloor(alpha);
alpha -= idx;
if (!cvIsNaN(val))
{
float w = space_weight[k+1] * (cvIsNaN(rval) ? 1.f : (expLUT[idx] + alpha*(expLUT[idx + 1] - expLUT[idx])));
wsum[j] += w;
sum[j] += val * w;
}
val = ksptr2[j];
alpha = std::abs(val - rval) * scale_index;
idx = cvFloor(alpha);
alpha -= idx;
if (!cvIsNaN(val))
{
float w = space_weight[k+2] * (cvIsNaN(rval) ? 1.f : (expLUT[idx] + alpha*(expLUT[idx + 1] - expLUT[idx])));
wsum[j] += w;
sum[j] += val * w;
}
val = ksptr3[j];
alpha = std::abs(val - rval) * scale_index;
idx = cvFloor(alpha);
alpha -= idx;
if (!cvIsNaN(val))
{
float w = space_weight[k+3] * (cvIsNaN(rval) ? 1.f : (expLUT[idx] + alpha*(expLUT[idx + 1] - expLUT[idx])));
wsum[j] += w;
sum[j] += val * w;
}
#endif
}
}
for(; k < maxk; k++)
{
const float* ksptr = sptr + space_ofs[k];
j = 0;
#if CV_SIMD
v_float32 kweight = vx_setall_f32(space_weight[k]);
for (; j <= size.width - v_float32::nlanes; j += v_float32::nlanes)
{
v_float32 val = vx_load(ksptr + j);
v_float32 rval = vx_load(sptr + j);
v_float32 knan = v_not_nan(val);
v_float32 alpha = (v_absdiff(val, rval) * sindex) & v_not_nan(rval) & knan;
v_int32 idx = v_trunc(alpha);
alpha -= v_cvt_f32(idx);
v_float32 w = (kweight * v_muladd(v_lut(expLUT + 1, idx), alpha, v_lut(expLUT, idx) * (v_one-alpha))) & knan;
v_store_aligned(wsum + j, vx_load_aligned(wsum + j) + w);
v_store_aligned(sum + j, v_muladd(val & knan, w, vx_load_aligned(sum + j)));
}
#endif
for (; j < size.width; j++)
{
float val = ksptr[j];
float rval = sptr[j];
float alpha = std::abs(val - rval) * scale_index;
int idx = cvFloor(alpha);
alpha -= idx;
if (!cvIsNaN(val))
{
float w = space_weight[k] * (cvIsNaN(rval) ? 1.f : (expLUT[idx] + alpha*(expLUT[idx + 1] - expLUT[idx])));
wsum[j] += w;
sum[j] += val * w;
}
}
}
j = 0;
#if CV_SIMD
for (; j <= size.width - v_float32::nlanes; j += v_float32::nlanes)
{
v_float32 v_val = vx_load(sptr + j);
v_store(dptr + j, (vx_load_aligned(sum + j) + (v_val & v_not_nan(v_val))) / (vx_load_aligned(wsum + j) + (v_one & v_not_nan(v_val))));
}
#endif
for (; j < size.width; j++)
{
CV_DbgAssert(fabs(wsum[j]) >= 0);
dptr[j] = cvIsNaN(sptr[j]) ? sum[j] / wsum[j] : (sum[j] + sptr[j]) / (wsum[j] + 1.f);
}
}
else
{
CV_Assert( cn == 3 );
AutoBuffer<float> buf(alignSize(size.width, CV_SIMD_WIDTH)*3 + size.width + CV_SIMD_WIDTH - 1);
memset(buf.data(), 0, buf.size() * sizeof(float));
float *sum_b = alignPtr(buf.data(), CV_SIMD_WIDTH);
float *sum_g = sum_b + alignSize(size.width, CV_SIMD_WIDTH);
float *sum_r = sum_g + alignSize(size.width, CV_SIMD_WIDTH);
float *wsum = sum_r + alignSize(size.width, CV_SIMD_WIDTH);
#if CV_SIMD
v_float32 v_one = vx_setall_f32(1.f);
v_float32 sindex = vx_setall_f32(scale_index);
#endif
k = 0;
for (; k <= maxk-4; k+=4)
{
const float* ksptr0 = sptr + space_ofs[k];
const float* ksptr1 = sptr + space_ofs[k+1];
const float* ksptr2 = sptr + space_ofs[k+2];
const float* ksptr3 = sptr + space_ofs[k+3];
const float* rsptr = sptr;
j = 0;
#if CV_SIMD
v_float32 kweight0 = vx_setall_f32(space_weight[k]);
v_float32 kweight1 = vx_setall_f32(space_weight[k+1]);
v_float32 kweight2 = vx_setall_f32(space_weight[k+2]);
v_float32 kweight3 = vx_setall_f32(space_weight[k+3]);
for (; j <= size.width - v_float32::nlanes; j += v_float32::nlanes, rsptr += 3 * v_float32::nlanes,
ksptr0 += 3 * v_float32::nlanes, ksptr1 += 3 * v_float32::nlanes, ksptr2 += 3 * v_float32::nlanes, ksptr3 += 3 * v_float32::nlanes)
{
v_float32 kb, kg, kr, rb, rg, rr;
v_load_deinterleave(rsptr, rb, rg, rr);
v_load_deinterleave(ksptr0, kb, kg, kr);
v_float32 knan = v_not_nan(kb) & v_not_nan(kg) & v_not_nan(kr);
v_float32 alpha = ((v_absdiff(kb, rb) + v_absdiff(kg, rg) + v_absdiff(kr, rr)) * sindex) & v_not_nan(rb) & v_not_nan(rg) & v_not_nan(rr) & knan;
v_int32 idx = v_trunc(alpha);
alpha -= v_cvt_f32(idx);
v_float32 w = (kweight0 * v_muladd(v_lut(expLUT + 1, idx), alpha, v_lut(expLUT, idx) * (v_one - alpha))) & knan;
v_float32 v_wsum = vx_load_aligned(wsum + j) + w;
v_float32 v_sum_b = v_muladd(kb & knan, w, vx_load_aligned(sum_b + j));
v_float32 v_sum_g = v_muladd(kg & knan, w, vx_load_aligned(sum_g + j));
v_float32 v_sum_r = v_muladd(kr & knan, w, vx_load_aligned(sum_r + j));
v_load_deinterleave(ksptr1, kb, kg, kr);
knan = v_not_nan(kb) & v_not_nan(kg) & v_not_nan(kr);
alpha = ((v_absdiff(kb, rb) + v_absdiff(kg, rg) + v_absdiff(kr, rr)) * sindex) & v_not_nan(rb) & v_not_nan(rg) & v_not_nan(rr) & knan;
idx = v_trunc(alpha);
alpha -= v_cvt_f32(idx);
w = (kweight1 * v_muladd(v_lut(expLUT + 1, idx), alpha, v_lut(expLUT, idx) * (v_one - alpha))) & knan;
v_wsum += w;
v_sum_b = v_muladd(kb & knan, w, v_sum_b);
v_sum_g = v_muladd(kg & knan, w, v_sum_g);
v_sum_r = v_muladd(kr & knan, w, v_sum_r);
v_load_deinterleave(ksptr2, kb, kg, kr);
knan = v_not_nan(kb) & v_not_nan(kg) & v_not_nan(kr);
alpha = ((v_absdiff(kb, rb) + v_absdiff(kg, rg) + v_absdiff(kr, rr)) * sindex) & v_not_nan(rb) & v_not_nan(rg) & v_not_nan(rr) & knan;
idx = v_trunc(alpha);
alpha -= v_cvt_f32(idx);
w = (kweight2 * v_muladd(v_lut(expLUT + 1, idx), alpha, v_lut(expLUT, idx) * (v_one - alpha))) & knan;
v_wsum += w;
v_sum_b = v_muladd(kb & knan, w, v_sum_b);
v_sum_g = v_muladd(kg & knan, w, v_sum_g);
v_sum_r = v_muladd(kr & knan, w, v_sum_r);
v_load_deinterleave(ksptr3, kb, kg, kr);
knan = v_not_nan(kb) & v_not_nan(kg) & v_not_nan(kr);
alpha = ((v_absdiff(kb, rb) + v_absdiff(kg, rg) + v_absdiff(kr, rr)) * sindex) & v_not_nan(rb) & v_not_nan(rg) & v_not_nan(rr) & knan;
idx = v_trunc(alpha);
alpha -= v_cvt_f32(idx);
w = (kweight3 * v_muladd(v_lut(expLUT + 1, idx), alpha, v_lut(expLUT, idx) * (v_one - alpha))) & knan;
v_wsum += w;
v_sum_b = v_muladd(kb & knan, w, v_sum_b);
v_sum_g = v_muladd(kg & knan, w, v_sum_g);
v_sum_r = v_muladd(kr & knan, w, v_sum_r);
v_store_aligned(wsum + j, v_wsum);
v_store_aligned(sum_b + j, v_sum_b);
v_store_aligned(sum_g + j, v_sum_g);
v_store_aligned(sum_r + j, v_sum_r);
}
#endif
#if CV_SIMD128
v_float32x4 v_one4 = v_setall_f32(1.f);
v_float32x4 sindex4 = v_setall_f32(scale_index);
v_float32x4 kweight4 = v_load(space_weight + k);
#endif
for (; j < size.width; j++, rsptr += 3, ksptr0 += 3, ksptr1 += 3, ksptr2 += 3, ksptr3 += 3)
{
#if CV_SIMD128
v_float32x4 rb = v_setall_f32(rsptr[0]);
v_float32x4 rg = v_setall_f32(rsptr[1]);
v_float32x4 rr = v_setall_f32(rsptr[2]);
v_float32x4 kb(ksptr0[0], ksptr1[0], ksptr2[0], ksptr3[0]);
v_float32x4 kg(ksptr0[1], ksptr1[1], ksptr2[1], ksptr3[1]);
v_float32x4 kr(ksptr0[2], ksptr1[2], ksptr2[2], ksptr3[2]);
v_float32x4 knan = v_not_nan(kb) & v_not_nan(kg) & v_not_nan(kr);
v_float32x4 alpha = ((v_absdiff(kb, rb) + v_absdiff(kg, rg) + v_absdiff(kr, rr)) * sindex4) & v_not_nan(rb) & v_not_nan(rg) & v_not_nan(rr) & knan;
v_int32x4 idx = v_trunc(alpha);
alpha -= v_cvt_f32(idx);
v_float32x4 w = (kweight4 * v_muladd(v_lut(expLUT + 1, idx), alpha, v_lut(expLUT, idx) * (v_one4 - alpha))) & knan;
wsum[j] += v_reduce_sum(w);
sum_b[j] += v_reduce_sum((kb & knan) * w);
sum_g[j] += v_reduce_sum((kg & knan) * w);
sum_r[j] += v_reduce_sum((kr & knan) * w);
#else
float rb = rsptr[0], rg = rsptr[1], rr = rsptr[2];
bool r_NAN = cvIsNaN(rb) || cvIsNaN(rg) || cvIsNaN(rr);
float b = ksptr0[0], g = ksptr0[1], r = ksptr0[2];
bool v_NAN = cvIsNaN(b) || cvIsNaN(g) || cvIsNaN(r);
float alpha = (std::abs(b - rb) + std::abs(g - rg) + std::abs(r - rr)) * scale_index;
int idx = cvFloor(alpha);
alpha -= idx;
if (!v_NAN)
{
float w = space_weight[k] * (r_NAN ? 1.f : (expLUT[idx] + alpha*(expLUT[idx + 1] - expLUT[idx])));
wsum[j] += w;
sum_b[j] += b*w;
sum_g[j] += g*w;
sum_r[j] += r*w;
}
b = ksptr1[0]; g = ksptr1[1]; r = ksptr1[2];
v_NAN = cvIsNaN(b) || cvIsNaN(g) || cvIsNaN(r);
alpha = (std::abs(b - rb) + std::abs(g - rg) + std::abs(r - rr)) * scale_index;
idx = cvFloor(alpha);
alpha -= idx;
if (!v_NAN)
{
float w = space_weight[k+1] * (r_NAN ? 1.f : (expLUT[idx] + alpha*(expLUT[idx + 1] - expLUT[idx])));
wsum[j] += w;
sum_b[j] += b*w;
sum_g[j] += g*w;
sum_r[j] += r*w;
}
b = ksptr2[0]; g = ksptr2[1]; r = ksptr2[2];
v_NAN = cvIsNaN(b) || cvIsNaN(g) || cvIsNaN(r);
alpha = (std::abs(b - rb) + std::abs(g - rg) + std::abs(r - rr)) * scale_index;
idx = cvFloor(alpha);
alpha -= idx;
if (!v_NAN)
{
float w = space_weight[k+2] * (r_NAN ? 1.f : (expLUT[idx] + alpha*(expLUT[idx + 1] - expLUT[idx])));
wsum[j] += w;
sum_b[j] += b*w;
sum_g[j] += g*w;
sum_r[j] += r*w;
}
b = ksptr3[0]; g = ksptr3[1]; r = ksptr3[2];
v_NAN = cvIsNaN(b) || cvIsNaN(g) || cvIsNaN(r);
alpha = (std::abs(b - rb) + std::abs(g - rg) + std::abs(r - rr)) * scale_index;
idx = cvFloor(alpha);
alpha -= idx;
if (!v_NAN)
{
float w = space_weight[k+3] * (r_NAN ? 1.f : (expLUT[idx] + alpha*(expLUT[idx + 1] - expLUT[idx])));
wsum[j] += w;
sum_b[j] += b*w;
sum_g[j] += g*w;
sum_r[j] += r*w;
}
#endif
}
}
for (; k < maxk; k++)
{
const float* ksptr = sptr + space_ofs[k];
const float* rsptr = sptr;
j = 0;
#if CV_SIMD
v_float32 kweight = vx_setall_f32(space_weight[k]);
for (; j <= size.width - v_float32::nlanes; j += v_float32::nlanes, ksptr += 3*v_float32::nlanes, rsptr += 3*v_float32::nlanes)
{
v_float32 kb, kg, kr, rb, rg, rr;
v_load_deinterleave(ksptr, kb, kg, kr);
v_load_deinterleave(rsptr, rb, rg, rr);
v_float32 knan = v_not_nan(kb) & v_not_nan(kg) & v_not_nan(kr);
v_float32 alpha = ((v_absdiff(kb, rb) + v_absdiff(kg, rg) + v_absdiff(kr, rr)) * sindex) & v_not_nan(rb) & v_not_nan(rg) & v_not_nan(rr) & knan;
v_int32 idx = v_trunc(alpha);
alpha -= v_cvt_f32(idx);
v_float32 w = (kweight * v_muladd(v_lut(expLUT + 1, idx), alpha, v_lut(expLUT, idx) * (v_one - alpha))) & knan;
v_store_aligned(wsum + j, vx_load_aligned(wsum + j) + w);
v_store_aligned(sum_b + j, v_muladd(kb & knan, w, vx_load_aligned(sum_b + j)));
v_store_aligned(sum_g + j, v_muladd(kg & knan, w, vx_load_aligned(sum_g + j)));
v_store_aligned(sum_r + j, v_muladd(kr & knan, w, vx_load_aligned(sum_r + j)));
}
#endif
for (; j < size.width; j++, ksptr += 3, rsptr += 3)
{
float b = ksptr[0], g = ksptr[1], r = ksptr[2];
bool v_NAN = cvIsNaN(b) || cvIsNaN(g) || cvIsNaN(r);
float rb = rsptr[0], rg = rsptr[1], rr = rsptr[2];
bool r_NAN = cvIsNaN(rb) || cvIsNaN(rg) || cvIsNaN(rr);
float alpha = (std::abs(b - rb) + std::abs(g - rg) + std::abs(r - rr)) * scale_index;
int idx = cvFloor(alpha);
alpha -= idx;
if (!v_NAN)
{
float w = space_weight[k] * (r_NAN ? 1.f : (expLUT[idx] + alpha*(expLUT[idx + 1] - expLUT[idx])));
wsum[j] += w;
sum_b[j] += b*w;
sum_g[j] += g*w;
sum_r[j] += r*w;
}
}
}
j = 0;
#if CV_SIMD
for (; j <= size.width - v_float32::nlanes; j += v_float32::nlanes, sptr += 3*v_float32::nlanes, dptr += 3*v_float32::nlanes)
{
v_float32 b, g, r;
v_load_deinterleave(sptr, b, g, r);
v_float32 mask = v_not_nan(b) & v_not_nan(g) & v_not_nan(r);
v_float32 w = v_one / (vx_load_aligned(wsum + j) + (v_one & mask));
v_store_interleave(dptr, (vx_load_aligned(sum_b + j) + (b & mask)) * w, (vx_load_aligned(sum_g + j) + (g & mask)) * w, (vx_load_aligned(sum_r + j) + (r & mask)) * w);
}
#endif
for (; j < size.width; j++)
{
CV_DbgAssert(fabs(wsum[j]) >= 0);
float b = *(sptr++);
float g = *(sptr++);
float r = *(sptr++);
if (cvIsNaN(b) || cvIsNaN(g) || cvIsNaN(r))
{
wsum[j] = 1.f / wsum[j];
*(dptr++) = sum_b[j] * wsum[j];
*(dptr++) = sum_g[j] * wsum[j];
*(dptr++) = sum_r[j] * wsum[j];
}
else
{
wsum[j] = 1.f / (wsum[j] + 1.f);
*(dptr++) = (sum_b[j] + b) * wsum[j];
*(dptr++) = (sum_g[j] + g) * wsum[j];
*(dptr++) = (sum_r[j] + r) * wsum[j];
}
}
}
}
#if CV_SIMD
vx_cleanup();
#endif
}
private:
int cn, radius, maxk, *space_ofs;
const Mat* temp;
Mat *dest;
float scale_index, *space_weight, *expLUT;
};
static void
bilateralFilter_32f( const Mat& src, Mat& dst, int d,
double sigma_color, double sigma_space,
int borderType )
{
int cn = src.channels();
int i, j, maxk, radius;
double minValSrc=-1, maxValSrc=1;
const int kExpNumBinsPerChannel = 1 << 12;
int kExpNumBins = 0;
float lastExpVal = 1.f;
float len, scale_index;
Size size = src.size();
CV_Assert( (src.type() == CV_32FC1 || src.type() == CV_32FC3) && src.data != dst.data );
if( sigma_color <= 0 )
sigma_color = 1;
if( sigma_space <= 0 )
sigma_space = 1;
double gauss_color_coeff = -0.5/(sigma_color*sigma_color);
double gauss_space_coeff = -0.5/(sigma_space*sigma_space);
if( d <= 0 )
radius = cvRound(sigma_space*1.5);
else
radius = d/2;
radius = MAX(radius, 1);
d = radius*2 + 1;
// compute the min/max range for the input image (even if multichannel)
minMaxLoc( src.reshape(1), &minValSrc, &maxValSrc );
if(std::abs(minValSrc - maxValSrc) < FLT_EPSILON)
{
src.copyTo(dst);
return;
}
// temporary copy of the image with borders for easy processing
Mat temp;
copyMakeBorder( src, temp, radius, radius, radius, radius, borderType );
// allocate lookup tables
std::vector<float> _space_weight(d*d);
std::vector<int> _space_ofs(d*d);
float* space_weight = &_space_weight[0];
int* space_ofs = &_space_ofs[0];
// assign a length which is slightly more than needed
len = (float)(maxValSrc - minValSrc) * cn;
kExpNumBins = kExpNumBinsPerChannel * cn;
std::vector<float> _expLUT(kExpNumBins+2);
float* expLUT = &_expLUT[0];
scale_index = kExpNumBins/len;
// initialize the exp LUT
for( i = 0; i < kExpNumBins+2; i++ )
{
if( lastExpVal > 0.f )
{
double val = i / scale_index;
expLUT[i] = (float)std::exp(val * val * gauss_color_coeff);
lastExpVal = expLUT[i];
}
else
expLUT[i] = 0.f;
}
// initialize space-related bilateral filter coefficients
for( i = -radius, maxk = 0; i <= radius; i++ )
for( j = -radius; j <= radius; j++ )
{
double r = std::sqrt((double)i*i + (double)j*j);
if( r > radius || ( i == 0 && j == 0 ) )
continue;
space_weight[maxk] = (float)std::exp(r*r*gauss_space_coeff);
space_ofs[maxk++] = (int)(i*(temp.step/sizeof(float)) + j*cn);
}
// parallel_for usage
BilateralFilter_32f_Invoker body(cn, radius, maxk, space_ofs, temp, dst, scale_index, space_weight, expLUT);
parallel_for_(Range(0, size.height), body, dst.total()/(double)(1<<16));
}
#ifdef HAVE_IPP
#define IPP_BILATERAL_PARALLEL 1
#ifdef HAVE_IPP_IW
class ipp_bilateralFilterParallel: public ParallelLoopBody
{
public:
ipp_bilateralFilterParallel(::ipp::IwiImage &_src, ::ipp::IwiImage &_dst, int _radius, Ipp32f _valSquareSigma, Ipp32f _posSquareSigma, ::ipp::IwiBorderType _borderType, bool *_ok):
src(_src), dst(_dst)
{
pOk = _ok;
radius = _radius;
valSquareSigma = _valSquareSigma;
posSquareSigma = _posSquareSigma;
borderType = _borderType;
*pOk = true;
}
~ipp_bilateralFilterParallel() {}
virtual void operator() (const Range& range) const CV_OVERRIDE
{
if(*pOk == false)
return;
try
{
::ipp::IwiTile tile = ::ipp::IwiRoi(0, range.start, dst.m_size.width, range.end - range.start);
CV_INSTRUMENT_FUN_IPP(::ipp::iwiFilterBilateral, src, dst, radius, valSquareSigma, posSquareSigma, ::ipp::IwDefault(), borderType, tile);
}
catch(const ::ipp::IwException &)
{
*pOk = false;
return;
}
}
private:
::ipp::IwiImage &src;
::ipp::IwiImage &dst;
int radius;
Ipp32f valSquareSigma;
Ipp32f posSquareSigma;
::ipp::IwiBorderType borderType;
bool *pOk;
const ipp_bilateralFilterParallel& operator= (const ipp_bilateralFilterParallel&);
};
#endif
static bool ipp_bilateralFilter(Mat &src, Mat &dst, int d, double sigmaColor, double sigmaSpace, int borderType)
{
#ifdef HAVE_IPP_IW
CV_INSTRUMENT_REGION_IPP();
int radius = IPP_MAX(((d <= 0)?cvRound(sigmaSpace*1.5):d/2), 1);
Ipp32f valSquareSigma = (Ipp32f)((sigmaColor <= 0)?1:sigmaColor*sigmaColor);
Ipp32f posSquareSigma = (Ipp32f)((sigmaSpace <= 0)?1:sigmaSpace*sigmaSpace);
// Acquire data and begin processing
try
{
::ipp::IwiImage iwSrc = ippiGetImage(src);
::ipp::IwiImage iwDst = ippiGetImage(dst);
::ipp::IwiBorderSize borderSize(radius);
::ipp::IwiBorderType ippBorder(ippiGetBorder(iwSrc, borderType, borderSize));
if(!ippBorder)
return false;
const int threads = ippiSuggestThreadsNum(iwDst, 2);
if(IPP_BILATERAL_PARALLEL && threads > 1) {
bool ok = true;
Range range(0, (int)iwDst.m_size.height);
ipp_bilateralFilterParallel invoker(iwSrc, iwDst, radius, valSquareSigma, posSquareSigma, ippBorder, &ok);
if(!ok)
return false;
parallel_for_(range, invoker, threads*4);
if(!ok)
return false;
} else {
CV_INSTRUMENT_FUN_IPP(::ipp::iwiFilterBilateral, iwSrc, iwDst, radius, valSquareSigma, posSquareSigma, ::ipp::IwDefault(), ippBorder);
}
}
catch (const ::ipp::IwException &)
{
return false;
}
return true;
#else
CV_UNUSED(src); CV_UNUSED(dst); CV_UNUSED(d); CV_UNUSED(sigmaColor); CV_UNUSED(sigmaSpace); CV_UNUSED(borderType);
return false;
#endif
}
#endif
}
void cv::bilateralFilter( InputArray _src, OutputArray _dst, int d,
double sigmaColor, double sigmaSpace,
int borderType )
{
CV_INSTRUMENT_REGION();
_dst.create( _src.size(), _src.type() );
CV_OCL_RUN(_src.dims() <= 2 && _dst.isUMat(),
ocl_bilateralFilter_8u(_src, _dst, d, sigmaColor, sigmaSpace, borderType))
Mat src = _src.getMat(), dst = _dst.getMat();
CV_IPP_RUN_FAST(ipp_bilateralFilter(src, dst, d, sigmaColor, sigmaSpace, borderType));
if( src.depth() == CV_8U )
bilateralFilter_8u( src, dst, d, sigmaColor, sigmaSpace, borderType );
else if( src.depth() == CV_32F )
bilateralFilter_32f( src, dst, d, sigmaColor, sigmaSpace, borderType );
else
CV_Error( CV_StsUnsupportedFormat,
"Bilateral filtering is only implemented for 8u and 32f images" );
}
/* End of file. */
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, 2018, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Copyright (C) 2014-2015, Itseez Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "precomp.hpp"
#include <vector>
#include "opencv2/core/hal/intrin.hpp"
#include "opencl_kernels_imgproc.hpp"
#include "opencv2/core/openvx/ovx_defs.hpp"
namespace cv
{
/****************************************************************************************\
Box Filter
\****************************************************************************************/
template<typename T, typename ST>
struct RowSum :
public BaseRowFilter
{
RowSum( int _ksize, int _anchor ) :
BaseRowFilter()
{
ksize = _ksize;
anchor = _anchor;
}
virtual void operator()(const uchar* src, uchar* dst, int width, int cn) CV_OVERRIDE
{
const T* S = (const T*)src;
ST* D = (ST*)dst;
int i = 0, k, ksz_cn = ksize*cn;
width = (width - 1)*cn;
if( ksize == 3 )
{
for( i = 0; i < width + cn; i++ )
{
D[i] = (ST)S[i] + (ST)S[i+cn] + (ST)S[i+cn*2];
}
}
else if( ksize == 5 )
{
for( i = 0; i < width + cn; i++ )
{
D[i] = (ST)S[i] + (ST)S[i+cn] + (ST)S[i+cn*2] + (ST)S[i + cn*3] + (ST)S[i + cn*4];
}
}
else if( cn == 1 )
{
ST s = 0;
for( i = 0; i < ksz_cn; i++ )
s += (ST)S[i];
D[0] = s;
for( i = 0; i < width; i++ )
{
s += (ST)S[i + ksz_cn] - (ST)S[i];
D[i+1] = s;
}
}
else if( cn == 3 )
{
ST s0 = 0, s1 = 0, s2 = 0;
for( i = 0; i < ksz_cn; i += 3 )
{
s0 += (ST)S[i];
s1 += (ST)S[i+1];
s2 += (ST)S[i+2];
}
D[0] = s0;
D[1] = s1;
D[2] = s2;
for( i = 0; i < width; i += 3 )
{
s0 += (ST)S[i + ksz_cn] - (ST)S[i];
s1 += (ST)S[i + ksz_cn + 1] - (ST)S[i + 1];
s2 += (ST)S[i + ksz_cn + 2] - (ST)S[i + 2];
D[i+3] = s0;
D[i+4] = s1;
D[i+5] = s2;
}
}
else if( cn == 4 )
{
ST s0 = 0, s1 = 0, s2 = 0, s3 = 0;
for( i = 0; i < ksz_cn; i += 4 )
{
s0 += (ST)S[i];
s1 += (ST)S[i+1];
s2 += (ST)S[i+2];
s3 += (ST)S[i+3];
}
D[0] = s0;
D[1] = s1;
D[2] = s2;
D[3] = s3;
for( i = 0; i < width; i += 4 )
{
s0 += (ST)S[i + ksz_cn] - (ST)S[i];
s1 += (ST)S[i + ksz_cn + 1] - (ST)S[i + 1];
s2 += (ST)S[i + ksz_cn + 2] - (ST)S[i + 2];
s3 += (ST)S[i + ksz_cn + 3] - (ST)S[i + 3];
D[i+4] = s0;
D[i+5] = s1;
D[i+6] = s2;
D[i+7] = s3;
}
}
else
for( k = 0; k < cn; k++, S++, D++ )
{
ST s = 0;
for( i = 0; i < ksz_cn; i += cn )
s += (ST)S[i];
D[0] = s;
for( i = 0; i < width; i += cn )
{
s += (ST)S[i + ksz_cn] - (ST)S[i];
D[i+cn] = s;
}
}
}
};
template<typename ST, typename T>
struct ColumnSum :
public BaseColumnFilter
{
ColumnSum( int _ksize, int _anchor, double _scale ) :
BaseColumnFilter()
{
ksize = _ksize;
anchor = _anchor;
scale = _scale;
sumCount = 0;
}
virtual void reset() CV_OVERRIDE { sumCount = 0; }
virtual void operator()(const uchar** src, uchar* dst, int dststep, int count, int width) CV_OVERRIDE
{
int i;
ST* SUM;
bool haveScale = scale != 1;
double _scale = scale;
if( width != (int)sum.size() )
{
sum.resize(width);
sumCount = 0;
}
SUM = &sum[0];
if( sumCount == 0 )
{
memset((void*)SUM, 0, width*sizeof(ST));
for( ; sumCount < ksize - 1; sumCount++, src++ )
{
const ST* Sp = (const ST*)src[0];
for( i = 0; i < width; i++ )
SUM[i] += Sp[i];
}
}
else
{
CV_Assert( sumCount == ksize-1 );
src += ksize-1;
}
for( ; count--; src++ )
{
const ST* Sp = (const ST*)src[0];
const ST* Sm = (const ST*)src[1-ksize];
T* D = (T*)dst;
if( haveScale )
{
for( i = 0; i <= width - 2; i += 2 )
{
ST s0 = SUM[i] + Sp[i], s1 = SUM[i+1] + Sp[i+1];
D[i] = saturate_cast<T>(s0*_scale);
D[i+1] = saturate_cast<T>(s1*_scale);
s0 -= Sm[i]; s1 -= Sm[i+1];
SUM[i] = s0; SUM[i+1] = s1;
}
for( ; i < width; i++ )
{
ST s0 = SUM[i] + Sp[i];
D[i] = saturate_cast<T>(s0*_scale);
SUM[i] = s0 - Sm[i];
}
}
else
{
for( i = 0; i <= width - 2; i += 2 )
{
ST s0 = SUM[i] + Sp[i], s1 = SUM[i+1] + Sp[i+1];
D[i] = saturate_cast<T>(s0);
D[i+1] = saturate_cast<T>(s1);
s0 -= Sm[i]; s1 -= Sm[i+1];
SUM[i] = s0; SUM[i+1] = s1;
}
for( ; i < width; i++ )
{
ST s0 = SUM[i] + Sp[i];
D[i] = saturate_cast<T>(s0);
SUM[i] = s0 - Sm[i];
}
}
dst += dststep;
}
}
double scale;
int sumCount;
std::vector<ST> sum;
};
template<>
struct ColumnSum<int, uchar> :
public BaseColumnFilter
{
ColumnSum( int _ksize, int _anchor, double _scale ) :
BaseColumnFilter()
{
ksize = _ksize;
anchor = _anchor;
scale = _scale;
sumCount = 0;
}
virtual void reset() CV_OVERRIDE { sumCount = 0; }
virtual void operator()(const uchar** src, uchar* dst, int dststep, int count, int width) CV_OVERRIDE
{
int* SUM;
bool haveScale = scale != 1;
double _scale = scale;
if( width != (int)sum.size() )
{
sum.resize(width);
sumCount = 0;
}
SUM = &sum[0];
if( sumCount == 0 )
{
memset((void*)SUM, 0, width*sizeof(int));
for( ; sumCount < ksize - 1; sumCount++, src++ )
{
const int* Sp = (const int*)src[0];
int i = 0;
#if CV_SIMD
for (; i <= width - v_int32::nlanes; i += v_int32::nlanes)
{
v_store(SUM + i, vx_load(SUM + i) + vx_load(Sp + i));
}
#if CV_SIMD_WIDTH > 16
for (; i <= width - v_int32x4::nlanes; i += v_int32x4::nlanes)
{
v_store(SUM + i, v_load(SUM + i) + v_load(Sp + i));
}
#endif
#endif
for( ; i < width; i++ )
SUM[i] += Sp[i];
}
}
else
{
CV_Assert( sumCount == ksize-1 );
src += ksize-1;
}
for( ; count--; src++ )
{
const int* Sp = (const int*)src[0];
const int* Sm = (const int*)src[1-ksize];
uchar* D = (uchar*)dst;
if( haveScale )
{
int i = 0;
#if CV_SIMD
v_float32 _v_scale = vx_setall_f32((float)_scale);
for( ; i <= width - v_uint16::nlanes; i += v_uint16::nlanes )
{
v_int32 v_s0 = vx_load(SUM + i) + vx_load(Sp + i);
v_int32 v_s01 = vx_load(SUM + i + v_int32::nlanes) + vx_load(Sp + i + v_int32::nlanes);
v_uint32 v_s0d = v_reinterpret_as_u32(v_round(v_cvt_f32(v_s0) * _v_scale));
v_uint32 v_s01d = v_reinterpret_as_u32(v_round(v_cvt_f32(v_s01) * _v_scale));
v_uint16 v_dst = v_pack(v_s0d, v_s01d);
v_pack_store(D + i, v_dst);
v_store(SUM + i, v_s0 - vx_load(Sm + i));
v_store(SUM + i + v_int32::nlanes, v_s01 - vx_load(Sm + i + v_int32::nlanes));
}
#if CV_SIMD_WIDTH > 16
v_float32x4 v_scale = v_setall_f32((float)_scale);
for( ; i <= width-v_uint16x8::nlanes; i+=v_uint16x8::nlanes )
{
v_int32x4 v_s0 = v_load(SUM + i) + v_load(Sp + i);
v_int32x4 v_s01 = v_load(SUM + i + v_int32x4::nlanes) + v_load(Sp + i + v_int32x4::nlanes);
v_uint32x4 v_s0d = v_reinterpret_as_u32(v_round(v_cvt_f32(v_s0) * v_scale));
v_uint32x4 v_s01d = v_reinterpret_as_u32(v_round(v_cvt_f32(v_s01) * v_scale));
v_uint16x8 v_dst = v_pack(v_s0d, v_s01d);
v_pack_store(D + i, v_dst);
v_store(SUM + i, v_s0 - v_load(Sm + i));
v_store(SUM + i + v_int32x4::nlanes, v_s01 - v_load(Sm + i + v_int32x4::nlanes));
}
#endif
#endif
for( ; i < width; i++ )
{
int s0 = SUM[i] + Sp[i];
D[i] = saturate_cast<uchar>(s0*_scale);
SUM[i] = s0 - Sm[i];
}
}
else
{
int i = 0;
#if CV_SIMD
for( ; i <= width-v_uint16::nlanes; i+=v_uint16::nlanes )
{
v_int32 v_s0 = vx_load(SUM + i) + vx_load(Sp + i);
v_int32 v_s01 = vx_load(SUM + i + v_int32::nlanes) + vx_load(Sp + i + v_int32::nlanes);
v_uint16 v_dst = v_pack(v_reinterpret_as_u32(v_s0), v_reinterpret_as_u32(v_s01));
v_pack_store(D + i, v_dst);
v_store(SUM + i, v_s0 - vx_load(Sm + i));
v_store(SUM + i + v_int32::nlanes, v_s01 - vx_load(Sm + i + v_int32::nlanes));
}
#if CV_SIMD_WIDTH > 16
for( ; i <= width-v_uint16x8::nlanes; i+=v_uint16x8::nlanes )
{
v_int32x4 v_s0 = v_load(SUM + i) + v_load(Sp + i);
v_int32x4 v_s01 = v_load(SUM + i + v_int32x4::nlanes) + v_load(Sp + i + v_int32x4::nlanes);
v_uint16x8 v_dst = v_pack(v_reinterpret_as_u32(v_s0), v_reinterpret_as_u32(v_s01));
v_pack_store(D + i, v_dst);
v_store(SUM + i, v_s0 - v_load(Sm + i));
v_store(SUM + i + v_int32x4::nlanes, v_s01 - v_load(Sm + i + v_int32x4::nlanes));
}
#endif
#endif
for( ; i < width; i++ )
{
int s0 = SUM[i] + Sp[i];
D[i] = saturate_cast<uchar>(s0);
SUM[i] = s0 - Sm[i];
}
}
dst += dststep;
}
#if CV_SIMD
vx_cleanup();
#endif
}
double scale;
int sumCount;
std::vector<int> sum;
};
template<>
struct ColumnSum<ushort, uchar> :
public BaseColumnFilter
{
enum { SHIFT = 23 };
ColumnSum( int _ksize, int _anchor, double _scale ) :
BaseColumnFilter()
{
ksize = _ksize;
anchor = _anchor;
scale = _scale;
sumCount = 0;
divDelta = 0;
divScale = 1;
if( scale != 1 )
{
int d = cvRound(1./scale);
double scalef = ((double)(1 << SHIFT))/d;
divScale = cvFloor(scalef);
scalef -= divScale;
divDelta = d/2;
if( scalef < 0.5 )
divDelta++;
else
divScale++;
}
}
virtual void reset() CV_OVERRIDE { sumCount = 0; }
virtual void operator()(const uchar** src, uchar* dst, int dststep, int count, int width) CV_OVERRIDE
{
const int ds = divScale;
const int dd = divDelta;
ushort* SUM;
const bool haveScale = scale != 1;
if( width != (int)sum.size() )
{
sum.resize(width);
sumCount = 0;
}
SUM = &sum[0];
if( sumCount == 0 )
{
memset((void*)SUM, 0, width*sizeof(SUM[0]));
for( ; sumCount < ksize - 1; sumCount++, src++ )
{
const ushort* Sp = (const ushort*)src[0];
int i = 0;
#if CV_SIMD
for( ; i <= width - v_uint16::nlanes; i += v_uint16::nlanes )
{
v_store(SUM + i, vx_load(SUM + i) + vx_load(Sp + i));
}
#if CV_SIMD_WIDTH > 16
for( ; i <= width - v_uint16x8::nlanes; i += v_uint16x8::nlanes )
{
v_store(SUM + i, v_load(SUM + i) + v_load(Sp + i));
}
#endif
#endif
for( ; i < width; i++ )
SUM[i] += Sp[i];
}
}
else
{
CV_Assert( sumCount == ksize-1 );
src += ksize-1;
}
for( ; count--; src++ )
{
const ushort* Sp = (const ushort*)src[0];
const ushort* Sm = (const ushort*)src[1-ksize];
uchar* D = (uchar*)dst;
if( haveScale )
{
int i = 0;
#if CV_SIMD
v_uint32 _ds4 = vx_setall_u32((unsigned)ds);
v_uint16 _dd8 = vx_setall_u16((ushort)dd);
for( ; i <= width-v_uint8::nlanes; i+=v_uint8::nlanes )
{
v_uint16 _sm0 = vx_load(Sm + i);
v_uint16 _sm1 = vx_load(Sm + i + v_uint16::nlanes);
v_uint16 _s0 = v_add_wrap(vx_load(SUM + i), vx_load(Sp + i));
v_uint16 _s1 = v_add_wrap(vx_load(SUM + i + v_uint16::nlanes), vx_load(Sp + i + v_uint16::nlanes));
v_uint32 _s00, _s01, _s10, _s11;
v_expand(_s0 + _dd8, _s00, _s01);
v_expand(_s1 + _dd8, _s10, _s11);
_s00 = v_shr<SHIFT>(_s00*_ds4);
_s01 = v_shr<SHIFT>(_s01*_ds4);
_s10 = v_shr<SHIFT>(_s10*_ds4);
_s11 = v_shr<SHIFT>(_s11*_ds4);
v_int16 r0 = v_pack(v_reinterpret_as_s32(_s00), v_reinterpret_as_s32(_s01));
v_int16 r1 = v_pack(v_reinterpret_as_s32(_s10), v_reinterpret_as_s32(_s11));
_s0 = v_sub_wrap(_s0, _sm0);
_s1 = v_sub_wrap(_s1, _sm1);
v_store(D + i, v_pack_u(r0, r1));
v_store(SUM + i, _s0);
v_store(SUM + i + v_uint16::nlanes, _s1);
}
#if CV_SIMD_WIDTH > 16
v_uint32x4 ds4 = v_setall_u32((unsigned)ds);
v_uint16x8 dd8 = v_setall_u16((ushort)dd);
for( ; i <= width-v_uint8x16::nlanes; i+=v_uint8x16::nlanes )
{
v_uint16x8 _sm0 = v_load(Sm + i);
v_uint16x8 _sm1 = v_load(Sm + i + v_uint16x8::nlanes);
v_uint16x8 _s0 = v_add_wrap(v_load(SUM + i), v_load(Sp + i));
v_uint16x8 _s1 = v_add_wrap(v_load(SUM + i + v_uint16x8::nlanes), v_load(Sp + i + v_uint16x8::nlanes));
v_uint32x4 _s00, _s01, _s10, _s11;
v_expand(_s0 + dd8, _s00, _s01);
v_expand(_s1 + dd8, _s10, _s11);
_s00 = v_shr<SHIFT>(_s00*ds4);
_s01 = v_shr<SHIFT>(_s01*ds4);
_s10 = v_shr<SHIFT>(_s10*ds4);
_s11 = v_shr<SHIFT>(_s11*ds4);
v_int16x8 r0 = v_pack(v_reinterpret_as_s32(_s00), v_reinterpret_as_s32(_s01));
v_int16x8 r1 = v_pack(v_reinterpret_as_s32(_s10), v_reinterpret_as_s32(_s11));
_s0 = v_sub_wrap(_s0, _sm0);
_s1 = v_sub_wrap(_s1, _sm1);
v_store(D + i, v_pack_u(r0, r1));
v_store(SUM + i, _s0);
v_store(SUM + i + v_uint16x8::nlanes, _s1);
}
#endif
#endif
for( ; i < width; i++ )
{
int s0 = SUM[i] + Sp[i];
D[i] = (uchar)((s0 + dd)*ds >> SHIFT);
SUM[i] = (ushort)(s0 - Sm[i]);
}
}
else
{
int i = 0;
for( ; i < width; i++ )
{
int s0 = SUM[i] + Sp[i];
D[i] = saturate_cast<uchar>(s0);
SUM[i] = (ushort)(s0 - Sm[i]);
}
}
dst += dststep;
}
#if CV_SIMD
vx_cleanup();
#endif
}
double scale;
int sumCount;
int divDelta;
int divScale;
std::vector<ushort> sum;
};
template<>
struct ColumnSum<int, short> :
public BaseColumnFilter
{
ColumnSum( int _ksize, int _anchor, double _scale ) :
BaseColumnFilter()
{
ksize = _ksize;
anchor = _anchor;
scale = _scale;
sumCount = 0;
}
virtual void reset() CV_OVERRIDE { sumCount = 0; }
virtual void operator()(const uchar** src, uchar* dst, int dststep, int count, int width) CV_OVERRIDE
{
int i;
int* SUM;
bool haveScale = scale != 1;
double _scale = scale;
if( width != (int)sum.size() )
{
sum.resize(width);
sumCount = 0;
}
SUM = &sum[0];
if( sumCount == 0 )
{
memset((void*)SUM, 0, width*sizeof(int));
for( ; sumCount < ksize - 1; sumCount++, src++ )
{
const int* Sp = (const int*)src[0];
i = 0;
#if CV_SIMD
for( ; i <= width - v_int32::nlanes; i+=v_int32::nlanes )
{
v_store(SUM + i, vx_load(SUM + i) + vx_load(Sp + i));
}
#if CV_SIMD_WIDTH > 16
for( ; i <= width - v_int32x4::nlanes; i+=v_int32x4::nlanes )
{
v_store(SUM + i, v_load(SUM + i) + v_load(Sp + i));
}
#endif
#endif
for( ; i < width; i++ )
SUM[i] += Sp[i];
}
}
else
{
CV_Assert( sumCount == ksize-1 );
src += ksize-1;
}
for( ; count--; src++ )
{
const int* Sp = (const int*)src[0];
const int* Sm = (const int*)src[1-ksize];
short* D = (short*)dst;
if( haveScale )
{
i = 0;
#if CV_SIMD
v_float32 _v_scale = vx_setall_f32((float)_scale);
for( ; i <= width-v_int16::nlanes; i+=v_int16::nlanes )
{
v_int32 v_s0 = vx_load(SUM + i) + vx_load(Sp + i);
v_int32 v_s01 = vx_load(SUM + i + v_int32::nlanes) + vx_load(Sp + i + v_int32::nlanes);
v_int32 v_s0d = v_round(v_cvt_f32(v_s0) * _v_scale);
v_int32 v_s01d = v_round(v_cvt_f32(v_s01) * _v_scale);
v_store(D + i, v_pack(v_s0d, v_s01d));
v_store(SUM + i, v_s0 - vx_load(Sm + i));
v_store(SUM + i + v_int32::nlanes, v_s01 - vx_load(Sm + i + v_int32::nlanes));
}
#if CV_SIMD_WIDTH > 16
v_float32x4 v_scale = v_setall_f32((float)_scale);
for( ; i <= width-v_int16x8::nlanes; i+=v_int16x8::nlanes )
{
v_int32x4 v_s0 = v_load(SUM + i) + v_load(Sp + i);
v_int32x4 v_s01 = v_load(SUM + i + v_int32x4::nlanes) + v_load(Sp + i + v_int32x4::nlanes);
v_int32x4 v_s0d = v_round(v_cvt_f32(v_s0) * v_scale);
v_int32x4 v_s01d = v_round(v_cvt_f32(v_s01) * v_scale);
v_store(D + i, v_pack(v_s0d, v_s01d));
v_store(SUM + i, v_s0 - v_load(Sm + i));
v_store(SUM + i + v_int32x4::nlanes, v_s01 - v_load(Sm + i + v_int32x4::nlanes));
}
#endif
#endif
for( ; i < width; i++ )
{
int s0 = SUM[i] + Sp[i];
D[i] = saturate_cast<short>(s0*_scale);
SUM[i] = s0 - Sm[i];
}
}
else
{
i = 0;
#if CV_SIMD
for( ; i <= width-v_int16::nlanes; i+=v_int16::nlanes )
{
v_int32 v_s0 = vx_load(SUM + i) + vx_load(Sp + i);
v_int32 v_s01 = vx_load(SUM + i + v_int32::nlanes) + vx_load(Sp + i + v_int32::nlanes);
v_store(D + i, v_pack(v_s0, v_s01));
v_store(SUM + i, v_s0 - vx_load(Sm + i));
v_store(SUM + i + v_int32::nlanes, v_s01 - vx_load(Sm + i + v_int32::nlanes));
}
#if CV_SIMD_WIDTH > 16
for( ; i <= width-v_int16x8::nlanes; i+=v_int16x8::nlanes )
{
v_int32x4 v_s0 = v_load(SUM + i) + v_load(Sp + i);
v_int32x4 v_s01 = v_load(SUM + i + v_int32x4::nlanes) + v_load(Sp + i + v_int32x4::nlanes);
v_store(D + i, v_pack(v_s0, v_s01));
v_store(SUM + i, v_s0 - v_load(Sm + i));
v_store(SUM + i + v_int32x4::nlanes, v_s01 - v_load(Sm + i + v_int32x4::nlanes));
}
#endif
#endif
for( ; i < width; i++ )
{
int s0 = SUM[i] + Sp[i];
D[i] = saturate_cast<short>(s0);
SUM[i] = s0 - Sm[i];
}
}
dst += dststep;
}
#if CV_SIMD
vx_cleanup();
#endif
}
double scale;
int sumCount;
std::vector<int> sum;
};
template<>
struct ColumnSum<int, ushort> :
public BaseColumnFilter
{
ColumnSum( int _ksize, int _anchor, double _scale ) :
BaseColumnFilter()
{
ksize = _ksize;
anchor = _anchor;
scale = _scale;
sumCount = 0;
}
virtual void reset() CV_OVERRIDE { sumCount = 0; }
virtual void operator()(const uchar** src, uchar* dst, int dststep, int count, int width) CV_OVERRIDE
{
int* SUM;
bool haveScale = scale != 1;
double _scale = scale;
if( width != (int)sum.size() )
{
sum.resize(width);
sumCount = 0;
}
SUM = &sum[0];
if( sumCount == 0 )
{
memset((void*)SUM, 0, width*sizeof(int));
for( ; sumCount < ksize - 1; sumCount++, src++ )
{
const int* Sp = (const int*)src[0];
int i = 0;
#if CV_SIMD
for (; i <= width - v_int32::nlanes; i += v_int32::nlanes)
{
v_store(SUM + i, vx_load(SUM + i) + vx_load(Sp + i));
}
#if CV_SIMD_WIDTH > 16
for (; i <= width - v_int32x4::nlanes; i += v_int32x4::nlanes)
{
v_store(SUM + i, v_load(SUM + i) + v_load(Sp + i));
}
#endif
#endif
for( ; i < width; i++ )
SUM[i] += Sp[i];
}
}
else
{
CV_Assert( sumCount == ksize-1 );
src += ksize-1;
}
for( ; count--; src++ )
{
const int* Sp = (const int*)src[0];
const int* Sm = (const int*)src[1-ksize];
ushort* D = (ushort*)dst;
if( haveScale )
{
int i = 0;
#if CV_SIMD
v_float32 _v_scale = vx_setall_f32((float)_scale);
for( ; i <= width-v_uint16::nlanes; i+=v_uint16::nlanes )
{
v_int32 v_s0 = vx_load(SUM + i) + vx_load(Sp + i);
v_int32 v_s01 = vx_load(SUM + i + v_int32::nlanes) + vx_load(Sp + i + v_int32::nlanes);
v_uint32 v_s0d = v_reinterpret_as_u32(v_round(v_cvt_f32(v_s0) * _v_scale));
v_uint32 v_s01d = v_reinterpret_as_u32(v_round(v_cvt_f32(v_s01) * _v_scale));
v_store(D + i, v_pack(v_s0d, v_s01d));
v_store(SUM + i, v_s0 - vx_load(Sm + i));
v_store(SUM + i + v_int32::nlanes, v_s01 - vx_load(Sm + i + v_int32::nlanes));
}
#if CV_SIMD_WIDTH > 16
v_float32x4 v_scale = v_setall_f32((float)_scale);
for( ; i <= width-v_uint16x8::nlanes; i+=v_uint16x8::nlanes )
{
v_int32x4 v_s0 = v_load(SUM + i) + v_load(Sp + i);
v_int32x4 v_s01 = v_load(SUM + i + v_int32x4::nlanes) + v_load(Sp + i + v_int32x4::nlanes);
v_uint32x4 v_s0d = v_reinterpret_as_u32(v_round(v_cvt_f32(v_s0) * v_scale));
v_uint32x4 v_s01d = v_reinterpret_as_u32(v_round(v_cvt_f32(v_s01) * v_scale));
v_store(D + i, v_pack(v_s0d, v_s01d));
v_store(SUM + i, v_s0 - v_load(Sm + i));
v_store(SUM + i + v_int32x4::nlanes, v_s01 - v_load(Sm + i + v_int32x4::nlanes));
}
#endif
#endif
for( ; i < width; i++ )
{
int s0 = SUM[i] + Sp[i];
D[i] = saturate_cast<ushort>(s0*_scale);
SUM[i] = s0 - Sm[i];
}
}
else
{
int i = 0;
#if CV_SIMD
for( ; i <= width-v_uint16::nlanes; i+=v_uint16::nlanes )
{
v_int32 v_s0 = vx_load(SUM + i) + vx_load(Sp + i);
v_int32 v_s01 = vx_load(SUM + i + v_int32::nlanes) + vx_load(Sp + i + v_int32::nlanes);
v_store(D + i, v_pack(v_reinterpret_as_u32(v_s0), v_reinterpret_as_u32(v_s01)));
v_store(SUM + i, v_s0 - vx_load(Sm + i));
v_store(SUM + i + v_int32::nlanes, v_s01 - vx_load(Sm + i + v_int32::nlanes));
}
#if CV_SIMD_WIDTH > 16
for( ; i <= width-v_uint16x8::nlanes; i+=v_uint16x8::nlanes )
{
v_int32x4 v_s0 = v_load(SUM + i) + v_load(Sp + i);
v_int32x4 v_s01 = v_load(SUM + i + v_int32x4::nlanes) + v_load(Sp + i + v_int32x4::nlanes);
v_store(D + i, v_pack(v_reinterpret_as_u32(v_s0), v_reinterpret_as_u32(v_s01)));
v_store(SUM + i, v_s0 - v_load(Sm + i));
v_store(SUM + i + v_int32x4::nlanes, v_s01 - v_load(Sm + i + v_int32x4::nlanes));
}
#endif
#endif
for( ; i < width; i++ )
{
int s0 = SUM[i] + Sp[i];
D[i] = saturate_cast<ushort>(s0);
SUM[i] = s0 - Sm[i];
}
}
dst += dststep;
}
#if CV_SIMD
vx_cleanup();
#endif
}
double scale;
int sumCount;
std::vector<int> sum;
};
template<>
struct ColumnSum<int, int> :
public BaseColumnFilter
{
ColumnSum( int _ksize, int _anchor, double _scale ) :
BaseColumnFilter()
{
ksize = _ksize;
anchor = _anchor;
scale = _scale;
sumCount = 0;
}
virtual void reset() CV_OVERRIDE { sumCount = 0; }
virtual void operator()(const uchar** src, uchar* dst, int dststep, int count, int width) CV_OVERRIDE
{
int* SUM;
bool haveScale = scale != 1;
double _scale = scale;
if( width != (int)sum.size() )
{
sum.resize(width);
sumCount = 0;
}
SUM = &sum[0];
if( sumCount == 0 )
{
memset((void*)SUM, 0, width*sizeof(int));
for( ; sumCount < ksize - 1; sumCount++, src++ )
{
const int* Sp = (const int*)src[0];
int i = 0;
#if CV_SIMD
for( ; i <= width - v_int32::nlanes; i+=v_int32::nlanes )
{
v_store(SUM + i, vx_load(SUM + i) + vx_load(Sp + i));
}
#if CV_SIMD_WIDTH > 16
for( ; i <= width - v_int32x4::nlanes; i+=v_int32x4::nlanes )
{
v_store(SUM + i, v_load(SUM + i) + v_load(Sp + i));
}
#endif
#endif
for( ; i < width; i++ )
SUM[i] += Sp[i];
}
}
else
{
CV_Assert( sumCount == ksize-1 );
src += ksize-1;
}
for( ; count--; src++ )
{
const int* Sp = (const int*)src[0];
const int* Sm = (const int*)src[1-ksize];
int* D = (int*)dst;
if( haveScale )
{
int i = 0;
#if CV_SIMD
v_float32 _v_scale = vx_setall_f32((float)_scale);
for( ; i <= width-v_int32::nlanes; i+=v_int32::nlanes )
{
v_int32 v_s0 = vx_load(SUM + i) + vx_load(Sp + i);
v_int32 v_s0d = v_round(v_cvt_f32(v_s0) * _v_scale);
v_store(D + i, v_s0d);
v_store(SUM + i, v_s0 - vx_load(Sm + i));
}
#if CV_SIMD_WIDTH > 16
v_float32x4 v_scale = v_setall_f32((float)_scale);
for( ; i <= width-v_int32x4::nlanes; i+=v_int32x4::nlanes )
{
v_int32x4 v_s0 = v_load(SUM + i) + v_load(Sp + i);
v_int32x4 v_s0d = v_round(v_cvt_f32(v_s0) * v_scale);
v_store(D + i, v_s0d);
v_store(SUM + i, v_s0 - v_load(Sm + i));
}
#endif
#endif
for( ; i < width; i++ )
{
int s0 = SUM[i] + Sp[i];
D[i] = saturate_cast<int>(s0*_scale);
SUM[i] = s0 - Sm[i];
}
}
else
{
int i = 0;
#if CV_SIMD
for( ; i <= width-v_int32::nlanes; i+=v_int32::nlanes )
{
v_int32 v_s0 = vx_load(SUM + i) + vx_load(Sp + i);
v_store(D + i, v_s0);
v_store(SUM + i, v_s0 - vx_load(Sm + i));
}
#if CV_SIMD_WIDTH > 16
for( ; i <= width-v_int32x4::nlanes; i+=v_int32x4::nlanes )
{
v_int32x4 v_s0 = v_load(SUM + i) + v_load(Sp + i);
v_store(D + i, v_s0);
v_store(SUM + i, v_s0 - v_load(Sm + i));
}
#endif
#endif
for( ; i < width; i++ )
{
int s0 = SUM[i] + Sp[i];
D[i] = s0;
SUM[i] = s0 - Sm[i];
}
}
dst += dststep;
}
#if CV_SIMD
vx_cleanup();
#endif
}
double scale;
int sumCount;
std::vector<int> sum;
};
template<>
struct ColumnSum<int, float> :
public BaseColumnFilter
{
ColumnSum( int _ksize, int _anchor, double _scale ) :
BaseColumnFilter()
{
ksize = _ksize;
anchor = _anchor;
scale = _scale;
sumCount = 0;
}
virtual void reset() CV_OVERRIDE { sumCount = 0; }
virtual void operator()(const uchar** src, uchar* dst, int dststep, int count, int width) CV_OVERRIDE
{
int* SUM;
bool haveScale = scale != 1;
double _scale = scale;
if( width != (int)sum.size() )
{
sum.resize(width);
sumCount = 0;
}
SUM = &sum[0];
if( sumCount == 0 )
{
memset((void*)SUM, 0, width*sizeof(int));
for( ; sumCount < ksize - 1; sumCount++, src++ )
{
const int* Sp = (const int*)src[0];
int i = 0;
#if CV_SIMD
for( ; i <= width - v_int32::nlanes; i+=v_int32::nlanes )
{
v_store(SUM + i, vx_load(SUM + i) + vx_load(Sp + i));
}
#if CV_SIMD_WIDTH > 16
for( ; i <= width - v_int32x4::nlanes; i+=v_int32x4::nlanes )
{
v_store(SUM + i, v_load(SUM + i) + v_load(Sp + i));
}
#endif
#endif
for( ; i < width; i++ )
SUM[i] += Sp[i];
}
}
else
{
CV_Assert( sumCount == ksize-1 );
src += ksize-1;
}
for( ; count--; src++ )
{
const int * Sp = (const int*)src[0];
const int * Sm = (const int*)src[1-ksize];
float* D = (float*)dst;
if( haveScale )
{
int i = 0;
#if CV_SIMD
v_float32 _v_scale = vx_setall_f32((float)_scale);
for (; i <= width - v_int32::nlanes; i += v_int32::nlanes)
{
v_int32 v_s0 = vx_load(SUM + i) + vx_load(Sp + i);
v_store(D + i, v_cvt_f32(v_s0) * _v_scale);
v_store(SUM + i, v_s0 - vx_load(Sm + i));
}
#if CV_SIMD_WIDTH > 16
v_float32x4 v_scale = v_setall_f32((float)_scale);
for (; i <= width - v_int32x4::nlanes; i += v_int32x4::nlanes)
{
v_int32x4 v_s0 = v_load(SUM + i) + v_load(Sp + i);
v_store(D + i, v_cvt_f32(v_s0) * v_scale);
v_store(SUM + i, v_s0 - v_load(Sm + i));
}
#endif
#endif
for( ; i < width; i++ )
{
int s0 = SUM[i] + Sp[i];
D[i] = (float)(s0*_scale);
SUM[i] = s0 - Sm[i];
}
}
else
{
int i = 0;
#if CV_SIMD
for( ; i <= width-v_int32::nlanes; i+=v_int32::nlanes )
{
v_int32 v_s0 = vx_load(SUM + i) + vx_load(Sp + i);
v_store(D + i, v_cvt_f32(v_s0));
v_store(SUM + i, v_s0 - vx_load(Sm + i));
}
#if CV_SIMD_WIDTH > 16
for( ; i <= width-v_int32x4::nlanes; i+=v_int32x4::nlanes )
{
v_int32x4 v_s0 = v_load(SUM + i) + v_load(Sp + i);
v_store(D + i, v_cvt_f32(v_s0));
v_store(SUM + i, v_s0 - v_load(Sm + i));
}
#endif
#endif
for( ; i < width; i++ )
{
int s0 = SUM[i] + Sp[i];
D[i] = (float)(s0);
SUM[i] = s0 - Sm[i];
}
}
dst += dststep;
}
#if CV_SIMD
vx_cleanup();
#endif
}
double scale;
int sumCount;
std::vector<int> sum;
};
#ifdef HAVE_OPENCL
static bool ocl_boxFilter3x3_8UC1( InputArray _src, OutputArray _dst, int ddepth,
Size ksize, Point anchor, int borderType, bool normalize )
{
const ocl::Device & dev = ocl::Device::getDefault();
int type = _src.type(), sdepth = CV_MAT_DEPTH(type), cn = CV_MAT_CN(type);
if (ddepth < 0)
ddepth = sdepth;
if (anchor.x < 0)
anchor.x = ksize.width / 2;
if (anchor.y < 0)
anchor.y = ksize.height / 2;
if ( !(dev.isIntel() && (type == CV_8UC1) &&
(_src.offset() == 0) && (_src.step() % 4 == 0) &&
(_src.cols() % 16 == 0) && (_src.rows() % 2 == 0) &&
(anchor.x == 1) && (anchor.y == 1) &&
(ksize.width == 3) && (ksize.height == 3)) )
return false;
float alpha = 1.0f / (ksize.height * ksize.width);
Size size = _src.size();
size_t globalsize[2] = { 0, 0 };
size_t localsize[2] = { 0, 0 };
const char * const borderMap[] = { "BORDER_CONSTANT", "BORDER_REPLICATE", "BORDER_REFLECT", 0, "BORDER_REFLECT_101" };
globalsize[0] = size.width / 16;
globalsize[1] = size.height / 2;
char build_opts[1024];
sprintf(build_opts, "-D %s %s", borderMap[borderType], normalize ? "-D NORMALIZE" : "");
ocl::Kernel kernel("boxFilter3x3_8UC1_cols16_rows2", cv::ocl::imgproc::boxFilter3x3_oclsrc, build_opts);
if (kernel.empty())
return false;
UMat src = _src.getUMat();
_dst.create(size, CV_MAKETYPE(ddepth, cn));
if (!(_dst.offset() == 0 && _dst.step() % 4 == 0))
return false;
UMat dst = _dst.getUMat();
int idxArg = kernel.set(0, ocl::KernelArg::PtrReadOnly(src));
idxArg = kernel.set(idxArg, (int)src.step);
idxArg = kernel.set(idxArg, ocl::KernelArg::PtrWriteOnly(dst));
idxArg = kernel.set(idxArg, (int)dst.step);
idxArg = kernel.set(idxArg, (int)dst.rows);
idxArg = kernel.set(idxArg, (int)dst.cols);
if (normalize)
idxArg = kernel.set(idxArg, (float)alpha);
return kernel.run(2, globalsize, (localsize[0] == 0) ? NULL : localsize, false);
}
static bool ocl_boxFilter( InputArray _src, OutputArray _dst, int ddepth,
Size ksize, Point anchor, int borderType, bool normalize, bool sqr = false )
{
const ocl::Device & dev = ocl::Device::getDefault();
int type = _src.type(), sdepth = CV_MAT_DEPTH(type), cn = CV_MAT_CN(type), esz = CV_ELEM_SIZE(type);
bool doubleSupport = dev.doubleFPConfig() > 0;
if (ddepth < 0)
ddepth = sdepth;
if (cn > 4 || (!doubleSupport && (sdepth == CV_64F || ddepth == CV_64F)) ||
_src.offset() % esz != 0 || _src.step() % esz != 0)
return false;
if (anchor.x < 0)
anchor.x = ksize.width / 2;
if (anchor.y < 0)
anchor.y = ksize.height / 2;
int computeUnits = ocl::Device::getDefault().maxComputeUnits();
float alpha = 1.0f / (ksize.height * ksize.width);
Size size = _src.size(), wholeSize;
bool isolated = (borderType & BORDER_ISOLATED) != 0;
borderType &= ~BORDER_ISOLATED;
int wdepth = std::max(CV_32F, std::max(ddepth, sdepth)),
wtype = CV_MAKE_TYPE(wdepth, cn), dtype = CV_MAKE_TYPE(ddepth, cn);
const char * const borderMap[] = { "BORDER_CONSTANT", "BORDER_REPLICATE", "BORDER_REFLECT", 0, "BORDER_REFLECT_101" };
size_t globalsize[2] = { (size_t)size.width, (size_t)size.height };
size_t localsize_general[2] = { 0, 1 }, * localsize = NULL;
UMat src = _src.getUMat();
if (!isolated)
{
Point ofs;
src.locateROI(wholeSize, ofs);
}
int h = isolated ? size.height : wholeSize.height;
int w = isolated ? size.width : wholeSize.width;
size_t maxWorkItemSizes[32];
ocl::Device::getDefault().maxWorkItemSizes(maxWorkItemSizes);
int tryWorkItems = (int)maxWorkItemSizes[0];
ocl::Kernel kernel;
if (dev.isIntel() && !(dev.type() & ocl::Device::TYPE_CPU) &&
((ksize.width < 5 && ksize.height < 5 && esz <= 4) ||
(ksize.width == 5 && ksize.height == 5 && cn == 1)))
{
if (w < ksize.width || h < ksize.height)
return false;
// Figure out what vector size to use for loading the pixels.
int pxLoadNumPixels = cn != 1 || size.width % 4 ? 1 : 4;
int pxLoadVecSize = cn * pxLoadNumPixels;
// Figure out how many pixels per work item to compute in X and Y
// directions. Too many and we run out of registers.
int pxPerWorkItemX = 1, pxPerWorkItemY = 1;
if (cn <= 2 && ksize.width <= 4 && ksize.height <= 4)
{
pxPerWorkItemX = size.width % 8 ? size.width % 4 ? size.width % 2 ? 1 : 2 : 4 : 8;
pxPerWorkItemY = size.height % 2 ? 1 : 2;
}
else if (cn < 4 || (ksize.width <= 4 && ksize.height <= 4))
{
pxPerWorkItemX = size.width % 2 ? 1 : 2;
pxPerWorkItemY = size.height % 2 ? 1 : 2;
}
globalsize[0] = size.width / pxPerWorkItemX;
globalsize[1] = size.height / pxPerWorkItemY;
// Need some padding in the private array for pixels
int privDataWidth = roundUp(pxPerWorkItemX + ksize.width - 1, pxLoadNumPixels);
// Make the global size a nice round number so the runtime can pick
// from reasonable choices for the workgroup size
const int wgRound = 256;
globalsize[0] = roundUp(globalsize[0], wgRound);
char build_options[1024], cvt[2][40];
sprintf(build_options, "-D cn=%d "
"-D ANCHOR_X=%d -D ANCHOR_Y=%d -D KERNEL_SIZE_X=%d -D KERNEL_SIZE_Y=%d "
"-D PX_LOAD_VEC_SIZE=%d -D PX_LOAD_NUM_PX=%d "
"-D PX_PER_WI_X=%d -D PX_PER_WI_Y=%d -D PRIV_DATA_WIDTH=%d -D %s -D %s "
"-D PX_LOAD_X_ITERATIONS=%d -D PX_LOAD_Y_ITERATIONS=%d "
"-D srcT=%s -D srcT1=%s -D dstT=%s -D dstT1=%s -D WT=%s -D WT1=%s "
"-D convertToWT=%s -D convertToDstT=%s%s%s -D PX_LOAD_FLOAT_VEC_CONV=convert_%s -D OP_BOX_FILTER",
cn, anchor.x, anchor.y, ksize.width, ksize.height,
pxLoadVecSize, pxLoadNumPixels,
pxPerWorkItemX, pxPerWorkItemY, privDataWidth, borderMap[borderType],
isolated ? "BORDER_ISOLATED" : "NO_BORDER_ISOLATED",
privDataWidth / pxLoadNumPixels, pxPerWorkItemY + ksize.height - 1,
ocl::typeToStr(type), ocl::typeToStr(sdepth), ocl::typeToStr(dtype),
ocl::typeToStr(ddepth), ocl::typeToStr(wtype), ocl::typeToStr(wdepth),
ocl::convertTypeStr(sdepth, wdepth, cn, cvt[0]),
ocl::convertTypeStr(wdepth, ddepth, cn, cvt[1]),
normalize ? " -D NORMALIZE" : "", sqr ? " -D SQR" : "",
ocl::typeToStr(CV_MAKE_TYPE(wdepth, pxLoadVecSize)) //PX_LOAD_FLOAT_VEC_CONV
);
if (!kernel.create("filterSmall", cv::ocl::imgproc::filterSmall_oclsrc, build_options))
return false;
}
else
{
localsize = localsize_general;
for ( ; ; )
{
int BLOCK_SIZE_X = tryWorkItems, BLOCK_SIZE_Y = std::min(ksize.height * 10, size.height);
while (BLOCK_SIZE_X > 32 && BLOCK_SIZE_X >= ksize.width * 2 && BLOCK_SIZE_X > size.width * 2)
BLOCK_SIZE_X /= 2;
while (BLOCK_SIZE_Y < BLOCK_SIZE_X / 8 && BLOCK_SIZE_Y * computeUnits * 32 < size.height)
BLOCK_SIZE_Y *= 2;
if (ksize.width > BLOCK_SIZE_X || w < ksize.width || h < ksize.height)
return false;
char cvt[2][50];
String opts = format("-D LOCAL_SIZE_X=%d -D BLOCK_SIZE_Y=%d -D ST=%s -D DT=%s -D WT=%s -D convertToDT=%s -D convertToWT=%s"
" -D ANCHOR_X=%d -D ANCHOR_Y=%d -D KERNEL_SIZE_X=%d -D KERNEL_SIZE_Y=%d -D %s%s%s%s%s"
" -D ST1=%s -D DT1=%s -D cn=%d",
BLOCK_SIZE_X, BLOCK_SIZE_Y, ocl::typeToStr(type), ocl::typeToStr(CV_MAKE_TYPE(ddepth, cn)),
ocl::typeToStr(CV_MAKE_TYPE(wdepth, cn)),
ocl::convertTypeStr(wdepth, ddepth, cn, cvt[0]),
ocl::convertTypeStr(sdepth, wdepth, cn, cvt[1]),
anchor.x, anchor.y, ksize.width, ksize.height, borderMap[borderType],
isolated ? " -D BORDER_ISOLATED" : "", doubleSupport ? " -D DOUBLE_SUPPORT" : "",
normalize ? " -D NORMALIZE" : "", sqr ? " -D SQR" : "",
ocl::typeToStr(sdepth), ocl::typeToStr(ddepth), cn);
localsize[0] = BLOCK_SIZE_X;
globalsize[0] = divUp(size.width, BLOCK_SIZE_X - (ksize.width - 1)) * BLOCK_SIZE_X;
globalsize[1] = divUp(size.height, BLOCK_SIZE_Y);
kernel.create("boxFilter", cv::ocl::imgproc::boxFilter_oclsrc, opts);
if (kernel.empty())
return false;
size_t kernelWorkGroupSize = kernel.workGroupSize();
if (localsize[0] <= kernelWorkGroupSize)
break;
if (BLOCK_SIZE_X < (int)kernelWorkGroupSize)
return false;
tryWorkItems = (int)kernelWorkGroupSize;
}
}
_dst.create(size, CV_MAKETYPE(ddepth, cn));
UMat dst = _dst.getUMat();
int idxArg = kernel.set(0, ocl::KernelArg::PtrReadOnly(src));
idxArg = kernel.set(idxArg, (int)src.step);
int srcOffsetX = (int)((src.offset % src.step) / src.elemSize());
int srcOffsetY = (int)(src.offset / src.step);
int srcEndX = isolated ? srcOffsetX + size.width : wholeSize.width;
int srcEndY = isolated ? srcOffsetY + size.height : wholeSize.height;
idxArg = kernel.set(idxArg, srcOffsetX);
idxArg = kernel.set(idxArg, srcOffsetY);
idxArg = kernel.set(idxArg, srcEndX);
idxArg = kernel.set(idxArg, srcEndY);
idxArg = kernel.set(idxArg, ocl::KernelArg::WriteOnly(dst));
if (normalize)
idxArg = kernel.set(idxArg, (float)alpha);
return kernel.run(2, globalsize, localsize, false);
}
#endif
}
cv::Ptr<cv::BaseRowFilter> cv::getRowSumFilter(int srcType, int sumType, int ksize, int anchor)
{
int sdepth = CV_MAT_DEPTH(srcType), ddepth = CV_MAT_DEPTH(sumType);
CV_Assert( CV_MAT_CN(sumType) == CV_MAT_CN(srcType) );
if( anchor < 0 )
anchor = ksize/2;
if( sdepth == CV_8U && ddepth == CV_32S )
return makePtr<RowSum<uchar, int> >(ksize, anchor);
if( sdepth == CV_8U && ddepth == CV_16U )
return makePtr<RowSum<uchar, ushort> >(ksize, anchor);
if( sdepth == CV_8U && ddepth == CV_64F )
return makePtr<RowSum<uchar, double> >(ksize, anchor);
if( sdepth == CV_16U && ddepth == CV_32S )
return makePtr<RowSum<ushort, int> >(ksize, anchor);
if( sdepth == CV_16U && ddepth == CV_64F )
return makePtr<RowSum<ushort, double> >(ksize, anchor);
if( sdepth == CV_16S && ddepth == CV_32S )
return makePtr<RowSum<short, int> >(ksize, anchor);
if( sdepth == CV_32S && ddepth == CV_32S )
return makePtr<RowSum<int, int> >(ksize, anchor);
if( sdepth == CV_16S && ddepth == CV_64F )
return makePtr<RowSum<short, double> >(ksize, anchor);
if( sdepth == CV_32F && ddepth == CV_64F )
return makePtr<RowSum<float, double> >(ksize, anchor);
if( sdepth == CV_64F && ddepth == CV_64F )
return makePtr<RowSum<double, double> >(ksize, anchor);
CV_Error_( CV_StsNotImplemented,
("Unsupported combination of source format (=%d), and buffer format (=%d)",
srcType, sumType));
}
cv::Ptr<cv::BaseColumnFilter> cv::getColumnSumFilter(int sumType, int dstType, int ksize,
int anchor, double scale)
{
int sdepth = CV_MAT_DEPTH(sumType), ddepth = CV_MAT_DEPTH(dstType);
CV_Assert( CV_MAT_CN(sumType) == CV_MAT_CN(dstType) );
if( anchor < 0 )
anchor = ksize/2;
if( ddepth == CV_8U && sdepth == CV_32S )
return makePtr<ColumnSum<int, uchar> >(ksize, anchor, scale);
if( ddepth == CV_8U && sdepth == CV_16U )
return makePtr<ColumnSum<ushort, uchar> >(ksize, anchor, scale);
if( ddepth == CV_8U && sdepth == CV_64F )
return makePtr<ColumnSum<double, uchar> >(ksize, anchor, scale);
if( ddepth == CV_16U && sdepth == CV_32S )
return makePtr<ColumnSum<int, ushort> >(ksize, anchor, scale);
if( ddepth == CV_16U && sdepth == CV_64F )
return makePtr<ColumnSum<double, ushort> >(ksize, anchor, scale);
if( ddepth == CV_16S && sdepth == CV_32S )
return makePtr<ColumnSum<int, short> >(ksize, anchor, scale);
if( ddepth == CV_16S && sdepth == CV_64F )
return makePtr<ColumnSum<double, short> >(ksize, anchor, scale);
if( ddepth == CV_32S && sdepth == CV_32S )
return makePtr<ColumnSum<int, int> >(ksize, anchor, scale);
if( ddepth == CV_32F && sdepth == CV_32S )
return makePtr<ColumnSum<int, float> >(ksize, anchor, scale);
if( ddepth == CV_32F && sdepth == CV_64F )
return makePtr<ColumnSum<double, float> >(ksize, anchor, scale);
if( ddepth == CV_64F && sdepth == CV_32S )
return makePtr<ColumnSum<int, double> >(ksize, anchor, scale);
if( ddepth == CV_64F && sdepth == CV_64F )
return makePtr<ColumnSum<double, double> >(ksize, anchor, scale);
CV_Error_( CV_StsNotImplemented,
("Unsupported combination of sum format (=%d), and destination format (=%d)",
sumType, dstType));
}
cv::Ptr<cv::FilterEngine> cv::createBoxFilter( int srcType, int dstType, Size ksize,
Point anchor, bool normalize, int borderType )
{
int sdepth = CV_MAT_DEPTH(srcType);
int cn = CV_MAT_CN(srcType), sumType = CV_64F;
if( sdepth == CV_8U && CV_MAT_DEPTH(dstType) == CV_8U &&
ksize.width*ksize.height <= 256 )
sumType = CV_16U;
else if( sdepth <= CV_32S && (!normalize ||
ksize.width*ksize.height <= (sdepth == CV_8U ? (1<<23) :
sdepth == CV_16U ? (1 << 15) : (1 << 16))) )
sumType = CV_32S;
sumType = CV_MAKETYPE( sumType, cn );
Ptr<BaseRowFilter> rowFilter = getRowSumFilter(srcType, sumType, ksize.width, anchor.x );
Ptr<BaseColumnFilter> columnFilter = getColumnSumFilter(sumType,
dstType, ksize.height, anchor.y, normalize ? 1./(ksize.width*ksize.height) : 1);
return makePtr<FilterEngine>(Ptr<BaseFilter>(), rowFilter, columnFilter,
srcType, dstType, sumType, borderType );
}
#ifdef HAVE_OPENVX
namespace cv
{
namespace ovx {
template <> inline bool skipSmallImages<VX_KERNEL_BOX_3x3>(int w, int h) { return w*h < 640 * 480; }
}
static bool openvx_boxfilter(InputArray _src, OutputArray _dst, int ddepth,
Size ksize, Point anchor,
bool normalize, int borderType)
{
if (ddepth < 0)
ddepth = CV_8UC1;
if (_src.type() != CV_8UC1 || ddepth != CV_8U || !normalize ||
_src.cols() < 3 || _src.rows() < 3 ||
ksize.width != 3 || ksize.height != 3 ||
(anchor.x >= 0 && anchor.x != 1) ||
(anchor.y >= 0 && anchor.y != 1) ||
ovx::skipSmallImages<VX_KERNEL_BOX_3x3>(_src.cols(), _src.rows()))
return false;
Mat src = _src.getMat();
if ((borderType & BORDER_ISOLATED) == 0 && src.isSubmatrix())
return false; //Process isolated borders only
vx_enum border;
switch (borderType & ~BORDER_ISOLATED)
{
case BORDER_CONSTANT:
border = VX_BORDER_CONSTANT;
break;
case BORDER_REPLICATE:
border = VX_BORDER_REPLICATE;
break;
default:
return false;
}
_dst.create(src.size(), CV_8UC1);
Mat dst = _dst.getMat();
try
{
ivx::Context ctx = ovx::getOpenVXContext();
Mat a;
if (dst.data != src.data)
a = src;
else
src.copyTo(a);
ivx::Image
ia = ivx::Image::createFromHandle(ctx, VX_DF_IMAGE_U8,
ivx::Image::createAddressing(a.cols, a.rows, 1, (vx_int32)(a.step)), a.data),
ib = ivx::Image::createFromHandle(ctx, VX_DF_IMAGE_U8,
ivx::Image::createAddressing(dst.cols, dst.rows, 1, (vx_int32)(dst.step)), dst.data);
//ATTENTION: VX_CONTEXT_IMMEDIATE_BORDER attribute change could lead to strange issues in multi-threaded environments
//since OpenVX standard says nothing about thread-safety for now
ivx::border_t prevBorder = ctx.immediateBorder();
ctx.setImmediateBorder(border, (vx_uint8)(0));
ivx::IVX_CHECK_STATUS(vxuBox3x3(ctx, ia, ib));
ctx.setImmediateBorder(prevBorder);
}
catch (const ivx::RuntimeError & e)
{
VX_DbgThrow(e.what());
}
catch (const ivx::WrapperError & e)
{
VX_DbgThrow(e.what());
}
return true;
}
}
#endif
#if defined(HAVE_IPP)
namespace cv
{
static bool ipp_boxfilter(Mat &src, Mat &dst, Size ksize, Point anchor, bool normalize, int borderType)
{
#ifdef HAVE_IPP_IW
CV_INSTRUMENT_REGION_IPP();
#if IPP_VERSION_X100 < 201801
// Problem with SSE42 optimization for 16s and some 8u modes
if(ipp::getIppTopFeatures() == ippCPUID_SSE42 && (((src.depth() == CV_16S || src.depth() == CV_16U) && (src.channels() == 3 || src.channels() == 4)) || (src.depth() == CV_8U && src.channels() == 3 && (ksize.width > 5 || ksize.height > 5))))
return false;
// Other optimizations has some degradations too
if((((src.depth() == CV_16S || src.depth() == CV_16U) && (src.channels() == 4)) || (src.depth() == CV_8U && src.channels() == 1 && (ksize.width > 5 || ksize.height > 5))))
return false;
#endif
if(!normalize)
return false;
if(!ippiCheckAnchor(anchor, ksize))
return false;
try
{
::ipp::IwiImage iwSrc = ippiGetImage(src);
::ipp::IwiImage iwDst = ippiGetImage(dst);
::ipp::IwiSize iwKSize = ippiGetSize(ksize);
::ipp::IwiBorderSize borderSize(iwKSize);
::ipp::IwiBorderType ippBorder(ippiGetBorder(iwSrc, borderType, borderSize));
if(!ippBorder)
return false;
CV_INSTRUMENT_FUN_IPP(::ipp::iwiFilterBox, iwSrc, iwDst, iwKSize, ::ipp::IwDefault(), ippBorder);
}
catch (const ::ipp::IwException &)
{
return false;
}
return true;
#else
CV_UNUSED(src); CV_UNUSED(dst); CV_UNUSED(ksize); CV_UNUSED(anchor); CV_UNUSED(normalize); CV_UNUSED(borderType);
return false;
#endif
}
}
#endif
void cv::boxFilter( InputArray _src, OutputArray _dst, int ddepth,
Size ksize, Point anchor,
bool normalize, int borderType )
{
CV_INSTRUMENT_REGION();
CV_OCL_RUN(_dst.isUMat() &&
(borderType == BORDER_REPLICATE || borderType == BORDER_CONSTANT ||
borderType == BORDER_REFLECT || borderType == BORDER_REFLECT_101),
ocl_boxFilter3x3_8UC1(_src, _dst, ddepth, ksize, anchor, borderType, normalize))
CV_OCL_RUN(_dst.isUMat(), ocl_boxFilter(_src, _dst, ddepth, ksize, anchor, borderType, normalize))
Mat src = _src.getMat();
int stype = src.type(), sdepth = CV_MAT_DEPTH(stype), cn = CV_MAT_CN(stype);
if( ddepth < 0 )
ddepth = sdepth;
_dst.create( src.size(), CV_MAKETYPE(ddepth, cn) );
Mat dst = _dst.getMat();
if( borderType != BORDER_CONSTANT && normalize && (borderType & BORDER_ISOLATED) != 0 )
{
if( src.rows == 1 )
ksize.height = 1;
if( src.cols == 1 )
ksize.width = 1;
}
Point ofs;
Size wsz(src.cols, src.rows);
if(!(borderType&BORDER_ISOLATED))
src.locateROI( wsz, ofs );
CALL_HAL(boxFilter, cv_hal_boxFilter, src.ptr(), src.step, dst.ptr(), dst.step, src.cols, src.rows, sdepth, ddepth, cn,
ofs.x, ofs.y, wsz.width - src.cols - ofs.x, wsz.height - src.rows - ofs.y, ksize.width, ksize.height,
anchor.x, anchor.y, normalize, borderType&~BORDER_ISOLATED);
CV_OVX_RUN(true,
openvx_boxfilter(src, dst, ddepth, ksize, anchor, normalize, borderType))
CV_IPP_RUN_FAST(ipp_boxfilter(src, dst, ksize, anchor, normalize, borderType));
borderType = (borderType&~BORDER_ISOLATED);
Ptr<FilterEngine> f = createBoxFilter( src.type(), dst.type(),
ksize, anchor, normalize, borderType );
f->apply( src, dst, wsz, ofs );
}
void cv::blur( InputArray src, OutputArray dst,
Size ksize, Point anchor, int borderType )
{
CV_INSTRUMENT_REGION();
boxFilter( src, dst, -1, ksize, anchor, true, borderType );
}
/****************************************************************************************\
Squared Box Filter
\****************************************************************************************/
namespace cv
{
template<typename T, typename ST>
struct SqrRowSum :
public BaseRowFilter
{
SqrRowSum( int _ksize, int _anchor ) :
BaseRowFilter()
{
ksize = _ksize;
anchor = _anchor;
}
virtual void operator()(const uchar* src, uchar* dst, int width, int cn) CV_OVERRIDE
{
const T* S = (const T*)src;
ST* D = (ST*)dst;
int i = 0, k, ksz_cn = ksize*cn;
width = (width - 1)*cn;
for( k = 0; k < cn; k++, S++, D++ )
{
ST s = 0;
for( i = 0; i < ksz_cn; i += cn )
{
ST val = (ST)S[i];
s += val*val;
}
D[0] = s;
for( i = 0; i < width; i += cn )
{
ST val0 = (ST)S[i], val1 = (ST)S[i + ksz_cn];
s += val1*val1 - val0*val0;
D[i+cn] = s;
}
}
}
};
static Ptr<BaseRowFilter> getSqrRowSumFilter(int srcType, int sumType, int ksize, int anchor)
{
int sdepth = CV_MAT_DEPTH(srcType), ddepth = CV_MAT_DEPTH(sumType);
CV_Assert( CV_MAT_CN(sumType) == CV_MAT_CN(srcType) );
if( anchor < 0 )
anchor = ksize/2;
if( sdepth == CV_8U && ddepth == CV_32S )
return makePtr<SqrRowSum<uchar, int> >(ksize, anchor);
if( sdepth == CV_8U && ddepth == CV_64F )
return makePtr<SqrRowSum<uchar, double> >(ksize, anchor);
if( sdepth == CV_16U && ddepth == CV_64F )
return makePtr<SqrRowSum<ushort, double> >(ksize, anchor);
if( sdepth == CV_16S && ddepth == CV_64F )
return makePtr<SqrRowSum<short, double> >(ksize, anchor);
if( sdepth == CV_32F && ddepth == CV_64F )
return makePtr<SqrRowSum<float, double> >(ksize, anchor);
if( sdepth == CV_64F && ddepth == CV_64F )
return makePtr<SqrRowSum<double, double> >(ksize, anchor);
CV_Error_( CV_StsNotImplemented,
("Unsupported combination of source format (=%d), and buffer format (=%d)",
srcType, sumType));
}
}
void cv::sqrBoxFilter( InputArray _src, OutputArray _dst, int ddepth,
Size ksize, Point anchor,
bool normalize, int borderType )
{
CV_INSTRUMENT_REGION();
int srcType = _src.type(), sdepth = CV_MAT_DEPTH(srcType), cn = CV_MAT_CN(srcType);
Size size = _src.size();
if( ddepth < 0 )
ddepth = sdepth < CV_32F ? CV_32F : CV_64F;
if( borderType != BORDER_CONSTANT && normalize )
{
if( size.height == 1 )
ksize.height = 1;
if( size.width == 1 )
ksize.width = 1;
}
CV_OCL_RUN(_dst.isUMat() && _src.dims() <= 2,
ocl_boxFilter(_src, _dst, ddepth, ksize, anchor, borderType, normalize, true))
int sumDepth = CV_64F;
if( sdepth == CV_8U )
sumDepth = CV_32S;
int sumType = CV_MAKETYPE( sumDepth, cn ), dstType = CV_MAKETYPE(ddepth, cn);
Mat src = _src.getMat();
_dst.create( size, dstType );
Mat dst = _dst.getMat();
Ptr<BaseRowFilter> rowFilter = getSqrRowSumFilter(srcType, sumType, ksize.width, anchor.x );
Ptr<BaseColumnFilter> columnFilter = getColumnSumFilter(sumType,
dstType, ksize.height, anchor.y,
normalize ? 1./(ksize.width*ksize.height) : 1);
Ptr<FilterEngine> f = makePtr<FilterEngine>(Ptr<BaseFilter>(), rowFilter, columnFilter,
srcType, dstType, sumType, borderType );
Point ofs;
Size wsz(src.cols, src.rows);
src.locateROI( wsz, ofs );
f->apply( src, dst, wsz, ofs );
}
/* End of file. */
This source diff could not be displayed because it is too large. You can view the blob instead.
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, 2018, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Copyright (C) 2014-2015, Itseez Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "precomp.hpp"
#include <vector>
#include "opencv2/core/hal/intrin.hpp"
#include "opencl_kernels_imgproc.hpp"
#include "opencv2/core/openvx/ovx_defs.hpp"
/*
* This file includes the code, contributed by Simon Perreault
* (the function icvMedianBlur_8u_O1)
*
* Constant-time median filtering -- http://nomis80.org/ctmf.html
* Copyright (C) 2006 Simon Perreault
*
* Contact:
* Laboratoire de vision et systemes numeriques
* Pavillon Adrien-Pouliot
* Universite Laval
* Sainte-Foy, Quebec, Canada
* G1K 7P4
*
* perreaul@gel.ulaval.ca
*/
/****************************************************************************************\
Median Filter
\****************************************************************************************/
namespace cv
{
static void
medianBlur_8u_O1( const Mat& _src, Mat& _dst, int ksize )
{
typedef ushort HT;
/**
* This structure represents a two-tier histogram. The first tier (known as the
* "coarse" level) is 4 bit wide and the second tier (known as the "fine" level)
* is 8 bit wide. Pixels inserted in the fine level also get inserted into the
* coarse bucket designated by the 4 MSBs of the fine bucket value.
*
* The structure is aligned on 16 bits, which is a prerequisite for SIMD
* instructions. Each bucket is 16 bit wide, which means that extra care must be
* taken to prevent overflow.
*/
typedef struct
{
HT coarse[16];
HT fine[16][16];
} Histogram;
/**
* HOP is short for Histogram OPeration. This macro makes an operation \a op on
* histogram \a h for pixel value \a x. It takes care of handling both levels.
*/
#define HOP(h,x,op) \
h.coarse[x>>4] op, \
*((HT*)h.fine + x) op
#define COP(c,j,x,op) \
h_coarse[ 16*(n*c+j) + (x>>4) ] op, \
h_fine[ 16 * (n*(16*c+(x>>4)) + j) + (x & 0xF) ] op
int cn = _dst.channels(), m = _dst.rows, r = (ksize-1)/2;
CV_Assert(cn > 0 && cn <= 4);
size_t sstep = _src.step, dstep = _dst.step;
int STRIPE_SIZE = std::min( _dst.cols, 512/cn );
#if defined(CV_SIMD_WIDTH) && CV_SIMD_WIDTH >= 16
# define CV_ALIGNMENT CV_SIMD_WIDTH
#else
# define CV_ALIGNMENT 16
#endif
std::vector<HT> _h_coarse(1 * 16 * (STRIPE_SIZE + 2*r) * cn + CV_ALIGNMENT);
std::vector<HT> _h_fine(16 * 16 * (STRIPE_SIZE + 2*r) * cn + CV_ALIGNMENT);
HT* h_coarse = alignPtr(&_h_coarse[0], CV_ALIGNMENT);
HT* h_fine = alignPtr(&_h_fine[0], CV_ALIGNMENT);
for( int x = 0; x < _dst.cols; x += STRIPE_SIZE )
{
int i, j, k, c, n = std::min(_dst.cols - x, STRIPE_SIZE) + r*2;
const uchar* src = _src.ptr() + x*cn;
uchar* dst = _dst.ptr() + (x - r)*cn;
memset( h_coarse, 0, 16*n*cn*sizeof(h_coarse[0]) );
memset( h_fine, 0, 16*16*n*cn*sizeof(h_fine[0]) );
// First row initialization
for( c = 0; c < cn; c++ )
{
for( j = 0; j < n; j++ )
COP( c, j, src[cn*j+c], += (HT)(r+2) );
for( i = 1; i < r; i++ )
{
const uchar* p = src + sstep*std::min(i, m-1);
for ( j = 0; j < n; j++ )
COP( c, j, p[cn*j+c], ++ );
}
}
for( i = 0; i < m; i++ )
{
const uchar* p0 = src + sstep * std::max( 0, i-r-1 );
const uchar* p1 = src + sstep * std::min( m-1, i+r );
for( c = 0; c < cn; c++ )
{
Histogram CV_DECL_ALIGNED(CV_ALIGNMENT) H;
HT CV_DECL_ALIGNED(CV_ALIGNMENT) luc[16];
memset(&H, 0, sizeof(H));
memset(luc, 0, sizeof(luc));
// Update column histograms for the entire row.
for( j = 0; j < n; j++ )
{
COP( c, j, p0[j*cn + c], -- );
COP( c, j, p1[j*cn + c], ++ );
}
// First column initialization
for (k = 0; k < 16; ++k)
{
#if CV_SIMD256
v_store(H.fine[k], v_mul_wrap(v256_load(h_fine + 16 * n*(16 * c + k)), v256_setall_u16(2 * r + 1)) + v256_load(H.fine[k]));
#elif CV_SIMD128
v_store(H.fine[k], v_mul_wrap(v_load(h_fine + 16 * n*(16 * c + k)), v_setall_u16((ushort)(2 * r + 1))) + v_load(H.fine[k]));
v_store(H.fine[k] + 8, v_mul_wrap(v_load(h_fine + 16 * n*(16 * c + k) + 8), v_setall_u16((ushort)(2 * r + 1))) + v_load(H.fine[k] + 8));
#else
for (int ind = 0; ind < 16; ++ind)
H.fine[k][ind] = (HT)(H.fine[k][ind] + (2 * r + 1) * h_fine[16 * n*(16 * c + k) + ind]);
#endif
}
#if CV_SIMD256
v_uint16x16 v_coarse = v256_load(H.coarse);
#elif CV_SIMD128
v_uint16x8 v_coarsel = v_load(H.coarse);
v_uint16x8 v_coarseh = v_load(H.coarse + 8);
#endif
HT* px = h_coarse + 16 * n*c;
for( j = 0; j < 2*r; ++j, px += 16 )
{
#if CV_SIMD256
v_coarse += v256_load(px);
#elif CV_SIMD128
v_coarsel += v_load(px);
v_coarseh += v_load(px + 8);
#else
for (int ind = 0; ind < 16; ++ind)
H.coarse[ind] += px[ind];
#endif
}
for( j = r; j < n-r; j++ )
{
int t = 2*r*r + 2*r, b, sum = 0;
HT* segment;
px = h_coarse + 16 * (n*c + std::min(j + r, n - 1));
#if CV_SIMD256
v_coarse += v256_load(px);
v_store(H.coarse, v_coarse);
#elif CV_SIMD128
v_coarsel += v_load(px);
v_coarseh += v_load(px + 8);
v_store(H.coarse, v_coarsel);
v_store(H.coarse + 8, v_coarseh);
#else
for (int ind = 0; ind < 16; ++ind)
H.coarse[ind] += px[ind];
#endif
// Find median at coarse level
for ( k = 0; k < 16 ; ++k )
{
sum += H.coarse[k];
if ( sum > t )
{
sum -= H.coarse[k];
break;
}
}
CV_Assert( k < 16 );
/* Update corresponding histogram segment */
#if CV_SIMD256
v_uint16x16 v_fine;
#elif CV_SIMD128
v_uint16x8 v_finel;
v_uint16x8 v_fineh;
#endif
if ( luc[k] <= j-r )
{
#if CV_SIMD256
v_fine = v256_setzero_u16();
#elif CV_SIMD128
v_finel = v_setzero_u16();
v_fineh = v_setzero_u16();
#else
memset(&H.fine[k], 0, 16 * sizeof(HT));
#endif
px = h_fine + 16 * (n*(16 * c + k) + j - r);
for (luc[k] = HT(j - r); luc[k] < MIN(j + r + 1, n); ++luc[k], px += 16)
{
#if CV_SIMD256
v_fine += v256_load(px);
#elif CV_SIMD128
v_finel += v_load(px);
v_fineh += v_load(px + 8);
#else
for (int ind = 0; ind < 16; ++ind)
H.fine[k][ind] += px[ind];
#endif
}
if ( luc[k] < j+r+1 )
{
px = h_fine + 16 * (n*(16 * c + k) + (n - 1));
#if CV_SIMD256
v_fine += v_mul_wrap(v256_load(px), v256_setall_u16(j + r + 1 - n));
#elif CV_SIMD128
v_finel += v_mul_wrap(v_load(px), v_setall_u16((ushort)(j + r + 1 - n)));
v_fineh += v_mul_wrap(v_load(px + 8), v_setall_u16((ushort)(j + r + 1 - n)));
#else
for (int ind = 0; ind < 16; ++ind)
H.fine[k][ind] = (HT)(H.fine[k][ind] + (j + r + 1 - n) * px[ind]);
#endif
luc[k] = (HT)(j+r+1);
}
}
else
{
#if CV_SIMD256
v_fine = v256_load(H.fine[k]);
#elif CV_SIMD128
v_finel = v_load(H.fine[k]);
v_fineh = v_load(H.fine[k] + 8);
#endif
px = h_fine + 16*n*(16 * c + k);
for ( ; luc[k] < j+r+1; ++luc[k] )
{
#if CV_SIMD256
v_fine = v_fine + v256_load(px + 16 * MIN(luc[k], n - 1)) - v256_load(px + 16 * MAX(luc[k] - 2 * r - 1, 0));
#elif CV_SIMD128
v_finel = v_finel + v_load(px + 16 * MIN(luc[k], n - 1) ) - v_load(px + 16 * MAX(luc[k] - 2 * r - 1, 0));
v_fineh = v_fineh + v_load(px + 16 * MIN(luc[k], n - 1) + 8) - v_load(px + 16 * MAX(luc[k] - 2 * r - 1, 0) + 8);
#else
for (int ind = 0; ind < 16; ++ind)
H.fine[k][ind] += px[16 * MIN(luc[k], n - 1) + ind] - px[16 * MAX(luc[k] - 2 * r - 1, 0) + ind];
#endif
}
}
px = h_coarse + 16 * (n*c + MAX(j - r, 0));
#if CV_SIMD256
v_store(H.fine[k], v_fine);
v_coarse -= v256_load(px);
#elif CV_SIMD128
v_store(H.fine[k], v_finel);
v_store(H.fine[k] + 8, v_fineh);
v_coarsel -= v_load(px);
v_coarseh -= v_load(px + 8);
#else
for (int ind = 0; ind < 16; ++ind)
H.coarse[ind] -= px[ind];
#endif
/* Find median in segment */
segment = H.fine[k];
for ( b = 0; b < 16 ; b++ )
{
sum += segment[b];
if ( sum > t )
{
dst[dstep*i+cn*j+c] = (uchar)(16*k + b);
break;
}
}
CV_Assert( b < 16 );
}
}
}
#if CV_SIMD
vx_cleanup();
#endif
}
#undef HOP
#undef COP
}
static void
medianBlur_8u_Om( const Mat& _src, Mat& _dst, int m )
{
#define N 16
int zone0[4][N];
int zone1[4][N*N];
int x, y;
int n2 = m*m/2;
Size size = _dst.size();
const uchar* src = _src.ptr();
uchar* dst = _dst.ptr();
int src_step = (int)_src.step, dst_step = (int)_dst.step;
int cn = _src.channels();
const uchar* src_max = src + size.height*src_step;
CV_Assert(cn > 0 && cn <= 4);
#define UPDATE_ACC01( pix, cn, op ) \
{ \
int p = (pix); \
zone1[cn][p] op; \
zone0[cn][p >> 4] op; \
}
//CV_Assert( size.height >= nx && size.width >= nx );
for( x = 0; x < size.width; x++, src += cn, dst += cn )
{
uchar* dst_cur = dst;
const uchar* src_top = src;
const uchar* src_bottom = src;
int k, c;
int src_step1 = src_step, dst_step1 = dst_step;
if( x % 2 != 0 )
{
src_bottom = src_top += src_step*(size.height-1);
dst_cur += dst_step*(size.height-1);
src_step1 = -src_step1;
dst_step1 = -dst_step1;
}
// init accumulator
memset( zone0, 0, sizeof(zone0[0])*cn );
memset( zone1, 0, sizeof(zone1[0])*cn );
for( y = 0; y <= m/2; y++ )
{
for( c = 0; c < cn; c++ )
{
if( y > 0 )
{
for( k = 0; k < m*cn; k += cn )
UPDATE_ACC01( src_bottom[k+c], c, ++ );
}
else
{
for( k = 0; k < m*cn; k += cn )
UPDATE_ACC01( src_bottom[k+c], c, += m/2+1 );
}
}
if( (src_step1 > 0 && y < size.height-1) ||
(src_step1 < 0 && size.height-y-1 > 0) )
src_bottom += src_step1;
}
for( y = 0; y < size.height; y++, dst_cur += dst_step1 )
{
// find median
for( c = 0; c < cn; c++ )
{
int s = 0;
for( k = 0; ; k++ )
{
int t = s + zone0[c][k];
if( t > n2 ) break;
s = t;
}
for( k *= N; ;k++ )
{
s += zone1[c][k];
if( s > n2 ) break;
}
dst_cur[c] = (uchar)k;
}
if( y+1 == size.height )
break;
if( cn == 1 )
{
for( k = 0; k < m; k++ )
{
int p = src_top[k];
int q = src_bottom[k];
zone1[0][p]--;
zone0[0][p>>4]--;
zone1[0][q]++;
zone0[0][q>>4]++;
}
}
else if( cn == 3 )
{
for( k = 0; k < m*3; k += 3 )
{
UPDATE_ACC01( src_top[k], 0, -- );
UPDATE_ACC01( src_top[k+1], 1, -- );
UPDATE_ACC01( src_top[k+2], 2, -- );
UPDATE_ACC01( src_bottom[k], 0, ++ );
UPDATE_ACC01( src_bottom[k+1], 1, ++ );
UPDATE_ACC01( src_bottom[k+2], 2, ++ );
}
}
else
{
assert( cn == 4 );
for( k = 0; k < m*4; k += 4 )
{
UPDATE_ACC01( src_top[k], 0, -- );
UPDATE_ACC01( src_top[k+1], 1, -- );
UPDATE_ACC01( src_top[k+2], 2, -- );
UPDATE_ACC01( src_top[k+3], 3, -- );
UPDATE_ACC01( src_bottom[k], 0, ++ );
UPDATE_ACC01( src_bottom[k+1], 1, ++ );
UPDATE_ACC01( src_bottom[k+2], 2, ++ );
UPDATE_ACC01( src_bottom[k+3], 3, ++ );
}
}
if( (src_step1 > 0 && src_bottom + src_step1 < src_max) ||
(src_step1 < 0 && src_bottom + src_step1 >= src) )
src_bottom += src_step1;
if( y >= m/2 )
src_top += src_step1;
}
}
#undef N
#undef UPDATE_ACC
}
namespace {
struct MinMax8u
{
typedef uchar value_type;
typedef int arg_type;
enum { SIZE = 1 };
arg_type load(const uchar* ptr) { return *ptr; }
void store(uchar* ptr, arg_type val) { *ptr = (uchar)val; }
void operator()(arg_type& a, arg_type& b) const
{
int t = CV_FAST_CAST_8U(a - b);
b += t; a -= t;
}
};
struct MinMax16u
{
typedef ushort value_type;
typedef int arg_type;
enum { SIZE = 1 };
arg_type load(const ushort* ptr) { return *ptr; }
void store(ushort* ptr, arg_type val) { *ptr = (ushort)val; }
void operator()(arg_type& a, arg_type& b) const
{
arg_type t = a;
a = std::min(a, b);
b = std::max(b, t);
}
};
struct MinMax16s
{
typedef short value_type;
typedef int arg_type;
enum { SIZE = 1 };
arg_type load(const short* ptr) { return *ptr; }
void store(short* ptr, arg_type val) { *ptr = (short)val; }
void operator()(arg_type& a, arg_type& b) const
{
arg_type t = a;
a = std::min(a, b);
b = std::max(b, t);
}
};
struct MinMax32f
{
typedef float value_type;
typedef float arg_type;
enum { SIZE = 1 };
arg_type load(const float* ptr) { return *ptr; }
void store(float* ptr, arg_type val) { *ptr = val; }
void operator()(arg_type& a, arg_type& b) const
{
arg_type t = a;
a = std::min(a, b);
b = std::max(b, t);
}
};
#if CV_SIMD
struct MinMaxVec8u
{
typedef uchar value_type;
typedef v_uint8x16 arg_type;
enum { SIZE = v_uint8x16::nlanes };
arg_type load(const uchar* ptr) { return v_load(ptr); }
void store(uchar* ptr, const arg_type &val) { v_store(ptr, val); }
void operator()(arg_type& a, arg_type& b) const
{
arg_type t = a;
a = v_min(a, b);
b = v_max(b, t);
}
#if CV_SIMD_WIDTH > 16
typedef v_uint8 warg_type;
enum { WSIZE = v_uint8::nlanes };
warg_type wload(const uchar* ptr) { return vx_load(ptr); }
void store(uchar* ptr, const warg_type &val) { v_store(ptr, val); }
void operator()(warg_type& a, warg_type& b) const
{
warg_type t = a;
a = v_min(a, b);
b = v_max(b, t);
}
#endif
};
struct MinMaxVec16u
{
typedef ushort value_type;
typedef v_uint16x8 arg_type;
enum { SIZE = v_uint16x8::nlanes };
arg_type load(const ushort* ptr) { return v_load(ptr); }
void store(ushort* ptr, const arg_type &val) { v_store(ptr, val); }
void operator()(arg_type& a, arg_type& b) const
{
arg_type t = a;
a = v_min(a, b);
b = v_max(b, t);
}
#if CV_SIMD_WIDTH > 16
typedef v_uint16 warg_type;
enum { WSIZE = v_uint16::nlanes };
warg_type wload(const ushort* ptr) { return vx_load(ptr); }
void store(ushort* ptr, const warg_type &val) { v_store(ptr, val); }
void operator()(warg_type& a, warg_type& b) const
{
warg_type t = a;
a = v_min(a, b);
b = v_max(b, t);
}
#endif
};
struct MinMaxVec16s
{
typedef short value_type;
typedef v_int16x8 arg_type;
enum { SIZE = v_int16x8::nlanes };
arg_type load(const short* ptr) { return v_load(ptr); }
void store(short* ptr, const arg_type &val) { v_store(ptr, val); }
void operator()(arg_type& a, arg_type& b) const
{
arg_type t = a;
a = v_min(a, b);
b = v_max(b, t);
}
#if CV_SIMD_WIDTH > 16
typedef v_int16 warg_type;
enum { WSIZE = v_int16::nlanes };
warg_type wload(const short* ptr) { return vx_load(ptr); }
void store(short* ptr, const warg_type &val) { v_store(ptr, val); }
void operator()(warg_type& a, warg_type& b) const
{
warg_type t = a;
a = v_min(a, b);
b = v_max(b, t);
}
#endif
};
struct MinMaxVec32f
{
typedef float value_type;
typedef v_float32x4 arg_type;
enum { SIZE = v_float32x4::nlanes };
arg_type load(const float* ptr) { return v_load(ptr); }
void store(float* ptr, const arg_type &val) { v_store(ptr, val); }
void operator()(arg_type& a, arg_type& b) const
{
arg_type t = a;
a = v_min(a, b);
b = v_max(b, t);
}
#if CV_SIMD_WIDTH > 16
typedef v_float32 warg_type;
enum { WSIZE = v_float32::nlanes };
warg_type wload(const float* ptr) { return vx_load(ptr); }
void store(float* ptr, const warg_type &val) { v_store(ptr, val); }
void operator()(warg_type& a, warg_type& b) const
{
warg_type t = a;
a = v_min(a, b);
b = v_max(b, t);
}
#endif
};
#else
typedef MinMax8u MinMaxVec8u;
typedef MinMax16u MinMaxVec16u;
typedef MinMax16s MinMaxVec16s;
typedef MinMax32f MinMaxVec32f;
#endif
template<class Op, class VecOp>
static void
medianBlur_SortNet( const Mat& _src, Mat& _dst, int m )
{
typedef typename Op::value_type T;
typedef typename Op::arg_type WT;
typedef typename VecOp::arg_type VT;
#if CV_SIMD_WIDTH > 16
typedef typename VecOp::warg_type WVT;
#endif
const T* src = _src.ptr<T>();
T* dst = _dst.ptr<T>();
int sstep = (int)(_src.step/sizeof(T));
int dstep = (int)(_dst.step/sizeof(T));
Size size = _dst.size();
int i, j, k, cn = _src.channels();
Op op;
VecOp vop;
if( m == 3 )
{
if( size.width == 1 || size.height == 1 )
{
int len = size.width + size.height - 1;
int sdelta = size.height == 1 ? cn : sstep;
int sdelta0 = size.height == 1 ? 0 : sstep - cn;
int ddelta = size.height == 1 ? cn : dstep;
for( i = 0; i < len; i++, src += sdelta0, dst += ddelta )
for( j = 0; j < cn; j++, src++ )
{
WT p0 = src[i > 0 ? -sdelta : 0];
WT p1 = src[0];
WT p2 = src[i < len - 1 ? sdelta : 0];
op(p0, p1); op(p1, p2); op(p0, p1);
dst[j] = (T)p1;
}
return;
}
size.width *= cn;
for( i = 0; i < size.height; i++, dst += dstep )
{
const T* row0 = src + std::max(i - 1, 0)*sstep;
const T* row1 = src + i*sstep;
const T* row2 = src + std::min(i + 1, size.height-1)*sstep;
int limit = cn;
for(j = 0;; )
{
for( ; j < limit; j++ )
{
int j0 = j >= cn ? j - cn : j;
int j2 = j < size.width - cn ? j + cn : j;
WT p0 = row0[j0], p1 = row0[j], p2 = row0[j2];
WT p3 = row1[j0], p4 = row1[j], p5 = row1[j2];
WT p6 = row2[j0], p7 = row2[j], p8 = row2[j2];
op(p1, p2); op(p4, p5); op(p7, p8); op(p0, p1);
op(p3, p4); op(p6, p7); op(p1, p2); op(p4, p5);
op(p7, p8); op(p0, p3); op(p5, p8); op(p4, p7);
op(p3, p6); op(p1, p4); op(p2, p5); op(p4, p7);
op(p4, p2); op(p6, p4); op(p4, p2);
dst[j] = (T)p4;
}
if( limit == size.width )
break;
#if CV_SIMD_WIDTH > 16
for( ; j <= size.width - VecOp::WSIZE - cn; j += VecOp::WSIZE )
{
WVT p0 = vop.wload(row0+j-cn), p1 = vop.wload(row0+j), p2 = vop.wload(row0+j+cn);
WVT p3 = vop.wload(row1+j-cn), p4 = vop.wload(row1+j), p5 = vop.wload(row1+j+cn);
WVT p6 = vop.wload(row2+j-cn), p7 = vop.wload(row2+j), p8 = vop.wload(row2+j+cn);
vop(p1, p2); vop(p4, p5); vop(p7, p8); vop(p0, p1);
vop(p3, p4); vop(p6, p7); vop(p1, p2); vop(p4, p5);
vop(p7, p8); vop(p0, p3); vop(p5, p8); vop(p4, p7);
vop(p3, p6); vop(p1, p4); vop(p2, p5); vop(p4, p7);
vop(p4, p2); vop(p6, p4); vop(p4, p2);
vop.store(dst+j, p4);
}
#endif
for( ; j <= size.width - VecOp::SIZE - cn; j += VecOp::SIZE )
{
VT p0 = vop.load(row0+j-cn), p1 = vop.load(row0+j), p2 = vop.load(row0+j+cn);
VT p3 = vop.load(row1+j-cn), p4 = vop.load(row1+j), p5 = vop.load(row1+j+cn);
VT p6 = vop.load(row2+j-cn), p7 = vop.load(row2+j), p8 = vop.load(row2+j+cn);
vop(p1, p2); vop(p4, p5); vop(p7, p8); vop(p0, p1);
vop(p3, p4); vop(p6, p7); vop(p1, p2); vop(p4, p5);
vop(p7, p8); vop(p0, p3); vop(p5, p8); vop(p4, p7);
vop(p3, p6); vop(p1, p4); vop(p2, p5); vop(p4, p7);
vop(p4, p2); vop(p6, p4); vop(p4, p2);
vop.store(dst+j, p4);
}
limit = size.width;
}
}
#if CV_SIMD
vx_cleanup();
#endif
}
else if( m == 5 )
{
if( size.width == 1 || size.height == 1 )
{
int len = size.width + size.height - 1;
int sdelta = size.height == 1 ? cn : sstep;
int sdelta0 = size.height == 1 ? 0 : sstep - cn;
int ddelta = size.height == 1 ? cn : dstep;
for( i = 0; i < len; i++, src += sdelta0, dst += ddelta )
for( j = 0; j < cn; j++, src++ )
{
int i1 = i > 0 ? -sdelta : 0;
int i0 = i > 1 ? -sdelta*2 : i1;
int i3 = i < len-1 ? sdelta : 0;
int i4 = i < len-2 ? sdelta*2 : i3;
WT p0 = src[i0], p1 = src[i1], p2 = src[0], p3 = src[i3], p4 = src[i4];
op(p0, p1); op(p3, p4); op(p2, p3); op(p3, p4); op(p0, p2);
op(p2, p4); op(p1, p3); op(p1, p2);
dst[j] = (T)p2;
}
return;
}
size.width *= cn;
for( i = 0; i < size.height; i++, dst += dstep )
{
const T* row[5];
row[0] = src + std::max(i - 2, 0)*sstep;
row[1] = src + std::max(i - 1, 0)*sstep;
row[2] = src + i*sstep;
row[3] = src + std::min(i + 1, size.height-1)*sstep;
row[4] = src + std::min(i + 2, size.height-1)*sstep;
int limit = cn*2;
for(j = 0;; )
{
for( ; j < limit; j++ )
{
WT p[25];
int j1 = j >= cn ? j - cn : j;
int j0 = j >= cn*2 ? j - cn*2 : j1;
int j3 = j < size.width - cn ? j + cn : j;
int j4 = j < size.width - cn*2 ? j + cn*2 : j3;
for( k = 0; k < 5; k++ )
{
const T* rowk = row[k];
p[k*5] = rowk[j0]; p[k*5+1] = rowk[j1];
p[k*5+2] = rowk[j]; p[k*5+3] = rowk[j3];
p[k*5+4] = rowk[j4];
}
op(p[1], p[2]); op(p[0], p[1]); op(p[1], p[2]); op(p[4], p[5]); op(p[3], p[4]);
op(p[4], p[5]); op(p[0], p[3]); op(p[2], p[5]); op(p[2], p[3]); op(p[1], p[4]);
op(p[1], p[2]); op(p[3], p[4]); op(p[7], p[8]); op(p[6], p[7]); op(p[7], p[8]);
op(p[10], p[11]); op(p[9], p[10]); op(p[10], p[11]); op(p[6], p[9]); op(p[8], p[11]);
op(p[8], p[9]); op(p[7], p[10]); op(p[7], p[8]); op(p[9], p[10]); op(p[0], p[6]);
op(p[4], p[10]); op(p[4], p[6]); op(p[2], p[8]); op(p[2], p[4]); op(p[6], p[8]);
op(p[1], p[7]); op(p[5], p[11]); op(p[5], p[7]); op(p[3], p[9]); op(p[3], p[5]);
op(p[7], p[9]); op(p[1], p[2]); op(p[3], p[4]); op(p[5], p[6]); op(p[7], p[8]);
op(p[9], p[10]); op(p[13], p[14]); op(p[12], p[13]); op(p[13], p[14]); op(p[16], p[17]);
op(p[15], p[16]); op(p[16], p[17]); op(p[12], p[15]); op(p[14], p[17]); op(p[14], p[15]);
op(p[13], p[16]); op(p[13], p[14]); op(p[15], p[16]); op(p[19], p[20]); op(p[18], p[19]);
op(p[19], p[20]); op(p[21], p[22]); op(p[23], p[24]); op(p[21], p[23]); op(p[22], p[24]);
op(p[22], p[23]); op(p[18], p[21]); op(p[20], p[23]); op(p[20], p[21]); op(p[19], p[22]);
op(p[22], p[24]); op(p[19], p[20]); op(p[21], p[22]); op(p[23], p[24]); op(p[12], p[18]);
op(p[16], p[22]); op(p[16], p[18]); op(p[14], p[20]); op(p[20], p[24]); op(p[14], p[16]);
op(p[18], p[20]); op(p[22], p[24]); op(p[13], p[19]); op(p[17], p[23]); op(p[17], p[19]);
op(p[15], p[21]); op(p[15], p[17]); op(p[19], p[21]); op(p[13], p[14]); op(p[15], p[16]);
op(p[17], p[18]); op(p[19], p[20]); op(p[21], p[22]); op(p[23], p[24]); op(p[0], p[12]);
op(p[8], p[20]); op(p[8], p[12]); op(p[4], p[16]); op(p[16], p[24]); op(p[12], p[16]);
op(p[2], p[14]); op(p[10], p[22]); op(p[10], p[14]); op(p[6], p[18]); op(p[6], p[10]);
op(p[10], p[12]); op(p[1], p[13]); op(p[9], p[21]); op(p[9], p[13]); op(p[5], p[17]);
op(p[13], p[17]); op(p[3], p[15]); op(p[11], p[23]); op(p[11], p[15]); op(p[7], p[19]);
op(p[7], p[11]); op(p[11], p[13]); op(p[11], p[12]);
dst[j] = (T)p[12];
}
if( limit == size.width )
break;
#if CV_SIMD_WIDTH > 16
for( ; j <= size.width - VecOp::WSIZE - cn*2; j += VecOp::WSIZE )
{
WVT p[25];
for( k = 0; k < 5; k++ )
{
const T* rowk = row[k];
p[k*5] = vop.wload(rowk+j-cn*2); p[k*5+1] = vop.wload(rowk+j-cn);
p[k*5+2] = vop.wload(rowk+j); p[k*5+3] = vop.wload(rowk+j+cn);
p[k*5+4] = vop.wload(rowk+j+cn*2);
}
vop(p[1], p[2]); vop(p[0], p[1]); vop(p[1], p[2]); vop(p[4], p[5]); vop(p[3], p[4]);
vop(p[4], p[5]); vop(p[0], p[3]); vop(p[2], p[5]); vop(p[2], p[3]); vop(p[1], p[4]);
vop(p[1], p[2]); vop(p[3], p[4]); vop(p[7], p[8]); vop(p[6], p[7]); vop(p[7], p[8]);
vop(p[10], p[11]); vop(p[9], p[10]); vop(p[10], p[11]); vop(p[6], p[9]); vop(p[8], p[11]);
vop(p[8], p[9]); vop(p[7], p[10]); vop(p[7], p[8]); vop(p[9], p[10]); vop(p[0], p[6]);
vop(p[4], p[10]); vop(p[4], p[6]); vop(p[2], p[8]); vop(p[2], p[4]); vop(p[6], p[8]);
vop(p[1], p[7]); vop(p[5], p[11]); vop(p[5], p[7]); vop(p[3], p[9]); vop(p[3], p[5]);
vop(p[7], p[9]); vop(p[1], p[2]); vop(p[3], p[4]); vop(p[5], p[6]); vop(p[7], p[8]);
vop(p[9], p[10]); vop(p[13], p[14]); vop(p[12], p[13]); vop(p[13], p[14]); vop(p[16], p[17]);
vop(p[15], p[16]); vop(p[16], p[17]); vop(p[12], p[15]); vop(p[14], p[17]); vop(p[14], p[15]);
vop(p[13], p[16]); vop(p[13], p[14]); vop(p[15], p[16]); vop(p[19], p[20]); vop(p[18], p[19]);
vop(p[19], p[20]); vop(p[21], p[22]); vop(p[23], p[24]); vop(p[21], p[23]); vop(p[22], p[24]);
vop(p[22], p[23]); vop(p[18], p[21]); vop(p[20], p[23]); vop(p[20], p[21]); vop(p[19], p[22]);
vop(p[22], p[24]); vop(p[19], p[20]); vop(p[21], p[22]); vop(p[23], p[24]); vop(p[12], p[18]);
vop(p[16], p[22]); vop(p[16], p[18]); vop(p[14], p[20]); vop(p[20], p[24]); vop(p[14], p[16]);
vop(p[18], p[20]); vop(p[22], p[24]); vop(p[13], p[19]); vop(p[17], p[23]); vop(p[17], p[19]);
vop(p[15], p[21]); vop(p[15], p[17]); vop(p[19], p[21]); vop(p[13], p[14]); vop(p[15], p[16]);
vop(p[17], p[18]); vop(p[19], p[20]); vop(p[21], p[22]); vop(p[23], p[24]); vop(p[0], p[12]);
vop(p[8], p[20]); vop(p[8], p[12]); vop(p[4], p[16]); vop(p[16], p[24]); vop(p[12], p[16]);
vop(p[2], p[14]); vop(p[10], p[22]); vop(p[10], p[14]); vop(p[6], p[18]); vop(p[6], p[10]);
vop(p[10], p[12]); vop(p[1], p[13]); vop(p[9], p[21]); vop(p[9], p[13]); vop(p[5], p[17]);
vop(p[13], p[17]); vop(p[3], p[15]); vop(p[11], p[23]); vop(p[11], p[15]); vop(p[7], p[19]);
vop(p[7], p[11]); vop(p[11], p[13]); vop(p[11], p[12]);
vop.store(dst+j, p[12]);
}
#endif
for( ; j <= size.width - VecOp::SIZE - cn*2; j += VecOp::SIZE )
{
VT p[25];
for( k = 0; k < 5; k++ )
{
const T* rowk = row[k];
p[k*5] = vop.load(rowk+j-cn*2); p[k*5+1] = vop.load(rowk+j-cn);
p[k*5+2] = vop.load(rowk+j); p[k*5+3] = vop.load(rowk+j+cn);
p[k*5+4] = vop.load(rowk+j+cn*2);
}
vop(p[1], p[2]); vop(p[0], p[1]); vop(p[1], p[2]); vop(p[4], p[5]); vop(p[3], p[4]);
vop(p[4], p[5]); vop(p[0], p[3]); vop(p[2], p[5]); vop(p[2], p[3]); vop(p[1], p[4]);
vop(p[1], p[2]); vop(p[3], p[4]); vop(p[7], p[8]); vop(p[6], p[7]); vop(p[7], p[8]);
vop(p[10], p[11]); vop(p[9], p[10]); vop(p[10], p[11]); vop(p[6], p[9]); vop(p[8], p[11]);
vop(p[8], p[9]); vop(p[7], p[10]); vop(p[7], p[8]); vop(p[9], p[10]); vop(p[0], p[6]);
vop(p[4], p[10]); vop(p[4], p[6]); vop(p[2], p[8]); vop(p[2], p[4]); vop(p[6], p[8]);
vop(p[1], p[7]); vop(p[5], p[11]); vop(p[5], p[7]); vop(p[3], p[9]); vop(p[3], p[5]);
vop(p[7], p[9]); vop(p[1], p[2]); vop(p[3], p[4]); vop(p[5], p[6]); vop(p[7], p[8]);
vop(p[9], p[10]); vop(p[13], p[14]); vop(p[12], p[13]); vop(p[13], p[14]); vop(p[16], p[17]);
vop(p[15], p[16]); vop(p[16], p[17]); vop(p[12], p[15]); vop(p[14], p[17]); vop(p[14], p[15]);
vop(p[13], p[16]); vop(p[13], p[14]); vop(p[15], p[16]); vop(p[19], p[20]); vop(p[18], p[19]);
vop(p[19], p[20]); vop(p[21], p[22]); vop(p[23], p[24]); vop(p[21], p[23]); vop(p[22], p[24]);
vop(p[22], p[23]); vop(p[18], p[21]); vop(p[20], p[23]); vop(p[20], p[21]); vop(p[19], p[22]);
vop(p[22], p[24]); vop(p[19], p[20]); vop(p[21], p[22]); vop(p[23], p[24]); vop(p[12], p[18]);
vop(p[16], p[22]); vop(p[16], p[18]); vop(p[14], p[20]); vop(p[20], p[24]); vop(p[14], p[16]);
vop(p[18], p[20]); vop(p[22], p[24]); vop(p[13], p[19]); vop(p[17], p[23]); vop(p[17], p[19]);
vop(p[15], p[21]); vop(p[15], p[17]); vop(p[19], p[21]); vop(p[13], p[14]); vop(p[15], p[16]);
vop(p[17], p[18]); vop(p[19], p[20]); vop(p[21], p[22]); vop(p[23], p[24]); vop(p[0], p[12]);
vop(p[8], p[20]); vop(p[8], p[12]); vop(p[4], p[16]); vop(p[16], p[24]); vop(p[12], p[16]);
vop(p[2], p[14]); vop(p[10], p[22]); vop(p[10], p[14]); vop(p[6], p[18]); vop(p[6], p[10]);
vop(p[10], p[12]); vop(p[1], p[13]); vop(p[9], p[21]); vop(p[9], p[13]); vop(p[5], p[17]);
vop(p[13], p[17]); vop(p[3], p[15]); vop(p[11], p[23]); vop(p[11], p[15]); vop(p[7], p[19]);
vop(p[7], p[11]); vop(p[11], p[13]); vop(p[11], p[12]);
vop.store(dst+j, p[12]);
}
limit = size.width;
}
}
#if CV_SIMD
vx_cleanup();
#endif
}
}
#ifdef HAVE_OPENCL
#define DIVUP(total, grain) ((total + grain - 1) / (grain))
static bool ocl_medianFilter(InputArray _src, OutputArray _dst, int m)
{
size_t localsize[2] = { 16, 16 };
size_t globalsize[2];
int type = _src.type(), depth = CV_MAT_DEPTH(type), cn = CV_MAT_CN(type);
if ( !((depth == CV_8U || depth == CV_16U || depth == CV_16S || depth == CV_32F) && cn <= 4 && (m == 3 || m == 5)) )
return false;
Size imgSize = _src.size();
bool useOptimized = (1 == cn) &&
(size_t)imgSize.width >= localsize[0] * 8 &&
(size_t)imgSize.height >= localsize[1] * 8 &&
imgSize.width % 4 == 0 &&
imgSize.height % 4 == 0 &&
(ocl::Device::getDefault().isIntel());
cv::String kname = format( useOptimized ? "medianFilter%d_u" : "medianFilter%d", m) ;
cv::String kdefs = useOptimized ?
format("-D T=%s -D T1=%s -D T4=%s%d -D cn=%d -D USE_4OPT", ocl::typeToStr(type),
ocl::typeToStr(depth), ocl::typeToStr(depth), cn*4, cn)
:
format("-D T=%s -D T1=%s -D cn=%d", ocl::typeToStr(type), ocl::typeToStr(depth), cn) ;
ocl::Kernel k(kname.c_str(), ocl::imgproc::medianFilter_oclsrc, kdefs.c_str() );
if (k.empty())
return false;
UMat src = _src.getUMat();
_dst.create(src.size(), type);
UMat dst = _dst.getUMat();
k.args(ocl::KernelArg::ReadOnlyNoSize(src), ocl::KernelArg::WriteOnly(dst));
if( useOptimized )
{
globalsize[0] = DIVUP(src.cols / 4, localsize[0]) * localsize[0];
globalsize[1] = DIVUP(src.rows / 4, localsize[1]) * localsize[1];
}
else
{
globalsize[0] = (src.cols + localsize[0] + 2) / localsize[0] * localsize[0];
globalsize[1] = (src.rows + localsize[1] - 1) / localsize[1] * localsize[1];
}
return k.run(2, globalsize, localsize, false);
}
#undef DIVUP
#endif
#ifdef HAVE_OPENVX
namespace ovx {
template <> inline bool skipSmallImages<VX_KERNEL_MEDIAN_3x3>(int w, int h) { return w*h < 1280 * 720; }
}
static bool openvx_medianFilter(InputArray _src, OutputArray _dst, int ksize)
{
if (_src.type() != CV_8UC1 || _dst.type() != CV_8U
#ifndef VX_VERSION_1_1
|| ksize != 3
#endif
)
return false;
Mat src = _src.getMat();
Mat dst = _dst.getMat();
if (
#ifdef VX_VERSION_1_1
ksize != 3 ? ovx::skipSmallImages<VX_KERNEL_NON_LINEAR_FILTER>(src.cols, src.rows) :
#endif
ovx::skipSmallImages<VX_KERNEL_MEDIAN_3x3>(src.cols, src.rows)
)
return false;
try
{
ivx::Context ctx = ovx::getOpenVXContext();
#ifdef VX_VERSION_1_1
if ((vx_size)ksize > ctx.nonlinearMaxDimension())
return false;
#endif
Mat a;
if (dst.data != src.data)
a = src;
else
src.copyTo(a);
ivx::Image
ia = ivx::Image::createFromHandle(ctx, VX_DF_IMAGE_U8,
ivx::Image::createAddressing(a.cols, a.rows, 1, (vx_int32)(a.step)), a.data),
ib = ivx::Image::createFromHandle(ctx, VX_DF_IMAGE_U8,
ivx::Image::createAddressing(dst.cols, dst.rows, 1, (vx_int32)(dst.step)), dst.data);
//ATTENTION: VX_CONTEXT_IMMEDIATE_BORDER attribute change could lead to strange issues in multi-threaded environments
//since OpenVX standard says nothing about thread-safety for now
ivx::border_t prevBorder = ctx.immediateBorder();
ctx.setImmediateBorder(VX_BORDER_REPLICATE);
#ifdef VX_VERSION_1_1
if (ksize == 3)
#endif
{
ivx::IVX_CHECK_STATUS(vxuMedian3x3(ctx, ia, ib));
}
#ifdef VX_VERSION_1_1
else
{
ivx::Matrix mtx;
if(ksize == 5)
mtx = ivx::Matrix::createFromPattern(ctx, VX_PATTERN_BOX, ksize, ksize);
else
{
vx_size supportedSize;
ivx::IVX_CHECK_STATUS(vxQueryContext(ctx, VX_CONTEXT_NONLINEAR_MAX_DIMENSION, &supportedSize, sizeof(supportedSize)));
if ((vx_size)ksize > supportedSize)
{
ctx.setImmediateBorder(prevBorder);
return false;
}
Mat mask(ksize, ksize, CV_8UC1, Scalar(255));
mtx = ivx::Matrix::create(ctx, VX_TYPE_UINT8, ksize, ksize);
mtx.copyFrom(mask);
}
ivx::IVX_CHECK_STATUS(vxuNonLinearFilter(ctx, VX_NONLINEAR_FILTER_MEDIAN, ia, mtx, ib));
}
#endif
ctx.setImmediateBorder(prevBorder);
}
catch (const ivx::RuntimeError & e)
{
VX_DbgThrow(e.what());
}
catch (const ivx::WrapperError & e)
{
VX_DbgThrow(e.what());
}
return true;
}
#endif
#ifdef HAVE_IPP
static bool ipp_medianFilter(Mat &src0, Mat &dst, int ksize)
{
CV_INSTRUMENT_REGION_IPP();
#if IPP_VERSION_X100 < 201801
// Degradations for big kernel
if(ksize > 7)
return false;
#endif
{
int bufSize;
IppiSize dstRoiSize = ippiSize(dst.cols, dst.rows), maskSize = ippiSize(ksize, ksize);
IppDataType ippType = ippiGetDataType(src0.type());
int channels = src0.channels();
IppAutoBuffer<Ipp8u> buffer;
if(src0.isSubmatrix())
return false;
Mat src;
if(dst.data != src0.data)
src = src0;
else
src0.copyTo(src);
if(ippiFilterMedianBorderGetBufferSize(dstRoiSize, maskSize, ippType, channels, &bufSize) < 0)
return false;
buffer.allocate(bufSize);
switch(ippType)
{
case ipp8u:
if(channels == 1)
return CV_INSTRUMENT_FUN_IPP(ippiFilterMedianBorder_8u_C1R, src.ptr<Ipp8u>(), (int)src.step, dst.ptr<Ipp8u>(), (int)dst.step, dstRoiSize, maskSize, ippBorderRepl, 0, buffer) >= 0;
else if(channels == 3)
return CV_INSTRUMENT_FUN_IPP(ippiFilterMedianBorder_8u_C3R, src.ptr<Ipp8u>(), (int)src.step, dst.ptr<Ipp8u>(), (int)dst.step, dstRoiSize, maskSize, ippBorderRepl, 0, buffer) >= 0;
else if(channels == 4)
return CV_INSTRUMENT_FUN_IPP(ippiFilterMedianBorder_8u_C4R, src.ptr<Ipp8u>(), (int)src.step, dst.ptr<Ipp8u>(), (int)dst.step, dstRoiSize, maskSize, ippBorderRepl, 0, buffer) >= 0;
else
return false;
case ipp16u:
if(channels == 1)
return CV_INSTRUMENT_FUN_IPP(ippiFilterMedianBorder_16u_C1R, src.ptr<Ipp16u>(), (int)src.step, dst.ptr<Ipp16u>(), (int)dst.step, dstRoiSize, maskSize, ippBorderRepl, 0, buffer) >= 0;
else if(channels == 3)
return CV_INSTRUMENT_FUN_IPP(ippiFilterMedianBorder_16u_C3R, src.ptr<Ipp16u>(), (int)src.step, dst.ptr<Ipp16u>(), (int)dst.step, dstRoiSize, maskSize, ippBorderRepl, 0, buffer) >= 0;
else if(channels == 4)
return CV_INSTRUMENT_FUN_IPP(ippiFilterMedianBorder_16u_C4R, src.ptr<Ipp16u>(), (int)src.step, dst.ptr<Ipp16u>(), (int)dst.step, dstRoiSize, maskSize, ippBorderRepl, 0, buffer) >= 0;
else
return false;
case ipp16s:
if(channels == 1)
return CV_INSTRUMENT_FUN_IPP(ippiFilterMedianBorder_16s_C1R, src.ptr<Ipp16s>(), (int)src.step, dst.ptr<Ipp16s>(), (int)dst.step, dstRoiSize, maskSize, ippBorderRepl, 0, buffer) >= 0;
else if(channels == 3)
return CV_INSTRUMENT_FUN_IPP(ippiFilterMedianBorder_16s_C3R, src.ptr<Ipp16s>(), (int)src.step, dst.ptr<Ipp16s>(), (int)dst.step, dstRoiSize, maskSize, ippBorderRepl, 0, buffer) >= 0;
else if(channels == 4)
return CV_INSTRUMENT_FUN_IPP(ippiFilterMedianBorder_16s_C4R, src.ptr<Ipp16s>(), (int)src.step, dst.ptr<Ipp16s>(), (int)dst.step, dstRoiSize, maskSize, ippBorderRepl, 0, buffer) >= 0;
else
return false;
case ipp32f:
if(channels == 1)
return CV_INSTRUMENT_FUN_IPP(ippiFilterMedianBorder_32f_C1R, src.ptr<Ipp32f>(), (int)src.step, dst.ptr<Ipp32f>(), (int)dst.step, dstRoiSize, maskSize, ippBorderRepl, 0, buffer) >= 0;
else
return false;
default:
return false;
}
}
}
#endif
}
void medianBlur( InputArray _src0, OutputArray _dst, int ksize )
{
CV_INSTRUMENT_REGION();
CV_Assert( (ksize % 2 == 1) && (_src0.dims() <= 2 ));
if( ksize <= 1 || _src0.empty() )
{
_src0.copyTo(_dst);
return;
}
CV_OCL_RUN(_dst.isUMat(),
ocl_medianFilter(_src0,_dst, ksize))
Mat src0 = _src0.getMat();
_dst.create( src0.size(), src0.type() );
Mat dst = _dst.getMat();
CALL_HAL(medianBlur, cv_hal_medianBlur, src0.data, src0.step, dst.data, dst.step, src0.cols, src0.rows, src0.depth(),
src0.channels(), ksize);
CV_OVX_RUN(true,
openvx_medianFilter(_src0, _dst, ksize))
CV_IPP_RUN_FAST(ipp_medianFilter(src0, dst, ksize));
#ifdef HAVE_TEGRA_OPTIMIZATION
if (tegra::useTegra() && tegra::medianBlur(src0, dst, ksize))
return;
#endif
bool useSortNet = ksize == 3 || (ksize == 5
#if !(CV_SIMD)
&& ( src0.depth() > CV_8U || src0.channels() == 2 || src0.channels() > 4 )
#endif
);
Mat src;
if( useSortNet )
{
if( dst.data != src0.data )
src = src0;
else
src0.copyTo(src);
if( src.depth() == CV_8U )
medianBlur_SortNet<MinMax8u, MinMaxVec8u>( src, dst, ksize );
else if( src.depth() == CV_16U )
medianBlur_SortNet<MinMax16u, MinMaxVec16u>( src, dst, ksize );
else if( src.depth() == CV_16S )
medianBlur_SortNet<MinMax16s, MinMaxVec16s>( src, dst, ksize );
else if( src.depth() == CV_32F )
medianBlur_SortNet<MinMax32f, MinMaxVec32f>( src, dst, ksize );
else
CV_Error(CV_StsUnsupportedFormat, "");
return;
}
else
{
cv::copyMakeBorder( src0, src, 0, 0, ksize/2, ksize/2, BORDER_REPLICATE|BORDER_ISOLATED);
int cn = src0.channels();
CV_Assert( src.depth() == CV_8U && (cn == 1 || cn == 3 || cn == 4) );
double img_size_mp = (double)(src0.total())/(1 << 20);
if( ksize <= 3 + (img_size_mp < 1 ? 12 : img_size_mp < 4 ? 6 : 2)*
(CV_SIMD ? 1 : 3))
medianBlur_8u_Om( src, dst, ksize );
else
medianBlur_8u_O1( src, dst, ksize );
}
}
}
/* End of file. */
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "precomp.hpp"
#include <limits.h>
#include "opencl_kernels_imgproc.hpp"
#include <iostream>
#include "hal_replacement.hpp"
#include "opencv2/core/hal/intrin.hpp"
#include <opencv2/core/utils/configuration.private.hpp>
/****************************************************************************************\
Basic Morphological Operations: Erosion & Dilation
\****************************************************************************************/
using namespace std;
namespace cv
{
template<typename T> struct MinOp
{
typedef T type1;
typedef T type2;
typedef T rtype;
T operator ()(const T a, const T b) const { return std::min(a, b); }
};
template<typename T> struct MaxOp
{
typedef T type1;
typedef T type2;
typedef T rtype;
T operator ()(const T a, const T b) const { return std::max(a, b); }
};
#undef CV_MIN_8U
#undef CV_MAX_8U
#define CV_MIN_8U(a,b) ((a) - CV_FAST_CAST_8U((a) - (b)))
#define CV_MAX_8U(a,b) ((a) + CV_FAST_CAST_8U((b) - (a)))
template<> inline uchar MinOp<uchar>::operator ()(const uchar a, const uchar b) const { return CV_MIN_8U(a, b); }
template<> inline uchar MaxOp<uchar>::operator ()(const uchar a, const uchar b) const { return CV_MAX_8U(a, b); }
struct MorphRowNoVec
{
MorphRowNoVec(int, int) {}
int operator()(const uchar*, uchar*, int, int) const { return 0; }
};
struct MorphColumnNoVec
{
MorphColumnNoVec(int, int) {}
int operator()(const uchar**, uchar*, int, int, int) const { return 0; }
};
struct MorphNoVec
{
int operator()(uchar**, int, uchar*, int) const { return 0; }
};
#if CV_SIMD
template<class VecUpdate> struct MorphRowVec
{
typedef typename VecUpdate::vtype vtype;
typedef typename vtype::lane_type stype;
MorphRowVec(int _ksize, int _anchor) : ksize(_ksize), anchor(_anchor) {}
int operator()(const uchar* src, uchar* dst, int width, int cn) const
{
int i, k, _ksize = ksize*cn;
width *= cn;
VecUpdate updateOp;
for( i = 0; i <= width - 4*vtype::nlanes; i += 4*vtype::nlanes )
{
vtype s0 = vx_load((const stype*)src + i);
vtype s1 = vx_load((const stype*)src + i + vtype::nlanes);
vtype s2 = vx_load((const stype*)src + i + 2*vtype::nlanes);
vtype s3 = vx_load((const stype*)src + i + 3*vtype::nlanes);
for (k = cn; k < _ksize; k += cn)
{
s0 = updateOp(s0, vx_load((const stype*)src + i + k));
s1 = updateOp(s1, vx_load((const stype*)src + i + k + vtype::nlanes));
s2 = updateOp(s2, vx_load((const stype*)src + i + k + 2*vtype::nlanes));
s3 = updateOp(s3, vx_load((const stype*)src + i + k + 3*vtype::nlanes));
}
v_store((stype*)dst + i, s0);
v_store((stype*)dst + i + vtype::nlanes, s1);
v_store((stype*)dst + i + 2*vtype::nlanes, s2);
v_store((stype*)dst + i + 3*vtype::nlanes, s3);
}
if( i <= width - 2*vtype::nlanes )
{
vtype s0 = vx_load((const stype*)src + i);
vtype s1 = vx_load((const stype*)src + i + vtype::nlanes);
for( k = cn; k < _ksize; k += cn )
{
s0 = updateOp(s0, vx_load((const stype*)src + i + k));
s1 = updateOp(s1, vx_load((const stype*)src + i + k + vtype::nlanes));
}
v_store((stype*)dst + i, s0);
v_store((stype*)dst + i + vtype::nlanes, s1);
i += 2*vtype::nlanes;
}
if( i <= width - vtype::nlanes )
{
vtype s = vx_load((const stype*)src + i);
for( k = cn; k < _ksize; k += cn )
s = updateOp(s, vx_load((const stype*)src + i + k));
v_store((stype*)dst + i, s);
i += vtype::nlanes;
}
if( i <= width - vtype::nlanes/2 )
{
vtype s = vx_load_low((const stype*)src + i);
for( k = cn; k < _ksize; k += cn )
s = updateOp(s, vx_load_low((const stype*)src + i + k));
v_store_low((stype*)dst + i, s);
i += vtype::nlanes/2;
}
return i - i % cn;
}
int ksize, anchor;
};
template<class VecUpdate> struct MorphColumnVec
{
typedef typename VecUpdate::vtype vtype;
typedef typename vtype::lane_type stype;
MorphColumnVec(int _ksize, int _anchor) : ksize(_ksize), anchor(_anchor) {}
int operator()(const uchar** _src, uchar* _dst, int dststep, int count, int width) const
{
int i = 0, k, _ksize = ksize;
VecUpdate updateOp;
for( i = 0; i < count + ksize - 1; i++ )
CV_Assert( ((size_t)_src[i] & (CV_SIMD_WIDTH-1)) == 0 );
const stype** src = (const stype**)_src;
stype* dst = (stype*)_dst;
dststep /= sizeof(dst[0]);
for( ; _ksize > 1 && count > 1; count -= 2, dst += dststep*2, src += 2 )
{
for( i = 0; i <= width - 4*vtype::nlanes; i += 4*vtype::nlanes)
{
const stype* sptr = src[1] + i;
vtype s0 = vx_load_aligned(sptr);
vtype s1 = vx_load_aligned(sptr + vtype::nlanes);
vtype s2 = vx_load_aligned(sptr + 2*vtype::nlanes);
vtype s3 = vx_load_aligned(sptr + 3*vtype::nlanes);
for( k = 2; k < _ksize; k++ )
{
sptr = src[k] + i;
s0 = updateOp(s0, vx_load_aligned(sptr));
s1 = updateOp(s1, vx_load_aligned(sptr + vtype::nlanes));
s2 = updateOp(s2, vx_load_aligned(sptr + 2*vtype::nlanes));
s3 = updateOp(s3, vx_load_aligned(sptr + 3*vtype::nlanes));
}
sptr = src[0] + i;
v_store(dst + i, updateOp(s0, vx_load_aligned(sptr)));
v_store(dst + i + vtype::nlanes, updateOp(s1, vx_load_aligned(sptr + vtype::nlanes)));
v_store(dst + i + 2*vtype::nlanes, updateOp(s2, vx_load_aligned(sptr + 2*vtype::nlanes)));
v_store(dst + i + 3*vtype::nlanes, updateOp(s3, vx_load_aligned(sptr + 3*vtype::nlanes)));
sptr = src[k] + i;
v_store(dst + dststep + i, updateOp(s0, vx_load_aligned(sptr)));
v_store(dst + dststep + i + vtype::nlanes, updateOp(s1, vx_load_aligned(sptr + vtype::nlanes)));
v_store(dst + dststep + i + 2*vtype::nlanes, updateOp(s2, vx_load_aligned(sptr + 2*vtype::nlanes)));
v_store(dst + dststep + i + 3*vtype::nlanes, updateOp(s3, vx_load_aligned(sptr + 3*vtype::nlanes)));
}
if( i <= width - 2*vtype::nlanes )
{
const stype* sptr = src[1] + i;
vtype s0 = vx_load_aligned(sptr);
vtype s1 = vx_load_aligned(sptr + vtype::nlanes);
for( k = 2; k < _ksize; k++ )
{
sptr = src[k] + i;
s0 = updateOp(s0, vx_load_aligned(sptr));
s1 = updateOp(s1, vx_load_aligned(sptr + vtype::nlanes));
}
sptr = src[0] + i;
v_store(dst + i, updateOp(s0, vx_load_aligned(sptr)));
v_store(dst + i + vtype::nlanes, updateOp(s1, vx_load_aligned(sptr + vtype::nlanes)));
sptr = src[k] + i;
v_store(dst + dststep + i, updateOp(s0, vx_load_aligned(sptr)));
v_store(dst + dststep + i + vtype::nlanes, updateOp(s1, vx_load_aligned(sptr + vtype::nlanes)));
i += 2*vtype::nlanes;
}
if( i <= width - vtype::nlanes )
{
vtype s0 = vx_load_aligned(src[1] + i);
for( k = 2; k < _ksize; k++ )
s0 = updateOp(s0, vx_load_aligned(src[k] + i));
v_store(dst + i, updateOp(s0, vx_load_aligned(src[0] + i)));
v_store(dst + dststep + i, updateOp(s0, vx_load_aligned(src[k] + i)));
i += vtype::nlanes;
}
if( i <= width - vtype::nlanes/2 )
{
vtype s0 = vx_load_low(src[1] + i);
for( k = 2; k < _ksize; k++ )
s0 = updateOp(s0, vx_load_low(src[k] + i));
v_store_low(dst + i, updateOp(s0, vx_load_low(src[0] + i)));
v_store_low(dst + dststep + i, updateOp(s0, vx_load_low(src[k] + i)));
i += vtype::nlanes/2;
}
}
for( ; count > 0; count--, dst += dststep, src++ )
{
for( i = 0; i <= width - 4*vtype::nlanes; i += 4*vtype::nlanes)
{
const stype* sptr = src[0] + i;
vtype s0 = vx_load_aligned(sptr);
vtype s1 = vx_load_aligned(sptr + vtype::nlanes);
vtype s2 = vx_load_aligned(sptr + 2*vtype::nlanes);
vtype s3 = vx_load_aligned(sptr + 3*vtype::nlanes);
for( k = 1; k < _ksize; k++ )
{
sptr = src[k] + i;
s0 = updateOp(s0, vx_load_aligned(sptr));
s1 = updateOp(s1, vx_load_aligned(sptr + vtype::nlanes));
s2 = updateOp(s2, vx_load_aligned(sptr + 2*vtype::nlanes));
s3 = updateOp(s3, vx_load_aligned(sptr + 3*vtype::nlanes));
}
v_store(dst + i, s0);
v_store(dst + i + vtype::nlanes, s1);
v_store(dst + i + 2*vtype::nlanes, s2);
v_store(dst + i + 3*vtype::nlanes, s3);
}
if( i <= width - 2*vtype::nlanes )
{
const stype* sptr = src[0] + i;
vtype s0 = vx_load_aligned(sptr);
vtype s1 = vx_load_aligned(sptr + vtype::nlanes);
for( k = 1; k < _ksize; k++ )
{
sptr = src[k] + i;
s0 = updateOp(s0, vx_load_aligned(sptr));
s1 = updateOp(s1, vx_load_aligned(sptr + vtype::nlanes));
}
v_store(dst + i, s0);
v_store(dst + i + vtype::nlanes, s1);
i += 2*vtype::nlanes;
}
if( i <= width - vtype::nlanes )
{
vtype s0 = vx_load_aligned(src[0] + i);
for( k = 1; k < _ksize; k++ )
s0 = updateOp(s0, vx_load_aligned(src[k] + i));
v_store(dst + i, s0);
i += vtype::nlanes;
}
if( i <= width - vtype::nlanes/2 )
{
vtype s0 = vx_load_low(src[0] + i);
for( k = 1; k < _ksize; k++ )
s0 = updateOp(s0, vx_load_low(src[k] + i));
v_store_low(dst + i, s0);
i += vtype::nlanes/2;
}
}
return i;
}
int ksize, anchor;
};
template<class VecUpdate> struct MorphVec
{
typedef typename VecUpdate::vtype vtype;
typedef typename vtype::lane_type stype;
int operator()(uchar** _src, int nz, uchar* _dst, int width) const
{
const stype** src = (const stype**)_src;
stype* dst = (stype*)_dst;
int i, k;
VecUpdate updateOp;
for( i = 0; i <= width - 4*vtype::nlanes; i += 4*vtype::nlanes )
{
const stype* sptr = src[0] + i;
vtype s0 = vx_load(sptr);
vtype s1 = vx_load(sptr + vtype::nlanes);
vtype s2 = vx_load(sptr + 2*vtype::nlanes);
vtype s3 = vx_load(sptr + 3*vtype::nlanes);
for( k = 1; k < nz; k++ )
{
sptr = src[k] + i;
s0 = updateOp(s0, vx_load(sptr));
s1 = updateOp(s1, vx_load(sptr + vtype::nlanes));
s2 = updateOp(s2, vx_load(sptr + 2*vtype::nlanes));
s3 = updateOp(s3, vx_load(sptr + 3*vtype::nlanes));
}
v_store(dst + i, s0);
v_store(dst + i + vtype::nlanes, s1);
v_store(dst + i + 2*vtype::nlanes, s2);
v_store(dst + i + 3*vtype::nlanes, s3);
}
if( i <= width - 2*vtype::nlanes )
{
const stype* sptr = src[0] + i;
vtype s0 = vx_load(sptr);
vtype s1 = vx_load(sptr + vtype::nlanes);
for( k = 1; k < nz; k++ )
{
sptr = src[k] + i;
s0 = updateOp(s0, vx_load(sptr));
s1 = updateOp(s1, vx_load(sptr + vtype::nlanes));
}
v_store(dst + i, s0);
v_store(dst + i + vtype::nlanes, s1);
i += 2*vtype::nlanes;
}
if( i <= width - vtype::nlanes )
{
vtype s0 = vx_load(src[0] + i);
for( k = 1; k < nz; k++ )
s0 = updateOp(s0, vx_load(src[k] + i));
v_store(dst + i, s0);
i += vtype::nlanes;
}
if( i <= width - vtype::nlanes/2 )
{
vtype s0 = vx_load_low(src[0] + i);
for( k = 1; k < nz; k++ )
s0 = updateOp(s0, vx_load_low(src[k] + i));
v_store_low(dst + i, s0);
i += vtype::nlanes/2;
}
return i;
}
};
template <typename T> struct VMin
{
typedef T vtype;
vtype operator()(const vtype& a, const vtype& b) const { return v_min(a,b); }
};
template <typename T> struct VMax
{
typedef T vtype;
vtype operator()(const vtype& a, const vtype& b) const { return v_max(a,b); }
};
typedef MorphRowVec<VMin<v_uint8> > ErodeRowVec8u;
typedef MorphRowVec<VMax<v_uint8> > DilateRowVec8u;
typedef MorphRowVec<VMin<v_uint16> > ErodeRowVec16u;
typedef MorphRowVec<VMax<v_uint16> > DilateRowVec16u;
typedef MorphRowVec<VMin<v_int16> > ErodeRowVec16s;
typedef MorphRowVec<VMax<v_int16> > DilateRowVec16s;
typedef MorphRowVec<VMin<v_float32> > ErodeRowVec32f;
typedef MorphRowVec<VMax<v_float32> > DilateRowVec32f;
typedef MorphColumnVec<VMin<v_uint8> > ErodeColumnVec8u;
typedef MorphColumnVec<VMax<v_uint8> > DilateColumnVec8u;
typedef MorphColumnVec<VMin<v_uint16> > ErodeColumnVec16u;
typedef MorphColumnVec<VMax<v_uint16> > DilateColumnVec16u;
typedef MorphColumnVec<VMin<v_int16> > ErodeColumnVec16s;
typedef MorphColumnVec<VMax<v_int16> > DilateColumnVec16s;
typedef MorphColumnVec<VMin<v_float32> > ErodeColumnVec32f;
typedef MorphColumnVec<VMax<v_float32> > DilateColumnVec32f;
typedef MorphVec<VMin<v_uint8> > ErodeVec8u;
typedef MorphVec<VMax<v_uint8> > DilateVec8u;
typedef MorphVec<VMin<v_uint16> > ErodeVec16u;
typedef MorphVec<VMax<v_uint16> > DilateVec16u;
typedef MorphVec<VMin<v_int16> > ErodeVec16s;
typedef MorphVec<VMax<v_int16> > DilateVec16s;
typedef MorphVec<VMin<v_float32> > ErodeVec32f;
typedef MorphVec<VMax<v_float32> > DilateVec32f;
#else
typedef MorphRowNoVec ErodeRowVec8u;
typedef MorphRowNoVec DilateRowVec8u;
typedef MorphColumnNoVec ErodeColumnVec8u;
typedef MorphColumnNoVec DilateColumnVec8u;
typedef MorphRowNoVec ErodeRowVec16u;
typedef MorphRowNoVec DilateRowVec16u;
typedef MorphRowNoVec ErodeRowVec16s;
typedef MorphRowNoVec DilateRowVec16s;
typedef MorphRowNoVec ErodeRowVec32f;
typedef MorphRowNoVec DilateRowVec32f;
typedef MorphColumnNoVec ErodeColumnVec16u;
typedef MorphColumnNoVec DilateColumnVec16u;
typedef MorphColumnNoVec ErodeColumnVec16s;
typedef MorphColumnNoVec DilateColumnVec16s;
typedef MorphColumnNoVec ErodeColumnVec32f;
typedef MorphColumnNoVec DilateColumnVec32f;
typedef MorphNoVec ErodeVec8u;
typedef MorphNoVec DilateVec8u;
typedef MorphNoVec ErodeVec16u;
typedef MorphNoVec DilateVec16u;
typedef MorphNoVec ErodeVec16s;
typedef MorphNoVec DilateVec16s;
typedef MorphNoVec ErodeVec32f;
typedef MorphNoVec DilateVec32f;
#endif
typedef MorphRowNoVec ErodeRowVec64f;
typedef MorphRowNoVec DilateRowVec64f;
typedef MorphColumnNoVec ErodeColumnVec64f;
typedef MorphColumnNoVec DilateColumnVec64f;
typedef MorphNoVec ErodeVec64f;
typedef MorphNoVec DilateVec64f;
template<class Op, class VecOp> struct MorphRowFilter : public BaseRowFilter
{
typedef typename Op::rtype T;
MorphRowFilter( int _ksize, int _anchor ) : vecOp(_ksize, _anchor)
{
ksize = _ksize;
anchor = _anchor;
}
void operator()(const uchar* src, uchar* dst, int width, int cn) CV_OVERRIDE
{
int i, j, k, _ksize = ksize*cn;
const T* S = (const T*)src;
Op op;
T* D = (T*)dst;
if( _ksize == cn )
{
for( i = 0; i < width*cn; i++ )
D[i] = S[i];
return;
}
int i0 = vecOp(src, dst, width, cn);
width *= cn;
for( k = 0; k < cn; k++, S++, D++ )
{
for( i = i0; i <= width - cn*2; i += cn*2 )
{
const T* s = S + i;
T m = s[cn];
for( j = cn*2; j < _ksize; j += cn )
m = op(m, s[j]);
D[i] = op(m, s[0]);
D[i+cn] = op(m, s[j]);
}
for( ; i < width; i += cn )
{
const T* s = S + i;
T m = s[0];
for( j = cn; j < _ksize; j += cn )
m = op(m, s[j]);
D[i] = m;
}
}
}
VecOp vecOp;
};
template<class Op, class VecOp> struct MorphColumnFilter : public BaseColumnFilter
{
typedef typename Op::rtype T;
MorphColumnFilter( int _ksize, int _anchor ) : vecOp(_ksize, _anchor)
{
ksize = _ksize;
anchor = _anchor;
}
void operator()(const uchar** _src, uchar* dst, int dststep, int count, int width) CV_OVERRIDE
{
int i, k, _ksize = ksize;
const T** src = (const T**)_src;
T* D = (T*)dst;
Op op;
int i0 = vecOp(_src, dst, dststep, count, width);
dststep /= sizeof(D[0]);
for( ; _ksize > 1 && count > 1; count -= 2, D += dststep*2, src += 2 )
{
i = i0;
#if CV_ENABLE_UNROLLED
for( ; i <= width - 4; i += 4 )
{
const T* sptr = src[1] + i;
T s0 = sptr[0], s1 = sptr[1], s2 = sptr[2], s3 = sptr[3];
for( k = 2; k < _ksize; k++ )
{
sptr = src[k] + i;
s0 = op(s0, sptr[0]); s1 = op(s1, sptr[1]);
s2 = op(s2, sptr[2]); s3 = op(s3, sptr[3]);
}
sptr = src[0] + i;
D[i] = op(s0, sptr[0]);
D[i+1] = op(s1, sptr[1]);
D[i+2] = op(s2, sptr[2]);
D[i+3] = op(s3, sptr[3]);
sptr = src[k] + i;
D[i+dststep] = op(s0, sptr[0]);
D[i+dststep+1] = op(s1, sptr[1]);
D[i+dststep+2] = op(s2, sptr[2]);
D[i+dststep+3] = op(s3, sptr[3]);
}
#endif
for( ; i < width; i++ )
{
T s0 = src[1][i];
for( k = 2; k < _ksize; k++ )
s0 = op(s0, src[k][i]);
D[i] = op(s0, src[0][i]);
D[i+dststep] = op(s0, src[k][i]);
}
}
for( ; count > 0; count--, D += dststep, src++ )
{
i = i0;
#if CV_ENABLE_UNROLLED
for( ; i <= width - 4; i += 4 )
{
const T* sptr = src[0] + i;
T s0 = sptr[0], s1 = sptr[1], s2 = sptr[2], s3 = sptr[3];
for( k = 1; k < _ksize; k++ )
{
sptr = src[k] + i;
s0 = op(s0, sptr[0]); s1 = op(s1, sptr[1]);
s2 = op(s2, sptr[2]); s3 = op(s3, sptr[3]);
}
D[i] = s0; D[i+1] = s1;
D[i+2] = s2; D[i+3] = s3;
}
#endif
for( ; i < width; i++ )
{
T s0 = src[0][i];
for( k = 1; k < _ksize; k++ )
s0 = op(s0, src[k][i]);
D[i] = s0;
}
}
}
VecOp vecOp;
};
template<class Op, class VecOp> struct MorphFilter : BaseFilter
{
typedef typename Op::rtype T;
MorphFilter( const Mat& _kernel, Point _anchor )
{
anchor = _anchor;
ksize = _kernel.size();
CV_Assert( _kernel.type() == CV_8U );
std::vector<uchar> coeffs; // we do not really the values of non-zero
// kernel elements, just their locations
preprocess2DKernel( _kernel, coords, coeffs );
ptrs.resize( coords.size() );
}
void operator()(const uchar** src, uchar* dst, int dststep, int count, int width, int cn) CV_OVERRIDE
{
const Point* pt = &coords[0];
const T** kp = (const T**)&ptrs[0];
int i, k, nz = (int)coords.size();
Op op;
width *= cn;
for( ; count > 0; count--, dst += dststep, src++ )
{
T* D = (T*)dst;
for( k = 0; k < nz; k++ )
kp[k] = (const T*)src[pt[k].y] + pt[k].x*cn;
i = vecOp(&ptrs[0], nz, dst, width);
#if CV_ENABLE_UNROLLED
for( ; i <= width - 4; i += 4 )
{
const T* sptr = kp[0] + i;
T s0 = sptr[0], s1 = sptr[1], s2 = sptr[2], s3 = sptr[3];
for( k = 1; k < nz; k++ )
{
sptr = kp[k] + i;
s0 = op(s0, sptr[0]); s1 = op(s1, sptr[1]);
s2 = op(s2, sptr[2]); s3 = op(s3, sptr[3]);
}
D[i] = s0; D[i+1] = s1;
D[i+2] = s2; D[i+3] = s3;
}
#endif
for( ; i < width; i++ )
{
T s0 = kp[0][i];
for( k = 1; k < nz; k++ )
s0 = op(s0, kp[k][i]);
D[i] = s0;
}
}
}
std::vector<Point> coords;
std::vector<uchar*> ptrs;
VecOp vecOp;
};
}
/////////////////////////////////// External Interface /////////////////////////////////////
cv::Ptr<cv::BaseRowFilter> cv::getMorphologyRowFilter(int op, int type, int ksize, int anchor)
{
int depth = CV_MAT_DEPTH(type);
if( anchor < 0 )
anchor = ksize/2;
CV_Assert( op == MORPH_ERODE || op == MORPH_DILATE );
if( op == MORPH_ERODE )
{
if( depth == CV_8U )
return makePtr<MorphRowFilter<MinOp<uchar>,
ErodeRowVec8u> >(ksize, anchor);
if( depth == CV_16U )
return makePtr<MorphRowFilter<MinOp<ushort>,
ErodeRowVec16u> >(ksize, anchor);
if( depth == CV_16S )
return makePtr<MorphRowFilter<MinOp<short>,
ErodeRowVec16s> >(ksize, anchor);
if( depth == CV_32F )
return makePtr<MorphRowFilter<MinOp<float>,
ErodeRowVec32f> >(ksize, anchor);
if( depth == CV_64F )
return makePtr<MorphRowFilter<MinOp<double>,
ErodeRowVec64f> >(ksize, anchor);
}
else
{
if( depth == CV_8U )
return makePtr<MorphRowFilter<MaxOp<uchar>,
DilateRowVec8u> >(ksize, anchor);
if( depth == CV_16U )
return makePtr<MorphRowFilter<MaxOp<ushort>,
DilateRowVec16u> >(ksize, anchor);
if( depth == CV_16S )
return makePtr<MorphRowFilter<MaxOp<short>,
DilateRowVec16s> >(ksize, anchor);
if( depth == CV_32F )
return makePtr<MorphRowFilter<MaxOp<float>,
DilateRowVec32f> >(ksize, anchor);
if( depth == CV_64F )
return makePtr<MorphRowFilter<MaxOp<double>,
DilateRowVec64f> >(ksize, anchor);
}
CV_Error_( CV_StsNotImplemented, ("Unsupported data type (=%d)", type));
}
cv::Ptr<cv::BaseColumnFilter> cv::getMorphologyColumnFilter(int op, int type, int ksize, int anchor)
{
int depth = CV_MAT_DEPTH(type);
if( anchor < 0 )
anchor = ksize/2;
CV_Assert( op == MORPH_ERODE || op == MORPH_DILATE );
if( op == MORPH_ERODE )
{
if( depth == CV_8U )
return makePtr<MorphColumnFilter<MinOp<uchar>,
ErodeColumnVec8u> >(ksize, anchor);
if( depth == CV_16U )
return makePtr<MorphColumnFilter<MinOp<ushort>,
ErodeColumnVec16u> >(ksize, anchor);
if( depth == CV_16S )
return makePtr<MorphColumnFilter<MinOp<short>,
ErodeColumnVec16s> >(ksize, anchor);
if( depth == CV_32F )
return makePtr<MorphColumnFilter<MinOp<float>,
ErodeColumnVec32f> >(ksize, anchor);
if( depth == CV_64F )
return makePtr<MorphColumnFilter<MinOp<double>,
ErodeColumnVec64f> >(ksize, anchor);
}
else
{
if( depth == CV_8U )
return makePtr<MorphColumnFilter<MaxOp<uchar>,
DilateColumnVec8u> >(ksize, anchor);
if( depth == CV_16U )
return makePtr<MorphColumnFilter<MaxOp<ushort>,
DilateColumnVec16u> >(ksize, anchor);
if( depth == CV_16S )
return makePtr<MorphColumnFilter<MaxOp<short>,
DilateColumnVec16s> >(ksize, anchor);
if( depth == CV_32F )
return makePtr<MorphColumnFilter<MaxOp<float>,
DilateColumnVec32f> >(ksize, anchor);
if( depth == CV_64F )
return makePtr<MorphColumnFilter<MaxOp<double>,
DilateColumnVec64f> >(ksize, anchor);
}
CV_Error_( CV_StsNotImplemented, ("Unsupported data type (=%d)", type));
}
cv::Ptr<cv::BaseFilter> cv::getMorphologyFilter(int op, int type, InputArray _kernel, Point anchor)
{
Mat kernel = _kernel.getMat();
int depth = CV_MAT_DEPTH(type);
anchor = normalizeAnchor(anchor, kernel.size());
CV_Assert( op == MORPH_ERODE || op == MORPH_DILATE );
if( op == MORPH_ERODE )
{
if( depth == CV_8U )
return makePtr<MorphFilter<MinOp<uchar>, ErodeVec8u> >(kernel, anchor);
if( depth == CV_16U )
return makePtr<MorphFilter<MinOp<ushort>, ErodeVec16u> >(kernel, anchor);
if( depth == CV_16S )
return makePtr<MorphFilter<MinOp<short>, ErodeVec16s> >(kernel, anchor);
if( depth == CV_32F )
return makePtr<MorphFilter<MinOp<float>, ErodeVec32f> >(kernel, anchor);
if( depth == CV_64F )
return makePtr<MorphFilter<MinOp<double>, ErodeVec64f> >(kernel, anchor);
}
else
{
if( depth == CV_8U )
return makePtr<MorphFilter<MaxOp<uchar>, DilateVec8u> >(kernel, anchor);
if( depth == CV_16U )
return makePtr<MorphFilter<MaxOp<ushort>, DilateVec16u> >(kernel, anchor);
if( depth == CV_16S )
return makePtr<MorphFilter<MaxOp<short>, DilateVec16s> >(kernel, anchor);
if( depth == CV_32F )
return makePtr<MorphFilter<MaxOp<float>, DilateVec32f> >(kernel, anchor);
if( depth == CV_64F )
return makePtr<MorphFilter<MaxOp<double>, DilateVec64f> >(kernel, anchor);
}
CV_Error_( CV_StsNotImplemented, ("Unsupported data type (=%d)", type));
}
cv::Ptr<cv::FilterEngine> cv::createMorphologyFilter( int op, int type, InputArray _kernel,
Point anchor, int _rowBorderType, int _columnBorderType,
const Scalar& _borderValue )
{
Mat kernel = _kernel.getMat();
anchor = normalizeAnchor(anchor, kernel.size());
Ptr<BaseRowFilter> rowFilter;
Ptr<BaseColumnFilter> columnFilter;
Ptr<BaseFilter> filter2D;
if( countNonZero(kernel) == kernel.rows*kernel.cols )
{
// rectangular structuring element
rowFilter = getMorphologyRowFilter(op, type, kernel.cols, anchor.x);
columnFilter = getMorphologyColumnFilter(op, type, kernel.rows, anchor.y);
}
else
filter2D = getMorphologyFilter(op, type, kernel, anchor);
Scalar borderValue = _borderValue;
if( (_rowBorderType == BORDER_CONSTANT || _columnBorderType == BORDER_CONSTANT) &&
borderValue == morphologyDefaultBorderValue() )
{
int depth = CV_MAT_DEPTH(type);
CV_Assert( depth == CV_8U || depth == CV_16U || depth == CV_16S ||
depth == CV_32F || depth == CV_64F );
if( op == MORPH_ERODE )
borderValue = Scalar::all( depth == CV_8U ? (double)UCHAR_MAX :
depth == CV_16U ? (double)USHRT_MAX :
depth == CV_16S ? (double)SHRT_MAX :
depth == CV_32F ? (double)FLT_MAX : DBL_MAX);
else
borderValue = Scalar::all( depth == CV_8U || depth == CV_16U ?
0. :
depth == CV_16S ? (double)SHRT_MIN :
depth == CV_32F ? (double)-FLT_MAX : -DBL_MAX);
}
return makePtr<FilterEngine>(filter2D, rowFilter, columnFilter,
type, type, type, _rowBorderType, _columnBorderType, borderValue );
}
cv::Mat cv::getStructuringElement(int shape, Size ksize, Point anchor)
{
int i, j;
int r = 0, c = 0;
double inv_r2 = 0;
CV_Assert( shape == MORPH_RECT || shape == MORPH_CROSS || shape == MORPH_ELLIPSE );
anchor = normalizeAnchor(anchor, ksize);
if( ksize == Size(1,1) )
shape = MORPH_RECT;
if( shape == MORPH_ELLIPSE )
{
r = ksize.height/2;
c = ksize.width/2;
inv_r2 = r ? 1./((double)r*r) : 0;
}
Mat elem(ksize, CV_8U);
for( i = 0; i < ksize.height; i++ )
{
uchar* ptr = elem.ptr(i);
int j1 = 0, j2 = 0;
if( shape == MORPH_RECT || (shape == MORPH_CROSS && i == anchor.y) )
j2 = ksize.width;
else if( shape == MORPH_CROSS )
j1 = anchor.x, j2 = j1 + 1;
else
{
int dy = i - r;
if( std::abs(dy) <= r )
{
int dx = saturate_cast<int>(c*std::sqrt((r*r - dy*dy)*inv_r2));
j1 = std::max( c - dx, 0 );
j2 = std::min( c + dx + 1, ksize.width );
}
}
for( j = 0; j < j1; j++ )
ptr[j] = 0;
for( ; j < j2; j++ )
ptr[j] = 1;
for( ; j < ksize.width; j++ )
ptr[j] = 0;
}
return elem;
}
namespace cv
{
// ===== 1. replacement implementation
static bool halMorph(int op, int src_type, int dst_type,
uchar * src_data, size_t src_step,
uchar * dst_data, size_t dst_step,
int width, int height,
int roi_width, int roi_height, int roi_x, int roi_y,
int roi_width2, int roi_height2, int roi_x2, int roi_y2,
int kernel_type, uchar * kernel_data, size_t kernel_step,
int kernel_width, int kernel_height, int anchor_x, int anchor_y,
int borderType, const double borderValue[4], int iterations, bool isSubmatrix)
{
cvhalFilter2D * ctx;
int res = cv_hal_morphInit(&ctx, op, src_type, dst_type, width, height,
kernel_type, kernel_data, kernel_step, kernel_width, kernel_height,
anchor_x, anchor_y,
borderType, borderValue,
iterations, isSubmatrix, src_data == dst_data);
if (res != CV_HAL_ERROR_OK)
return false;
res = cv_hal_morph(ctx, src_data, src_step, dst_data, dst_step, width, height,
roi_width, roi_height,
roi_x, roi_y,
roi_width2, roi_height2,
roi_x2, roi_y2);
bool success = (res == CV_HAL_ERROR_OK);
res = cv_hal_morphFree(ctx);
if (res != CV_HAL_ERROR_OK)
return false;
return success;
}
// ===== 2. IPP implementation
#ifdef HAVE_IPP
#ifdef HAVE_IPP_IW
static inline IwiMorphologyType ippiGetMorphologyType(int morphOp)
{
return morphOp == MORPH_ERODE ? iwiMorphErode :
morphOp == MORPH_DILATE ? iwiMorphDilate :
morphOp == MORPH_OPEN ? iwiMorphOpen :
morphOp == MORPH_CLOSE ? iwiMorphClose :
morphOp == MORPH_GRADIENT ? iwiMorphGradient :
morphOp == MORPH_TOPHAT ? iwiMorphTophat :
morphOp == MORPH_BLACKHAT ? iwiMorphBlackhat : (IwiMorphologyType)-1;
}
#endif
static bool ippMorph(int op, int src_type, int dst_type,
const uchar * src_data, size_t src_step,
uchar * dst_data, size_t dst_step,
int width, int height,
int roi_width, int roi_height, int roi_x, int roi_y,
int roi_width2, int roi_height2, int roi_x2, int roi_y2,
int kernel_type, uchar * kernel_data, size_t kernel_step,
int kernel_width, int kernel_height, int anchor_x, int anchor_y,
int borderType, const double borderValue[4], int iterations, bool isSubmatrix)
{
#ifdef HAVE_IPP_IW
CV_INSTRUMENT_REGION_IPP();
#if IPP_VERSION_X100 < 201800
// Problem with SSE42 optimizations performance
if(cv::ipp::getIppTopFeatures() == ippCPUID_SSE42)
return false;
// Different mask flipping
if(op == MORPH_GRADIENT)
return false;
// Integer overflow bug
if(src_step >= IPP_MAX_32S ||
src_step*height >= IPP_MAX_32S)
return false;
#endif
#if IPP_VERSION_X100 < 201801
// Problem with AVX512 optimizations performance
if(cv::ipp::getIppTopFeatures()&ippCPUID_AVX512F)
return false;
// Multiple iterations on small mask is not effective in current integration
// Implace imitation for 3x3 kernel is not efficient
// Advanced morphology for small mask introduces degradations
if((iterations > 1 || src_data == dst_data || (op != MORPH_ERODE && op != MORPH_DILATE)) && kernel_width*kernel_height < 25)
return false;
// Skip even mask sizes for advanced morphology since they can produce out of spec writes
if((op != MORPH_ERODE && op != MORPH_DILATE) && (!(kernel_width&1) || !(kernel_height&1)))
return false;
#endif
IppAutoBuffer<Ipp8u> kernelTempBuffer;
::ipp::IwiBorderSize iwBorderSize;
::ipp::IwiBorderSize iwBorderSize2;
::ipp::IwiBorderType iwBorderType;
::ipp::IwiBorderType iwBorderType2;
::ipp::IwiImage iwMask;
::ipp::IwiImage iwInter;
::ipp::IwiSize initSize(width, height);
::ipp::IwiSize kernelSize(kernel_width, kernel_height);
IppDataType type = ippiGetDataType(CV_MAT_DEPTH(src_type));
int channels = CV_MAT_CN(src_type);
IwiMorphologyType morphType = ippiGetMorphologyType(op);
CV_UNUSED(isSubmatrix);
if((int)morphType < 0)
return false;
if(iterations > 1 && morphType != iwiMorphErode && morphType != iwiMorphDilate)
return false;
if(src_type != dst_type)
return false;
if(!ippiCheckAnchor(anchor_x, anchor_y, kernel_width, kernel_height))
return false;
try
{
::ipp::IwiImage iwSrc(initSize, type, channels, ::ipp::IwiBorderSize(roi_x, roi_y, roi_width-roi_x-width, roi_height-roi_y-height), (void*)src_data, src_step);
::ipp::IwiImage iwDst(initSize, type, channels, ::ipp::IwiBorderSize(roi_x2, roi_y2, roi_width2-roi_x2-width, roi_height2-roi_y2-height), (void*)dst_data, dst_step);
iwBorderSize = ::ipp::iwiSizeToBorderSize(kernelSize);
iwBorderType = ippiGetBorder(iwSrc, borderType, iwBorderSize);
if(!iwBorderType)
return false;
if(iterations > 1)
{
// Check dst border for second and later iterations
iwBorderSize2 = ::ipp::iwiSizeToBorderSize(kernelSize);
iwBorderType2 = ippiGetBorder(iwDst, borderType, iwBorderSize2);
if(!iwBorderType2)
return false;
}
if(morphType != iwiMorphErode && morphType != iwiMorphDilate && morphType != iwiMorphGradient)
{
// For now complex morphology support only InMem around all sides. This will be improved later.
if((iwBorderType&ippBorderInMem) && (iwBorderType&ippBorderInMem) != ippBorderInMem)
return false;
if((iwBorderType&ippBorderInMem) == ippBorderInMem)
{
iwBorderType &= ~ippBorderInMem;
iwBorderType &= ippBorderFirstStageInMem;
}
}
if(iwBorderType.StripFlags() == ippBorderConst)
{
if(Vec<double, 4>(borderValue) == morphologyDefaultBorderValue())
iwBorderType.SetType(ippBorderDefault);
else
iwBorderType.m_value = ::ipp::IwValueFloat(borderValue[0], borderValue[1], borderValue[2], borderValue[3]);
}
iwMask.Init(ippiSize(kernel_width, kernel_height), ippiGetDataType(CV_MAT_DEPTH(kernel_type)), CV_MAT_CN(kernel_type), 0, kernel_data, kernel_step);
::ipp::IwiImage iwMaskLoc = iwMask;
if(morphType == iwiMorphDilate)
{
iwMaskLoc.Alloc(iwMask.m_size, iwMask.m_dataType, iwMask.m_channels);
::ipp::iwiMirror(iwMask, iwMaskLoc, ippAxsBoth);
iwMask = iwMaskLoc;
}
if(iterations > 1)
{
// OpenCV uses in mem border from dst for two and more iterations, so we need to keep this border in intermediate image
iwInter.Alloc(initSize, type, channels, iwBorderSize2);
::ipp::IwiImage *pSwap[2] = {&iwInter, &iwDst};
CV_INSTRUMENT_FUN_IPP(::ipp::iwiFilterMorphology, iwSrc, iwInter, morphType, iwMask, ::ipp::IwDefault(), iwBorderType);
// Copy border only
{
if(iwBorderSize2.top)
{
::ipp::IwiRoi borderRoi(-iwBorderSize2.left, -iwBorderSize2.top, iwDst.m_size.width+iwBorderSize2.left+iwBorderSize2.right, iwBorderSize2.top);
::ipp::IwiImage iwInterRoi = iwInter.GetRoiImage(borderRoi);
::ipp::iwiCopy(iwDst.GetRoiImage(borderRoi), iwInterRoi);
}
if(iwBorderSize2.bottom)
{
::ipp::IwiRoi borderRoi(-iwBorderSize2.left, iwDst.m_size.height, iwDst.m_size.width+iwBorderSize2.left+iwBorderSize2.right, iwBorderSize2.bottom);
::ipp::IwiImage iwInterRoi = iwInter.GetRoiImage(borderRoi);
::ipp::iwiCopy(iwDst.GetRoiImage(borderRoi), iwInterRoi);
}
if(iwBorderSize2.left)
{
::ipp::IwiRoi borderRoi(-iwBorderSize2.left, 0, iwBorderSize2.left, iwDst.m_size.height);
::ipp::IwiImage iwInterRoi = iwInter.GetRoiImage(borderRoi);
::ipp::iwiCopy(iwDst.GetRoiImage(borderRoi), iwInterRoi);
}
if(iwBorderSize2.right)
{
::ipp::IwiRoi borderRoi(iwDst.m_size.width, 0, iwBorderSize2.left, iwDst.m_size.height);
::ipp::IwiImage iwInterRoi = iwInter.GetRoiImage(borderRoi);
::ipp::iwiCopy(iwDst.GetRoiImage(borderRoi), iwInterRoi);
}
}
iwBorderType2.SetType(iwBorderType);
for(int i = 0; i < iterations-1; i++)
CV_INSTRUMENT_FUN_IPP(::ipp::iwiFilterMorphology, *pSwap[i&0x1], *pSwap[(i+1)&0x1], morphType, iwMask, ::ipp::IwDefault(), iwBorderType2);
if(iterations&0x1)
CV_INSTRUMENT_FUN_IPP(::ipp::iwiCopy, iwInter, iwDst);
}
else
{
if(src_data == dst_data)
{
iwInter.Alloc(initSize, type, channels);
CV_INSTRUMENT_FUN_IPP(::ipp::iwiFilterMorphology, iwSrc, iwInter, morphType, iwMask, ::ipp::IwDefault(), iwBorderType);
CV_INSTRUMENT_FUN_IPP(::ipp::iwiCopy, iwInter, iwDst);
}
else
CV_INSTRUMENT_FUN_IPP(::ipp::iwiFilterMorphology, iwSrc, iwDst, morphType, iwMask, ::ipp::IwDefault(), iwBorderType);
}
}
catch(const ::ipp::IwException &)
{
return false;
}
return true;
#else
CV_UNUSED(op); CV_UNUSED(src_type); CV_UNUSED(dst_type); CV_UNUSED(src_data); CV_UNUSED(src_step); CV_UNUSED(dst_data);
CV_UNUSED(dst_step); CV_UNUSED(width); CV_UNUSED(height); CV_UNUSED(roi_width); CV_UNUSED(roi_height);
CV_UNUSED(roi_x); CV_UNUSED(roi_y); CV_UNUSED(roi_width2); CV_UNUSED(roi_height2); CV_UNUSED(roi_x2); CV_UNUSED(roi_y2);
CV_UNUSED(kernel_type); CV_UNUSED(kernel_data); CV_UNUSED(kernel_step); CV_UNUSED(kernel_width); CV_UNUSED(kernel_height);
CV_UNUSED(anchor_x); CV_UNUSED(anchor_y); CV_UNUSED(borderType); CV_UNUSED(borderValue); CV_UNUSED(iterations);
CV_UNUSED(isSubmatrix);
return false;
#endif
};
#endif // HAVE_IPP
// ===== 3. Fallback implementation
static void ocvMorph(int op, int src_type, int dst_type,
uchar * src_data, size_t src_step,
uchar * dst_data, size_t dst_step,
int width, int height,
int roi_width, int roi_height, int roi_x, int roi_y,
int roi_width2, int roi_height2, int roi_x2, int roi_y2,
int kernel_type, uchar * kernel_data, size_t kernel_step,
int kernel_width, int kernel_height, int anchor_x, int anchor_y,
int borderType, const double borderValue[4], int iterations)
{
Mat kernel(Size(kernel_width, kernel_height), kernel_type, kernel_data, kernel_step);
Point anchor(anchor_x, anchor_y);
Vec<double, 4> borderVal(borderValue);
Ptr<FilterEngine> f = createMorphologyFilter(op, src_type, kernel, anchor, borderType, borderType, borderVal);
Mat src(Size(width, height), src_type, src_data, src_step);
Mat dst(Size(width, height), dst_type, dst_data, dst_step);
{
Point ofs(roi_x, roi_y);
Size wsz(roi_width, roi_height);
f->apply( src, dst, wsz, ofs );
}
{
Point ofs(roi_x2, roi_y2);
Size wsz(roi_width2, roi_height2);
for( int i = 1; i < iterations; i++ )
f->apply( dst, dst, wsz, ofs );
}
}
// ===== HAL interface implementation
namespace hal {
CV_DEPRECATED Ptr<Morph> Morph::create(int , int , int , int , int ,
int , uchar * , size_t ,
int , int ,
int , int ,
int , const double *,
int , bool , bool ) { return Ptr<hal::Morph>(); }
void morph(int op, int src_type, int dst_type,
uchar * src_data, size_t src_step,
uchar * dst_data, size_t dst_step,
int width, int height,
int roi_width, int roi_height, int roi_x, int roi_y,
int roi_width2, int roi_height2, int roi_x2, int roi_y2,
int kernel_type, uchar * kernel_data, size_t kernel_step,
int kernel_width, int kernel_height, int anchor_x, int anchor_y,
int borderType, const double borderValue[4], int iterations, bool isSubmatrix)
{
{
bool res = halMorph(op, src_type, dst_type, src_data, src_step, dst_data, dst_step, width, height,
roi_width, roi_height, roi_x, roi_y,
roi_width2, roi_height2, roi_x2, roi_y2,
kernel_type, kernel_data, kernel_step,
kernel_width, kernel_height, anchor_x, anchor_y,
borderType, borderValue, iterations, isSubmatrix);
if (res)
return;
}
CV_IPP_RUN_FAST(ippMorph(op, src_type, dst_type, src_data, src_step, dst_data, dst_step, width, height,
roi_width, roi_height, roi_x, roi_y,
roi_width2, roi_height2, roi_x2, roi_y2,
kernel_type, kernel_data, kernel_step,
kernel_width, kernel_height, anchor_x, anchor_y,
borderType, borderValue, iterations, isSubmatrix));
ocvMorph(op, src_type, dst_type, src_data, src_step, dst_data, dst_step, width, height,
roi_width, roi_height, roi_x, roi_y,
roi_width2, roi_height2, roi_x2, roi_y2,
kernel_type, kernel_data, kernel_step,
kernel_width, kernel_height, anchor_x, anchor_y,
borderType, borderValue, iterations);
}
} // cv::hal
#ifdef HAVE_OPENCL
#define ROUNDUP(sz, n) ((sz) + (n) - 1 - (((sz) + (n) - 1) % (n)))
static bool ocl_morph3x3_8UC1( InputArray _src, OutputArray _dst, InputArray _kernel, Point anchor,
int op, int actual_op = -1, InputArray _extraMat = noArray())
{
int type = _src.type(), depth = CV_MAT_DEPTH(type), cn = CV_MAT_CN(type);
Size ksize = _kernel.size();
Mat kernel8u;
String processing;
bool haveExtraMat = !_extraMat.empty();
CV_Assert(actual_op <= 3 || haveExtraMat);
_kernel.getMat().convertTo(kernel8u, CV_8U);
for (int y = 0; y < kernel8u.rows; ++y)
for (int x = 0; x < kernel8u.cols; ++x)
if (kernel8u.at<uchar>(y, x) != 0)
processing += format("PROCESS(%d,%d)", y, x);
if (anchor.x < 0)
anchor.x = ksize.width / 2;
if (anchor.y < 0)
anchor.y = ksize.height / 2;
if (actual_op < 0)
actual_op = op;
if (type != CV_8UC1 ||
!((_src.offset() == 0) && (_src.step() % 4 == 0)) ||
!((_src.cols() % 16 == 0) && (_src.rows() % 2 == 0)) ||
!(anchor.x == 1 && anchor.y == 1) ||
!(ksize.width == 3 && ksize.height == 3))
return false;
Size size = _src.size();
size_t globalsize[2] = { 0, 0 };
size_t localsize[2] = { 0, 0 };
globalsize[0] = size.width / 16;
globalsize[1] = size.height / 2;
static const char * const op2str[] = { "OP_ERODE", "OP_DILATE", NULL, NULL, "OP_GRADIENT", "OP_TOPHAT", "OP_BLACKHAT" };
String opts = format("-D PROCESS_ELEM_=%s -D %s%s", processing.c_str(), op2str[op],
actual_op == op ? "" : cv::format(" -D %s", op2str[actual_op]).c_str());
ocl::Kernel k;
k.create("morph3x3_8UC1_cols16_rows2", cv::ocl::imgproc::morph3x3_oclsrc, opts);
if (k.empty())
return false;
UMat src = _src.getUMat();
_dst.create(size, CV_MAKETYPE(depth, cn));
if (!(_dst.offset() == 0 && _dst.step() % 4 == 0))
return false;
UMat dst = _dst.getUMat();
UMat extraMat = _extraMat.getUMat();
int idxArg = k.set(0, ocl::KernelArg::PtrReadOnly(src));
idxArg = k.set(idxArg, (int)src.step);
idxArg = k.set(idxArg, ocl::KernelArg::PtrWriteOnly(dst));
idxArg = k.set(idxArg, (int)dst.step);
idxArg = k.set(idxArg, (int)dst.rows);
idxArg = k.set(idxArg, (int)dst.cols);
if (haveExtraMat)
{
idxArg = k.set(idxArg, ocl::KernelArg::ReadOnlyNoSize(extraMat));
}
return k.run(2, globalsize, (localsize[0] == 0) ? NULL : localsize, false);
}
static bool ocl_morphSmall( InputArray _src, OutputArray _dst, InputArray _kernel, Point anchor, int borderType,
int op, int actual_op = -1, InputArray _extraMat = noArray())
{
const ocl::Device & dev = ocl::Device::getDefault();
int type = _src.type(), depth = CV_MAT_DEPTH(type), cn = CV_MAT_CN(type), esz = CV_ELEM_SIZE(type);
bool doubleSupport = dev.doubleFPConfig() > 0;
if (cn > 4 || (!doubleSupport && depth == CV_64F) ||
_src.offset() % esz != 0 || _src.step() % esz != 0)
return false;
bool haveExtraMat = !_extraMat.empty();
CV_Assert(actual_op <= 3 || haveExtraMat);
Size ksize = _kernel.size();
if (anchor.x < 0)
anchor.x = ksize.width / 2;
if (anchor.y < 0)
anchor.y = ksize.height / 2;
Size size = _src.size(), wholeSize;
bool isolated = (borderType & BORDER_ISOLATED) != 0;
borderType &= ~BORDER_ISOLATED;
int wdepth = depth, wtype = type;
if (depth == CV_8U)
{
wdepth = CV_32S;
wtype = CV_MAKETYPE(wdepth, cn);
}
char cvt[2][40];
const char * const borderMap[] = { "BORDER_CONSTANT", "BORDER_REPLICATE",
"BORDER_REFLECT", 0, "BORDER_REFLECT_101" };
size_t globalsize[2] = { (size_t)size.width, (size_t)size.height };
UMat src = _src.getUMat();
if (!isolated)
{
Point ofs;
src.locateROI(wholeSize, ofs);
}
int h = isolated ? size.height : wholeSize.height;
int w = isolated ? size.width : wholeSize.width;
if (w < ksize.width || h < ksize.height)
return false;
// Figure out what vector size to use for loading the pixels.
int pxLoadNumPixels = cn != 1 || size.width % 4 ? 1 : 4;
int pxLoadVecSize = cn * pxLoadNumPixels;
// Figure out how many pixels per work item to compute in X and Y
// directions. Too many and we run out of registers.
int pxPerWorkItemX = 1, pxPerWorkItemY = 1;
if (cn <= 2 && ksize.width <= 4 && ksize.height <= 4)
{
pxPerWorkItemX = size.width % 8 ? size.width % 4 ? size.width % 2 ? 1 : 2 : 4 : 8;
pxPerWorkItemY = size.height % 2 ? 1 : 2;
}
else if (cn < 4 || (ksize.width <= 4 && ksize.height <= 4))
{
pxPerWorkItemX = size.width % 2 ? 1 : 2;
pxPerWorkItemY = size.height % 2 ? 1 : 2;
}
globalsize[0] = size.width / pxPerWorkItemX;
globalsize[1] = size.height / pxPerWorkItemY;
// Need some padding in the private array for pixels
int privDataWidth = ROUNDUP(pxPerWorkItemX + ksize.width - 1, pxLoadNumPixels);
// Make the global size a nice round number so the runtime can pick
// from reasonable choices for the workgroup size
const int wgRound = 256;
globalsize[0] = ROUNDUP(globalsize[0], wgRound);
if (actual_op < 0)
actual_op = op;
// build processing
String processing;
Mat kernel8u;
_kernel.getMat().convertTo(kernel8u, CV_8U);
for (int y = 0; y < kernel8u.rows; ++y)
for (int x = 0; x < kernel8u.cols; ++x)
if (kernel8u.at<uchar>(y, x) != 0)
processing += format("PROCESS(%d,%d)", y, x);
static const char * const op2str[] = { "OP_ERODE", "OP_DILATE", NULL, NULL, "OP_GRADIENT", "OP_TOPHAT", "OP_BLACKHAT" };
String opts = format("-D cn=%d "
"-D ANCHOR_X=%d -D ANCHOR_Y=%d -D KERNEL_SIZE_X=%d -D KERNEL_SIZE_Y=%d "
"-D PX_LOAD_VEC_SIZE=%d -D PX_LOAD_NUM_PX=%d -D DEPTH_%d "
"-D PX_PER_WI_X=%d -D PX_PER_WI_Y=%d -D PRIV_DATA_WIDTH=%d -D %s -D %s "
"-D PX_LOAD_X_ITERATIONS=%d -D PX_LOAD_Y_ITERATIONS=%d "
"-D srcT=%s -D srcT1=%s -D dstT=srcT -D dstT1=srcT1 -D WT=%s -D WT1=%s "
"-D convertToWT=%s -D convertToDstT=%s -D PX_LOAD_FLOAT_VEC_CONV=convert_%s -D PROCESS_ELEM_=%s -D %s%s",
cn, anchor.x, anchor.y, ksize.width, ksize.height,
pxLoadVecSize, pxLoadNumPixels, depth,
pxPerWorkItemX, pxPerWorkItemY, privDataWidth, borderMap[borderType],
isolated ? "BORDER_ISOLATED" : "NO_BORDER_ISOLATED",
privDataWidth / pxLoadNumPixels, pxPerWorkItemY + ksize.height - 1,
ocl::typeToStr(type), ocl::typeToStr(depth),
haveExtraMat ? ocl::typeToStr(wtype):"srcT",//to prevent overflow - WT
haveExtraMat ? ocl::typeToStr(wdepth):"srcT1",//to prevent overflow - WT1
haveExtraMat ? ocl::convertTypeStr(depth, wdepth, cn, cvt[0]) : "noconvert",//to prevent overflow - src to WT
haveExtraMat ? ocl::convertTypeStr(wdepth, depth, cn, cvt[1]) : "noconvert",//to prevent overflow - WT to dst
ocl::typeToStr(CV_MAKE_TYPE(haveExtraMat ? wdepth : depth, pxLoadVecSize)), //PX_LOAD_FLOAT_VEC_CONV
processing.c_str(), op2str[op],
actual_op == op ? "" : cv::format(" -D %s", op2str[actual_op]).c_str());
ocl::Kernel kernel("filterSmall", cv::ocl::imgproc::filterSmall_oclsrc, opts);
if (kernel.empty())
return false;
_dst.create(size, type);
UMat dst = _dst.getUMat();
UMat source;
if(src.u != dst.u)
source = src;
else
{
Point ofs;
int cols = src.cols, rows = src.rows;
src.locateROI(wholeSize, ofs);
src.adjustROI(ofs.y, wholeSize.height - rows - ofs.y, ofs.x, wholeSize.width - cols - ofs.x);
src.copyTo(source);
src.adjustROI(-ofs.y, -wholeSize.height + rows + ofs.y, -ofs.x, -wholeSize.width + cols + ofs.x);
source.adjustROI(-ofs.y, -wholeSize.height + rows + ofs.y, -ofs.x, -wholeSize.width + cols + ofs.x);
source.locateROI(wholeSize, ofs);
}
UMat extraMat = _extraMat.getUMat();
int idxArg = kernel.set(0, ocl::KernelArg::PtrReadOnly(source));
idxArg = kernel.set(idxArg, (int)source.step);
int srcOffsetX = (int)((source.offset % source.step) / source.elemSize());
int srcOffsetY = (int)(source.offset / source.step);
int srcEndX = isolated ? srcOffsetX + size.width : wholeSize.width;
int srcEndY = isolated ? srcOffsetY + size.height : wholeSize.height;
idxArg = kernel.set(idxArg, srcOffsetX);
idxArg = kernel.set(idxArg, srcOffsetY);
idxArg = kernel.set(idxArg, srcEndX);
idxArg = kernel.set(idxArg, srcEndY);
idxArg = kernel.set(idxArg, ocl::KernelArg::WriteOnly(dst));
if (haveExtraMat)
{
idxArg = kernel.set(idxArg, ocl::KernelArg::ReadOnlyNoSize(extraMat));
}
return kernel.run(2, globalsize, NULL, false);
}
static bool ocl_morphOp(InputArray _src, OutputArray _dst, InputArray _kernel,
Point anchor, int iterations, int op, int borderType,
const Scalar &, int actual_op = -1, InputArray _extraMat = noArray())
{
const ocl::Device & dev = ocl::Device::getDefault();
int type = _src.type(), depth = CV_MAT_DEPTH(type), cn = CV_MAT_CN(type);
Mat kernel = _kernel.getMat();
Size ksize = !kernel.empty() ? kernel.size() : Size(3, 3), ssize = _src.size();
bool doubleSupport = dev.doubleFPConfig() > 0;
if ((depth == CV_64F && !doubleSupport) || borderType != BORDER_CONSTANT)
return false;
bool haveExtraMat = !_extraMat.empty();
CV_Assert(actual_op <= 3 || haveExtraMat);
if (kernel.empty())
{
ksize = Size(1+iterations*2,1+iterations*2);
kernel = getStructuringElement(MORPH_RECT, ksize);
anchor = Point(iterations, iterations);
iterations = 1;
CV_DbgAssert(ksize == kernel.size());
}
else if( iterations > 1 && countNonZero(kernel) == kernel.rows*kernel.cols )
{
ksize = Size(ksize.width + (iterations-1)*(ksize.width-1),
ksize.height + (iterations-1)*(ksize.height-1));
anchor = Point(anchor.x*iterations, anchor.y*iterations);
kernel = getStructuringElement(MORPH_RECT, ksize, anchor);
iterations = 1;
CV_DbgAssert(ksize == kernel.size());
}
static bool param_use_morph_special_kernels = utils::getConfigurationParameterBool("OPENCV_OPENCL_IMGPROC_MORPH_SPECIAL_KERNEL",
#ifndef __APPLE__
true
#else
false
#endif
);
int esz = CV_ELEM_SIZE(type);
// try to use OpenCL kernel adopted for small morph kernel
if (param_use_morph_special_kernels && dev.isIntel() &&
((ksize.width < 5 && ksize.height < 5 && esz <= 4) ||
(ksize.width == 5 && ksize.height == 5 && cn == 1)) &&
(iterations == 1)
)
{
if (ocl_morph3x3_8UC1(_src, _dst, kernel, anchor, op, actual_op, _extraMat))
return true;
if (ocl_morphSmall(_src, _dst, kernel, anchor, borderType, op, actual_op, _extraMat))
return true;
}
if (iterations == 0 || kernel.rows*kernel.cols == 1)
{
_src.copyTo(_dst);
return true;
}
#ifdef __ANDROID__
size_t localThreads[2] = { 16, 8 };
#else
size_t localThreads[2] = { 16, 16 };
#endif
size_t globalThreads[2] = { (size_t)ssize.width, (size_t)ssize.height };
#ifdef __APPLE__
if( actual_op != MORPH_ERODE && actual_op != MORPH_DILATE )
localThreads[0] = localThreads[1] = 4;
#endif
if (localThreads[0]*localThreads[1] * 2 < (localThreads[0] + ksize.width - 1) * (localThreads[1] + ksize.height - 1))
return false;
#ifdef __ANDROID__
if (dev.isNVidia())
return false;
#endif
// build processing
String processing;
Mat kernel8u;
kernel.convertTo(kernel8u, CV_8U);
for (int y = 0; y < kernel8u.rows; ++y)
for (int x = 0; x < kernel8u.cols; ++x)
if (kernel8u.at<uchar>(y, x) != 0)
processing += format("PROCESS(%d,%d)", y, x);
static const char * const op2str[] = { "OP_ERODE", "OP_DILATE", NULL, NULL, "OP_GRADIENT", "OP_TOPHAT", "OP_BLACKHAT" };
char cvt[2][50];
int wdepth = std::max(depth, CV_32F), scalarcn = cn == 3 ? 4 : cn;
if (actual_op < 0)
actual_op = op;
std::vector<ocl::Kernel> kernels(iterations);
for (int i = 0; i < iterations; i++)
{
int current_op = iterations == i + 1 ? actual_op : op;
String buildOptions = format("-D RADIUSX=%d -D RADIUSY=%d -D LSIZE0=%d -D LSIZE1=%d -D %s%s"
" -D PROCESS_ELEMS=%s -D T=%s -D DEPTH_%d -D cn=%d -D T1=%s"
" -D convertToWT=%s -D convertToT=%s -D ST=%s%s",
anchor.x, anchor.y, (int)localThreads[0], (int)localThreads[1], op2str[op],
doubleSupport ? " -D DOUBLE_SUPPORT" : "", processing.c_str(),
ocl::typeToStr(type), depth, cn, ocl::typeToStr(depth),
ocl::convertTypeStr(depth, wdepth, cn, cvt[0]),
ocl::convertTypeStr(wdepth, depth, cn, cvt[1]),
ocl::typeToStr(CV_MAKE_TYPE(depth, scalarcn)),
current_op == op ? "" : cv::format(" -D %s", op2str[current_op]).c_str());
kernels[i].create("morph", ocl::imgproc::morph_oclsrc, buildOptions);
if (kernels[i].empty())
return false;
}
UMat src = _src.getUMat(), extraMat = _extraMat.getUMat();
_dst.create(src.size(), src.type());
UMat dst = _dst.getUMat();
if (iterations == 1 && src.u != dst.u)
{
Size wholesize;
Point ofs;
src.locateROI(wholesize, ofs);
int wholecols = wholesize.width, wholerows = wholesize.height;
if (haveExtraMat)
kernels[0].args(ocl::KernelArg::ReadOnlyNoSize(src), ocl::KernelArg::WriteOnlyNoSize(dst),
ofs.x, ofs.y, src.cols, src.rows, wholecols, wholerows,
ocl::KernelArg::ReadOnlyNoSize(extraMat));
else
kernels[0].args(ocl::KernelArg::ReadOnlyNoSize(src), ocl::KernelArg::WriteOnlyNoSize(dst),
ofs.x, ofs.y, src.cols, src.rows, wholecols, wholerows);
return kernels[0].run(2, globalThreads, localThreads, false);
}
for (int i = 0; i < iterations; i++)
{
UMat source;
Size wholesize;
Point ofs;
if (i == 0)
{
int cols = src.cols, rows = src.rows;
src.locateROI(wholesize, ofs);
src.adjustROI(ofs.y, wholesize.height - rows - ofs.y, ofs.x, wholesize.width - cols - ofs.x);
if(src.u != dst.u)
source = src;
else
src.copyTo(source);
src.adjustROI(-ofs.y, -wholesize.height + rows + ofs.y, -ofs.x, -wholesize.width + cols + ofs.x);
source.adjustROI(-ofs.y, -wholesize.height + rows + ofs.y, -ofs.x, -wholesize.width + cols + ofs.x);
}
else
{
int cols = dst.cols, rows = dst.rows;
dst.locateROI(wholesize, ofs);
dst.adjustROI(ofs.y, wholesize.height - rows - ofs.y, ofs.x, wholesize.width - cols - ofs.x);
dst.copyTo(source);
dst.adjustROI(-ofs.y, -wholesize.height + rows + ofs.y, -ofs.x, -wholesize.width + cols + ofs.x);
source.adjustROI(-ofs.y, -wholesize.height + rows + ofs.y, -ofs.x, -wholesize.width + cols + ofs.x);
}
source.locateROI(wholesize, ofs);
if (haveExtraMat && iterations == i + 1)
kernels[i].args(ocl::KernelArg::ReadOnlyNoSize(source), ocl::KernelArg::WriteOnlyNoSize(dst),
ofs.x, ofs.y, source.cols, source.rows, wholesize.width, wholesize.height,
ocl::KernelArg::ReadOnlyNoSize(extraMat));
else
kernels[i].args(ocl::KernelArg::ReadOnlyNoSize(source), ocl::KernelArg::WriteOnlyNoSize(dst),
ofs.x, ofs.y, source.cols, source.rows, wholesize.width, wholesize.height);
if (!kernels[i].run(2, globalThreads, localThreads, false))
return false;
}
return true;
}
#endif
static void morphOp( int op, InputArray _src, OutputArray _dst,
InputArray _kernel,
Point anchor, int iterations,
int borderType, const Scalar& borderValue )
{
CV_INSTRUMENT_REGION();
Mat kernel = _kernel.getMat();
Size ksize = !kernel.empty() ? kernel.size() : Size(3,3);
anchor = normalizeAnchor(anchor, ksize);
CV_OCL_RUN(_dst.isUMat() && _src.dims() <= 2 && _src.channels() <= 4 &&
borderType == cv::BORDER_CONSTANT && borderValue == morphologyDefaultBorderValue() &&
(op == MORPH_ERODE || op == MORPH_DILATE) &&
anchor.x == ksize.width >> 1 && anchor.y == ksize.height >> 1,
ocl_morphOp(_src, _dst, kernel, anchor, iterations, op, borderType, borderValue) )
if (iterations == 0 || kernel.rows*kernel.cols == 1)
{
_src.copyTo(_dst);
return;
}
if (kernel.empty())
{
kernel = getStructuringElement(MORPH_RECT, Size(1+iterations*2,1+iterations*2));
anchor = Point(iterations, iterations);
iterations = 1;
}
else if( iterations > 1 && countNonZero(kernel) == kernel.rows*kernel.cols )
{
anchor = Point(anchor.x*iterations, anchor.y*iterations);
kernel = getStructuringElement(MORPH_RECT,
Size(ksize.width + (iterations-1)*(ksize.width-1),
ksize.height + (iterations-1)*(ksize.height-1)),
anchor);
iterations = 1;
}
Mat src = _src.getMat();
_dst.create( src.size(), src.type() );
Mat dst = _dst.getMat();
Point s_ofs;
Size s_wsz(src.cols, src.rows);
Point d_ofs;
Size d_wsz(dst.cols, dst.rows);
bool isolated = (borderType&BORDER_ISOLATED)?true:false;
borderType = (borderType&~BORDER_ISOLATED);
if(!isolated)
{
src.locateROI(s_wsz, s_ofs);
dst.locateROI(d_wsz, d_ofs);
}
hal::morph(op, src.type(), dst.type(),
src.data, src.step,
dst.data, dst.step,
src.cols, src.rows,
s_wsz.width, s_wsz.height, s_ofs.x, s_ofs.y,
d_wsz.width, d_wsz.height, d_ofs.x, d_ofs.y,
kernel.type(), kernel.data, kernel.step, kernel.cols, kernel.rows, anchor.x, anchor.y,
borderType, borderValue.val, iterations,
(src.isSubmatrix() && !isolated));
}
}
void cv::erode( InputArray src, OutputArray dst, InputArray kernel,
Point anchor, int iterations,
int borderType, const Scalar& borderValue )
{
CV_INSTRUMENT_REGION();
morphOp( MORPH_ERODE, src, dst, kernel, anchor, iterations, borderType, borderValue );
}
void cv::dilate( InputArray src, OutputArray dst, InputArray kernel,
Point anchor, int iterations,
int borderType, const Scalar& borderValue )
{
CV_INSTRUMENT_REGION();
morphOp( MORPH_DILATE, src, dst, kernel, anchor, iterations, borderType, borderValue );
}
#ifdef HAVE_OPENCL
namespace cv {
static bool ocl_morphologyEx(InputArray _src, OutputArray _dst, int op,
InputArray kernel, Point anchor, int iterations,
int borderType, const Scalar& borderValue)
{
_dst.createSameSize(_src, _src.type());
bool submat = _dst.isSubmatrix();
UMat temp;
_OutputArray _temp = submat ? _dst : _OutputArray(temp);
switch( op )
{
case MORPH_ERODE:
if (!ocl_morphOp( _src, _dst, kernel, anchor, iterations, MORPH_ERODE, borderType, borderValue ))
return false;
break;
case MORPH_DILATE:
if (!ocl_morphOp( _src, _dst, kernel, anchor, iterations, MORPH_DILATE, borderType, borderValue ))
return false;
break;
case MORPH_OPEN:
if (!ocl_morphOp( _src, _temp, kernel, anchor, iterations, MORPH_ERODE, borderType, borderValue ))
return false;
if (!ocl_morphOp( _temp, _dst, kernel, anchor, iterations, MORPH_DILATE, borderType, borderValue ))
return false;
break;
case MORPH_CLOSE:
if (!ocl_morphOp( _src, _temp, kernel, anchor, iterations, MORPH_DILATE, borderType, borderValue ))
return false;
if (!ocl_morphOp( _temp, _dst, kernel, anchor, iterations, MORPH_ERODE, borderType, borderValue ))
return false;
break;
case MORPH_GRADIENT:
if (!ocl_morphOp( _src, temp, kernel, anchor, iterations, MORPH_ERODE, borderType, borderValue ))
return false;
if (!ocl_morphOp( _src, _dst, kernel, anchor, iterations, MORPH_DILATE, borderType, borderValue, MORPH_GRADIENT, temp ))
return false;
break;
case MORPH_TOPHAT:
if (!ocl_morphOp( _src, _temp, kernel, anchor, iterations, MORPH_ERODE, borderType, borderValue ))
return false;
if (!ocl_morphOp( _temp, _dst, kernel, anchor, iterations, MORPH_DILATE, borderType, borderValue, MORPH_TOPHAT, _src ))
return false;
break;
case MORPH_BLACKHAT:
if (!ocl_morphOp( _src, _temp, kernel, anchor, iterations, MORPH_DILATE, borderType, borderValue ))
return false;
if (!ocl_morphOp( _temp, _dst, kernel, anchor, iterations, MORPH_ERODE, borderType, borderValue, MORPH_BLACKHAT, _src ))
return false;
break;
default:
CV_Error( CV_StsBadArg, "unknown morphological operation" );
}
return true;
}
}
#endif
#define IPP_DISABLE_MORPH_ADV 1
#ifdef HAVE_IPP
#if !IPP_DISABLE_MORPH_ADV
namespace cv {
static bool ipp_morphologyEx(int op, InputArray _src, OutputArray _dst,
InputArray _kernel,
Point anchor, int iterations,
int borderType, const Scalar& borderValue)
{
#if defined HAVE_IPP_IW
Mat kernel = _kernel.getMat();
Size ksize = !kernel.empty() ? kernel.size() : Size(3,3);
anchor = normalizeAnchor(anchor, ksize);
if (iterations == 0 || kernel.rows*kernel.cols == 1)
{
_src.copyTo(_dst);
return true;
}
if (kernel.empty())
{
kernel = getStructuringElement(MORPH_RECT, Size(1+iterations*2,1+iterations*2));
anchor = Point(iterations, iterations);
iterations = 1;
}
else if( iterations > 1 && countNonZero(kernel) == kernel.rows*kernel.cols )
{
anchor = Point(anchor.x*iterations, anchor.y*iterations);
kernel = getStructuringElement(MORPH_RECT,
Size(ksize.width + (iterations-1)*(ksize.width-1),
ksize.height + (iterations-1)*(ksize.height-1)),
anchor);
iterations = 1;
}
Mat src = _src.getMat();
_dst.create( src.size(), src.type() );
Mat dst = _dst.getMat();
Point s_ofs;
Size s_wsz(src.cols, src.rows);
Point d_ofs;
Size d_wsz(dst.cols, dst.rows);
bool isolated = (borderType&BORDER_ISOLATED)?true:false;
borderType = (borderType&~BORDER_ISOLATED);
if(!isolated)
{
src.locateROI(s_wsz, s_ofs);
dst.locateROI(d_wsz, d_ofs);
}
return ippMorph(op, src.type(), dst.type(),
src.data, src.step,
dst.data, dst.step,
src.cols, src.rows,
s_wsz.width, s_wsz.height, s_ofs.x, s_ofs.y,
d_wsz.width, d_wsz.height, d_ofs.x, d_ofs.y,
kernel.type(), kernel.data, kernel.step, kernel.cols, kernel.rows, anchor.x, anchor.y,
borderType, borderValue.val, iterations,
(src.isSubmatrix() && !isolated));
#else
CV_UNUSED(op); CV_UNUSED(_src); CV_UNUSED(_dst); CV_UNUSED(_kernel); CV_UNUSED(anchor);
CV_UNUSED(iterations); CV_UNUSED(borderType); CV_UNUSED(borderValue);
return false;
#endif
}
}
#endif
#endif
void cv::morphologyEx( InputArray _src, OutputArray _dst, int op,
InputArray _kernel, Point anchor, int iterations,
int borderType, const Scalar& borderValue )
{
CV_INSTRUMENT_REGION();
Mat kernel = _kernel.getMat();
if (kernel.empty())
{
kernel = getStructuringElement(MORPH_RECT, Size(3,3), Point(1,1));
}
#ifdef HAVE_OPENCL
Size ksize = kernel.size();
anchor = normalizeAnchor(anchor, ksize);
CV_OCL_RUN(_dst.isUMat() && _src.dims() <= 2 && _src.channels() <= 4 &&
anchor.x == ksize.width >> 1 && anchor.y == ksize.height >> 1 &&
borderType == cv::BORDER_CONSTANT && borderValue == morphologyDefaultBorderValue(),
ocl_morphologyEx(_src, _dst, op, kernel, anchor, iterations, borderType, borderValue))
#endif
Mat src = _src.getMat(), temp;
_dst.create(src.size(), src.type());
Mat dst = _dst.getMat();
#if !IPP_DISABLE_MORPH_ADV
CV_IPP_RUN_FAST(ipp_morphologyEx(op, src, dst, kernel, anchor, iterations, borderType, borderValue));
#endif
switch( op )
{
case MORPH_ERODE:
erode( src, dst, kernel, anchor, iterations, borderType, borderValue );
break;
case MORPH_DILATE:
dilate( src, dst, kernel, anchor, iterations, borderType, borderValue );
break;
case MORPH_OPEN:
erode( src, dst, kernel, anchor, iterations, borderType, borderValue );
dilate( dst, dst, kernel, anchor, iterations, borderType, borderValue );
break;
case MORPH_CLOSE:
dilate( src, dst, kernel, anchor, iterations, borderType, borderValue );
erode( dst, dst, kernel, anchor, iterations, borderType, borderValue );
break;
case MORPH_GRADIENT:
erode( src, temp, kernel, anchor, iterations, borderType, borderValue );
dilate( src, dst, kernel, anchor, iterations, borderType, borderValue );
dst -= temp;
break;
case MORPH_TOPHAT:
if( src.data != dst.data )
temp = dst;
erode( src, temp, kernel, anchor, iterations, borderType, borderValue );
dilate( temp, temp, kernel, anchor, iterations, borderType, borderValue );
dst = src - temp;
break;
case MORPH_BLACKHAT:
if( src.data != dst.data )
temp = dst;
dilate( src, temp, kernel, anchor, iterations, borderType, borderValue );
erode( temp, temp, kernel, anchor, iterations, borderType, borderValue );
dst = temp - src;
break;
case MORPH_HITMISS:
CV_Assert(src.type() == CV_8UC1);
if(countNonZero(kernel) <=0)
{
src.copyTo(dst);
break;
}
{
Mat k1, k2, e1, e2;
k1 = (kernel == 1);
k2 = (kernel == -1);
if (countNonZero(k1) <= 0)
e1 = Mat(src.size(), src.type(), Scalar(255));
else
erode(src, e1, k1, anchor, iterations, borderType, borderValue);
if (countNonZero(k2) <= 0)
e2 = Mat(src.size(), src.type(), Scalar(255));
else
{
Mat src_complement;
bitwise_not(src, src_complement);
erode(src_complement, e2, k2, anchor, iterations, borderType, borderValue);
}
dst = e1 & e2;
}
break;
default:
CV_Error( CV_StsBadArg, "unknown morphological operation" );
}
}
CV_IMPL IplConvKernel *
cvCreateStructuringElementEx( int cols, int rows,
int anchorX, int anchorY,
int shape, int *values )
{
cv::Size ksize = cv::Size(cols, rows);
cv::Point anchor = cv::Point(anchorX, anchorY);
CV_Assert( cols > 0 && rows > 0 && anchor.inside(cv::Rect(0,0,cols,rows)) &&
(shape != CV_SHAPE_CUSTOM || values != 0));
int i, size = rows * cols;
int element_size = sizeof(IplConvKernel) + size*sizeof(int);
IplConvKernel *element = (IplConvKernel*)cvAlloc(element_size + 32);
element->nCols = cols;
element->nRows = rows;
element->anchorX = anchorX;
element->anchorY = anchorY;
element->nShiftR = shape < CV_SHAPE_ELLIPSE ? shape : CV_SHAPE_CUSTOM;
element->values = (int*)(element + 1);
if( shape == CV_SHAPE_CUSTOM )
{
for( i = 0; i < size; i++ )
element->values[i] = values[i];
}
else
{
cv::Mat elem = cv::getStructuringElement(shape, ksize, anchor);
for( i = 0; i < size; i++ )
element->values[i] = elem.ptr()[i];
}
return element;
}
CV_IMPL void
cvReleaseStructuringElement( IplConvKernel ** element )
{
if( !element )
CV_Error( CV_StsNullPtr, "" );
cvFree( element );
}
static void convertConvKernel( const IplConvKernel* src, cv::Mat& dst, cv::Point& anchor )
{
if(!src)
{
anchor = cv::Point(1,1);
dst.release();
return;
}
anchor = cv::Point(src->anchorX, src->anchorY);
dst.create(src->nRows, src->nCols, CV_8U);
int i, size = src->nRows*src->nCols;
for( i = 0; i < size; i++ )
dst.ptr()[i] = (uchar)(src->values[i] != 0);
}
CV_IMPL void
cvErode( const CvArr* srcarr, CvArr* dstarr, IplConvKernel* element, int iterations )
{
cv::Mat src = cv::cvarrToMat(srcarr), dst = cv::cvarrToMat(dstarr), kernel;
CV_Assert( src.size() == dst.size() && src.type() == dst.type() );
cv::Point anchor;
convertConvKernel( element, kernel, anchor );
cv::erode( src, dst, kernel, anchor, iterations, cv::BORDER_REPLICATE );
}
CV_IMPL void
cvDilate( const CvArr* srcarr, CvArr* dstarr, IplConvKernel* element, int iterations )
{
cv::Mat src = cv::cvarrToMat(srcarr), dst = cv::cvarrToMat(dstarr), kernel;
CV_Assert( src.size() == dst.size() && src.type() == dst.type() );
cv::Point anchor;
convertConvKernel( element, kernel, anchor );
cv::dilate( src, dst, kernel, anchor, iterations, cv::BORDER_REPLICATE );
}
CV_IMPL void
cvMorphologyEx( const void* srcarr, void* dstarr, void*,
IplConvKernel* element, int op, int iterations )
{
cv::Mat src = cv::cvarrToMat(srcarr), dst = cv::cvarrToMat(dstarr), kernel;
CV_Assert( src.size() == dst.size() && src.type() == dst.type() );
cv::Point anchor;
IplConvKernel* temp_element = NULL;
if (!element)
{
temp_element = cvCreateStructuringElementEx(3, 3, 1, 1, CV_SHAPE_RECT);
} else {
temp_element = element;
}
convertConvKernel( temp_element, kernel, anchor );
if (!element)
{
cvReleaseStructuringElement(&temp_element);
}
cv::morphologyEx( src, dst, op, kernel, anchor, iterations, cv::BORDER_REPLICATE );
}
/* End of file. */
This source diff could not be displayed because it is too large. You can view the blob instead.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment