Commit 3322aeed authored by Vadim Pisarevsky's avatar Vadim Pisarevsky

Merge pull request #52 from ludv1x/edgefilter

Edgefilter
parents 9e5b6684 e622117f
set(the_description "Extended image processing module. It includes edge-aware filters and etc.")
ocv_define_module(ximgproc opencv_imgproc opencv_core opencv_highgui)
target_link_libraries(opencv_ximgproc)
\ No newline at end of file
This diff is collapsed.
********************************************
ximgproc. Extended image processing module.
********************************************
.. highlight:: cpp
.. toctree::
:maxdepth: 2
edge_aware_filters
/*
* Software License Agreement (BSD License)
*
* Copyright (c) 2009, Willow Garage, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Willow Garage, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
*/
#ifndef __OPENCV_XIMGPROC_HPP__
#define __OPENCV_XIMGPROC_HPP__
#include "ximgproc/edge_filter.hpp"
#endif
\ No newline at end of file
/*
* Software License Agreement (BSD License)
*
* Copyright (c) 2009, Willow Garage, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Willow Garage, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
*/
#ifndef __OPENCV_EDGEFILTER_HPP__
#define __OPENCV_EDGEFILTER_HPP__
#ifdef __cplusplus
#include <opencv2/core.hpp>
namespace cv
{
namespace ximgproc
{
enum EdgeAwareFiltersList
{
DTF_NC,
DTF_IC,
DTF_RF,
GUIDED_FILTER,
AM_FILTER
};
/*Interface for DT filters*/
class CV_EXPORTS DTFilter : public Algorithm
{
public:
virtual void filter(InputArray src, OutputArray dst, int dDepth = -1) = 0;
};
typedef Ptr<DTFilter> DTFilterPtr;
/*Fabric function for DT filters*/
CV_EXPORTS
Ptr<DTFilter> createDTFilter(InputArray guide, double sigmaSpatial, double sigmaColor, int mode = DTF_NC, int numIters = 3);
/*One-line DT filter call*/
CV_EXPORTS
void dtFilter(InputArray guide, InputArray src, OutputArray dst, double sigmaSpatial, double sigmaColor, int mode = DTF_NC, int numIters = 3);
//////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////
/*Interface for Guided Filter*/
class CV_EXPORTS GuidedFilter : public Algorithm
{
public:
virtual void filter(InputArray src, OutputArray dst, int dDepth = -1) = 0;
};
/*Fabric function for Guided Filter*/
CV_EXPORTS Ptr<GuidedFilter> createGuidedFilter(InputArray guide, int radius, double eps);
/*One-line Guided Filter call*/
CV_EXPORTS void guidedFilter(InputArray guide, InputArray src, OutputArray dst, int radius, double eps, int dDepth = -1);
//////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////
class CV_EXPORTS AdaptiveManifoldFilter : public Algorithm
{
public:
/**
* @brief Apply High-dimensional filtering using adaptive manifolds
* @param src Input image to be filtered.
* @param dst Adaptive-manifold filter response.
* @param src_joint Image for joint filtering (optional).
*/
virtual void filter(InputArray src, OutputArray dst, InputArray joint = noArray()) = 0;
virtual void collectGarbage() = 0;
static Ptr<AdaptiveManifoldFilter> create();
};
//Fabric function for AM filter algorithm
CV_EXPORTS Ptr<AdaptiveManifoldFilter> createAMFilter(double sigma_s, double sigma_r, bool adjust_outliers = false);
//One-line Adaptive Manifold filter call
CV_EXPORTS void amFilter(InputArray joint, InputArray src, OutputArray dst, double sigma_s, double sigma_r, bool adjust_outliers = false);
//////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////
CV_EXPORTS
void jointBilateralFilter(InputArray joint, InputArray src, OutputArray dst, int d, double sigmaColor, double sigmaSpace, int borderType = BORDER_DEFAULT);
}
}
#endif
#endif
#include "perf_precomp.hpp"
namespace cvtest
{
using std::tr1::tuple;
using std::tr1::get;
using namespace perf;
using namespace testing;
using namespace cv;
using namespace cv::ximgproc;
typedef tuple<bool, Size, int, int, MatType> AMPerfTestParam;
typedef TestBaseWithParam<AMPerfTestParam> AdaptiveManifoldPerfTest;
PERF_TEST_P( AdaptiveManifoldPerfTest, perf,
Combine(
Values(true, false), //adjust_outliers flag
Values(sz1080p, sz720p), //size
Values(1, 3, 8), //joint channels num
Values(1, 3), //source channels num
Values(CV_8U, CV_32F) //source and joint depth
)
)
{
AMPerfTestParam params = GetParam();
bool adjustOutliers = get<0>(params);
Size sz = get<1>(params);
int jointCnNum = get<2>(params);
int srcCnNum = get<3>(params);
int depth = get<4>(params);
Mat joint(sz, CV_MAKE_TYPE(depth, jointCnNum));
Mat src(sz, CV_MAKE_TYPE(depth, srcCnNum));
Mat dst(sz, CV_MAKE_TYPE(depth, srcCnNum));
cv::setNumThreads(cv::getNumberOfCPUs());
declare.in(joint, src, WARMUP_RNG).out(dst).tbb_threads(cv::getNumberOfCPUs());
double sigma_s = 16;
double sigma_r = 0.5;
TEST_CYCLE_N(3)
{
Mat res;
amFilter(joint, src, res, sigma_s, sigma_r, adjustOutliers);
//at 5th cycle sigma_s will be five times more and tree depth will be 5
sigma_s *= 1.38;
sigma_r /= 1.38;
}
SANITY_CHECK(dst);
}
}
\ No newline at end of file
#include "perf_precomp.hpp"
namespace cvtest
{
using std::tr1::tuple;
using std::tr1::get;
using namespace perf;
using namespace testing;
using namespace cv;
using namespace cv::ximgproc;
CV_ENUM(GuideMatType, CV_8UC1, CV_8UC3, CV_32FC1, CV_32FC3) //reduced set
CV_ENUM(SourceMatType, CV_8UC1, CV_8UC2, CV_8UC3, CV_8UC4, CV_32FC1, CV_32FC2, CV_32FC3, CV_32FC4) //full supported set
CV_ENUM(DTFMode, DTF_NC, DTF_IC, DTF_RF)
typedef tuple<GuideMatType, SourceMatType, Size, double, double, DTFMode> DTTestParams;
typedef TestBaseWithParam<DTTestParams> DomainTransformTest;
PERF_TEST_P( DomainTransformTest, perf,
Combine(
GuideMatType::all(),
SourceMatType::all(),
Values(szVGA, sz720p),
Values(10.0, 80.0),
Values(30.0, 50.0),
DTFMode::all()
)
)
{
int guideType = get<0>(GetParam());
int srcType = get<1>(GetParam());
Size size = get<2>(GetParam());
double sigmaSpatial = get<3>(GetParam());
double sigmaColor = get<4>(GetParam());
int dtfType = get<5>(GetParam());
Mat guide(size, guideType);
Mat src(size, srcType);
Mat dst(size, srcType);
declare.in(guide, src, WARMUP_RNG).out(dst).tbb_threads(cv::getNumberOfCPUs());
cv::setNumThreads(cv::getNumberOfCPUs());
TEST_CYCLE_N(5)
{
dtFilter(guide, src, dst, sigmaSpatial, sigmaColor, dtfType);
}
SANITY_CHECK(dst);
}
}
\ No newline at end of file
#include "perf_precomp.hpp"
namespace cvtest
{
using std::tr1::tuple;
using std::tr1::get;
using namespace perf;
using namespace testing;
using namespace cv;
using namespace cv::ximgproc;
CV_ENUM(GuideTypes, CV_8UC1, CV_8UC2, CV_8UC3, CV_32FC1, CV_32FC2, CV_32FC3);
CV_ENUM(SrcTypes, CV_8UC1, CV_8UC3, CV_32FC1, CV_32FC3);
typedef tuple<GuideTypes, SrcTypes, Size> GFParams;
typedef TestBaseWithParam<GFParams> GuidedFilterPerfTest;
PERF_TEST_P( GuidedFilterPerfTest, perf, Combine(GuideTypes::all(), SrcTypes::all(), Values(sz1080p, sz2K)) )
{
RNG rng(0);
GFParams params = GetParam();
int guideType = get<0>(params);
int srcType = get<1>(params);
Size sz = get<2>(params);
Mat guide(sz, guideType);
Mat src(sz, srcType);
Mat dst(sz, srcType);
declare.in(guide, src, WARMUP_RNG).out(dst).tbb_threads(cv::getNumberOfCPUs());
cv::setNumThreads(cv::getNumberOfCPUs());
TEST_CYCLE_N(3)
{
int radius = rng.uniform(5, 30);
double eps = rng.uniform(0.1, 1e5);
guidedFilter(guide, src, dst, radius, eps);
}
SANITY_CHECK(dst);
}
}
\ No newline at end of file
#include "perf_precomp.hpp"
CV_PERF_TEST_MAIN(edgefilter)
#ifdef __GNUC__
# pragma GCC diagnostic ignored "-Wmissing-declarations"
# if defined __clang__ || defined __APPLE__
# pragma GCC diagnostic ignored "-Wmissing-prototypes"
# pragma GCC diagnostic ignored "-Wextra"
# endif
#endif
#ifndef __OPENCV_PERF_PRECOMP_HPP__
#define __OPENCV_PERF_PRECOMP_HPP__
#include <opencv2/ts.hpp>
#include <opencv2/imgproc.hpp>
#include <opencv2/highgui.hpp>
#include <opencv2/ximgproc.hpp>
#endif
#include "perf_precomp.hpp"
namespace cvtest
{
using std::tr1::tuple;
using std::tr1::get;
using namespace perf;
using namespace testing;
using namespace cv;
using namespace cv::ximgproc;
typedef tuple<double, Size, MatType, int, int> JBFTestParam;
typedef TestBaseWithParam<JBFTestParam> JointBilateralFilterTest;
PERF_TEST_P(JointBilateralFilterTest, perf,
Combine(
Values(2.0, 4.0, 6.0, 10.0),
SZ_TYPICAL,
Values(CV_8U, CV_32F),
Values(1, 3),
Values(1, 3))
)
{
JBFTestParam params = GetParam();
double sigmaS = get<0>(params);
Size sz = get<1>(params);
int depth = get<2>(params);
int jCn = get<3>(params);
int srcCn = get<4>(params);
Mat joint(sz, CV_MAKE_TYPE(depth, jCn));
Mat src(sz, CV_MAKE_TYPE(depth, srcCn));
Mat dst(sz, src.type());
cv::setNumThreads(cv::getNumberOfCPUs());
declare.in(joint, src, WARMUP_RNG).out(dst).tbb_threads(cv::getNumberOfCPUs());
RNG rnd(cvRound(10*sigmaS) + sz.height + depth + jCn + srcCn);
double sigmaC = rnd.uniform(1.0, 255.0);
TEST_CYCLE_N(1)
{
jointBilateralFilter(joint, src, dst, 0, sigmaC, sigmaS);
}
SANITY_CHECK(dst);
}
}
\ No newline at end of file
cmake_minimum_required(VERSION 2.8)
project(live_demo)
find_package(OpenCV 3.0 REQUIRED)
set(SOURCES live_demo.cpp)
include_directories(${OpenCV_INCLUDE_DIRS})
add_executable(live_demo ${SOURCES} ${HEADERS})
target_link_libraries(live_demo ${OpenCV_LIBS})
#include <opencv2/core.hpp>
#include <opencv2/core/utility.hpp>
#include <opencv2/highgui.hpp>
#include <opencv2/imgproc.hpp>
#include <opencv2/ximgproc.hpp>
using namespace cv;
using namespace cv::ximgproc;
#include <iostream>
using namespace std;
typedef void(*FilteringOperation)(const Mat& src, Mat& dst);
//current mode (filtering operation example)
FilteringOperation g_filterOp = NULL;
//list of filtering operations
void filterDoNothing(const Mat& frame, Mat& dst);
void filterBlurring(const Mat& frame, Mat& dst);
void filterStylize(const Mat& frame, Mat& dst);
void filterDetailEnhancement(const Mat& frame8u, Mat& dst);
//common sliders for every mode
int g_sigmaColor = 25;
int g_sigmaSpatial = 10;
//for Stylizing mode
int g_edgesGamma = 100;
//for Details Enhancement mode
int g_contrastBase = 100;
int g_detailsLevel = 100;
int g_numberOfCPUs = cv::getNumberOfCPUs();
//We will use two callbacks to change parameters
void changeModeCallback(int state, void *filter);
void changeNumberOfCpuCallback(int count, void*);
void splitScreen(const Mat& rawFrame, Mat& outputFrame, Mat& srcFrame, Mat& processedFrame);
//trivial filter
void filterDoNothing(const Mat& frame, Mat& dst)
{
frame.copyTo(dst);
}
//simple edge-aware blurring
void filterBlurring(const Mat& frame, Mat& dst)
{
dtFilter(frame, frame, dst, g_sigmaSpatial, g_sigmaColor, DTF_RF);
}
//stylizing filter
void filterStylize(const Mat& frame, Mat& dst)
{
//blur frame
Mat filtered;
dtFilter(frame, frame, filtered, g_sigmaSpatial, g_sigmaColor, DTF_NC);
//compute grayscale blurred frame
Mat filteredGray;
cvtColor(filtered, filteredGray, COLOR_BGR2GRAY);
//find gradients of blurred image
Mat gradX, gradY;
Sobel(filteredGray, gradX, CV_32F, 1, 0, 3, 1.0/255);
Sobel(filteredGray, gradY, CV_32F, 0, 1, 3, 1.0/255);
//compute magnitude of gradient and fit it accordingly the gamma parameter
Mat gradMagnitude;
magnitude(gradX, gradY, gradMagnitude);
cv::pow(gradMagnitude, g_edgesGamma/100.0, gradMagnitude);
//multiply a blurred frame to the value inversely proportional to the magnitude
Mat multiplier = 1.0/(1.0 + gradMagnitude);
cvtColor(multiplier, multiplier, COLOR_GRAY2BGR);
multiply(filtered, multiplier, dst, 1, dst.type());
}
void filterDetailEnhancement(const Mat& frame8u, Mat& dst)
{
Mat frame;
frame8u.convertTo(frame, CV_32F, 1.0/255);
//Decompose image to 3 Lab channels
Mat frameLab, frameLabCn[3];
cvtColor(frame, frameLab, COLOR_BGR2Lab);
split(frameLab, frameLabCn);
//Generate progressively smoother versions of the lightness channel
Mat layer0 = frameLabCn[0]; //first channel is original lightness
Mat layer1, layer2;
dtFilter(layer0, layer0, layer1, g_sigmaSpatial, g_sigmaColor, DTF_IC);
dtFilter(layer1, layer1, layer2, 2*g_sigmaSpatial, g_sigmaColor, DTF_IC);
//Compute detail layers
Mat detailLayer1 = layer0 - layer1;
Mat detailLayer2 = layer1 - layer2;
double cBase = g_contrastBase / 100.0;
double cDetails1 = g_detailsLevel / 100.0;
double cDetails2 = 2.0 - g_detailsLevel / 100.0;
//Generate lightness
double meanLigtness = mean(frameLabCn[0])[0];
frameLabCn[0] = cBase*(layer2 - meanLigtness) + meanLigtness; //fit contrast of base (most blurred) layer
frameLabCn[0] += cDetails1*detailLayer1; //add weighted sum of detail layers to new lightness
frameLabCn[0] += cDetails2*detailLayer2; //
//Update new lightness
merge(frameLabCn, 3, frameLab);
cvtColor(frameLab, frame, COLOR_Lab2BGR);
frame.convertTo(dst, CV_8U, 255);
}
void changeModeCallback(int state, void *filter)
{
if (state == 1)
g_filterOp = (FilteringOperation) filter;
}
void changeNumberOfCpuCallback(int count, void*)
{
count = std::max(1, count);
cv::setNumThreads(count);
g_numberOfCPUs = count;
}
//divide screen on two parts: srcFrame and processed Frame
void splitScreen(const Mat& rawFrame, Mat& outputFrame, Mat& srcFrame, Mat& processedFrame)
{
int h = rawFrame.rows;
int w = rawFrame.cols;
int cn = rawFrame.channels();
outputFrame.create(h, 2 * w, CV_MAKE_TYPE(CV_8U, cn));
srcFrame = outputFrame(Range::all(), Range(0, w));
processedFrame = outputFrame(Range::all(), Range(w, 2 * w));
rawFrame.convertTo(srcFrame, srcFrame.type());
}
int main()
{
VideoCapture cap(0);
if (!cap.isOpened())
{
cerr << "Capture device was not found" << endl;
return -1;
}
namedWindow("Demo");
displayOverlay("Demo", "Press Ctrl+P to show property window", 5000);
//Thread trackbar
cv::setNumThreads(g_numberOfCPUs); //speedup filtering
createTrackbar("Threads", NULL, &g_numberOfCPUs, cv::getNumberOfCPUs(), changeNumberOfCpuCallback);
//Buttons to choose different modes
createButton("Mode Details Enhancement", changeModeCallback, (void*)filterDetailEnhancement, QT_RADIOBOX, true);
createButton("Mode Stylizing", changeModeCallback, (void*)filterStylize, QT_RADIOBOX, false);
createButton("Mode Blurring", changeModeCallback, (void*)filterBlurring, QT_RADIOBOX, false);
createButton("Mode DoNothing", changeModeCallback, (void*)filterDoNothing, QT_RADIOBOX, false);
//sliders for Details Enhancement mode
g_filterOp = filterDetailEnhancement; //set Details Enhancement as default filter
createTrackbar("Detail contrast", NULL, &g_contrastBase, 200);
createTrackbar("Detail level" , NULL, &g_detailsLevel, 200);
//sliders for Stylizing mode
createTrackbar("Style gamma", NULL, &g_edgesGamma, 300);
//sliders for every mode
createTrackbar("Sigma Spatial", NULL, &g_sigmaSpatial, 200);
createTrackbar("Sigma Color" , NULL, &g_sigmaColor, 200);
Mat rawFrame, outputFrame;
Mat srcFrame, processedFrame;
for (;;)
{
do
{
cap >> rawFrame;
} while (rawFrame.empty());
splitScreen(rawFrame, outputFrame, srcFrame, processedFrame);
g_filterOp(srcFrame, processedFrame);
imshow("Demo", outputFrame);
if (waitKey(1) == 27) break;
}
return 0;
}
This diff is collapsed.
#include "precomp.hpp"
#include "dtfilter_cpu.hpp"
namespace cv
{
namespace ximgproc
{
CV_EXPORTS_W
Ptr<DTFilter> createDTFilter(InputArray guide, double sigmaSpatial, double sigmaColor, int mode, int numIters)
{
return Ptr<DTFilter>(DTFilterCPU::create(guide, sigmaSpatial, sigmaColor, mode, numIters));
}
CV_EXPORTS_W
void dtFilter(InputArray guide, InputArray src, OutputArray dst, double sigmaSpatial, double sigmaColor, int mode, int numIters)
{
Ptr<DTFilterCPU> dtf = DTFilterCPU::create(guide, sigmaSpatial, sigmaColor, mode, numIters);
dtf->setSingleFilterCall(true);
dtf->filter(src, dst);
}
}
}
\ No newline at end of file
#include "precomp.hpp"
#include "dtfilter_cpu.hpp"
namespace cv
{
namespace ximgproc
{
typedef Vec<uchar, 1> Vec1b;
typedef Vec<float, 1> Vec1f;
Ptr<DTFilterCPU> DTFilterCPU::create(InputArray guide, double sigmaSpatial, double sigmaColor, int mode, int numIters)
{
Ptr<DTFilterCPU> dtf(new DTFilterCPU());
dtf->init(guide, sigmaSpatial, sigmaColor, mode, numIters);
return dtf;
}
Ptr<DTFilterCPU> DTFilterCPU::createRF(InputArray adistHor, InputArray adistVert, double sigmaSpatial, double sigmaColor, int numIters /*= 3*/)
{
Mat adh = adistHor.getMat();
Mat adv = adistVert.getMat();
CV_Assert(adh.type() == CV_32FC1 && adv.type() == CV_32FC1 && adh.rows == adv.rows + 1 && adh.cols == adv.cols - 1);
Ptr<DTFilterCPU> dtf(new DTFilterCPU());
dtf->release();
dtf->mode = DTF_RF;
dtf->numIters = std::max(1, numIters);
dtf->h = adh.rows;
dtf->w = adh.cols + 1;
dtf->sigmaSpatial = std::max(1.0f, (float)sigmaSpatial);
dtf->sigmaColor = std::max(0.01f, (float)sigmaColor);
dtf->a0distHor = adh;
dtf->a0distVert = adv;
return dtf;
}
void DTFilterCPU::init(InputArray guide_, double sigmaSpatial_, double sigmaColor_, int mode_, int numIters_)
{
Mat guide = guide_.getMat();
int cn = guide.channels();
int depth = guide.depth();
CV_Assert(cn <= 4);
CV_Assert((depth == CV_8U || depth == CV_32F) && !guide.empty());
#define CREATE_DTF(Vect) init_<Vect>(guide, sigmaSpatial_, sigmaColor_, mode_, numIters_);
if (cn == 1)
{
if (depth == CV_8U)
CREATE_DTF(Vec1b);
if (depth == CV_32F)
CREATE_DTF(Vec1f);
}
else if (cn == 2)
{
if (depth == CV_8U)
CREATE_DTF(Vec2b);
if (depth == CV_32F)
CREATE_DTF(Vec2f);
}
else if (cn == 3)
{
if (depth == CV_8U)
CREATE_DTF(Vec3b);
if (depth == CV_32F)
CREATE_DTF(Vec3f);
}
else if (cn == 4)
{
if (depth == CV_8U)
CREATE_DTF(Vec4b);
if (depth == CV_32F)
CREATE_DTF(Vec4f);
}
#undef CREATE_DTF
}
void DTFilterCPU::filter(InputArray src_, OutputArray dst_, int dDepth)
{
Mat src = src_.getMat();
dst_.create(src.size(), src.type());
Mat& dst = dst_.getMatRef();
int cn = src.channels();
int depth = src.depth();
CV_Assert(cn <= 4 && (depth == CV_8U || depth == CV_32F));
if (cn == 1)
{
if (depth == CV_8U)
filter_<Vec1b>(src, dst, dDepth);
if (depth == CV_32F)
filter_<Vec1f>(src, dst, dDepth);
}
else if (cn == 2)
{
if (depth == CV_8U)
filter_<Vec2b>(src, dst, dDepth);
if (depth == CV_32F)
filter_<Vec2f>(src, dst, dDepth);
}
else if (cn == 3)
{
if (depth == CV_8U)
filter_<Vec3b>(src, dst, dDepth);
if (depth == CV_32F)
filter_<Vec3f>(src, dst, dDepth);
}
else if (cn == 4)
{
if (depth == CV_8U)
filter_<Vec4b>(src, dst, dDepth);
if (depth == CV_32F)
filter_<Vec4f>(src, dst, dDepth);
}
}
void DTFilterCPU::setSingleFilterCall(bool value)
{
singleFilterCall = value;
}
void DTFilterCPU::release()
{
if (mode == -1) return;
idistHor.release();
idistVert.release();
distHor.release();
distVert.release();
a0distHor.release();
a0distVert.release();
adistHor.release();
adistVert.release();
}
Mat DTFilterCPU::getWExtendedMat(int h, int w, int type, int brdleft /*= 0*/, int brdRight /*= 0*/, int cacheAlign /*= 0*/)
{
int wrapperCols = w + brdleft + brdRight;
if (cacheAlign > 0)
wrapperCols += ((wrapperCols + cacheAlign-1) / cacheAlign)*cacheAlign;
Mat mat(h, wrapperCols, type);
return mat(Range::all(), Range(brdleft, w + brdleft));
}
Range DTFilterCPU::getWorkRangeByThread(const Range& itemsRange, const Range& rangeThread, int declaredNumThreads)
{
if (declaredNumThreads <= 0)
declaredNumThreads = cv::getNumThreads();
int chunk = itemsRange.size() / declaredNumThreads;
int start = itemsRange.start + chunk * rangeThread.start;
int end = itemsRange.start + ((rangeThread.end >= declaredNumThreads) ? itemsRange.size() : chunk * rangeThread.end);
return Range(start, end);
}
Range DTFilterCPU::getWorkRangeByThread(int items, const Range& rangeThread, int declaredNumThreads)
{
return getWorkRangeByThread(Range(0, items), rangeThread, declaredNumThreads);
}
}
}
\ No newline at end of file
#ifndef __OPENCV_DTFILTER_HPP__
#define __OPENCV_DTFILTER_HPP__
#include "precomp.hpp"
#ifdef _MSC_VER
#pragma warning(disable: 4512)
#pragma warning(disable: 4127)
#endif
#define CV_GET_NUM_THREAD_WORKS_PROPERLY
#undef CV_GET_NUM_THREAD_WORKS_PROPERLY
namespace cv
{
namespace ximgproc
{
class DTFilterCPU : public DTFilter
{
public: /*Non-template methods*/
static Ptr<DTFilterCPU> create(InputArray guide, double sigmaSpatial, double sigmaColor, int mode = DTF_NC, int numIters = 3);
static Ptr<DTFilterCPU> createRF(InputArray adistHor, InputArray adistVert, double sigmaSpatial, double sigmaColor, int numIters = 3);
void filter(InputArray src, OutputArray dst, int dDepth = -1);
void setSingleFilterCall(bool value);
public: /*Template methods*/
/*Use this static methods instead of constructor*/
template<typename GuideVec>
static DTFilterCPU* create_p_(const Mat& guide, double sigmaSpatial, double sigmaColor, int mode = DTF_NC, int numIters = 3);
template<typename GuideVec>
static DTFilterCPU create_(const Mat& guide, double sigmaSpatial, double sigmaColor, int mode = DTF_NC, int numIters = 3);
template<typename GuideVec>
void init_(Mat& guide, double sigmaSpatial, double sigmaColor, int mode = DTF_NC, int numIters = 3);
template<typename SrcVec>
void filter_(const Mat& src, Mat& dst, int dDepth = -1);
protected: /*Typedefs declarations*/
typedef float IDistType;
typedef Vec<IDistType, 1> IDistVec;
typedef float DistType;
typedef Vec<DistType, 1> DistVec;
typedef float WorkType;
public: /*Members declarations*/
int h, w, mode;
float sigmaSpatial, sigmaColor;
bool singleFilterCall;
int numFilterCalls;
Mat idistHor, idistVert;
Mat distHor, distVert;
Mat a0distHor, a0distVert;
Mat adistHor, adistVert;
int numIters;
protected: /*Functions declarations*/
DTFilterCPU() : mode(-1), singleFilterCall(false), numFilterCalls(0) {}
void init(InputArray guide, double sigmaSpatial, double sigmaColor, int mode = DTF_NC, int numIters = 3);
void release();
template<typename GuideVec>
inline IDistType getTransformedDistance(const GuideVec &l, const GuideVec &r)
{
return (IDistType)(1.0f + sigmaSpatial / sigmaColor * norm1<IDistType>(l, r));
}
inline double getIterSigmaH(int iterNum)
{
return sigmaSpatial * std::pow(2.0, numIters - iterNum) / sqrt(std::pow(4.0, numIters) - 1);
}
inline IDistType getIterRadius(int iterNum)
{
return (IDistType)(3.0*getIterSigmaH(iterNum));
}
inline float getIterAlpha(int iterNum)
{
return (float)std::exp(-std::sqrt(2.0 / 3.0) / getIterSigmaH(iterNum));
}
protected: /*Wrappers for parallelization*/
template <typename WorkVec>
struct FilterNC_horPass : public ParallelLoopBody
{
Mat &src, &idist, &dst;
float radius;
FilterNC_horPass(Mat& src_, Mat& idist_, Mat& dst_);
void operator() (const Range& range) const;
};
template <typename WorkVec>
struct FilterIC_horPass : public ParallelLoopBody
{
Mat &src, &idist, &dist, &dst, isrcBuf;
float radius;
FilterIC_horPass(Mat& src_, Mat& idist_, Mat& dist_, Mat& dst_);
void operator() (const Range& range) const;
};
template <typename WorkVec>
struct FilterRF_horPass : public ParallelLoopBody
{
Mat &res, &alphaD;
int iteration;
FilterRF_horPass(Mat& res_, Mat& alphaD_, int iteration_);
void operator() (const Range& range) const;
Range getRange() const { return Range(0, res.rows); }
};
template <typename WorkVec>
struct FilterRF_vertPass : public ParallelLoopBody
{
Mat &res, &alphaD;
int iteration;
FilterRF_vertPass(Mat& res_, Mat& alphaD_, int iteration_);
void operator() (const Range& range) const;
#ifdef CV_GET_NUM_THREAD_WORKS_PROPERLY
Range getRange() const { return Range(0, cv::getNumThreads()); }
#else
Range getRange() const { return Range(0, res.cols); }
#endif
};
template <typename GuideVec>
struct ComputeIDTHor_ParBody: public ParallelLoopBody
{
DTFilterCPU &dtf;
Mat &guide, &dst;
ComputeIDTHor_ParBody(DTFilterCPU& dtf_, Mat& guide_, Mat& dst_);
void operator() (const Range& range) const;
Range getRange() { return Range(0, guide.rows); }
};
template <typename GuideVec>
struct ComputeDTandIDTHor_ParBody : public ParallelLoopBody
{
DTFilterCPU &dtf;
Mat &guide, &dist, &idist;
IDistType maxRadius;
ComputeDTandIDTHor_ParBody(DTFilterCPU& dtf_, Mat& guide_, Mat& dist_, Mat& idist_);
void operator() (const Range& range) const;
Range getRange() { return Range(0, guide.rows); }
};
template <typename GuideVec>
struct ComputeA0DTHor_ParBody : public ParallelLoopBody
{
DTFilterCPU &dtf;
Mat &guide;
float lna;
ComputeA0DTHor_ParBody(DTFilterCPU& dtf_, Mat& guide_);
void operator() (const Range& range) const;
Range getRange() { return Range(0, guide.rows); }
~ComputeA0DTHor_ParBody();
};
template <typename GuideVec>
struct ComputeA0DTVert_ParBody : public ParallelLoopBody
{
DTFilterCPU &dtf;
Mat &guide;
float lna;
ComputeA0DTVert_ParBody(DTFilterCPU& dtf_, Mat& guide_);
void operator() (const Range& range) const;
Range getRange() const { return Range(0, guide.rows - 1); }
~ComputeA0DTVert_ParBody();
};
protected: /*Auxiliary implementation functions*/
static Range getWorkRangeByThread(const Range& itemsRange, const Range& rangeThread, int maxThreads = 0);
static Range getWorkRangeByThread(int items, const Range& rangeThread, int maxThreads = 0);
template<typename SrcVec>
static void prepareSrcImg_IC(const Mat& src, Mat& inner, Mat& outer);
static Mat getWExtendedMat(int h, int w, int type, int brdleft = 0, int brdRight = 0, int cacheAlign = 0);
template<typename SrcVec, typename SrcWorkVec>
static void integrateSparseRow(const SrcVec *src, const float *dist, SrcWorkVec *dst, int cols);
template<typename SrcVec, typename SrcWorkVec>
static void integrateRow(const SrcVec *src, SrcWorkVec *dst, int cols);
inline static int getLeftBound(IDistType *idist, int pos, IDistType searchValue)
{
while (idist[pos] < searchValue)
pos++;
return pos;
}
inline static int getRightBound(IDistType *idist, int pos, IDistType searchValue)
{
while (idist[pos + 1] < searchValue)
pos++;
return pos;
}
template <typename T, typename T1, typename T2, int n>
inline static T norm1(const cv::Vec<T1, n>& v1, const cv::Vec<T2, n>& v2)
{
T sum = (T) 0;
for (int i = 0; i < n; i++)
sum += std::abs( (T)v1[i] - (T)v2[i] );
return sum;
}
};
/*One-line template wrappers for DT call*/
template<typename GuideVec, typename SrcVec>
void domainTransformFilter( const Mat_<GuideVec>& guide,
const Mat_<SrcVec>& source,
Mat& dst,
double sigmaSpatial, double sigmaColor,
int mode = DTF_NC, int numPasses = 3
);
template<typename GuideVec, typename SrcVec>
void domainTransformFilter( const Mat& guide,
const Mat& source,
Mat& dst,
double sigmaSpatial, double sigmaColor,
int mode = DTF_NC, int numPasses = 3
);
}
}
#include "dtfilter_cpu.inl.hpp"
#endif
\ No newline at end of file
This diff is collapsed.
This diff is collapsed.
#ifndef __EDGEAWAREFILTERS_COMMON_HPP__
#define __EDGEAWAREFILTERS_COMMON_HPP__
#ifdef __cplusplus
namespace cv
{
namespace ximgproc
{
Ptr<DTFilter> createDTFilterRF(InputArray adistHor, InputArray adistVert, double sigmaSpatial, double sigmaColor, int numIters);
int getTotalNumberOfChannels(InputArrayOfArrays src);
void checkSameSizeAndDepth(InputArrayOfArrays src, Size &sz, int &depth);
namespace intrinsics
{
void add_(register float *dst, register float *src1, int w);
void mul(register float *dst, register float *src1, register float *src2, int w);
void mul(register float *dst, register float *src1, float src2, int w);
//dst = alpha*src + beta
void mad(register float *dst, register float *src1, float alpha, float beta, int w);
void add_mul(register float *dst, register float *src1, register float *src2, int w);
void sub_mul(register float *dst, register float *src1, register float *src2, int w);
void sub_mad(register float *dst, register float *src1, register float *src2, float c0, int w);
void det_2x2(register float *dst, register float *a00, register float *a01, register float *a10, register float *a11, int w);
void div_det_2x2(register float *a00, register float *a01, register float *a11, int w);
void div_1x(register float *a1, register float *b1, int w);
void inv_self(register float *src, int w);
void sqr_(register float *dst, register float *src1, int w);
void sqrt_(register float *dst, register float *src, int w);
void sqr_dif(register float *dst, register float *src1, register float *src2, int w);
void add_sqr_dif(register float *dst, register float *src1, register float *src2, int w);
void add_sqr(register float *dst, register float *src1, int w);
void min_(register float *dst, register float *src1, register float *src2, int w);
void rf_vert_row_pass(register float *curRow, register float *prevRow, float alphaVal, int w);
}
}
}
#endif
#endif
\ No newline at end of file
This diff is collapsed.
This diff is collapsed.
#ifndef _OPENCV_EDGEFILTER_PRECOMP_HPP_
#define _OPENCV_EDGEFILTER_PRECOMP_HPP_
#include <opencv2/core.hpp>
#include <opencv2/core/ocl.hpp>
#include <opencv2/core/base.hpp>
#include <opencv2/core/utility.hpp>
#include <opencv2/core/cvdef.h>
#include <opencv2/core/core_c.h>
#include <opencv2/core/private.hpp>
#include <opencv2/imgproc.hpp>
#include <opencv2/ximgproc.hpp>
#endif
\ No newline at end of file
#include "test_precomp.hpp"
namespace cvtest
{
using namespace std;
using namespace std::tr1;
using namespace testing;
using namespace cv;
using namespace cv::ximgproc;
#ifndef SQR
#define SQR(x) ((x)*(x))
#endif
static string getOpenCVExtraDir()
{
return cvtest::TS::ptr()->get_data_path();
}
static void checkSimilarity(InputArray res, InputArray ref)
{
double normInf = cvtest::norm(res, ref, NORM_INF);
double normL2 = cvtest::norm(res, ref, NORM_L2) / res.total();
EXPECT_LE(normInf, 1);
EXPECT_LE(normL2, 1.0 / 64);
}
TEST(AdaptiveManifoldTest, SplatSurfaceAccuracy)
{
RNG rnd(0);
for (int i = 0; i < 10; i++)
{
Size sz(rnd.uniform(512, 1024), rnd.uniform(512, 1024));
int guideCn = rnd.uniform(1, 8);
Mat guide(sz, CV_MAKE_TYPE(CV_32F, guideCn));
randu(guide, 0, 1);
Scalar surfaceValue;
int srcCn = rnd.uniform(1, 4);
rnd.fill(surfaceValue, RNG::UNIFORM, 0, 255);
Mat src(sz, CV_MAKE_TYPE(CV_8U, srcCn), surfaceValue);
double sigma_s = rnd.uniform(1.0, 50.0);
double sigma_r = rnd.uniform(0.1, 0.9);
Mat res;
amFilter(guide, src, res, sigma_s, sigma_r, false);
double normInf = cvtest::norm(src, res, NORM_INF);
EXPECT_EQ(normInf, 0);
}
}
TEST(AdaptiveManifoldTest, AuthorsReferenceAccuracy)
{
String srcImgPath = "cv/edgefilter/kodim23.png";
String refPaths[] =
{
"cv/edgefilter/amf/kodim23_amf_ss5_sr0.3_ref.png",
"cv/edgefilter/amf/kodim23_amf_ss30_sr0.1_ref.png",
"cv/edgefilter/amf/kodim23_amf_ss50_sr0.3_ref.png"
};
pair<double, double> refParams[] =
{
make_pair(5.0, 0.3),
make_pair(30.0, 0.1),
make_pair(50.0, 0.3)
};
String refOutliersPaths[] =
{
"cv/edgefilter/amf/kodim23_amf_ss5_sr0.1_outliers_ref.png",
"cv/edgefilter/amf/kodim23_amf_ss15_sr0.3_outliers_ref.png",
"cv/edgefilter/amf/kodim23_amf_ss50_sr0.5_outliers_ref.png"
};
pair<double, double> refOutliersParams[] =
{
make_pair(5.0, 0.1),
make_pair(15.0, 0.3),
make_pair(50.0, 0.5),
};
Mat srcImg = imread(getOpenCVExtraDir() + srcImgPath);
ASSERT_TRUE(!srcImg.empty());
for (int i = 0; i < 3; i++)
{
Mat refRes = imread(getOpenCVExtraDir() + refPaths[i]);
double sigma_s = refParams[i].first;
double sigma_r = refParams[i].second;
ASSERT_TRUE(!refRes.empty());
Mat res;
Ptr<AdaptiveManifoldFilter> amf = createAMFilter(sigma_s, sigma_r, false);
amf->setBool("use_RNG", false);
amf->filter(srcImg, res, srcImg);
amf->collectGarbage();
checkSimilarity(res, refRes);
}
for (int i = 0; i < 3; i++)
{
Mat refRes = imread(getOpenCVExtraDir() + refOutliersPaths[i]);
double sigma_s = refOutliersParams[i].first;
double sigma_r = refOutliersParams[i].second;
ASSERT_TRUE(!refRes.empty());
Mat res;
Ptr<AdaptiveManifoldFilter> amf = createAMFilter(sigma_s, sigma_r, true);
amf->setBool("use_RNG", false);
amf->filter(srcImg, res, srcImg);
amf->collectGarbage();
checkSimilarity(res, refRes);
}
}
typedef tuple<string, string> AMRefTestParams;
typedef TestWithParam<AMRefTestParams> AdaptiveManifoldRefImplTest;
Ptr<AdaptiveManifoldFilter> createAMFilterRefImpl(double sigma_s, double sigma_r, bool adjust_outliers = false);
TEST_P(AdaptiveManifoldRefImplTest, RefImplAccuracy)
{
AMRefTestParams params = GetParam();
string guideFileName = get<0>(params);
string srcFileName = get<1>(params);
Mat guide = imread(getOpenCVExtraDir() + guideFileName);
Mat src = imread(getOpenCVExtraDir() + srcFileName);
ASSERT_TRUE(!guide.empty() && !src.empty());
int seed = 10 * (int)guideFileName.length() + (int)srcFileName.length();
RNG rnd(seed);
//inconsistent downsample/upsample operations in reference implementation
Size dstSize((guide.cols + 15) & ~15, (guide.rows + 15) & ~15);
resize(guide, guide, dstSize);
resize(src, src, dstSize);
for (int iter = 0; iter < 6; iter++)
{
double sigma_s = rnd.uniform(1.0, 50.0);
double sigma_r = rnd.uniform(0.1, 0.9);
bool adjust_outliers = (iter % 2 == 0);
Mat res;
amFilter(guide, src, res, sigma_s, sigma_r, adjust_outliers);
Mat resRef;
Ptr<AdaptiveManifoldFilter> amf = createAMFilterRefImpl(sigma_s, sigma_r, adjust_outliers);
amf->filter(src, resRef, guide);
checkSimilarity(res, resRef);
}
}
INSTANTIATE_TEST_CASE_P(TypicalSet, AdaptiveManifoldRefImplTest,
Combine(
Values("cv/shared/lena.png", "cv/edgefilter/kodim23.png", "cv/npr/test4.png"),
Values("cv/shared/lena.png", "cv/edgefilter/kodim23.png", "cv/npr/test4.png")
));
}
\ No newline at end of file
This diff is collapsed.
#include "test_precomp.hpp"
namespace cvtest
{
using namespace std;
using namespace std::tr1;
using namespace testing;
using namespace perf;
using namespace cv;
using namespace cv::ximgproc;
static string getOpenCVExtraDir()
{
return cvtest::TS::ptr()->get_data_path();
}
CV_ENUM(SupportedTypes, CV_8UC1, CV_8UC2, CV_8UC3, CV_8UC4, CV_32FC1, CV_32FC2, CV_32FC3, CV_32FC4);
CV_ENUM(ModeType, DTF_NC, DTF_IC, DTF_RF)
typedef tuple<Size, ModeType, SupportedTypes, SupportedTypes> DTParams;
Mat convertTypeAndSize(Mat src, int dstType, Size dstSize)
{
Mat dst;
CV_Assert(src.channels() == 3);
int dstChannels = CV_MAT_CN(dstType);
if (dstChannels == 1)
{
cvtColor(src, dst, COLOR_BGR2GRAY);
}
else if (dstChannels == 2)
{
Mat srcCn[3];
split(src, srcCn);
merge(srcCn, 2, dst);
}
else if (dstChannels == 3)
{
dst = src.clone();
}
else if (dstChannels == 4)
{
Mat srcCn[4];
split(src, srcCn);
srcCn[3] = srcCn[0].clone();
merge(srcCn, 4, dst);
}
dst.convertTo(dst, dstType);
resize(dst, dst, dstSize);
return dst;
}
TEST(DomainTransformTest, SplatSurfaceAccuracy)
{
static int dtModes[] = {DTF_NC, DTF_RF, DTF_IC};
RNG rnd(0);
for (int i = 0; i < 15; i++)
{
Size sz(rnd.uniform(512, 1024), rnd.uniform(512, 1024));
int guideCn = rnd.uniform(1, 4);
Mat guide(sz, CV_MAKE_TYPE(CV_32F, guideCn));
randu(guide, 0, 255);
Scalar surfaceValue;
int srcCn = rnd.uniform(1, 4);
rnd.fill(surfaceValue, RNG::UNIFORM, 0, 255);
Mat src(sz, CV_MAKE_TYPE(CV_8U, srcCn), surfaceValue);
double sigma_s = rnd.uniform(1.0, 100.0);
double sigma_r = rnd.uniform(1.0, 100.0);
int mode = dtModes[i%3];
Mat res;
dtFilter(guide, src, res, sigma_s, sigma_r, mode, 1);
double normL1 = cvtest::norm(src, res, NORM_L1)/src.total()/src.channels();
EXPECT_LE(normL1, 1.0/64);
}
}
typedef TestWithParam<DTParams> DomainTransformTest;
TEST_P(DomainTransformTest, MultiThreadReproducibility)
{
if (cv::getNumberOfCPUs() == 1)
return;
double MAX_DIF = 1.0;
double MAX_MEAN_DIF = 1.0 / 256.0;
int loopsCount = 2;
RNG rng(0);
DTParams params = GetParam();
Size size = get<0>(params);
int mode = get<1>(params);
int guideType = get<2>(params);
int srcType = get<3>(params);
Mat original = imread(getOpenCVExtraDir() + "cv/edgefilter/statue.png");
Mat guide = convertTypeAndSize(original, guideType, size);
Mat src = convertTypeAndSize(original, srcType, size);
for (int iter = 0; iter <= loopsCount; iter++)
{
double ss = rng.uniform(0.0, 100.0);
double sc = rng.uniform(0.0, 100.0);
cv::setNumThreads(cv::getNumberOfCPUs());
Mat resMultithread;
dtFilter(guide, src, resMultithread, ss, sc, mode);
cv::setNumThreads(1);
Mat resSingleThread;
dtFilter(guide, src, resSingleThread, ss, sc, mode);
EXPECT_LE(cv::norm(resSingleThread, resMultithread, NORM_INF), MAX_DIF);
EXPECT_LE(cv::norm(resSingleThread, resMultithread, NORM_L1), MAX_MEAN_DIF*src.total());
}
}
INSTANTIATE_TEST_CASE_P(FullSet, DomainTransformTest,
Combine(Values(szODD, szQVGA), ModeType::all(), SupportedTypes::all(), SupportedTypes::all())
);
template<typename SrcVec>
Mat getChessMat1px(Size sz, double whiteIntensity = 255)
{
typedef typename DataType<SrcVec>::channel_type SrcType;
Mat dst(sz, DataType<SrcVec>::type);
SrcVec black = SrcVec::all(0);
SrcVec white = SrcVec::all((SrcType)whiteIntensity);
for (int i = 0; i < dst.rows; i++)
for (int j = 0; j < dst.cols; j++)
dst.at<SrcVec>(i, j) = ((i + j) % 2) ? white : black;
return dst;
}
TEST(DomainTransformTest, ChessBoard_NC_accuracy)
{
RNG rng(0);
double MAX_DIF = 1;
Size sz = szVGA;
double ss = 80;
double sc = 60;
Mat srcb = randomMat(rng, sz, CV_8UC4, 0, 255, true);
Mat srcf = randomMat(rng, sz, CV_32FC4, 0, 255, true);
Mat chessb = getChessMat1px<Vec3b>(sz);
Mat dstb, dstf;
dtFilter(chessb, srcb.clone(), dstb, ss, sc, DTF_NC);
dtFilter(chessb, srcf.clone(), dstf, ss, sc, DTF_NC);
EXPECT_LE(cv::norm(srcb, dstb, NORM_INF), MAX_DIF);
EXPECT_LE(cv::norm(srcf, dstf, NORM_INF), MAX_DIF);
}
TEST(DomainTransformTest, BoxFilter_NC_accuracy)
{
double MAX_DIF = 1;
int radius = 5;
double sc = 1.0;
double ss = 1.01*radius / sqrt(3.0);
Mat src = imread(getOpenCVExtraDir() + "cv/edgefilter/statue.png");
ASSERT_TRUE(!src.empty());
Mat1b guide(src.size(), 200);
Mat res_dt, res_box;
blur(src, res_box, Size(2 * radius + 1, 2 * radius + 1));
dtFilter(guide, src, res_dt, ss, sc, DTF_NC, 1);
EXPECT_LE(cv::norm(res_dt, res_box, NORM_L2), MAX_DIF*src.total());
}
TEST(DomainTransformTest, AuthorReferenceAccuracy)
{
string dir = getOpenCVExtraDir() + "cv/edgefilter";
double ss = 30;
double sc = 0.2 * 255;
Mat src = imread(dir + "/statue.png");
Mat ref_NC = imread(dir + "/dt/authors_statue_NC_ss30_sc0.2.png");
Mat ref_IC = imread(dir + "/dt/authors_statue_IC_ss30_sc0.2.png");
Mat ref_RF = imread(dir + "/dt/authors_statue_RF_ss30_sc0.2.png");
ASSERT_FALSE(src.empty());
ASSERT_FALSE(ref_NC.empty());
ASSERT_FALSE(ref_IC.empty());
ASSERT_FALSE(ref_RF.empty());
cv::setNumThreads(cv::getNumberOfCPUs());
Mat res_NC, res_IC, res_RF;
dtFilter(src, src, res_NC, ss, sc, DTF_NC);
dtFilter(src, src, res_IC, ss, sc, DTF_IC);
dtFilter(src, src, res_RF, ss, sc, DTF_RF);
double totalMaxError = 1.0/64.0*src.total();
EXPECT_LE(cvtest::norm(res_NC, ref_NC, NORM_L2), totalMaxError);
EXPECT_LE(cvtest::norm(res_NC, ref_NC, NORM_INF), 1);
EXPECT_LE(cvtest::norm(res_IC, ref_IC, NORM_L2), totalMaxError);
EXPECT_LE(cvtest::norm(res_IC, ref_IC, NORM_INF), 1);
EXPECT_LE(cvtest::norm(res_RF, ref_RF, NORM_L2), totalMaxError);
EXPECT_LE(cvtest::norm(res_IC, ref_IC, NORM_INF), 1);
}
}
\ No newline at end of file
This diff is collapsed.
This diff is collapsed.
#include "test_precomp.hpp"
CV_TEST_MAIN("")
\ No newline at end of file
#ifdef __GNUC__
# pragma GCC diagnostic ignored "-Wmissing-declarations"
# if defined __clang__ || defined __APPLE__
# pragma GCC diagnostic ignored "-Wmissing-prototypes"
# pragma GCC diagnostic ignored "-Wextra"
# endif
#endif
#ifndef __OPENCV_TEST_PRECOMP_HPP__
#define __OPENCV_TEST_PRECOMP_HPP__
#include <opencv2/ts.hpp>
#include <opencv2/ts/ts_perf.hpp>
#include <opencv2/core.hpp>
#include <opencv2/core/utility.hpp>
#include <opencv2/imgproc.hpp>
#include <opencv2/highgui.hpp>
#include <opencv2/ximgproc.hpp>
#endif
\ No newline at end of file
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment