Commit b0e450d2 authored by vludv's avatar vludv

Added edge-aware filters.

parent 079ff5c0
set(the_description "Extended image processing module. It includes edge-aware filters and etc.")
ocv_define_module(ximgproc opencv_imgproc opencv_core opencv_highgui)
target_link_libraries(opencv_ximgproc)
\ No newline at end of file
This diff is collapsed.
********************************
ximgproc. Extended image processing module.
********************************
.. highlight:: cpp
.. toctree::
:maxdepth: 2
edge_aware_filters
/*
* Software License Agreement (BSD License)
*
* Copyright (c) 2009, Willow Garage, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Willow Garage, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
*/
#ifndef __OPENCV_EDGEFILTER_HPP__
#define __OPENCV_EDGEFILTER_HPP__
#ifdef __cplusplus
#include <opencv2/core.hpp>
namespace cv
{
enum EdgeAwareFiltersList
{
DTF_NC,
DTF_IC,
DTF_RF,
GUIDED_FILTER,
AM_FILTER
};
/*Interface for DT filters*/
class CV_EXPORTS_W DTFilter : public Algorithm
{
public:
CV_WRAP virtual void filter(InputArray src, OutputArray dst, int dDepth = -1) = 0;
};
typedef Ptr<DTFilter> DTFilterPtr;
/*Fabric function for DT filters*/
CV_EXPORTS_W
Ptr<DTFilter> createDTFilter(InputArray guide, double sigmaSpatial, double sigmaColor, int mode = DTF_NC, int numIters = 3);
/*One-line DT filter call*/
CV_EXPORTS_W
void dtFilter(InputArray guide, InputArray src, OutputArray dst, double sigmaSpatial, double sigmaColor, int mode = DTF_NC, int numIters = 3);
//////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////
/*Interface for Guided Filter*/
class CV_EXPORTS_W GuidedFilter : public Algorithm
{
public:
CV_WRAP virtual void filter(InputArray src, OutputArray dst, int dDepth = -1) = 0;
};
/*Fabric function for Guided Filter*/
CV_EXPORTS_W Ptr<GuidedFilter> createGuidedFilter(InputArray guide, int radius, double eps);
/*One-line Guided Filter call*/
CV_EXPORTS_W void guidedFilter(InputArray guide, InputArray src, OutputArray dst, int radius, double eps, int dDepth = -1);
//////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////
class CV_EXPORTS_W AdaptiveManifoldFilter : public Algorithm
{
public:
/**
* @brief Apply High-dimensional filtering using adaptive manifolds
* @param src Input image to be filtered.
* @param dst Adaptive-manifold filter response.
* @param src_joint Image for joint filtering (optional).
*/
CV_WRAP virtual void filter(InputArray src, OutputArray dst, InputArray joint = noArray()) = 0;
CV_WRAP virtual void collectGarbage() = 0;
static Ptr<AdaptiveManifoldFilter> create();
};
//Fabric function for AM filter algorithm
CV_EXPORTS_W Ptr<AdaptiveManifoldFilter> createAMFilter(double sigma_s, double sigma_r, bool adjust_outliers = false);
//One-line Adaptive Manifold filter call
CV_EXPORTS_W void amFilter(InputArray joint, InputArray src, OutputArray dst, double sigma_s, double sigma_r, bool adjust_outliers = false);
//////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////
/*Interface for Weighted Median Filter*/
class CV_EXPORTS_W WeightedMedian : public Algorithm
{
public:
CV_WRAP virtual void filter(InputArray src, OutputArray dst, double rangeQuantizer = 1, int dDepth = -1) = 0;
};
/*Fabric function for Weighted Median Filter*/
CV_EXPORTS_W
Ptr<WeightedMedian> createWeightedMedianFilter(InputArray guide, double spatialSize, double colorSize, int filterType = DTF_NC);
/*One-line Weighted Median Filter call*/
CV_EXPORTS_W void weightedMedianFilter(InputArray guide, InputArray src, OutputArray dst, double spatialSize, double colorSize, int filterType = DTF_NC, double rangeQuantizer = 1);
}
#endif
#endif
#include "perf_precomp.hpp"
using namespace std;
using namespace std::tr1;
using namespace cv;
using namespace perf;
using namespace testing;
namespace cvtest
{
typedef tuple<bool, Size, int, int, MatType> AMPerfTestParam;
typedef TestBaseWithParam<AMPerfTestParam> AdaptiveManifoldPerfTest;
PERF_TEST_P( AdaptiveManifoldPerfTest, perf,
Combine(
Values(true, false), //adjust_outliers flag
Values(sz1080p, sz720p), //size
Values(1, 3, 8), //joint channels num
Values(1, 3), //source channels num
Values(CV_8U, CV_32F) //source and joint depth
)
)
{
AMPerfTestParam params = GetParam();
bool adjustOutliers = get<0>(params);
Size sz = get<1>(params);
int jointCnNum = get<2>(params);
int srcCnNum = get<3>(params);
int depth = get<4>(params);
Mat joint(sz, CV_MAKE_TYPE(depth, jointCnNum));
Mat src(sz, CV_MAKE_TYPE(depth, srcCnNum));
Mat dst(sz, CV_MAKE_TYPE(depth, srcCnNum));
cv::setNumThreads(cv::getNumberOfCPUs());
declare.in(joint, src, WARMUP_RNG).out(dst).tbb_threads(cv::getNumberOfCPUs());
double sigma_s = 16;
double sigma_r = 0.5;
TEST_CYCLE_N(3)
{
Mat res;
amFilter(joint, src, res, sigma_s, sigma_r, adjustOutliers);
//at 5th cycle sigma_s will be five times more and tree depth will be 5
sigma_s *= 1.38;
sigma_r /= 1.38;
}
SANITY_CHECK(dst);
}
}
\ No newline at end of file
#include "perf_precomp.hpp"
using namespace std;
using namespace std::tr1;
using namespace cv;
using namespace perf;
using namespace testing;
namespace cvtest
{
CV_ENUM(GuideMatType, CV_8UC1, CV_8UC3, CV_32FC1, CV_32FC3) //reduced set
CV_ENUM(SourceMatType, CV_8UC1, CV_8UC2, CV_8UC3, CV_8UC4, CV_32FC1, CV_32FC2, CV_32FC3, CV_32FC4) //full supported set
CV_ENUM(DTFMode, DTF_NC, DTF_IC, DTF_RF)
typedef tuple<GuideMatType, SourceMatType, Size, double, double, DTFMode> DTTestParams;
typedef TestBaseWithParam<DTTestParams> DomainTransformTest;
PERF_TEST_P( DomainTransformTest, perf,
Combine(
GuideMatType::all(),
SourceMatType::all(),
Values(szVGA, sz720p),
Values(10.0, 80.0),
Values(30.0, 50.0),
DTFMode::all()
)
)
{
int guideType = get<0>(GetParam());
int srcType = get<1>(GetParam());
Size size = get<2>(GetParam());
double sigmaSpatial = get<3>(GetParam());
double sigmaColor = get<4>(GetParam());
int dtfType = get<5>(GetParam());
Mat guide(size, guideType);
Mat src(size, srcType);
Mat dst(size, srcType);
declare.in(guide, src, WARMUP_RNG).out(dst).tbb_threads(cv::getNumberOfCPUs());
cv::setNumThreads(cv::getNumberOfCPUs());
TEST_CYCLE_N(5)
{
dtFilter(guide, src, dst, sigmaSpatial, sigmaColor, dtfType);
}
SANITY_CHECK(dst);
}
}
\ No newline at end of file
#include "perf_precomp.hpp"
using namespace std;
using namespace std::tr1;
using namespace cv;
using namespace perf;
using namespace testing;
namespace cvtest
{
CV_ENUM(GuideTypes, CV_8UC1, CV_8UC2, CV_8UC3, CV_32FC1, CV_32FC2, CV_32FC3);
CV_ENUM(SrcTypes, CV_8UC1, CV_8UC3, CV_32FC1, CV_32FC3);
typedef tuple<GuideTypes, SrcTypes, Size> GFParams;
typedef TestBaseWithParam<GFParams> GuidedFilterPerfTest;
PERF_TEST_P( GuidedFilterPerfTest, perf, Combine(GuideTypes::all(), SrcTypes::all(), Values(sz1080p, sz2K)) )
{
RNG rng(0);
GFParams params = GetParam();
int guideType = get<0>(params);
int srcType = get<1>(params);
Size sz = get<2>(params);
Mat guide(sz, guideType);
Mat src(sz, srcType);
Mat dst(sz, srcType);
declare.in(guide, src, WARMUP_RNG).out(dst).tbb_threads(cv::getNumberOfCPUs());
cv::setNumThreads(cv::getNumberOfCPUs());
TEST_CYCLE_N(3)
{
int radius = rng.uniform(5, 30);
double eps = rng.uniform(0.1, 1e5);
guidedFilter(guide, src, dst, radius, eps);
}
SANITY_CHECK(dst);
}
}
\ No newline at end of file
#include "perf_precomp.hpp"
CV_PERF_TEST_MAIN(edgefilter)
#ifndef __OPENCV_PERF_PRECOMP_HPP__
#define __OPENCV_PERF_PRECOMP_HPP__
#include <opencv2/ts.hpp>
#include <opencv2/imgproc.hpp>
#include <opencv2/highgui.hpp>
#include <opencv2/edge_filter.hpp>
#endif
cmake_minimum_required(VERSION 2.8)
project(live_demo)
find_package(OpenCV 3.0 REQUIRED)
set(SOURCES live_demo.cpp)
include_directories(${OpenCV_INCLUDE_DIRS})
add_executable(live_demo ${SOURCES} ${HEADERS})
target_link_libraries(live_demo ${OpenCV_LIBS})
#include <opencv2/core.hpp>
#include <opencv2/core/utility.hpp>
#include <opencv2/highgui.hpp>
#include <opencv2/imgproc.hpp>
#include <opencv2/edge_filter.hpp>
using namespace cv;
#include <iostream>
using namespace std;
typedef void(*FilteringOperation)(const Mat& src, Mat& dst);
//current mode (filtering operation example)
FilteringOperation g_filterOp = NULL;
//common sliders for every mode
int g_sigmaColor = 25;
int g_sigmaSpatial = 10;
//for Stylizing mode
int g_edgesGamma = 100;
//for Details Enhancement mode
int g_contrastBase = 100;
int g_detailsLevel = 100;
int g_numberOfCPUs = cv::getNumberOfCPUs();
//trivial filter
void filterDoNothing(const Mat& frame, Mat& dst)
{
frame.copyTo(dst);
}
//simple edge-aware blurring
void filterBlurring(const Mat& frame, Mat& dst)
{
dtFilter(frame, frame, dst, g_sigmaSpatial, g_sigmaColor, DTF_RF);
}
//stylizing filter
void filterStylize(const Mat& frame, Mat& dst)
{
//blur frame
Mat filtered;
dtFilter(frame, frame, filtered, g_sigmaSpatial, g_sigmaColor, DTF_NC);
//compute grayscale blurred frame
Mat filteredGray;
cvtColor(filtered, filteredGray, COLOR_BGR2GRAY);
//find gradients of blurred image
Mat gradX, gradY;
Sobel(filteredGray, gradX, CV_32F, 1, 0, 3, 1.0/255);
Sobel(filteredGray, gradY, CV_32F, 0, 1, 3, 1.0/255);
//compute magnitude of gradient and fit it accordingly the gamma parameter
Mat gradMagnitude;
magnitude(gradX, gradY, gradMagnitude);
cv::pow(gradMagnitude, g_edgesGamma/100.0, gradMagnitude);
//multiply a blurred frame to the value inversely proportional to the magnitude
Mat multiplier = 1.0/(1.0 + gradMagnitude);
cvtColor(multiplier, multiplier, COLOR_GRAY2BGR);
multiply(filtered, multiplier, dst, 1, dst.type());
}
void filterDetailEnhancement(const Mat& frame8u, Mat& dst)
{
Mat frame;
frame8u.convertTo(frame, CV_32F, 1.0/255);
//Decompose image to 3 Lab channels
Mat frameLab, frameLabCn[3];
cvtColor(frame, frameLab, COLOR_BGR2Lab);
split(frameLab, frameLabCn);
//Generate progressively smoother versions of the lightness channel
Mat layer0 = frameLabCn[0]; //first channel is original lightness
Mat layer1, layer2;
dtFilter(layer0, layer0, layer1, g_sigmaSpatial, g_sigmaColor, DTF_IC);
dtFilter(layer1, layer1, layer2, 2*g_sigmaSpatial, g_sigmaColor, DTF_IC);
//Compute detail layers
Mat detailLayer1 = layer0 - layer1;
Mat detailLayer2 = layer1 - layer2;
double cBase = g_contrastBase / 100.0;
double cDetails1 = g_detailsLevel / 100.0;
double cDetails2 = 2.0 - g_detailsLevel / 100.0;
//Generate lightness
double meanLigtness = mean(frameLabCn[0])[0];
frameLabCn[0] = cBase*(layer2 - meanLigtness) + meanLigtness; //fit contrast of base (most blurred) layer
frameLabCn[0] += cDetails1*detailLayer1; //add weighted sum of detail layers to new lightness
frameLabCn[0] += cDetails2*detailLayer2; //
//Update new lightness
merge(frameLabCn, 3, frameLab);
cvtColor(frameLab, frame, COLOR_Lab2BGR);
frame.convertTo(dst, CV_8U, 255);
}
void splitScreen(const Mat& rawFrame, Mat& outputFrame, Mat& srcFrame, Mat& processedFrame)
{
int h = rawFrame.rows;
int w = rawFrame.cols;
int cn = rawFrame.channels();
outputFrame.create(h, 2 * w, CV_MAKE_TYPE(CV_8U, cn));
srcFrame = outputFrame(Range::all(), Range(0, w));
processedFrame = outputFrame(Range::all(), Range(w, 2*w));
rawFrame.convertTo(srcFrame, srcFrame.type());
}
void changeModeCallback(int state, void *filter)
{
if (state == 1)
g_filterOp = (FilteringOperation) filter;
}
void changeNumberOfCpuCallback(int count, void *data)
{
count = std::max(1, count);
cv::setNumThreads(count);
g_numberOfCPUs = count;
}
int main(int argc, char **argv)
{
VideoCapture cap(0);
if (!cap.isOpened())
{
cerr << "Capture device was not found" << endl;
return -1;
}
namedWindow("Demo");
displayOverlay("Demo", "Press Ctrl+P to show property window", 5000);
//Thread trackbar
cv::setNumThreads(g_numberOfCPUs); //speedup filtering
createTrackbar("Threads", NULL, &g_numberOfCPUs, cv::getNumberOfCPUs(), changeNumberOfCpuCallback);
//Buttons to choose different modes
createButton("Mode Details Enhancement", changeModeCallback, filterDetailEnhancement, QT_RADIOBOX, true);
createButton("Mode Stylizing", changeModeCallback, filterStylize, QT_RADIOBOX, false);
createButton("Mode Blurring", changeModeCallback, filterBlurring, QT_RADIOBOX, false);
createButton("Mode DoNothing", changeModeCallback, filterDoNothing, QT_RADIOBOX, false);
//sliders for Details Enhancement mode
g_filterOp = filterDetailEnhancement; //set Details Enhancement as default filter
createTrackbar("Detail contrast", NULL, &g_contrastBase, 200);
createTrackbar("Detail level" , NULL, &g_detailsLevel, 200);
//sliders for Stylizing mode
createTrackbar("Style gamma", NULL, &g_edgesGamma, 300);
//sliders for every mode
createTrackbar("Sigma Spatial", NULL, &g_sigmaSpatial, 200);
createTrackbar("Sigma Color" , NULL, &g_sigmaColor, 200);
Mat rawFrame, outputFrame;
Mat srcFrame, processedFrame;
for (;;)
{
do
{
cap >> rawFrame;
} while (rawFrame.empty());
splitScreen(rawFrame, outputFrame, srcFrame, processedFrame);
g_filterOp(srcFrame, processedFrame);
imshow("Demo", outputFrame);
if (waitKey(1) == 27) break;
}
return 0;
}
This diff is collapsed.
#include "precomp.hpp"
#include "dtfilter_cpu.hpp"
#include "dtfilter_ocl.hpp"
namespace cv
{
static bool dtUseOpenCLVersion(InputArray guide, InputArray src, int mode, int numIters)
{
return
false && guide.isUMat() && ocl::useOpenCL() &&
(guide.cols() >= 256 && guide.rows() >= 256) &&
(guide.depth() == CV_32F || guide.depth() == CV_8U) &&
(src.depth() == CV_32F || src.depth() == CV_8U) &&
(numIters <= 3);
}
CV_EXPORTS_W
Ptr<DTFilter> createDTFilter(InputArray guide, double sigmaSpatial, double sigmaColor, int mode, int numIters)
{
return Ptr<DTFilter>(DTFilterCPU::create(guide, sigmaSpatial, sigmaColor, mode, numIters));
}
CV_EXPORTS_W
void dtFilter(InputArray guide, InputArray src, OutputArray dst, double sigmaSpatial, double sigmaColor, int mode, int numIters)
{
if (dtUseOpenCLVersion(guide, src, mode, numIters))
{
Ptr<DTFilterOCL> dtf = DTFilterOCL::create(guide, sigmaSpatial, sigmaColor, mode, numIters);
dtf->setSingleFilterCall(true);
dtf->filter(src, dst);
}
else
{
Ptr<DTFilterCPU> dtf = DTFilterCPU::create(guide, sigmaSpatial, sigmaColor, mode, numIters);
dtf->setSingleFilterCall(true);
dtf->filter(src, dst);
}
}
CV_EXPORTS_W
Ptr<DTFilter> createDTFilterOCL(InputArray guide, double sigmaSpatial, double sigmaColor, int mode, int numIters)
{
Ptr<DTFilterOCL> dtf = DTFilterOCL::create(guide, sigmaSpatial, sigmaColor, mode, numIters);
return Ptr<DTFilter>(dtf);
}
}
\ No newline at end of file
#include "precomp.hpp"
#include "dtfilter_cpu.hpp"
namespace cv
{
typedef Vec<uchar, 1> Vec1b;
typedef Vec<float, 1> Vec1f;
Ptr<DTFilterCPU> DTFilterCPU::create(InputArray guide, double sigmaSpatial, double sigmaColor, int mode, int numIters)
{
Ptr<DTFilterCPU> dtf(new DTFilterCPU());
dtf->init(guide, sigmaSpatial, sigmaColor, mode, numIters);
return dtf;
}
Ptr<DTFilterCPU> DTFilterCPU::createRF(InputArray adistHor, InputArray adistVert, double sigmaSpatial, double sigmaColor, int numIters /*= 3*/)
{
Mat adh = adistHor.getMat();
Mat adv = adistVert.getMat();
CV_Assert(adh.type() == CV_32FC1 && adv.type() == CV_32FC1 && adh.rows == adv.rows + 1 && adh.cols == adv.cols - 1);
Ptr<DTFilterCPU> dtf(new DTFilterCPU());
dtf->release();
dtf->mode = DTF_RF;
dtf->numIters = std::max(1, numIters);
dtf->h = adh.rows;
dtf->w = adh.cols + 1;
dtf->sigmaSpatial = std::max(1.0f, (float)sigmaSpatial);
dtf->sigmaColor = std::max(0.01f, (float)sigmaColor);
dtf->a0distHor = adh;
dtf->a0distVert = adv;
return dtf;
}
void DTFilterCPU::init(InputArray guide_, double sigmaSpatial, double sigmaColor, int mode, int numIters)
{
Mat guide = guide_.getMat();
int cn = guide.channels();
int depth = guide.depth();
CV_Assert(cn <= 4);
CV_Assert((depth == CV_8U || depth == CV_32F) && !guide.empty());
#define CREATE_DTF(Vect) init_<Vect>(guide, sigmaSpatial, sigmaColor, mode, numIters);
if (cn == 1)
{
if (depth == CV_8U)
CREATE_DTF(Vec1b);
if (depth == CV_32F)
CREATE_DTF(Vec1f);
}
else if (cn == 2)
{
if (depth == CV_8U)
CREATE_DTF(Vec2b);
if (depth == CV_32F)
CREATE_DTF(Vec2f);
}
else if (cn == 3)
{
if (depth == CV_8U)
CREATE_DTF(Vec3b);
if (depth == CV_32F)
CREATE_DTF(Vec3f);
}
else if (cn == 4)
{
if (depth == CV_8U)
CREATE_DTF(Vec4b);
if (depth == CV_32F)
CREATE_DTF(Vec4f);
}
#undef CREATE_DTF
}
void DTFilterCPU::filter(InputArray src_, OutputArray dst_, int dDepth)
{
Mat src = src_.getMat();
dst_.create(src.size(), src.type());
Mat& dst = dst_.getMatRef();
int cn = src.channels();
int depth = src.depth();
CV_Assert(cn <= 4 && (depth == CV_8U || depth == CV_32F));
if (cn == 1)
{
if (depth == CV_8U)
filter_<Vec1b>(src, dst, dDepth);
if (depth == CV_32F)
filter_<Vec1f>(src, dst, dDepth);
}
else if (cn == 2)
{
if (depth == CV_8U)
filter_<Vec2b>(src, dst, dDepth);
if (depth == CV_32F)
filter_<Vec2f>(src, dst, dDepth);
}
else if (cn == 3)
{
if (depth == CV_8U)
filter_<Vec3b>(src, dst, dDepth);
if (depth == CV_32F)
filter_<Vec3f>(src, dst, dDepth);
}
else if (cn == 4)
{
if (depth == CV_8U)
filter_<Vec4b>(src, dst, dDepth);
if (depth == CV_32F)
filter_<Vec4f>(src, dst, dDepth);
}
}
void DTFilterCPU::setSingleFilterCall(bool value)
{
singleFilterCall = value;
}
void DTFilterCPU::release()
{
if (mode == -1) return;
idistHor.release();
idistVert.release();
distHor.release();
distVert.release();
a0distHor.release();
a0distVert.release();
adistHor.release();
adistVert.release();
}
Mat DTFilterCPU::getWExtendedMat(int h, int w, int type, int brdleft /*= 0*/, int brdRight /*= 0*/, int cacheAlign /*= 0*/)
{
int wrapperCols = w + brdleft + brdRight;
if (cacheAlign > 0)
wrapperCols += ((wrapperCols + cacheAlign-1) / cacheAlign)*cacheAlign;
Mat mat(h, wrapperCols, type);
return mat(Range::all(), Range(brdleft, w + brdleft));
}
Range DTFilterCPU::getWorkRangeByThread(const Range& itemsRange, const Range& rangeThread, int declaredNumThreads)
{
if (declaredNumThreads <= 0)
declaredNumThreads = cv::getNumThreads();
int chunk = itemsRange.size() / declaredNumThreads;
int start = itemsRange.start + chunk * rangeThread.start;
int end = itemsRange.start + ((rangeThread.end >= declaredNumThreads) ? itemsRange.size() : chunk * rangeThread.end);
return Range(start, end);
}
Range DTFilterCPU::getWorkRangeByThread(int items, const Range& rangeThread, int declaredNumThreads)
{
return getWorkRangeByThread(Range(0, items), rangeThread, declaredNumThreads);
}
}
\ No newline at end of file
#ifndef __OPENCV_DTFILTER_HPP__
#define __OPENCV_DTFILTER_HPP__
#include "precomp.hpp"
#ifdef _MSC_VER
#pragma warning(disable: 4512)
#pragma warning(disable: 4127)
#endif
#define CV_GET_NUM_THREAD_WORKS_PROPERLY
#undef CV_GET_NUM_THREAD_WORKS_PROPERLY
namespace cv
{
class DTFilterCPU : public DTFilter
{
public: /*Non-template methods*/
static Ptr<DTFilterCPU> create(InputArray guide, double sigmaSpatial, double sigmaColor, int mode = DTF_NC, int numIters = 3);
static Ptr<DTFilterCPU> createRF(InputArray adistHor, InputArray adistVert, double sigmaSpatial, double sigmaColor, int numIters = 3);
void filter(InputArray src, OutputArray dst, int dDepth = -1);
void setSingleFilterCall(bool value);
public: /*Template methods*/
/*Use this static methods instead of constructor*/
template<typename GuideVec>
static DTFilterCPU* create_p_(const Mat& guide, double sigmaSpatial, double sigmaColor, int mode = DTF_NC, int numIters = 3);
template<typename GuideVec>
static DTFilterCPU create_(const Mat& guide, double sigmaSpatial, double sigmaColor, int mode = DTF_NC, int numIters = 3);
template<typename GuideVec>
void init_(Mat& guide, double sigmaSpatial, double sigmaColor, int mode = DTF_NC, int numIters = 3);
template<typename SrcVec>
void filter_(const Mat& src, Mat& dst, int dDepth = -1);
protected: /*Typedefs declarations*/
typedef float IDistType;
typedef Vec<IDistType, 1> IDistVec;
typedef float DistType;
typedef Vec<DistType, 1> DistVec;
typedef float WorkType;
public: /*Members declarations*/
int h, w, mode;
float sigmaSpatial, sigmaColor;
bool singleFilterCall;
int numFilterCalls;
Mat idistHor, idistVert;
Mat distHor, distVert;
Mat a0distHor, a0distVert;
Mat adistHor, adistVert;
int numIters;
protected: /*Functions declarations*/
DTFilterCPU() : mode(-1), singleFilterCall(false), numFilterCalls(0) {}
void init(InputArray guide, double sigmaSpatial, double sigmaColor, int mode = DTF_NC, int numIters = 3);
void release();
template<typename GuideVec>
inline IDistType getTransformedDistance(const GuideVec &l, const GuideVec &r)
{
return (IDistType)(1.0f + sigmaSpatial / sigmaColor * norm1<IDistType>(l, r));
}
inline double getIterSigmaH(int iterNum)
{
return sigmaSpatial * std::pow(2.0, numIters - iterNum) / sqrt(std::pow(4.0, numIters) - 1);
}
inline IDistType getIterRadius(int iterNum)
{
return (IDistType)(3.0*getIterSigmaH(iterNum));
}
inline float getIterAlpha(int iterNum)
{
return (float)std::exp(-std::sqrt(2.0 / 3.0) / getIterSigmaH(iterNum));
}
protected: /*Wrappers for parallelization*/
template <typename WorkVec>
struct FilterNC_horPass : public ParallelLoopBody
{
Mat &src, &idist, &dst;
float radius;
FilterNC_horPass(Mat& src_, Mat& idist_, Mat& dst_);
void operator() (const Range& range) const;
};
template <typename WorkVec>
struct FilterIC_horPass : public ParallelLoopBody
{
Mat &src, &idist, &dist, &dst, isrcBuf;
float radius;
FilterIC_horPass(Mat& src_, Mat& idist_, Mat& dist_, Mat& dst_);
void operator() (const Range& range) const;
};
template <typename WorkVec>
struct FilterRF_horPass : public ParallelLoopBody
{
Mat &res, &alphaD;
int iteration;
FilterRF_horPass(Mat& res_, Mat& alphaD_, int iteration_);
void operator() (const Range& range) const;
Range getRange() const { return Range(0, res.rows); }
};
template <typename WorkVec>
struct FilterRF_vertPass : public ParallelLoopBody
{
Mat &res, &alphaD;
int iteration;
FilterRF_vertPass(Mat& res_, Mat& alphaD_, int iteration_);
void operator() (const Range& range) const;
#ifdef CV_GET_NUM_THREAD_WORKS_PROPERLY
Range getRange() const { return Range(0, cv::getNumThreads()); }
#else
Range getRange() const { return Range(0, res.cols); }
#endif
};
template <typename GuideVec>
struct ComputeIDTHor_ParBody: public ParallelLoopBody
{
DTFilterCPU &dtf;
Mat &guide, &dst;
ComputeIDTHor_ParBody(DTFilterCPU& dtf_, Mat& guide_, Mat& dst_);
void operator() (const Range& range) const;
Range getRange() { return Range(0, guide.rows); }
};
template <typename GuideVec>
struct ComputeDTandIDTHor_ParBody : public ParallelLoopBody
{
DTFilterCPU &dtf;
Mat &guide, &dist, &idist;
IDistType maxRadius;
ComputeDTandIDTHor_ParBody(DTFilterCPU& dtf_, Mat& guide_, Mat& dist_, Mat& idist_);
void operator() (const Range& range) const;
Range getRange() { return Range(0, guide.rows); }
};
template <typename GuideVec>
struct ComputeA0DTHor_ParBody : public ParallelLoopBody
{
DTFilterCPU &dtf;
Mat &guide;
float lna;
ComputeA0DTHor_ParBody(DTFilterCPU& dtf_, Mat& guide_);
void operator() (const Range& range) const;
Range getRange() { return Range(0, guide.rows); }
~ComputeA0DTHor_ParBody();
};
template <typename GuideVec>
struct ComputeA0DTVert_ParBody : public ParallelLoopBody
{
DTFilterCPU &dtf;
Mat &guide;
float lna;
ComputeA0DTVert_ParBody(DTFilterCPU& dtf_, Mat& guide_);
void operator() (const Range& range) const;
Range getRange() const { return Range(0, guide.rows - 1); }
~ComputeA0DTVert_ParBody();
};
protected: /*Auxiliary implementation functions*/
static Range getWorkRangeByThread(const Range& itemsRange, const Range& rangeThread, int maxThreads = 0);
static Range getWorkRangeByThread(int items, const Range& rangeThread, int maxThreads = 0);
template<typename SrcVec>
static void prepareSrcImg_IC(const Mat& src, Mat& inner, Mat& outer);
static Mat getWExtendedMat(int h, int w, int type, int brdleft = 0, int brdRight = 0, int cacheAlign = 0);
template<typename SrcVec, typename SrcWorkVec>
static void integrateSparseRow(const SrcVec *src, const float *dist, SrcWorkVec *dst, int cols);
template<typename SrcVec, typename SrcWorkVec>
static void integrateRow(const SrcVec *src, SrcWorkVec *dst, int cols);
inline static int getLeftBound(IDistType *idist, int pos, IDistType searchValue)
{
while (idist[pos] < searchValue)
pos++;
return pos;
}
inline static int getRightBound(IDistType *idist, int pos, IDistType searchValue)
{
while (idist[pos + 1] < searchValue)
pos++;
return pos;
}
template <typename T, typename T1, typename T2, int n>
inline static T norm1(const cv::Vec<T1, n>& v1, const cv::Vec<T2, n>& v2)
{
T sum = (T) 0;
for (int i = 0; i < n; i++)
sum += std::abs( (T)v1[i] - (T)v2[i] );
return sum;
}
};
/*One-line template wrappers for DT call*/
template<typename GuideVec, typename SrcVec>
void domainTransformFilter( const Mat_<GuideVec>& guide,
const Mat_<SrcVec>& source,
Mat& dst,
double sigmaSpatial, double sigmaColor,
int mode = cv::DTF_NC, int numPasses = 3
);
template<typename GuideVec, typename SrcVec>
void domainTransformFilter( const Mat& guide,
const Mat& source,
Mat& dst,
double sigmaSpatial, double sigmaColor,
int mode = cv::DTF_NC, int numPasses = 3
);
}
#include "dtfilter_cpu.inl.hpp"
#endif
\ No newline at end of file
This diff is collapsed.
This diff is collapsed.
#pragma once
#include "precomp.hpp"
namespace cv
{
class DTFilterOCL : public DTFilter
{
public:
static Ptr<DTFilterOCL> create(InputArray guide, double sigmaSpatial, double sigmaColor, int mode = DTF_NC, int numIters = 3);
void filter(InputArray src, OutputArray dst, int dDepth = -1);
void setSingleFilterCall(bool flag);
~DTFilterOCL();
protected: /*Members declarations*/
UMat idistHor, idistVert;
UMat distHor, distVert;
UMat distHorT, distVertT;
UMat &a0distHor, &a0distVert; //synonyms of distHor distVert
UMat distIndexHor, distIndexVert;
UMat tailVert, tailHor;
int mode, numIters;
float sigmaSpatial, sigmaColor;
bool singleFilterCall;
int h, w;
int workType;
double meanDist;
static int64 totalKernelsTime;
int64 kernelsTime;
ocl::ProgramSource kerProgSrcDT;
ocl::ProgramSource kerProgSrcFilter;
cv::String buildOptionsFlt;
cv::String buildOptionsDT;
int NC_ocl_implememtation;
enum NCOclImplementation
{
ALG_PER_ROW,
ALG_PER_PIXEL,
ALG_INDEX,
};
protected: /*Functions declarations*/
DTFilterOCL();
void init(InputArray guide, double sigmaSpatial, double sigmaColor, int mode = DTF_NC, int numIters = 3);
inline double getIterSigmaH(int iterNum)
{
return sigmaSpatial * std::pow(2.0, numIters - iterNum) / sqrt(std::pow(4.0, numIters) - 1);
}
inline float getIterRadius(int iterNum)
{
return (float)(3.0*getIterSigmaH(iterNum));
}
inline float getIterAlpha(int iterNum)
{
return (float)std::exp(-std::sqrt(2.0 / 3.0) / getIterSigmaH(iterNum));
}
int getMaxWorkGropSize();
int getCacheLineSize();
void initProgram();
void setBuildOptionsFlt(InputArray src_);
void setBuildOptionsDT(UMat& guide);
void initDT_NC(UMat& guide);
void initDT_RF(UMat& guide);
void initDT_IC(UMat& guide);
void computeDT(UMat& guide, UMat& distHorOuter, UMat& distVertOuter);
void computeIDT(UMat& guide, UMat& guideT);
void filterNC_IndexAlg(UMat& src, UMat& dst);
void filterNC_perRowAlg(UMat& src, UMat& dst);
void filterNC_PerPixelAlg(UMat& src, UMat& dst);
void filterIC(InputArray src_, OutputArray dst_, int dDepth);
void filterRF(InputArray src_, OutputArray dst_, int dDepth);
void filterRF_naiveAlg(UMat& res, UMat& resT, UMat& adistHor, UMat& adistVert);
void filterRF_blockAlg(UMat& res, UMat& resT, UMat& adistHor, UMat& adistVert);
void filterRF_iter_vert_pass(UMat& res, UMat& adist, UMat& weights);
void computeIndexMat(UMat& idist, UMat& distIndex, bool useIDT = true);
void integrateCols(UMat& src, UMat& isrc);
void computeIndexAndTailMat(UMat& dist, UMat& distIndex, UMat& tail);
void duplicateVerticalBorders(UMat& srcOuter);
void integrateColsIC(UMat& src, UMat& dist, UMat& isrc);
static UMat UMatAligned(int rows, int cols, int type, int align = 16, int useageFlags = USAGE_DEFAULT);
static void createAligned(UMat& src, int rows, int cols, int type, int align = 16, int usageFlags = USAGE_DEFAULT);
};
}
\ No newline at end of file
This diff is collapsed.
#ifndef __EDGEAWAREFILTERS_COMMON_HPP__
#define __EDGEAWAREFILTERS_COMMON_HPP__
#ifdef __cplusplus
namespace cv
{
Ptr<DTFilter> createDTFilterRF(InputArray adistHor, InputArray adistVert, double sigmaSpatial, double sigmaColor, int numIters);
int getTotalNumberOfChannels(InputArrayOfArrays src);
void checkSameSizeAndDepth(InputArrayOfArrays src, Size &sz, int &depth);
namespace eaf
{
void add_(register float *dst, register float *src1, int w);
void mul(register float *dst, register float *src1, register float *src2, int w);
void mul(register float *dst, register float *src1, float src2, int w);
//dst = alpha*src + beta
void mad(register float *dst, register float *src1, float alpha, float beta, int w);
void add_mul(register float *dst, register float *src1, register float *src2, int w);
void sub_mul(register float *dst, register float *src1, register float *src2, int w);
void sub_mad(register float *dst, register float *src1, register float *src2, float c0, int w);
void det_2x2(register float *dst, register float *a00, register float *a01, register float *a10, register float *a11, int w);
void div_det_2x2(register float *a00, register float *a01, register float *a11, int w);
void div_1x(register float *a1, register float *b1, int w);
void inv_self(register float *src, int w);
void sqr_(register float *dst, register float *src1, int w);
void sqrt_(register float *dst, register float *src, int w);
void sqr_dif(register float *dst, register float *src1, register float *src2, int w);
void add_sqr_dif(register float *dst, register float *src1, register float *src2, int w);
void add_sqr(register float *dst, register float *src1, int w);
void min_(register float *dst, register float *src1, register float *src2, int w);
void rf_vert_row_pass(register float *curRow, register float *prevRow, float alphaVal, int w);
}
}
#endif
#endif
\ No newline at end of file
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
#ifndef _OPENCV_EDGEFILTER_PRECOMP_HPP_
#define _OPENCV_EDGEFILTER_PRECOMP_HPP_
#include <opencv2/edge_filter.hpp>
#include <opencv2/core.hpp>
#include <opencv2/core/ocl.hpp>
#include <opencv2/core/base.hpp>
#include <opencv2/core/utility.hpp>
#include <opencv2/core/cvdef.h>
#include <opencv2/core/core_c.h>
#include <opencv2/core/private.hpp>
#include <opencv2/imgproc.hpp>
#include <limits>
#endif
\ No newline at end of file
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
#include "test_precomp.hpp"
CV_TEST_MAIN("")
\ No newline at end of file
#ifndef __OPENCV_TEST_PRECOMP_HPP__
#define __OPENCV_TEST_PRECOMP_HPP__
#include <opencv2/ts.hpp>
#include <opencv2/ts/ts_perf.hpp>
#include <opencv2/core.hpp>
#include <opencv2/core/utility.hpp>
#include <opencv2/imgproc.hpp>
#include <opencv2/highgui.hpp>
#include <opencv2/edge_filter.hpp>
#endif
\ No newline at end of file
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment