Commit 2ce3606e authored by Vadim Pisarevsky's avatar Vadim Pisarevsky

Merge pull request #108 from ludv1x/master

Adaptive manifold filter improvements
parents 38bdd6f3 b1b36cd2
......@@ -64,8 +64,6 @@ public:
CV_WRAP virtual void filter(InputArray src, OutputArray dst, int dDepth = -1) = 0;
};
typedef Ptr<DTFilter> DTFilterPtr;
/*Fabric function for DT filters*/
CV_EXPORTS_W
Ptr<DTFilter> createDTFilter(InputArray guide, double sigmaSpatial, double sigmaColor, int mode = DTF_NC, int numIters = 3);
......
......@@ -40,6 +40,10 @@
#include <cstring>
#include <limits>
#ifdef _MSC_VER
# pragma warning(disable: 4512)
#endif
namespace
{
......@@ -53,8 +57,6 @@ using namespace cv::ximgproc::intrinsics;
#define SQR(x) ((x)*(x))
#endif
void computeEigenVector(const Mat1f& X, const Mat1b& mask, Mat1f& dst, int num_pca_iterations, const Mat1f& rand_vec);
inline double Log2(double n)
{
return log(n) / log(2.0);
......@@ -176,40 +178,28 @@ private: /*inline functions*/
return Size( cvRound(srcSize.width * (1.0/df)), cvRound(srcSize.height*(1.0/df)) ) ;
}
void downsample(InputArray src, OutputArray dst)
void downsample(const Mat& src, Mat& dst)
{
if (src.isMatVector())
{
vector<Mat>& srcv = *static_cast< vector<Mat>* >(src.getObj());
vector<Mat>& dstv = *static_cast< vector<Mat>* >(dst.getObj());
dstv.resize(srcv.size());
for (int i = 0; i < (int)srcv.size(); i++)
downsample(srcv[i], dstv[i]);
}
else
{
double df = getResizeRatio();
CV_DbgAssert(src.empty() || src.size() == srcSize);
resize(src, dst, Size(), 1.0 / df, 1.0 / df, INTER_LINEAR);
CV_DbgAssert(dst.size() == smallSize);
}
double df = getResizeRatio();
CV_DbgAssert(src.empty() || src.size() == srcSize);
resize(src, dst, Size(), 1.0 / df, 1.0 / df, INTER_LINEAR);
CV_DbgAssert(dst.size() == smallSize);
}
void upsample(InputArray src, OutputArray dst)
void upsample(const Mat& src, Mat& dst)
{
if (src.isMatVector())
{
vector<Mat>& srcv = *static_cast< vector<Mat>* >(src.getObj());
vector<Mat>& dstv = *static_cast< vector<Mat>* >(dst.getObj());
dstv.resize(srcv.size());
for (int i = 0; i < (int)srcv.size(); i++)
upsample(srcv[i], dstv[i]);
}
else
{
CV_DbgAssert(src.empty() || src.size() == smallSize);
resize(src, dst, srcSize, 0, 0);
}
CV_DbgAssert(src.empty() || src.size() == smallSize);
resize(src, dst, srcSize, 0, 0);
}
void downsample(const vector<Mat>& srcv, vector<Mat>& dstv)
{
mapParallel(&AdaptiveManifoldFilterN::downsample, srcv, dstv);
}
void upsample(const vector<Mat>& srcv, vector<Mat>& dstv)
{
mapParallel(&AdaptiveManifoldFilterN::upsample, srcv, dstv);
}
private:
......@@ -236,6 +226,39 @@ private:
static void computeDTHor(vector<Mat>& srcCn, Mat& dst, float ss, float sr);
static void computeDTVer(vector<Mat>& srcCn, Mat& dst, float ss, float sr);
static void computeEigenVector(const vector<Mat>& X, const Mat1b& mask, Mat1f& vecDst, int num_pca_iterations, const Mat1f& vecRand);
static void computeOrientation(const vector<Mat>& X, const Mat1f& vec, Mat1f& dst);
private: /*Parallelization routines*/
typedef void (AdaptiveManifoldFilterN::*MapFunc)(const Mat& src, Mat& dst);
void mapParallel(MapFunc func, const vector<Mat>& srcv, vector<Mat>& dstv)
{
dstv.resize(srcv.size());
parallel_for_(Range(0, (int)srcv.size()), MapPrallelLoopBody(this, func, srcv, dstv));
}
struct MapPrallelLoopBody : public cv::ParallelLoopBody
{
MapPrallelLoopBody(AdaptiveManifoldFilterN *_instancePtr, MapFunc _transform, const vector<Mat>& _srcv, vector<Mat>& _dstv)
: instancePtr(_instancePtr), transform(_transform), srcv(_srcv), dstv(_dstv)
{}
AdaptiveManifoldFilterN *instancePtr;
MapFunc transform;
const vector<Mat>& srcv;
vector<Mat>& dstv;
void operator () (const Range& range) const
{
for (int i = range.start; i < range.end; i++)
(instancePtr->*transform)(srcv[i], dstv[i]);
}
};
};
CV_INIT_ALGORITHM(AdaptiveManifoldFilterN, "AdaptiveManifoldFilter",
......@@ -660,36 +683,36 @@ void AdaptiveManifoldFilterN::RFFilterPass(vector<Mat>& joint, vector<Mat>& Psi_
void AdaptiveManifoldFilterN::computeClusters(Mat1b& cluster, Mat1b& cluster_minus, Mat1b& cluster_plus)
{
Mat difEtaSrc;
Mat1f difOreientation;
if (jointCnNum > 1)
{
vector<Mat> eta_difCn(jointCnNum);
Mat1f initVec(1, jointCnNum);
if (useRNG)
{
rnd.fill(initVec, RNG::UNIFORM, -0.5, 0.5);
}
else
{
for (int i = 0; i < (int)initVec.total(); i++)
initVec(0, i) = (i % 2 == 0) ? 0.5f : -0.5f;
}
vector<Mat> difEtaSrc(jointCnNum);
for (int i = 0; i < jointCnNum; i++)
subtract(jointCn[i], etaFull[i], eta_difCn[i]);
subtract(jointCn[i], etaFull[i], difEtaSrc[i]);
merge(eta_difCn, difEtaSrc);
difEtaSrc = difEtaSrc.reshape(1, (int)difEtaSrc.total());
CV_DbgAssert(difEtaSrc.cols == jointCnNum);
}
Mat1f eigenVec(1, jointCnNum);
computeEigenVector(difEtaSrc, cluster, eigenVec, num_pca_iterations_, initVec);
Mat1f initVec(1, jointCnNum);
if (useRNG)
{
rnd.fill(initVec, RNG::UNIFORM, -0.5, 0.5);
computeOrientation(difEtaSrc, eigenVec, difOreientation);
CV_DbgAssert(difOreientation.size() == srcSize);
}
else
{
for (int i = 0; i < (int)initVec.total(); i++)
initVec(0, i) = (i % 2 == 0) ? 0.5f : -0.5f;
subtract(jointCn[0], etaFull[0], difOreientation);
}
Mat1f eigenVec(1, jointCnNum);
computeEigenVector(difEtaSrc, cluster, eigenVec, num_pca_iterations_, initVec);
Mat1f difOreientation;
gemm(difEtaSrc, eigenVec, 1, noArray(), 0, difOreientation, GEMM_2_T);
difOreientation = difOreientation.reshape(1, srcSize.height);
CV_DbgAssert(difOreientation.size() == srcSize);
compare(difOreientation, 0, cluster_minus, CMP_LT);
bitwise_and(cluster_minus, cluster, cluster_minus);
......@@ -721,59 +744,101 @@ void AdaptiveManifoldFilterN::computeEta(Mat& teta, Mat1b& cluster, vector<Mat>&
}
}
void computeEigenVector(const Mat1f& X, const Mat1b& mask, Mat1f& dst, int num_pca_iterations, const Mat1f& rand_vec)
void AdaptiveManifoldFilterN::computeEigenVector(const vector<Mat>& X, const Mat1b& mask, Mat1f& vecDst, int num_pca_iterations, const Mat1f& vecRand)
{
CV_DbgAssert( X.cols == rand_vec.cols );
CV_DbgAssert( X.rows == mask.size().area() );
CV_DbgAssert( rand_vec.rows == 1 );
dst.create(rand_vec.size());
rand_vec.copyTo(dst);
Mat1f t(X.size());
int cnNum = (int)X.size();
int height = X[0].rows;
int width = X[0].cols;
float* dst_row = dst[0];
vecDst.create(1, cnNum);
CV_Assert(vecRand.size() == Size(cnNum, 1) && vecDst.size() == Size(cnNum, 1));
CV_Assert(mask.rows == height && mask.cols == width);
const float *pVecRand = vecRand.ptr<float>();
Mat1d vecDstd(1, cnNum, 0.0);
double *pVecDst = vecDstd.ptr<double>();
Mat1f Xw(height, width);
for (int i = 0; i < num_pca_iterations; ++i)
for (int iter = 0; iter < num_pca_iterations; iter++)
{
t.setTo(Scalar::all(0));
for (int y = 0, ind = 0; y < mask.rows; ++y)
for (int i = 0; i < height; i++)
{
const uchar* mask_row = mask[y];
const uchar *maskRow = mask.ptr<uchar>(i);
float *mulRow = Xw.ptr<float>(i);
for (int x = 0; x < mask.cols; ++x, ++ind)
//first multiplication
for (int cn = 0; cn < cnNum; cn++)
{
if (mask_row[x])
const float *srcRow = X[cn].ptr<float>(i);
const float cnVal = pVecRand[cn];
if (cn == 0)
{
for (int j = 0; j < width; j++)
mulRow[j] = cnVal*srcRow[j];
}
else
{
const float* X_row = X[ind];
float* t_row = t[ind];
for (int j = 0; j < width; j++)
mulRow[j] += cnVal*srcRow[j];
}
}
float dots = 0.0;
for (int c = 0; c < X.cols; ++c)
dots += dst_row[c] * X_row[c];
for (int j = 0; j < width; j++)
if (!maskRow[j]) mulRow[j] = 0.0f;
for (int c = 0; c < X.cols; ++c)
t_row[c] = dots * X_row[c];
}
//second multiplication
for (int cn = 0; cn < cnNum; cn++)
{
float curCnSum = 0.0f;
const float *srcRow = X[cn].ptr<float>(i);
for (int j = 0; j < width; j++)
curCnSum += mulRow[j]*srcRow[j];
//TODO: parallel reduce
pVecDst[cn] += curCnSum;
}
}
}
divide(vecDstd, norm(vecDstd), vecDst);
}
dst.setTo(0.0);
for (int k = 0; k < X.rows; ++k)
void AdaptiveManifoldFilterN::computeOrientation(const vector<Mat>& X, const Mat1f& vec, Mat1f& dst)
{
int height = X[0].rows;
int width = X[0].cols;
int cnNum = (int)X.size();
dst.create(height, width);
CV_DbgAssert(vec.rows == 1 && vec.cols == cnNum);
const float *pVec = vec.ptr<float>();
for (int i = 0; i < height; i++)
{
float *dstRow = dst.ptr<float>(i);
for (int cn = 0; cn < cnNum; cn++)
{
const float* t_row = t[k];
const float *srcRow = X[cn].ptr<float>(i);
const float cnVal = pVec[cn];
for (int c = 0; c < X.cols; ++c)
if (cn == 0)
{
dst_row[c] += t_row[c];
for (int j = 0; j < width; j++)
dstRow[j] = cnVal*srcRow[j];
}
else
{
for (int j = 0; j < width; j++)
dstRow[j] += cnVal*srcRow[j];
}
}
}
double n = norm(dst);
divide(dst, n, dst);
}
}
......
......@@ -54,19 +54,21 @@ static string getOpenCVExtraDir()
return cvtest::TS::ptr()->get_data_path();
}
static void checkSimilarity(InputArray res, InputArray ref)
static void checkSimilarity(InputArray res, InputArray ref, double maxNormInf = 1, double maxNormL2 = 1.0 / 64)
{
double normInf = cvtest::norm(res, ref, NORM_INF);
double normL2 = cvtest::norm(res, ref, NORM_L2) / res.total();
EXPECT_LE(normInf, 1);
EXPECT_LE(normL2, 1.0 / 64);
if (maxNormInf >= 0) EXPECT_LE(normInf, maxNormInf);
if (maxNormL2 >= 0) EXPECT_LE(normL2, maxNormL2);
}
TEST(AdaptiveManifoldTest, SplatSurfaceAccuracy)
{
RNG rnd(0);
cv::setNumThreads(cv::getNumberOfCPUs());
for (int i = 0; i < 10; i++)
{
Size sz(rnd.uniform(512, 1024), rnd.uniform(512, 1024));
......@@ -126,6 +128,8 @@ TEST(AdaptiveManifoldTest, AuthorsReferenceAccuracy)
Mat srcImg = imread(getOpenCVExtraDir() + srcImgPath);
ASSERT_TRUE(!srcImg.empty());
cv::setNumThreads(cv::getNumberOfCPUs());
for (int i = 0; i < 3; i++)
{
Mat refRes = imread(getOpenCVExtraDir() + refPaths[i]);
......@@ -190,14 +194,19 @@ TEST_P(AdaptiveManifoldRefImplTest, RefImplAccuracy)
double sigma_r = rnd.uniform(0.1, 0.9);
bool adjust_outliers = (iter % 2 == 0);
cv::setNumThreads(cv::getNumberOfCPUs());
Mat res;
amFilter(guide, src, res, sigma_s, sigma_r, adjust_outliers);
cv::setNumThreads(1);
Mat resRef;
Ptr<AdaptiveManifoldFilter> amf = createAMFilterRefImpl(sigma_s, sigma_r, adjust_outliers);
amf->filter(src, resRef, guide);
checkSimilarity(res, resRef);
//results of reference implementation may differ on small sigma_s into small isolated region
//due to low single-precision floating point numbers accuracy
//therefore the threshold of inf norm was increased
checkSimilarity(res, resRef, 25);
}
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment