Commit de774825 authored by Alexander Alekhin's avatar Alexander Alekhin

Merge pull request #2474 from alalek:stereo_fix

parents 4d57438c f7c14d90
set(the_description "Stereo Correspondence")
ocv_define_module(stereo opencv_imgproc opencv_features2d opencv_core opencv_calib3d)
ocv_define_module(stereo opencv_core opencv_imgproc opencv_calib3d)
......@@ -45,10 +45,7 @@
#define __OPENCV_STEREO_HPP__
#include "opencv2/core.hpp"
#include "opencv2/features2d.hpp"
#include "opencv2/core/affine.hpp"
#include "opencv2/stereo/descriptor.hpp"
#include "opencv2/stereo/matching.hpp"
/**
@defgroup stereo Stereo Correspondance Algorithms
......@@ -61,8 +58,6 @@ namespace cv
{
//! @addtogroup stereo
//! @{
// void correctMatches( InputArray F, InputArray points1, InputArray points2,
// OutputArray newPoints1, OutputArray newPoints2 );
/** @brief Filters off small noise blobs (speckles) in the disparity map
@param img The input 16-bit signed disparity image
@param newVal The disparity value used to paint-off the speckles
......
//By downloading, copying, installing or using the software you agree to this license.
//If you do not agree to this license, do not download, install,
//copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
// (3-clause BSD License)
//
//Copyright (C) 2000-2015, Intel Corporation, all rights reserved.
//Copyright (C) 2009-2011, Willow Garage Inc., all rights reserved.
//Copyright (C) 2009-2015, NVIDIA Corporation, all rights reserved.
//Copyright (C) 2010-2013, Advanced Micro Devices, Inc., all rights reserved.
//Copyright (C) 2015, OpenCV Foundation, all rights reserved.
//Copyright (C) 2015, Itseez Inc., all rights reserved.
//Third party copyrights are property of their respective owners.
//
//Redistribution and use in source and binary forms, with or without modification,
//are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * Neither the names of the copyright holders nor the names of the contributors
// may be used to endorse or promote products derived from this software
// without specific prior written permission.
//
//This software is provided by the copyright holders and contributors "as is" and
//any express or implied warranties, including, but not limited to, the implied
//warranties of merchantability and fitness for a particular purpose are disclaimed.
//In no event shall copyright holders or contributors be liable for any direct,
//indirect, incidental, special, exemplary, or consequential damages
//(including, but not limited to, procurement of substitute goods or services;
//loss of use, data, or profits; or business interruption) however caused
//and on any theory of liability, whether in contract, strict liability,
//or tort (including negligence or otherwise) arising in any way out of
//the use of this software, even if advised of the possibility of such damage.
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
/*****************************************************************************************************************\
* The interface contains the main descriptors that will be implemented in the descriptor class *
\*****************************************************************************************************************/
#ifndef _OPENCV_STEREO_DESCRIPTOR_HPP_
#define _OPENCV_STEREO_DESCRIPTOR_HPP_
#include <stdint.h>
#ifndef _OPENCV_DESCRIPTOR_HPP_
#define _OPENCV_DESCRIPTOR_HPP_
#ifdef __cplusplus
namespace cv { namespace stereo {
namespace cv
{
namespace stereo
{
//types of supported kernels
enum {
CV_DENSE_CENSUS, CV_SPARSE_CENSUS,
CV_CS_CENSUS, CV_MODIFIED_CS_CENSUS, CV_MODIFIED_CENSUS_TRANSFORM,
CV_MEAN_VARIATION, CV_STAR_KERNEL
};
//!Mean Variation is a robust kernel that compares a pixel
//!not just with the center but also with the mean of the window
template<int num_images>
struct MVKernel
{
uint8_t *image[num_images];
int *integralImage[num_images];
int stop;
MVKernel(){}
MVKernel(uint8_t **images, int **integral)
{
for(int i = 0; i < num_images; i++)
{
image[i] = images[i];
integralImage[i] = integral[i];
}
stop = num_images;
}
void operator()(int rrWidth,int w2, int rWidth, int jj, int j, int c[num_images]) const
{
CV_UNUSED(w2);
for (int i = 0; i < stop; i++)
{
if (image[i][rrWidth + jj] > image[i][rWidth + j])
{
c[i] = c[i] + 1;
}
c[i] = c[i] << 1;
if (integralImage[i][rrWidth + jj] > image[i][rWidth + j])
{
c[i] = c[i] + 1;
}
c[i] = c[i] << 1;
}
}
};
//!Compares pixels from a patch giving high weights to pixels in which
//!the intensity is higher. The other pixels receive a lower weight
template <int num_images>
struct MCTKernel
{
uint8_t *image[num_images];
int t,imageStop;
MCTKernel(){}
MCTKernel(uint8_t ** images, int threshold)
{
for(int i = 0; i < num_images; i++)
{
image[i] = images[i];
}
imageStop = num_images;
t = threshold;
}
void operator()(int rrWidth,int w2, int rWidth, int jj, int j, int c[num_images]) const
{
CV_UNUSED(w2);
for(int i = 0; i < imageStop; i++)
{
if (image[i][rrWidth + jj] > image[i][rWidth + j] - t)
{
c[i] = c[i] << 1;
c[i] = c[i] + 1;
c[i] = c[i] << 1;
c[i] = c[i] + 1;
}
else if (image[i][rWidth + j] - t < image[i][rrWidth + jj] && image[i][rWidth + j] + t >= image[i][rrWidth + jj])
{
c[i] = c[i] << 2;
c[i] = c[i] + 1;
}
else
{
c[i] <<= 2;
}
}
}
};
//!A madified cs census that compares a pixel with the imediat neightbour starting
//!from the center
template<int num_images>
struct ModifiedCsCensus
{
uint8_t *image[num_images];
int n2;
int imageStop;
ModifiedCsCensus(){}
ModifiedCsCensus(uint8_t **images, int ker)
{
for(int i = 0; i < num_images; i++)
image[i] = images[i];
imageStop = num_images;
n2 = ker;
}
void operator()(int rrWidth,int w2, int rWidth, int jj, int j, int c[num_images]) const
{
CV_UNUSED(j);
CV_UNUSED(rWidth);
for(int i = 0; i < imageStop; i++)
{
if (image[i][(rrWidth + jj)] > image[i][(w2 + (jj + n2))])
{
c[i] = c[i] + 1;
}
c[i] = c[i] * 2;
}
}
};
//!A kernel in which a pixel is compared with the center of the window
template<int num_images>
struct CensusKernel
{
uint8_t *image[num_images];
int imageStop;
CensusKernel(){}
CensusKernel(uint8_t **images)
{
for(int i = 0; i < num_images; i++)
image[i] = images[i];
imageStop = num_images;
}
void operator()(int rrWidth,int w2, int rWidth, int jj, int j, int c[num_images]) const
{
CV_UNUSED(w2);
for(int i = 0; i < imageStop; i++)
{
////compare a pixel with the center from the kernel
if (image[i][rrWidth + jj] > image[i][rWidth + j])
{
c[i] += 1;
}
c[i] <<= 1;
}
}
};
//template clas which efficiently combines the descriptors
template <int step_start, int step_end, int step_inc,int nr_img, typename Kernel>
class CombinedDescriptor:public ParallelLoopBody
{
private:
int width, height,n2;
int stride_;
int *dst[nr_img];
Kernel kernel_;
int n2_stop;
public:
CombinedDescriptor(int w, int h,int stride, int k2, int **distance, Kernel kernel,int k2Stop)
{
width = w;
height = h;
n2 = k2;
stride_ = stride;
for(int i = 0; i < nr_img; i++)
dst[i] = distance[i];
kernel_ = kernel;
n2_stop = k2Stop;
}
void operator()(const cv::Range &r) const CV_OVERRIDE {
for (int i = r.start; i <= r.end ; i++)
{
int rWidth = i * stride_;
for (int j = n2 + 2; j <= width - n2 - 2; j++)
{
int c[nr_img];
memset(c, 0, sizeof(c[0]) * nr_img);
for(int step = step_start; step <= step_end; step += step_inc)
{
for (int ii = - n2; ii <= + n2_stop; ii += step)
{
int rrWidth = (ii + i) * stride_;
int rrWidthC = (ii + i + n2) * stride_;
for (int jj = j - n2; jj <= j + n2; jj += step)
{
if (ii != i || jj != j)
{
kernel_(rrWidth,rrWidthC, rWidth, jj, j,c);
}
}
}
}
for(int l = 0; l < nr_img; l++)
dst[l][rWidth + j] = c[l];
}
}
}
};
//!calculate the mean of every windowSizexWindwoSize block from the integral Image
//!this is a preprocessing for MV kernel
class MeanKernelIntegralImage : public ParallelLoopBody
{
private:
int *img;
int windowSize,width;
float scalling;
int *c;
public:
MeanKernelIntegralImage(const cv::Mat &image, int window,float scale, int *cost):
img((int *)image.data),windowSize(window) ,width(image.cols) ,scalling(scale) , c(cost){};
void operator()(const cv::Range &r) const CV_OVERRIDE {
for (int i = r.start; i <= r.end; i++)
{
int iw = i * width;
for (int j = windowSize + 1; j <= width - windowSize - 1; j++)
{
c[iw + j] = (int)((img[(i + windowSize - 1) * width + j + windowSize - 1] + img[(i - windowSize - 1) * width + j - windowSize - 1]
- img[(i + windowSize) * width + j - windowSize] - img[(i - windowSize) * width + j + windowSize]) * scalling);
}
}
}
};
//!implementation for the star kernel descriptor
template<int num_images>
class StarKernelCensus:public ParallelLoopBody
{
private:
uint8_t *image[num_images];
int *dst[num_images];
int n2, width, height, im_num,stride_;
public:
StarKernelCensus(const cv::Mat *img, int k2, int **distance)
{
for(int i = 0; i < num_images; i++)
{
image[i] = img[i].data;
dst[i] = distance[i];
}
n2 = k2;
width = img[0].cols;
height = img[0].rows;
im_num = num_images;
stride_ = (int)img[0].step;
}
void operator()(const cv::Range &r) const CV_OVERRIDE {
for (int i = r.start; i <= r.end ; i++)
{
int rWidth = i * stride_;
for (int j = n2; j <= width - n2; j++)
{
for(int d = 0 ; d < im_num; d++)
{
int c = 0;
for(int step = 4; step > 0; step--)
{
for (int ii = i - step; ii <= i + step; ii += step)
{
int rrWidth = ii * stride_;
for (int jj = j - step; jj <= j + step; jj += step)
{
if (image[d][rrWidth + jj] > image[d][rWidth + j])
{
c = c + 1;
}
c = c * 2;
}
}
}
for (int ii = -1; ii <= +1; ii++)
{
int rrWidth = (ii + i) * stride_;
if (i == -1)
{
if (ii + i != i)
{
if (image[d][rrWidth + j] > image[d][rWidth + j])
{
c = c + 1;
}
c = c * 2;
}
}
else if (i == 0)
{
for (int j2 = -1; j2 <= 1; j2 += 2)
{
if (ii + i != i)
{
if (image[d][rrWidth + j + j2] > image[d][rWidth + j])
{
c = c + 1;
}
c = c * 2;
}
}
}
else
{
if (ii + i != i)
{
if (image[d][rrWidth + j] > image[d][rWidth + j])
{
c = c + 1;
}
c = c * 2;
}
}
}
dst[d][rWidth + j] = c;
}
}
}
}
};
//!paralel implementation of the center symetric census
template <int num_images>
class SymetricCensus:public ParallelLoopBody
{
private:
uint8_t *image[num_images];
int *dst[num_images];
int n2, width, height, im_num,stride_;
public:
SymetricCensus(const cv::Mat *img, int k2, int **distance)
{
for(int i = 0; i < num_images; i++)
{
image[i] = img[i].data;
dst[i] = distance[i];
}
n2 = k2;
width = img[0].cols;
height = img[0].rows;
im_num = num_images;
stride_ = (int)img[0].step;
}
void operator()(const cv::Range &r) const CV_OVERRIDE {
for (int i = r.start; i <= r.end ; i++)
{
int distV = i*stride_;
for (int j = n2; j <= width - n2; j++)
{
for(int d = 0; d < im_num; d++)
{
int c = 0;
//the classic center symetric census which compares the curent pixel with its symetric not its center.
for (int ii = -n2; ii <= 0; ii++)
{
int rrWidth = (ii + i) * stride_;
for (int jj = -n2; jj <= +n2; jj++)
{
if (image[d][(rrWidth + (jj + j))] > image[d][((ii * (-1) + i) * width + (-1 * jj) + j)])
{
c = c + 1;
}
c = c * 2;
if(ii == 0 && jj < 0)
{
if (image[d][(i * width + (jj + j))] > image[d][(i * width + (-1 * jj) + j)])
{
c = c + 1;
}
c = c * 2;
}
}
}
dst[d][(distV + j)] = c;
}
}
}
}
};
/**
Two variations of census applied on input images
Implementation of a census transform which is taking into account just the some pixels from the census kernel thus allowing for larger block sizes
**/
//void applyCensusOnImages(const cv::Mat &im1,const cv::Mat &im2, int kernelSize, cv::Mat &dist, cv::Mat &dist2, const int type);
CV_EXPORTS void censusTransform(const cv::Mat &image1, const cv::Mat &image2, int kernelSize, cv::Mat &dist1, cv::Mat &dist2, const int type);
//single image census transform
CV_EXPORTS void censusTransform(const cv::Mat &image1, int kernelSize, cv::Mat &dist1, const int type);
/**
STANDARD_MCT - Modified census which is memorizing for each pixel 2 bits and includes a tolerance to the pixel comparison
MCT_MEAN_VARIATION - Implementation of a modified census transform which is also taking into account the variation to the mean of the window not just the center pixel
**/
CV_EXPORTS void modifiedCensusTransform(const cv::Mat &img1, const cv::Mat &img2, int kernelSize, cv::Mat &dist1,cv::Mat &dist2, const int type, int t = 0 , const cv::Mat &IntegralImage1 = cv::Mat::zeros(100,100,CV_8UC1), const cv::Mat &IntegralImage2 = cv::Mat::zeros(100,100,CV_8UC1));
//single version of modified census transform descriptor
CV_EXPORTS void modifiedCensusTransform(const cv::Mat &img1, int kernelSize, cv::Mat &dist, const int type, int t = 0 ,const cv::Mat &IntegralImage = cv::Mat::zeros(100,100,CV_8UC1));
/**The classical center symetric census
A modified version of cs census which is comparing a pixel with its correspondent after the center
**/
CV_EXPORTS void symetricCensusTransform(const cv::Mat &img1, const cv::Mat &img2, int kernelSize, cv::Mat &dist1, cv::Mat &dist2, const int type);
//single version of census transform
CV_EXPORTS void symetricCensusTransform(const cv::Mat &img1, int kernelSize, cv::Mat &dist1, const int type);
//in a 9x9 kernel only certain positions are choosen
CV_EXPORTS void starCensusTransform(const cv::Mat &img1, const cv::Mat &img2, int kernelSize, cv::Mat &dist1,cv::Mat &dist2);
//single image version of star kernel
CV_EXPORTS void starCensusTransform(const cv::Mat &img1, int kernelSize, cv::Mat &dist);
//integral image computation used in the Mean Variation Census Transform
void imageMeanKernelSize(const cv::Mat &img, int windowSize, cv::Mat &c);
}
}
#endif
// FIXIT deprecate and remove CV_ prefix
/// types of supported kernels
enum {
CV_DENSE_CENSUS, CV_SPARSE_CENSUS,
CV_CS_CENSUS, CV_MODIFIED_CS_CENSUS, CV_MODIFIED_CENSUS_TRANSFORM,
CV_MEAN_VARIATION, CV_STAR_KERNEL
};
/**
Two variations of census applied on input images
Implementation of a census transform which is taking into account just the some pixels from the census kernel thus allowing for larger block sizes
**/
CV_EXPORTS void censusTransform(const Mat &image1, const Mat &image2, int kernelSize, Mat &dist1, Mat &dist2, const int type);
/// single image census transform
CV_EXPORTS void censusTransform(const Mat &image1, int kernelSize, Mat &dist1, const int type);
/**
STANDARD_MCT - Modified census which is memorizing for each pixel 2 bits and includes a tolerance to the pixel comparison
MCT_MEAN_VARIATION - Implementation of a modified census transform which is also taking into account the variation to the mean of the window not just the center pixel
**/
CV_EXPORTS void modifiedCensusTransform(const Mat &img1, const Mat &img2, int kernelSize, Mat &dist1, Mat &dist2, const int type, int t = 0, const Mat &integralImage1 = Mat(), const Mat &integralImage2 = Mat());
///single version of modified census transform descriptor
CV_EXPORTS void modifiedCensusTransform(const Mat &img1, int kernelSize, Mat &dist, const int type, int t = 0, const Mat &integralImage = Mat());
/**The classical center symetric census
A modified version of cs census which is comparing a pixel with its correspondent after the center
**/
CV_EXPORTS void symetricCensusTransform(const Mat &img1, const Mat &img2, int kernelSize, Mat &dist1, Mat &dist2, const int type);
///single version of census transform
CV_EXPORTS void symetricCensusTransform(const Mat &img1, int kernelSize, Mat &dist1, const int type);
///in a 9x9 kernel only certain positions are choosen
CV_EXPORTS void starCensusTransform(const Mat &img1, const Mat &img2, int kernelSize, Mat &dist1, Mat &dist2);
///single image version of star kernel
CV_EXPORTS void starCensusTransform(const Mat &img1, int kernelSize, Mat &dist);
}} // namespace
#endif
/*End of file*/
......@@ -64,7 +64,9 @@ PERF_TEST_P( s_bm, sgm_perf,
Mat out1(sz, sdepth);
Ptr<StereoBinarySGBM> sgbm = StereoBinarySGBM::create(0, 16, 5);
sgbm->setBinaryKernelType(CV_DENSE_CENSUS);
declare.in(left, WARMUP_RNG)
declare
.in(left, WARMUP_RNG)
.in(right, WARMUP_RNG)
.out(out1)
.time(0.1)
.iterations(20);
......@@ -72,7 +74,7 @@ PERF_TEST_P( s_bm, sgm_perf,
{
sgbm->compute(left, right, out1);
}
SANITY_CHECK(out1);
SANITY_CHECK_NOTHING();
}
PERF_TEST_P( s_bm, bm_perf,
testing::Combine(
......@@ -103,7 +105,9 @@ PERF_TEST_P( s_bm, bm_perf,
sbm->setSpekleRemovalTechnique(CV_SPECKLE_REMOVAL_AVG_ALGORITHM);
sbm->setUsePrefilter(false);
declare.in(left, WARMUP_RNG)
declare
.in(left, WARMUP_RNG)
.in(right, WARMUP_RNG)
.out(out1)
.time(0.1)
.iterations(20);
......@@ -111,7 +115,7 @@ PERF_TEST_P( s_bm, bm_perf,
{
sbm->compute(left, right, out1);
}
SANITY_CHECK(out1);
SANITY_CHECK_NOTHING();
}
......
......@@ -66,7 +66,7 @@ PERF_TEST_P( descript_params, census_sparse_descriptor,
{
censusTransform(left,9,out1,CV_SPARSE_CENSUS);
}
SANITY_CHECK(out1);
SANITY_CHECK_NOTHING();
}
PERF_TEST_P( descript_params, star_census_transform,
testing::Combine(
......@@ -88,7 +88,7 @@ PERF_TEST_P( descript_params, star_census_transform,
{
starCensusTransform(left,9,out1);
}
SANITY_CHECK(out1);
SANITY_CHECK_NOTHING();
}
PERF_TEST_P( descript_params, modified_census_transform,
testing::Combine(
......@@ -112,7 +112,7 @@ PERF_TEST_P( descript_params, modified_census_transform,
{
modifiedCensusTransform(left,9,out1,CV_MODIFIED_CENSUS_TRANSFORM);
}
SANITY_CHECK(out1);
SANITY_CHECK_NOTHING();
}
PERF_TEST_P( descript_params, center_symetric_census,
testing::Combine(
......@@ -136,7 +136,7 @@ PERF_TEST_P( descript_params, center_symetric_census,
{
symetricCensusTransform(left,7,out1,CV_CS_CENSUS);
}
SANITY_CHECK(out1);
SANITY_CHECK_NOTHING();
}
......
......@@ -6,9 +6,6 @@
#include "opencv2/ts.hpp"
#include "opencv2/stereo.hpp"
#include "opencv2/features2d.hpp"
#include "opencv2/core/utility.hpp"
#include "opencv2/calib3d.hpp"
namespace opencv_test {
using namespace cv::stereo;
......
......@@ -64,12 +64,12 @@ namespace cv
int stride = (int)image1.step;
if(type == CV_DENSE_CENSUS)
{
parallel_for_( Range(n2, image1.rows - n2),
parallel_for_(Range(0, image1.rows),
CombinedDescriptor<1,1,1,2,CensusKernel<2> >(image1.cols, image1.rows,stride,n2,costs,CensusKernel<2>(images),n2));
}
else if(type == CV_SPARSE_CENSUS)
{
parallel_for_( Range(n2, image1.rows - n2),
parallel_for_(Range(0, image1.rows),
CombinedDescriptor<2,2,1,2,CensusKernel<2> >(image1.cols, image1.rows, stride,n2,costs,CensusKernel<2>(images),n2));
}
}
......@@ -87,12 +87,12 @@ namespace cv
int stride = (int)image1.step;
if(type == CV_DENSE_CENSUS)
{
parallel_for_( Range(n2, image1.rows - n2),
parallel_for_(Range(0, image1.rows),
CombinedDescriptor<1,1,1,1,CensusKernel<1> >(image1.cols, image1.rows,stride,n2,costs,CensusKernel<1>(images),n2));
}
else if(type == CV_SPARSE_CENSUS)
{
parallel_for_( Range(n2, image1.rows - n2),
parallel_for_(Range(0, image1.rows),
CombinedDescriptor<2,2,1,1,CensusKernel<1> >(image1.cols, image1.rows,stride,n2,costs,CensusKernel<1>(images),n2));
}
}
......@@ -106,7 +106,7 @@ namespace cv
int n2 = (kernelSize) >> 1;
Mat images[] = {img1, img2};
int *date[] = { (int *)dist1.data, (int *)dist2.data};
parallel_for_(Range(n2, img1.rows - n2), StarKernelCensus<2>(images, n2,date));
parallel_for_(Range(0, img1.rows), StarKernelCensus<2>(images, n2,date));
}
//single version of star census
CV_EXPORTS void starCensusTransform(const Mat &img1, int kernelSize, Mat &dist)
......@@ -118,14 +118,14 @@ namespace cv
int n2 = (kernelSize) >> 1;
Mat images[] = {img1};
int *date[] = { (int *)dist.data};
parallel_for_(Range(n2, img1.rows - n2), StarKernelCensus<1>(images, n2,date));
parallel_for_(Range(0, img1.rows), StarKernelCensus<1>(images, n2,date));
}
//Modified census transforms
//the first one deals with small illumination changes
//the sencond modified census transform is invariant to noise; i.e.
//if the current pixel with whom we are dooing the comparison is a noise, this descriptor will provide a better result by comparing with the mean of the window
//otherwise if the pixel is not noise the information is strengthend
CV_EXPORTS void modifiedCensusTransform(const Mat &img1, const Mat &img2, int kernelSize, Mat &dist1,Mat &dist2, const int type, int t, const Mat &IntegralImage1, const Mat &IntegralImage2 )
CV_EXPORTS void modifiedCensusTransform(const Mat &img1, const Mat &img2, int kernelSize, Mat &dist1,Mat &dist2, const int type, int t, const Mat& integralImage1, const Mat& integralImage2)
{
CV_Assert(img1.size() == img2.size());
CV_Assert(kernelSize % 2 != 0);
......@@ -139,20 +139,31 @@ namespace cv
if(type == CV_MODIFIED_CENSUS_TRANSFORM)
{
//MCT
parallel_for_( Range(n2, img1.rows - n2),
parallel_for_(Range(0, img1.rows),
CombinedDescriptor<2,4,2, 2,MCTKernel<2> >(img1.cols, img1.rows,stride,n2,date,MCTKernel<2>(images,t),n2));
}
else if(type == CV_MEAN_VARIATION)
{
//MV
int *integral[2];
integral[0] = (int *)IntegralImage1.data;
integral[1] = (int *)IntegralImage2.data;
parallel_for_( Range(n2, img1.rows - n2),
CV_Assert(!integralImage1.empty());
CV_Assert(!integralImage1.isContinuous());
CV_CheckTypeEQ(integralImage1.type(), CV_32SC1, "");
CV_CheckGE(integralImage1.cols, img1.cols, "");
CV_CheckGE(integralImage1.rows, img1.rows, "");
CV_Assert(!integralImage2.empty());
CV_Assert(!integralImage2.isContinuous());
CV_CheckTypeEQ(integralImage2.type(), CV_32SC1, "");
CV_CheckGE(integralImage2.cols, img2.cols, "");
CV_CheckGE(integralImage2.rows, img2.rows, "");
int *integral[2] = {
(int *)integralImage1.data,
(int *)integralImage2.data
};
parallel_for_(Range(0, img1.rows),
CombinedDescriptor<2,3,2,2, MVKernel<2> >(img1.cols, img1.rows,stride,n2,date,MVKernel<2>(images,integral),n2));
}
}
CV_EXPORTS void modifiedCensusTransform(const Mat &img1, int kernelSize, Mat &dist, const int type, int t , Mat const &IntegralImage)
CV_EXPORTS void modifiedCensusTransform(const Mat &img1, int kernelSize, Mat &dist, const int type, int t , Mat const &integralImage)
{
CV_Assert(img1.size() == dist.size());
CV_Assert(kernelSize % 2 != 0);
......@@ -166,14 +177,19 @@ namespace cv
if(type == CV_MODIFIED_CENSUS_TRANSFORM)
{
//MCT
parallel_for_(Range(n2, img1.rows - n2),
parallel_for_(Range(0, img1.rows),
CombinedDescriptor<2,4,2, 1,MCTKernel<1> >(img1.cols, img1.rows,stride,n2,date,MCTKernel<1>(images,t),n2));
}
else if(type == CV_MEAN_VARIATION)
{
//MV
int *integral[] = { (int *)IntegralImage.data};
parallel_for_(Range(n2, img1.rows - n2),
CV_Assert(!integralImage.empty());
CV_Assert(!integralImage.isContinuous());
CV_CheckTypeEQ(integralImage.type(), CV_32SC1, "");
CV_CheckGE(integralImage.cols, img1.cols, "");
CV_CheckGE(integralImage.rows, img1.rows, "");
int *integral[] = { (int *)integralImage.data};
parallel_for_(Range(0, img1.rows),
CombinedDescriptor<2,3,2,1, MVKernel<1> >(img1.cols, img1.rows,stride,n2,date,MVKernel<1>(images,integral),n2));
}
}
......@@ -193,11 +209,11 @@ namespace cv
int stride = (int)img1.step;
if(type == CV_CS_CENSUS)
{
parallel_for_(Range(n2, img1.rows - n2), SymetricCensus<2>(imag, n2,date));
parallel_for_(Range(0, img1.rows), SymetricCensus<2>(imag, n2,date));
}
else if(type == CV_MODIFIED_CS_CENSUS)
{
parallel_for_(Range(n2, img1.rows - n2),
parallel_for_(Range(0, img1.rows),
CombinedDescriptor<1,1,1,2,ModifiedCsCensus<2> >(img1.cols, img1.rows,stride,n2,date,ModifiedCsCensus<2>(images,n2),1));
}
}
......@@ -215,26 +231,13 @@ namespace cv
int stride = (int)img1.step;
if(type == CV_CS_CENSUS)
{
parallel_for_( Range(n2, img1.rows - n2), SymetricCensus<1>(imag, n2,date));
parallel_for_(Range(0, img1.rows), SymetricCensus<1>(imag, n2,date));
}
else if(type == CV_MODIFIED_CS_CENSUS)
{
parallel_for_( Range(n2, img1.rows - n2),
parallel_for_( Range(0, img1.rows),
CombinedDescriptor<1,1,1,1,ModifiedCsCensus<1> >(img1.cols, img1.rows,stride,n2,date,ModifiedCsCensus<1>(images,n2),1));
}
}
//integral image computation used in the Mean Variation Census Transform
void imageMeanKernelSize(const Mat &image, int windowSize, Mat &cost)
{
CV_Assert(!image.empty());
CV_Assert(!cost.empty());
CV_Assert(windowSize % 2 != 0);
int win = windowSize / 2;
float scalling = ((float) 1) / (windowSize * windowSize);
int height = image.rows;
cost.setTo(0);
int *c = (int *)cost.data;
parallel_for_(Range(win + 1, height - win - 1),MeanKernelIntegralImage(image,win,scalling,c));
}
}
}
//By downloading, copying, installing or using the software you agree to this license.
//If you do not agree to this license, do not download, install,
//copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
// (3-clause BSD License)
//
//Copyright (C) 2000-2015, Intel Corporation, all rights reserved.
//Copyright (C) 2009-2011, Willow Garage Inc., all rights reserved.
//Copyright (C) 2009-2015, NVIDIA Corporation, all rights reserved.
//Copyright (C) 2010-2013, Advanced Micro Devices, Inc., all rights reserved.
//Copyright (C) 2015, OpenCV Foundation, all rights reserved.
//Copyright (C) 2015, Itseez Inc., all rights reserved.
//Third party copyrights are property of their respective owners.
//
//Redistribution and use in source and binary forms, with or without modification,
//are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * Neither the names of the copyright holders nor the names of the contributors
// may be used to endorse or promote products derived from this software
// without specific prior written permission.
//
//This software is provided by the copyright holders and contributors "as is" and
//any express or implied warranties, including, but not limited to, the implied
//warranties of merchantability and fitness for a particular purpose are disclaimed.
//In no event shall copyright holders or contributors be liable for any direct,
//indirect, incidental, special, exemplary, or consequential damages
//(including, but not limited to, procurement of substitute goods or services;
//loss of use, data, or profits; or business interruption) however caused
//and on any theory of liability, whether in contract, strict liability,
//or tort (including negligence or otherwise) arising in any way out of
//the use of this software, even if advised of the possibility of such damage.
/*****************************************************************************************************************\
* The interface contains the main descriptors that will be implemented in the descriptor class *
\*****************************************************************************************************************/
#include <stdint.h>
#ifndef _OPENCV_DESCRIPTOR_HPP_
#define _OPENCV_DESCRIPTOR_HPP_
#ifdef __cplusplus
namespace cv
{
namespace stereo
{
//!Mean Variation is a robust kernel that compares a pixel
//!not just with the center but also with the mean of the window
template<int num_images>
struct MVKernel
{
uint8_t *image[num_images];
int *integralImage[num_images];
int stop;
MVKernel(){}
MVKernel(uint8_t **images, int **integral)
{
for(int i = 0; i < num_images; i++)
{
image[i] = images[i];
integralImage[i] = integral[i];
}
stop = num_images;
}
void operator()(int rrWidth,int w2, int rWidth, int jj, int j, int c[num_images]) const
{
CV_UNUSED(w2);
for (int i = 0; i < stop; i++)
{
if (image[i][rrWidth + jj] > image[i][rWidth + j])
{
c[i] = c[i] + 1;
}
c[i] = c[i] << 1;
if (integralImage[i][rrWidth + jj] > image[i][rWidth + j])
{
c[i] = c[i] + 1;
}
c[i] = c[i] << 1;
}
}
};
//!Compares pixels from a patch giving high weights to pixels in which
//!the intensity is higher. The other pixels receive a lower weight
template <int num_images>
struct MCTKernel
{
uint8_t *image[num_images];
int t,imageStop;
MCTKernel(){}
MCTKernel(uint8_t ** images, int threshold)
{
for(int i = 0; i < num_images; i++)
{
image[i] = images[i];
}
imageStop = num_images;
t = threshold;
}
void operator()(int rrWidth,int w2, int rWidth, int jj, int j, int c[num_images]) const
{
CV_UNUSED(w2);
for(int i = 0; i < imageStop; i++)
{
if (image[i][rrWidth + jj] > image[i][rWidth + j] - t)
{
c[i] = c[i] << 1;
c[i] = c[i] + 1;
c[i] = c[i] << 1;
c[i] = c[i] + 1;
}
else if (image[i][rWidth + j] - t < image[i][rrWidth + jj] && image[i][rWidth + j] + t >= image[i][rrWidth + jj])
{
c[i] = c[i] << 2;
c[i] = c[i] + 1;
}
else
{
c[i] <<= 2;
}
}
}
};
//!A madified cs census that compares a pixel with the imediat neightbour starting
//!from the center
template<int num_images>
struct ModifiedCsCensus
{
uint8_t *image[num_images];
int n2;
int imageStop;
ModifiedCsCensus(){}
ModifiedCsCensus(uint8_t **images, int ker)
{
for(int i = 0; i < num_images; i++)
image[i] = images[i];
imageStop = num_images;
n2 = ker;
}
void operator()(int rrWidth,int w2, int rWidth, int jj, int j, int c[num_images]) const
{
CV_UNUSED(j);
CV_UNUSED(rWidth);
for(int i = 0; i < imageStop; i++)
{
if (image[i][(rrWidth + jj)] > image[i][(w2 + (jj + n2))])
{
c[i] = c[i] + 1;
}
c[i] = c[i] * 2;
}
}
};
//!A kernel in which a pixel is compared with the center of the window
template<int num_images>
struct CensusKernel
{
uint8_t *image[num_images];
int imageStop;
CensusKernel(){}
CensusKernel(uint8_t **images)
{
for(int i = 0; i < num_images; i++)
image[i] = images[i];
imageStop = num_images;
}
void operator()(int rrWidth,int w2, int rWidth, int jj, int j, int c[num_images]) const
{
CV_UNUSED(w2);
for(int i = 0; i < imageStop; i++)
{
////compare a pixel with the center from the kernel
if (image[i][rrWidth + jj] > image[i][rWidth + j])
{
c[i] += 1;
}
c[i] <<= 1;
}
}
};
//template clas which efficiently combines the descriptors
template <int step_start, int step_end, int step_inc,int nr_img, typename Kernel>
class CombinedDescriptor:public ParallelLoopBody
{
private:
int width, height,n2;
int stride_;
int *dst[nr_img];
Kernel kernel_;
int n2_stop;
public:
CombinedDescriptor(int w, int h,int stride, int k2, int **distance, Kernel kernel,int k2Stop)
{
width = w;
height = h;
n2 = k2;
stride_ = stride;
for(int i = 0; i < nr_img; i++)
dst[i] = distance[i];
kernel_ = kernel;
n2_stop = k2Stop;
}
void operator()(const cv::Range &r) const CV_OVERRIDE {
for (int i = r.start; i < r.end ; i++)
{
int rWidth = i * stride_;
for (int j = 0; j < width; j++)
{
if (i < n2 || i >= height - n2 || j < n2 + 2 || j >= width - n2 - 2)
{
for(int l = 0; l < nr_img; l++)
dst[l][rWidth + j] = 0; // TODO out of range value?
continue;
}
int c[nr_img];
memset(c, 0, sizeof(c[0]) * nr_img);
for(int step = step_start; step <= step_end; step += step_inc)
{
for (int ii = - n2; ii <= + n2_stop; ii += step)
{
int rrWidth = (ii + i) * stride_;
int rrWidthC = (ii + i + n2) * stride_;
for (int jj = j - n2; jj <= j + n2; jj += step)
{
if (ii != i || jj != j)
{
kernel_(rrWidth,rrWidthC, rWidth, jj, j,c);
}
}
}
}
for(int l = 0; l < nr_img; l++)
dst[l][rWidth + j] = c[l];
}
}
}
};
//!implementation for the star kernel descriptor
template<int num_images>
class StarKernelCensus:public ParallelLoopBody
{
private:
uint8_t *image[num_images];
int *dst[num_images];
int n2, width, height, im_num,stride_;
public:
StarKernelCensus(const cv::Mat *img, int k2, int **distance)
{
for(int i = 0; i < num_images; i++)
{
image[i] = img[i].data;
dst[i] = distance[i];
}
n2 = k2;
width = img[0].cols;
height = img[0].rows;
im_num = num_images;
stride_ = (int)img[0].step;
}
void operator()(const cv::Range &r) const CV_OVERRIDE {
for (int i = r.start; i < r.end; i++)
{
int rWidth = i * stride_;
for (int j = 0; j < width; j++)
{
for(int d = 0 ; d < im_num; d++)
{
if (i < n2 || i >= height - n2 || j < n2 || j >= width - n2)
{
dst[d][rWidth + j] = 0; // TODO out of range value?
continue;
}
int c = 0;
for(int step = 4; step > 0; step--)
{
for (int ii = i - step; ii <= i + step; ii += step)
{
int rrWidth = ii * stride_;
for (int jj = j - step; jj <= j + step; jj += step)
{
if (image[d][rrWidth + jj] > image[d][rWidth + j])
{
c = c + 1;
}
c = c * 2;
}
}
}
for (int ii = -1; ii <= +1; ii++)
{
int rrWidth = (ii + i) * stride_;
if (i == -1)
{
if (ii + i != i)
{
if (image[d][rrWidth + j] > image[d][rWidth + j])
{
c = c + 1;
}
c = c * 2;
}
}
else if (i == 0)
{
for (int j2 = -1; j2 <= 1; j2 += 2)
{
if (ii + i != i)
{
if (image[d][rrWidth + j + j2] > image[d][rWidth + j])
{
c = c + 1;
}
c = c * 2;
}
}
}
else
{
if (ii + i != i)
{
if (image[d][rrWidth + j] > image[d][rWidth + j])
{
c = c + 1;
}
c = c * 2;
}
}
}
dst[d][rWidth + j] = c;
}
}
}
}
};
//!paralel implementation of the center symetric census
template <int num_images>
class SymetricCensus:public ParallelLoopBody
{
private:
uint8_t *image[num_images];
int *dst[num_images];
int n2, width, height, im_num,stride_;
public:
SymetricCensus(const cv::Mat *img, int k2, int **distance)
{
for(int i = 0; i < num_images; i++)
{
image[i] = img[i].data;
dst[i] = distance[i];
}
n2 = k2;
width = img[0].cols;
height = img[0].rows;
im_num = num_images;
stride_ = (int)img[0].step;
}
void operator()(const cv::Range &r) const CV_OVERRIDE {
for (int i = r.start; i < r.end ; i++)
{
int distV = i*stride_;
for (int j = 0; j < width; j++)
{
for(int d = 0; d < im_num; d++)
{
if (i < n2 || i >= height - n2 || j < n2 || j >= width - n2)
{
dst[d][distV + j] = 0; // TODO out of range value?
continue;
}
int c = 0;
//the classic center symetric census which compares the curent pixel with its symetric not its center.
for (int ii = -n2; ii <= 0; ii++)
{
int rrWidth = (ii + i) * stride_;
for (int jj = -n2; jj <= +n2; jj++)
{
if (image[d][(rrWidth + (jj + j))] > image[d][((ii * (-1) + i) * width + (-1 * jj) + j)])
{
c = c + 1;
}
c = c * 2;
if(ii == 0 && jj < 0)
{
if (image[d][(i * width + (jj + j))] > image[d][(i * width + (-1 * jj) + j)])
{
c = c + 1;
}
c = c * 2;
}
}
}
dst[d][(distV + j)] = c;
}
}
}
}
};
}
}
#endif
#endif
/*End of file*/
......@@ -64,7 +64,7 @@ namespace cv
//!the confidence to which a min disparity found is good or not
double confidenceCheck;
//!the LUT used in case SSE is not available
int hamLut[65537];
int hamLut[65536]; // FIXIT use preferined 8-bit lookup table for hamming
//!function used for getting the minimum disparity from the cost volume"
static int minim(short *c, int iwpj, int widthDisp,const double confidence, const int search_region)
{
......@@ -131,7 +131,7 @@ namespace cv
//!a pre processing function that generates the Hamming LUT in case the algorithm will ever be used on platform where SSE is not available
void hammingLut()
{
for (int i = 0; i <= 65536; i++)
for (int i = 0; i < 65536; i++)
{
int dist = 0;
int j = i;
......@@ -157,19 +157,16 @@ namespace cv
hammingDistance(const Mat &leftImage, const Mat &rightImage, short *cost, int maxDisp, int kerSize, int *hammingLUT):
left((int *)leftImage.data), right((int *)rightImage.data), c(cost), v(maxDisp),kernelSize(kerSize),width(leftImage.cols), MASK(65535), hammLut(hammingLUT){}
void operator()(const cv::Range &r) const CV_OVERRIDE {
for (int i = r.start; i <= r.end ; i++)
for (int i = r.start; i < r.end ; i++)
{
int iw = i * width;
for (int j = kernelSize; j < width - kernelSize; j++)
{
int j2;
int xorul;
int iwj;
iwj = iw + j;
int iwj = iw + j;
for (int d = 0; d <= v; d++)
{
j2 = (0 > j - d) ? (0) : (j - d);
xorul = left[(iwj)] ^ right[(iw + j2)];
int j2 = std::max(0, j - d);
int xorul = left[(iwj)] ^ right[(iw + j2)];
#if CV_POPCNT
if (checkHardwareSupport(CV_CPU_POPCNT))
{
......@@ -203,16 +200,25 @@ namespace cv
parSum = (short *)partialSums.data;
}
void operator()(const cv::Range &r) const CV_OVERRIDE {
for (int i = r.start; i <= r.end; i++)
for (int i = r.start; i < r.end; i++)
{
int iwi = (i - 1) * width;
for (int j = win + 1; j <= width - win - 1; j++)
int iwi = i * width;
for (int j = 0; j <= width; j++)
{
int w = (iwi + j) * (maxDisp + 1);
if (i < win + 1 || i >= height - win - 1 || j < win + 1 || j >= width - win - 1)
{
for (int d = 0; d <= maxDisp; d++)
{
c[w + d] = 0;
}
continue;
}
int w1 = ((i + win + 1) * width + j + win) * (maxDisp + 1);
int w2 = ((i - win) * width + j - win - 1) * (maxDisp + 1);
int w3 = ((i + win + 1) * width + j - win - 1) * (maxDisp + 1);
int w4 = ((i - win) * width + j + win) * (maxDisp + 1);
int w = (iwi + j - 1) * (maxDisp + 1);
for (int d = 0; d <= maxDisp; d++)
{
c[w + d] = parSum[w1 + d] + parSum[w2 + d]
......@@ -244,7 +250,7 @@ namespace cv
confCheck = confidence;
}
void operator()(const cv::Range &r) const CV_OVERRIDE {
for (int i = r.start; i <= r.end ; i++)
for (int i = r.start; i < r.end ; i++)
{
int lr;
int v = -1;
......@@ -301,10 +307,16 @@ namespace cv
width = originalImage.cols;
}
void operator()(const cv::Range &r) const CV_OVERRIDE {
for (int m = r.start; m <= r.end; m++)
for (int m = r.start; m < r.end; m++)
{
for (int n = 4; n < width - 4; ++n)
for (int n = 0; n < width; ++n)
{
if (m < 1 || m >= height - 1 || n < 4 || n >= width - 4)
{
filtered[m * width + n] = original[m * width + n]; // FIXIT replace with OpenCV function
continue;
}
int k = 0;
T window[9];
for (int i = n - 4; i <= n + 4; ++i)
......@@ -341,10 +353,16 @@ namespace cv
width = originalImage.cols;
}
void operator()(const Range &r) const CV_OVERRIDE {
for (int n = r.start; n <= r.end; ++n)
for (int n = r.start; n < r.end; ++n)
{
for (int m = 4; m < height - 4; ++m)
{
if (m < 4 || m >= height - 4 || n < 1 || n >= width - 1)
{
filtered[m * width + n] = original[m * width + n]; // FIXIT replace with OpenCV function
continue;
}
int k = 0;
T window[9];
for (int i = m - 4; i <= m + 4; ++i)
......@@ -430,11 +448,11 @@ namespace cv
short *c = (short *)cost.data;
short *ham = (short *)hammingDistanceCost.data;
memset(c, 0, sizeof(c[0]) * (width + 1) * (height + 1) * (maxDisp + 1));
for (int i = 1; i <= height; i++)
for (int i = 1; i < height; i++)
{
int iw = i * width;
int iwi = (i - 1) * width;
for (int j = 1; j <= width; j++)
for (int j = 1; j < width; j++)
{
int iwj = (iw + j) * (maxDisp + 1);
int iwjmu = (iw + j - 1) * (maxDisp + 1);
......@@ -445,9 +463,9 @@ namespace cv
}
}
}
for (int i = 1; i <= height; i++)
for (int i = 1; i < height; i++)
{
for (int j = 1; j <= width; j++)
for (int j = 1; j < width; j++)
{
int iwj = (i * width + j) * (maxDisp + 1);
int iwjmu = ((i - 1) * width + j) * (maxDisp + 1);
......@@ -464,18 +482,18 @@ namespace cv
CV_Assert(windowSize % 2 != 0);
CV_Assert(partialSums.rows == cost.rows);
CV_Assert(partialSums.cols == cost.cols);
int win = windowSize / 2;
short *c = (short *)cost.data;
int maxDisp = maxDisparity;
int width = cost.cols / ( maxDisp + 1) - 1;
int height = cost.rows - 1;
memset(c, 0, sizeof(c[0]) * width * height * (maxDisp + 1));
parallel_for_(cv::Range(win + 1,height - win - 1), agregateCost(partialSums,windowSize,maxDisp,cost));
parallel_for_(cv::Range(0, height), agregateCost(partialSums,windowSize, maxDisp, cost));
}
//!remove small regions that have an area smaller than t, we fill the region with the average of the good pixels around it
template <typename T>
void smallRegionRemoval(const Mat &currentMap, int t, Mat &out)
{
CV_Assert(currentMap.data != out.data && "inplace is not supported");
CV_Assert(currentMap.cols == out.cols);
CV_Assert(currentMap.rows == out.rows);
CV_Assert(t >= 0);
......@@ -494,16 +512,22 @@ namespace cv
int speckle_size = 0;
st = 0;
dr = 0;
for (int i = 1; i < height - 1; i++)
for (int i = 0; i < height; i++)
{
int iw = i * width;
for (int j = 1; j < width - 1; j++)
for (int j = 0; j < width; j++)
{
if (i < 1 || i >= height - 1 || j < 1 || j >= width - 1)
{
outputMap[iw + j] = 0;
continue;
}
if (map[iw + j] != 0)
{
outputMap[iw + j] = map[iw + j];
}
else if (map[iw + j] == 0)
else // if (map[iw + j] == 0)
{
T nr = 1;
T avg = 0;
......@@ -571,7 +595,7 @@ namespace cv
int width = costVolume.cols / ( disparity + 1) - 1;
int height = costVolume.rows - 1;
memset(map, 0, sizeof(map[0]) * width * height);
parallel_for_(Range(0,height - 1), makeMap(costVolume,th,disparity,confidenceCheck,scallingFactor,mapFinal));
parallel_for_(Range(0, height), makeMap(costVolume,th,disparity,confidenceCheck,scallingFactor,mapFinal));
}
public:
//!a median filter of 1x9 and 9x1
......@@ -581,7 +605,7 @@ namespace cv
{
CV_Assert(originalImage.rows == filteredImage.rows);
CV_Assert(originalImage.cols == filteredImage.cols);
parallel_for_(Range(1,originalImage.rows - 2), Median1x9<T>(originalImage,filteredImage));
parallel_for_(Range(0, originalImage.rows), Median1x9<T>(originalImage,filteredImage));
}
//!9x1 median filter
template<typename T>
......@@ -589,7 +613,7 @@ namespace cv
{
CV_Assert(originalImage.cols == filteredImage.cols);
CV_Assert(originalImage.cols == filteredImage.cols);
parallel_for_(Range(1,originalImage.cols - 2), Median9x1<T>(originalImage,filteredImage));
parallel_for_(Range(0, originalImage.cols), Median9x1<T>(originalImage,filteredImage));
}
//!constructor for the matching class
//!maxDisp - represents the maximum disparity
......
......@@ -42,16 +42,13 @@
#ifndef __OPENCV_STEREO_PRECOMP_H__
#define __OPENCV_STEREO_PRECOMP_H__
#include "opencv2/stereo.hpp"
#include "opencv2/imgproc.hpp"
#include "opencv2/features2d.hpp"
#include "opencv2/core.hpp"
#include "opencv2/core/utility.hpp"
#include "opencv2/core/private.hpp"
#include "opencv2/core/cvdef.h"
#include "opencv2/imgproc.hpp"
#include "opencv2/calib3d.hpp"
#include <algorithm>
#include <cmath>
#include "opencv2/stereo.hpp"
#include "descriptor.hpp"
#include "matching.hpp"
#endif
......@@ -384,15 +384,10 @@ namespace cv
}
else if(params.kernelType == CV_MEAN_VARIATION)
{
parSumsIntensityImage[0].create(left0.rows, left0.cols,CV_32SC4);
parSumsIntensityImage[1].create(left0.rows, left0.cols,CV_32SC4);
Integral[0].create(left0.rows,left0.cols,CV_32SC4);
Integral[1].create(left0.rows,left0.cols,CV_32SC4);
integral(left, parSumsIntensityImage[0],CV_32S);
integral(right, parSumsIntensityImage[1],CV_32S);
imageMeanKernelSize(parSumsIntensityImage[0], params.kernelSize,Integral[0]);
imageMeanKernelSize(parSumsIntensityImage[1], params.kernelSize, Integral[1]);
modifiedCensusTransform(left,right,params.kernelSize,censusImage[0],censusImage[1],CV_MEAN_VARIATION,0,Integral[0], Integral[1]);
Mat blurLeft; blur(left, blurLeft, Size(params.kernelSize, params.kernelSize));
Mat blurRight; blur(right, blurRight, Size(params.kernelSize, params.kernelSize));
modifiedCensusTransform(left, right, params.kernelSize, censusImage[0], censusImage[1], CV_MEAN_VARIATION, 0,
blurLeft, blurRight);
}
else if(params.kernelType == CV_STAR_KERNEL)
{
......@@ -407,7 +402,7 @@ namespace cv
if(params.regionRemoval == CV_SPECKLE_REMOVAL_AVG_ALGORITHM)
{
smallRegionRemoval<uint8_t>(disp0,params.speckleWindowSize,disp0);
smallRegionRemoval<uint8_t>(disp0.clone(),params.speckleWindowSize,disp0);
}
else if(params.regionRemoval == CV_SPECKLE_REMOVAL_ALGORITHM)
{
......@@ -502,8 +497,6 @@ namespace cv
StereoBinaryBMParams params;
Mat preFilteredImg0, preFilteredImg1, cost, dispbuf;
Mat slidingSumBuf;
Mat parSumsIntensityImage[2];
Mat Integral[2];
Mat censusImage[2];
Mat hammingDistance;
Mat partialSumsLR;
......
......@@ -669,15 +669,10 @@ namespace cv
}
else if(params.kernelType == CV_MEAN_VARIATION)
{
parSumsIntensityImage[0].create(left.rows, left.cols,CV_32SC4);
parSumsIntensityImage[1].create(left.rows, left.cols,CV_32SC4);
Integral[0].create(left.rows,left.cols,CV_32SC4);
Integral[1].create(left.rows,left.cols,CV_32SC4);
integral(left, parSumsIntensityImage[0],CV_32S);
integral(right, parSumsIntensityImage[1],CV_32S);
imageMeanKernelSize(parSumsIntensityImage[0], params.kernelSize,Integral[0]);
imageMeanKernelSize(parSumsIntensityImage[1], params.kernelSize, Integral[1]);
modifiedCensusTransform(left,right,params.kernelSize,censusImageLeft,censusImageRight,CV_MEAN_VARIATION,0,Integral[0], Integral[1]);
Mat blurLeft; blur(left, blurLeft, Size(params.kernelSize, params.kernelSize));
Mat blurRight; blur(right, blurRight, Size(params.kernelSize, params.kernelSize));
modifiedCensusTransform(left, right, params.kernelSize, censusImageLeft, censusImageRight, CV_MEAN_VARIATION, 0,
blurLeft, blurRight);
}
else if(params.kernelType == CV_STAR_KERNEL)
{
......@@ -702,7 +697,7 @@ namespace cv
aux.create(height,width,CV_16S);
Median1x9Filter<short>(disp, aux);
Median9x1Filter<short>(aux,disp);
smallRegionRemoval<short>(disp, params.speckleWindowSize, disp);
smallRegionRemoval<short>(disp.clone(), params.speckleWindowSize, disp);
}
else if(params.regionRemoval == CV_SPECKLE_REMOVAL_ALGORITHM)
{
......@@ -800,8 +795,6 @@ namespace cv
Mat partialSumsLR;
Mat agregatedHammingLRCost;
Mat hamDist;
Mat parSumsIntensityImage[2];
Mat Integral[2];
};
const char* StereoBinarySGBMImpl::name_ = "StereoBinaryMatcher.SGBM";
......
......@@ -6,8 +6,6 @@
#include "opencv2/ts.hpp"
#include "opencv2/stereo.hpp"
#include "opencv2/features2d.hpp"
#include "opencv2/calib3d.hpp"
namespace opencv_test {
using namespace cv::stereo;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment