Commit 54d41251 authored by Muresan Mircea Paul's avatar Muresan Mircea Paul

aded singe kernel matchings

fixed warnings and error

replaced colls with strides

fixed warnings

replaced colls with strides everywhere

the number of images is estabished at run time

fixed braket warnings

replaced all enums with a single enum

removed whitespaces

fixed last issues

the matching class
this class contains the most important functions used in stereo correspondence
(StereoBM and StereoSGBM) call methods from these classes

fixed shadowing warning problems

included intrin.h header for popcnt instruction

replaced __popcnt with _mm_popcnt_u32() equivalent

fixed some warnings

fixed index warning
parent 7d13acdb
......@@ -41,91 +41,201 @@
//the use of this software, even if advised of the possibility of such damage.
/*****************************************************************************************************************\
* The file contains the implemented descriptors *
* The file contains the implemented descriptors *
\******************************************************************************************************************/
#include "precomp.hpp"
#include "descriptor.hpp"
using namespace cv;
using namespace stereo;
void cv::stereo::applyCensusOnImage(const cv::Mat &img, int kernelSize, cv::Mat &dist, const int type)
{
CV_Assert(img.type() == CV_8UC1);
CV_Assert(kernelSize <= 5);
CV_Assert(type < 2 && type >= 0);
int n2 = (kernelSize - 1) / 2;
parallel_for_(cv::Range(n2, img.rows - n2), singleImageCensus(img.data, img.cols, img.rows, n2, (int *)dist.data, type));
}
void cv::stereo::applyCensusOnImages(const cv::Mat &im1,const cv::Mat &im2, int kernelSize, cv::Mat &dist, cv::Mat &dist2, const int type)
{
CV_Assert(im1.size() == im2.size());
CV_Assert(im1.type() == CV_8UC1 && im2.type() == CV_8UC1);
CV_Assert(type < 2 && type >= 0);
CV_Assert(kernelSize <= (type == 0 ? 5 : 10));
int n2 = (kernelSize - 1) / 2;
if(type == Dense_Census)
{
parallel_for_(cv::Range(n2, im1.rows - n2),
CombinedDescriptor<1,1,1,CensusKernel>(im1.cols, im1.rows,n2,(int *)dist.data,(int *)dist2.data,CensusKernel(im1.data, im2.data),n2));
}
else if(type == Sparse_Census)
{
parallel_for_(cv::Range(n2, im1.rows - n2),
CombinedDescriptor<2,2,1,CensusKernel>(im1.cols, im1.rows,n2,(int *)dist.data,(int *)dist2.data,CensusKernel(im1.data, im2.data),n2));
}
}
void cv::stereo::applyMCTOnImages(const cv::Mat &img1, const cv::Mat &img2, int kernelSize, int t, cv::Mat &dist, cv::Mat &dist2, const int type)
{
CV_Assert(img1.size() == img2.size());
CV_Assert(img1.type() == CV_8UC1 && img2.type() == CV_8UC1);
CV_Assert(type < 2 && type >= 0);
CV_Assert(kernelSize <= 9);
int n2 = (kernelSize - 1) >> 1;
if(type == StandardMct)
{
parallel_for_(cv::Range(n2, img1.rows - n2),
CombinedDescriptor<2,3,2,MCTKernel>(img1.cols, img1.rows,n2,(int *)dist.data,(int *)dist2.data,MCTKernel(img1.data, img2.data,t),n2));
}
else
{
//MV
}
}
void cv::stereo::applySimetricCensus(const cv::Mat &img1, const cv::Mat &img2, int kernelSize, cv::Mat &dist, cv::Mat &dist2, const int type)
namespace cv
{
CV_Assert(img1.size() == img2.size());
CV_Assert(img1.type() == CV_8UC1 && img2.type() == CV_8UC1);
CV_Assert(type < 2 && type >= 0);
CV_Assert(kernelSize <= 7);
int n2 = (kernelSize - 1) >> 1;
if(type == ClassicCenterSymetricCensus)
namespace stereo
{
parallel_for_(cv::Range(n2, img1.rows - n2), parallelSymetricCensus(img1.data, img2.data, img1.cols, img2.rows, n2, (int *)dist.data, (int *)dist2.data, type));
//function that performs the census transform on two images.
//Two variants of census are offered a sparse version whcih takes every second pixel as well as dense version
void censusTransform(const Mat &image1, const Mat &image2, int kernelSize, Mat &dist1, Mat &dist2, const int type)
{
CV_Assert(image1.size() == image2.size());
CV_Assert(kernelSize % 2 != 0);
CV_Assert(image1.type() == CV_8UC1 && image2.type() == CV_8UC1);
CV_Assert(type != CV_DENSE_CENSUS || type != CV_SPARSE_CENSUS);
CV_Assert(kernelSize <= ((type == 0) ? 5 : 11));
int n2 = (kernelSize) / 2;
uint8_t *images[] = {image1.data, image2.data};
int *costs[] = {(int *)dist1.data,(int *)dist2.data};
int stride = (int)image1.step;
if(type == CV_DENSE_CENSUS)
{
parallel_for_( Range(n2, image1.rows - n2),
CombinedDescriptor<1,1,1,2,CensusKernel<2> >(image1.cols, image1.rows,stride,n2,costs,CensusKernel<2>(images),n2));
}
else if(type == CV_SPARSE_CENSUS)
{
parallel_for_( Range(n2, image1.rows - n2),
CombinedDescriptor<2,2,1,2,CensusKernel<2> >(image1.cols, image1.rows, stride,n2,costs,CensusKernel<2>(images),n2));
}
}
//function that performs census on one image
void censusTransform(const Mat &image1, int kernelSize, Mat &dist1, const int type)
{
CV_Assert(image1.size() == dist1.size());
CV_Assert(kernelSize % 2 != 0);
CV_Assert(image1.type() == CV_8UC1);
CV_Assert(type != CV_DENSE_CENSUS || type != CV_SPARSE_CENSUS);
CV_Assert(kernelSize <= ((type == 0) ? 5 : 11));
int n2 = (kernelSize) / 2;
uint8_t *images[] = {image1.data};
int *costs[] = {(int *)dist1.data};
int stride = (int)image1.step;
if(type == CV_DENSE_CENSUS)
{
parallel_for_( Range(n2, image1.rows - n2),
CombinedDescriptor<1,1,1,1,CensusKernel<1> >(image1.cols, image1.rows,stride,n2,costs,CensusKernel<1>(images),n2));
}
else if(type == CV_SPARSE_CENSUS)
{
parallel_for_( Range(n2, image1.rows - n2),
CombinedDescriptor<2,2,1,1,CensusKernel<1> >(image1.cols, image1.rows,stride,n2,costs,CensusKernel<1>(images),n2));
}
}
//in a 9x9 kernel only certain positions are choosen for comparison
void starCensusTransform(const Mat &img1, const Mat &img2, int kernelSize, Mat &dist1, Mat &dist2)
{
CV_Assert(img1.size() == img2.size());
CV_Assert(kernelSize % 2 != 0);
CV_Assert(img1.type() == CV_8UC1 && img2.type() == CV_8UC1);
CV_Assert(kernelSize >= 7);
int n2 = (kernelSize) >> 1;
Mat images[] = {img1, img2};
int *date[] = { (int *)dist1.data, (int *)dist2.data};
parallel_for_(Range(n2, img1.rows - n2), StarKernelCensus<2>(images, n2,date));
}
//single version of star census
void starCensusTransform(const Mat &img1, int kernelSize, Mat &dist)
{
CV_Assert(img1.size() == dist.size());
CV_Assert(kernelSize % 2 != 0);
CV_Assert(img1.type() == CV_8UC1);
CV_Assert(kernelSize >= 7);
int n2 = (kernelSize) >> 1;
Mat images[] = {img1};
int *date[] = { (int *)dist.data};
parallel_for_(Range(n2, img1.rows - n2), StarKernelCensus<1>(images, n2,date));
}
//Modified census transforms
//the first one deals with small illumination changes
//the sencond modified census transform is invariant to noise; i.e.
//if the current pixel with whom we are dooing the comparison is a noise, this descriptor will provide a better result by comparing with the mean of the window
//otherwise if the pixel is not noise the information is strengthend
void modifiedCensusTransform(const Mat &img1, const Mat &img2, int kernelSize, Mat &dist1,Mat &dist2, const int type, int t, const Mat &IntegralImage1, const Mat &IntegralImage2 )
{
CV_Assert(img1.size() == img2.size());
CV_Assert(kernelSize % 2 != 0);
CV_Assert(img1.type() == CV_8UC1 && img2.type() == CV_8UC1);
CV_Assert(type != CV_MODIFIED_CENSUS_TRANSFORM || type != CV_MEAN_VARIATION);
CV_Assert(kernelSize <= 9);
int n2 = (kernelSize - 1) >> 1;
uint8_t *images[] = {img1.data, img2.data};
int *date[] = { (int *)dist1.data, (int *)dist2.data};
int stride = (int)img1.cols;
if(type == CV_MODIFIED_CENSUS_TRANSFORM)
{
//MCT
parallel_for_( Range(n2, img1.rows - n2),
CombinedDescriptor<2,4,2, 2,MCTKernel<2> >(img1.cols, img1.rows,stride,n2,date,MCTKernel<2>(images,t),n2));
}
else if(type == CV_MEAN_VARIATION)
{
//MV
int *integral[2];
integral[0] = (int *)IntegralImage1.data;
integral[1] = (int *)IntegralImage2.data;
parallel_for_( Range(n2, img1.rows - n2),
CombinedDescriptor<2,3,2,2, MVKernel<2> >(img1.cols, img1.rows,stride,n2,date,MVKernel<2>(images,integral),n2));
}
}
void modifiedCensusTransform(const Mat &img1, int kernelSize, Mat &dist, const int type, int t , Mat const &IntegralImage)
{
CV_Assert(img1.size() == dist.size());
CV_Assert(kernelSize % 2 != 0);
CV_Assert(img1.type() == CV_8UC1);
CV_Assert(type != CV_MODIFIED_CENSUS_TRANSFORM || type != CV_MEAN_VARIATION);
CV_Assert(kernelSize <= 9);
int n2 = (kernelSize - 1) >> 1;
uint8_t *images[] = {img1.data};
int *date[] = { (int *)dist.data};
int stride = (int)img1.step;
if(type == CV_MODIFIED_CENSUS_TRANSFORM)
{
//MCT
parallel_for_(Range(n2, img1.rows - n2),
CombinedDescriptor<2,3,2, 1,MCTKernel<1> >(img1.cols, img1.rows,stride,n2,date,MCTKernel<1>(images,t),n2));
}
else if(type == CV_MEAN_VARIATION)
{
//MV
int *integral[] = { (int *)IntegralImage.data};
parallel_for_(Range(n2, img1.rows - n2),
CombinedDescriptor<2,3,2,1, MVKernel<1> >(img1.cols, img1.rows,stride,n2,date,MVKernel<1>(images,integral),n2));
}
}
//different versions of simetric census
//These variants since they do not compare with the center they are invariant to noise
void symetricCensusTransform(const Mat &img1, const Mat &img2, int kernelSize, Mat &dist1, Mat &dist2, const int type)
{
CV_Assert(img1.size() == img2.size());
CV_Assert(kernelSize % 2 != 0);
CV_Assert(img1.type() == CV_8UC1 && img2.type() == CV_8UC1);
CV_Assert(type != CV_CS_CENSUS || type != CV_MODIFIED_CS_CENSUS);
CV_Assert(kernelSize <= 7);
int n2 = kernelSize >> 1;
uint8_t *images[] = {img1.data, img2.data};
Mat imag[] = {img1, img2};
int *date[] = { (int *)dist1.data, (int *)dist2.data};
int stride = (int)img1.step;
if(type == CV_CS_CENSUS)
{
parallel_for_(Range(n2, img1.rows - n2), SymetricCensus<2>(imag, n2,date));
}
else if(type == CV_MODIFIED_CS_CENSUS)
{
parallel_for_(Range(n2, img1.rows - n2),
CombinedDescriptor<1,1,1,2,ModifiedCsCensus<2> >(img1.cols, img1.rows,stride,n2,date,ModifiedCsCensus<2>(images,n2),1));
}
}
void symetricCensusTransform(const Mat &img1, int kernelSize, Mat &dist1, const int type)
{
CV_Assert(img1.size() == dist1.size());
CV_Assert(kernelSize % 2 != 0);
CV_Assert(img1.type() == CV_8UC1);
CV_Assert(type != CV_MODIFIED_CS_CENSUS || type != CV_CS_CENSUS);
CV_Assert(kernelSize <= 7);
int n2 = kernelSize >> 1;
uint8_t *images[] = {img1.data};
Mat imag[] = {img1};
int *date[] = { (int *)dist1.data};
int stride = (int)img1.step;
if(type == CV_CS_CENSUS)
{
parallel_for_( Range(n2, img1.rows - n2), SymetricCensus<1>(imag, n2,date));
}
else if(type == CV_MODIFIED_CS_CENSUS)
{
parallel_for_( Range(n2, img1.rows - n2),
CombinedDescriptor<1,1,1,1,ModifiedCsCensus<1> >(img1.cols, img1.rows,stride,n2,date,ModifiedCsCensus<1>(images,n2),1));
}
}
//integral image computation used in the Mean Variation Census Transform
void imageMeanKernelSize(const Mat &image, int windowSize, Mat &cost)
{
CV_Assert(image.size > 0);
CV_Assert(cost.size > 0);
CV_Assert(windowSize % 2 != 0);
int win = windowSize / 2;
float scalling = ((float) 1) / (windowSize * windowSize);
int height = image.rows;
cost.setTo(0);
int *c = (int *)cost.data;
parallel_for_(Range(win + 1, height - win - 1),MeanKernelIntegralImage(image,win,scalling,c));
}
}
else if(type == ModifiedCenterSymetricCensus)
{
parallel_for_(cv::Range(n2, img1.rows - n2),
CombinedDescriptor<1,1,1,ModifiedCsCensus>(img1.cols, img1.rows,n2,(int *)dist.data,(int *)dist2.data,ModifiedCsCensus(img1.data, img2.data,n2),1));
}
}
void cv::stereo::applyBrifeDescriptor(const cv::Mat &image1, const cv::Mat &image2, int kernelSize, cv::Mat &dist, cv::Mat &dist2)
{
//TO DO
//marked the variables in order to avoid warnings
(void)image1;
(void)image2;
(void)dist;
(void)dist2;
(void)kernelSize;
}
void cv::stereo::applyRTDescriptor(const cv::Mat &image1, const cv::Mat &image2, int kernelSize, cv::Mat &dist, cv::Mat &dist2)
{
//TO DO
//marked the variables in order to avoid warnings
(void)image1;
(void)image2;
(void)dist;
(void)dist2;
(void)kernelSize;
}
......@@ -42,7 +42,7 @@
/*****************************************************************************************************************\
* The interface contains the main descriptors that will be implemented in the descriptor class *
\******************************************************************************************************************/
\*****************************************************************************************************************/
#include "precomp.hpp"
#include <stdint.h>
......@@ -54,194 +54,260 @@ namespace cv
{
namespace stereo
{
enum { Dense_Census, Sparse_Census, StarCensus};
enum {ClassicCenterSymetricCensus, ModifiedCenterSymetricCensus};
enum {StandardMct,MeanVariation};
enum {SSE, NonSSE};
//types of supported kernels
enum {
CV_DENSE_CENSUS, CV_SPARSE_CENSUS,
CV_CS_CENSUS, CV_MODIFIED_CS_CENSUS, CV_MODIFIED_CENSUS_TRANSFORM,
CV_MEAN_VARIATION
};
//!Mean Variation is a robust kernel that compares a pixel
//!not just with the center but also with the mean of the window
template<int num_images>
struct MVKernel
{
uint8_t *image1;
uint8_t *image2;
uint8_t *integralLeft;
uint8_t *integralRight;
MVKernel(uint8_t *img, uint8_t *img2, uint8_t *integralL, uint8_t *integralR): image1(img),image2(img2),integralLeft(integralL), integralRight(integralR){}
void operator()(int rrWidth,int w2, int rWidth, int jj, int j, int &c, int &c2) const
uint8_t *image[num_images];
int *integralImage[num_images];
int stop;
MVKernel(){}
MVKernel(uint8_t **images, int **integral)
{
for(int i = 0; i < num_images; i++)
{
image[i] = images[i];
integralImage[i] = integral[i];
}
stop = num_images;
}
};
//!kernel that takes the pixels from certain positions from a patch
//!offers verry good results
struct StarKernel
{
uint8_t *image1;
uint8_t *image2;
StarKernel(uint8_t *img, uint8_t *img2): image1(img),image2(img2){}
void operator()(int rrWidth,int w2, int rWidth, int jj, int j, int &c, int &c2) const
void operator()(int rrWidth,int w2, int rWidth, int jj, int j, int c[num_images]) const
{
(void)w2;
for (int i = 0; i < stop; i++)
{
if (image[i][rrWidth + jj] > image[i][rWidth + j])
{
c[i] = c[i] + 1;
}
c[i] = c[i] << 1;
if (integralImage[i][rrWidth + jj] > image[i][rWidth + j])
{
c[i] = c[i] + 1;
}
c[i] = c[i] << 1;
}
}
};
//!Compares pixels from a patch giving high weights to pixels in which
//!the intensity is higher. The other pixels receive a lower weight
template <int num_images>
struct MCTKernel
{
uint8_t *image1;
uint8_t *image2;
int t;
MCTKernel(uint8_t * img,uint8_t *img2, int threshold) : image1(img),image2(img2), t(threshold) {}
void operator()(int rrWidth,int w2, int rWidth, int jj, int j, int &c, int &c2) const
uint8_t *image[num_images];
int t,imageStop;
MCTKernel(){}
MCTKernel(uint8_t ** images, int threshold)
{
if (image1[rrWidth + jj] > image1[rWidth + j] - t)
for(int i = 0; i < num_images; i++)
{
c <<= 2;
c |= 0x3;
image[i] = images[i];
}
else if (image1[rWidth + j] - t < image1[rrWidth + jj] && image1[rWidth + j] + t >= image1[rrWidth + jj])
{
c <<= 2;
c = c + 1;
}
else
{
c <<= 2;
}
if (image2[rrWidth + jj] > image2[rWidth + j] - t)
{
c2 <<= 2;
c2 |= 0x3;
}
else if (image2[rWidth + j] - t < image2[rrWidth + jj] && image2[rWidth + j] + t >= image2[rrWidth + jj])
{
c2 <<= 2;
c2 = c2 + 1;
}
else
imageStop = num_images;
t = threshold;
}
void operator()(int rrWidth,int w2, int rWidth, int jj, int j, int c[num_images]) const
{
(void)w2;
for(int i = 0; i < imageStop; i++)
{
c2 <<= 2;
if (image[i][rrWidth + jj] > image[i][rWidth + j] - t)
{
c[i] = c[i] << 1;
c[i] = c[i] + 1;
c[i] = c[i] << 1;
c[i] = c[i] + 1;
}
else if (image[i][rWidth + j] - t < image[i][rrWidth + jj] && image[i][rWidth + j] + t >= image[i][rrWidth + jj])
{
c[i] = c[i] << 2;
c[i] = c[i] + 1;
}
else
{
c[i] <<= 2;
}
}
}
};
//!A madified cs census that compares a pixel with the imediat neightbour starting
//!from the center
template<int num_images>
struct ModifiedCsCensus
{
uint8_t *image1;
uint8_t *image2;
uint8_t *image[num_images];
int n2;
ModifiedCsCensus(uint8_t *im1, uint8_t *im2, int ker):image1(im1),image2(im2),n2(ker){}
void operator()(int rrWidth,int w2, int rWidth, int jj, int j, int &c, int &c2) const
int imageStop;
ModifiedCsCensus(){}
ModifiedCsCensus(uint8_t **images, int ker)
{
if (image1[(rrWidth + jj)] > image1[(w2 + (jj + n2))])
{
c = c + 1;
}
c = c * 2;
if (image2[(rrWidth + jj)] > image2[(w2 + (jj + n2))])
for(int i = 0; i < num_images; i++)
image[i] = images[i];
imageStop = num_images;
n2 = ker;
}
void operator()(int rrWidth,int w2, int rWidth, int jj, int j, int c[num_images]) const
{
(void)j;
(void)rWidth;
for(int i = 0; i < imageStop; i++)
{
c2 = c2 + 1;
if (image[i][(rrWidth + jj)] > image[i][(w2 + (jj + n2))])
{
c[i] = c[i] + 1;
}
c[i] = c[i] * 2;
}
c2 = c2 * 2;
}
};
//!A kernel in which a pixel is compared with the center of the window
template<int num_images>
struct CensusKernel
{
uint8_t *image1;
uint8_t *image2;
CensusKernel(uint8_t *im1, uint8_t *im2):image1(im1),image2(im2){}
void operator()(int rrWidth,int w2, int rWidth, int jj, int j, int &c, int &c2) const
uint8_t *image[num_images];
int imageStop;
CensusKernel(){}
CensusKernel(uint8_t **images)
{
//compare a pixel with the center from the kernel
if (image1[rrWidth + jj] > image1[rWidth + j])
{
c = c + 1;
}
c = c * 2;
//compare pixel with center for image 2
if (image2[rrWidth + jj] > image2[rWidth + j])
for(int i = 0; i < num_images; i++)
image[i] = images[i];
imageStop = num_images;
}
void operator()(int rrWidth,int w2, int rWidth, int jj, int j, int c[num_images]) const
{
(void)w2;
for(int i = 0; i < imageStop; i++)
{
c2 = c2 + 1;
////compare a pixel with the center from the kernel
if (image[i][rrWidth + jj] > image[i][rWidth + j])
{
c[i] += 1;
}
c[i] <<= 1;
}
c2 = c2 * 2;
}
};
//template clas which efficiently combines the descriptors
template <int step_start, int step_end, int step_inc, typename Kernel>
template <int step_start, int step_end, int step_inc,int nr_img, typename Kernel>
class CombinedDescriptor:public ParallelLoopBody
{
private:
uint8_t *image1, *image2;
int *dst1, *dst2;
int n2 , width, height;
int n2_stop;
int width, height,n2;
int stride_;
int *dst[nr_img];
Kernel kernel_;
int n2_stop;
public:
CombinedDescriptor(int w, int h, int k2, int * distance1, int * distance2, Kernel kernel,int k2Stop) :
width(w), height(h), n2(k2),dst1(distance1), dst2(distance2), kernel_(kernel), n2_stop(k2Stop){}
CombinedDescriptor(int w, int h,int stride, int k2, int **distance, Kernel kernel,int k2Stop)
{
width = w;
height = h;
n2 = k2;
stride_ = stride;
for(int i = 0; i < nr_img; i++)
dst[i] = distance[i];
kernel_ = kernel;
n2_stop = k2Stop;
}
void operator()(const cv::Range &r) const {
for (int i = r.start; i <= r.end ; i++)
{
int rWidth = i * width;
int rWidth = i * stride_;
for (int j = n2 + 2; j <= width - n2 - 2; j++)
{
int c = 0;
int c2 = 0;
int c[nr_img];
memset(c,0,nr_img);
for(int step = step_start; step <= step_end; step += step_inc)
{
for (int ii = - n2; ii <= + n2_stop; ii += step)
{
int rrWidth = (ii + i) * width;
int rrWidthC = (ii + i + n2) * width;
int rrWidth = (ii + i) * stride_;
int rrWidthC = (ii + i + n2) * stride_;
for (int jj = j - n2; jj <= j + n2; jj += step)
{
if (ii != i || jj != j)
{
kernel_(rrWidth,rrWidthC, rWidth, jj, j, c,c2);
kernel_(rrWidth,rrWidthC, rWidth, jj, j,c);
}
}
}
}
dst1[rWidth + j] = c;
dst2[rWidth + j] = c2;
for(int l = 0; l < nr_img; l++)
dst[l][rWidth + j] = c[l];
}
}
}
};
//!class that implemented the census descriptor on single images
class singleImageCensus : public ParallelLoopBody
//!calculate the mean of every windowSizexWindwoSize block from the integral Image
//!this is a preprocessing for MV kernel
class MeanKernelIntegralImage : public ParallelLoopBody
{
private:
uint8_t *image;
int *dst;
int n2, width, height, type;
int *img;
int windowSize,width;
float scalling;
int *c;
public:
singleImageCensus(uint8_t * img1, int w, int h, int k2, int * distance1,const int t) :
image(img1), dst(distance1), n2(k2), width(w), height(h), type(t){}
MeanKernelIntegralImage(const cv::Mat &image, int window,float scale, int *cost):
img((int *)image.data),windowSize(window) ,width(image.cols) ,scalling(scale) , c(cost){};
void operator()(const cv::Range &r) const{
for (int i = r.start; i <= r.end; i++)
{
int iw = i * width;
for (int j = windowSize + 1; j <= width - windowSize - 1; j++)
{
c[iw + j] = (int)((img[(i + windowSize - 1) * width + j + windowSize - 1] + img[(i - windowSize - 1) * width + j - windowSize - 1]
- img[(i + windowSize) * width + j - windowSize] - img[(i - windowSize) * width + j + windowSize]) * scalling);
}
}
}
};
//!implementation for the star kernel descriptor
template<int num_images>
class StarKernelCensus:public ParallelLoopBody
{
private:
uint8_t *image[num_images];
int *dst[num_images];
int n2, width, height, im_num,stride_;
public:
StarKernelCensus(const cv::Mat *img, int k2, int **distance)
{
for(int i = 0; i < num_images; i++)
{
image[i] = img[i].data;
dst[i] = distance[i];
}
n2 = k2;
width = img[0].cols;
height = img[0].rows;
im_num = num_images;
stride_ = (int)img[0].step;
}
void operator()(const cv::Range &r) const {
for (int i = r.start; i <= r.end ; i++)
{
int rWidth = i * width;
int rWidth = i * stride_;
for (int j = n2; j <= width - n2; j++)
{
if (type == SSE)
{
//to do
}
else
for(int d = 0 ; d < im_num; d++)
{
int c = 0;
for (int ii = i - n2; ii <= i + n2; ii++)
for(int step = 4; step > 0; step--)
{
int rrWidth = ii * width;
for (int jj = j - n2; jj <= j + n2; jj++)
for (int ii = i - step; ii <= i + step; ii += step)
{
if (ii != i || jj != j)
int rrWidth = ii * stride_;
for (int jj = j - step; jj <= j + step; jj += step)
{
if (image[(rrWidth + jj)] > image[(rWidth + j)])
if (image[d][rrWidth + jj] > image[d][rWidth + j])
{
c = c + 1;
}
......@@ -249,89 +315,137 @@ namespace cv
}
}
}
dst[(rWidth + j)] = c;
for (int ii = -1; ii <= +1; ii++)
{
int rrWidth = (ii + i) * stride_;
if (i == -1)
{
if (ii + i != i)
{
if (image[d][rrWidth + j] > image[d][rWidth + j])
{
c = c + 1;
}
c = c * 2;
}
}
else if (i == 0)
{
for (int j2 = -1; j2 <= 1; j2 += 2)
{
if (ii + i != i)
{
if (image[d][rrWidth + j + j2] > image[d][rWidth + j])
{
c = c + 1;
}
c = c * 2;
}
}
}
else
{
if (ii + i != i)
{
if (image[d][rrWidth + j] > image[d][rWidth + j])
{
c = c + 1;
}
c = c * 2;
}
}
}
dst[d][rWidth + j] = c;
}
}
}
}
};
//!paralel implementation of the center symetric census
class parallelSymetricCensus:public ParallelLoopBody
template <int num_images>
class SymetricCensus:public ParallelLoopBody
{
private:
uint8_t *image1, *image2;
int *dst1, *dst2;
int n2, width, height, type;
uint8_t *image[num_images];
int *dst[num_images];
int n2, width, height, im_num,stride_;
public:
parallelSymetricCensus(uint8_t * img1, uint8_t * img2, int w, int h, int k2, int * distance1, int * distance2,const int t) :
image1(img1), image2(img2), dst1(distance1), dst2(distance2), n2(k2), width(w), height(h), type(t){}
SymetricCensus(const cv::Mat *img, int k2, int **distance)
{
for(int i = 0; i < num_images; i++)
{
image[i] = img[i].data;
dst[i] = distance[i];
}
n2 = k2;
width = img[0].cols;
height = img[0].rows;
im_num = num_images;
stride_ = (int)img[0].step;
}
void operator()(const cv::Range &r) const {
for (int i = r.start; i <= r.end ; i++)
{
int distV = (i)* width;
int distV = i*stride_;
for (int j = n2; j <= width - n2; j++)
{
int c = 0;
int c2 = 0;
//the classic center symetric census which compares the curent pixel with its symetric not its center.
for (int ii = -n2; ii < 0; ii++)
for(int d = 0; d < im_num; d++)
{
int rrWidth = (ii + i) * width;
for (int jj = -n2; jj <= +n2; jj++)
int c = 0;
//the classic center symetric census which compares the curent pixel with its symetric not its center.
for (int ii = -n2; ii <= 0; ii++)
{
if (image1[(rrWidth + (jj + j))] > image1[((ii * (-1) + i) * width + (-1 * jj) + j)])
{
c = c + 1;
}
c = c * 2;
if (image2[(rrWidth + (jj + j))] > image2[((ii * (-1) + i) * width + (-1 * jj) + j)])
int rrWidth = (ii + i) * stride_;
for (int jj = -n2; jj <= +n2; jj++)
{
c2 = c2 + 1;
if (image[d][(rrWidth + (jj + j))] > image[d][((ii * (-1) + i) * width + (-1 * jj) + j)])
{
c = c + 1;
}
c = c * 2;
if(ii == 0 && jj < 0)
{
if (image[d][(i * width + (jj + j))] > image[d][(i * width + (-1 * jj) + j)])
{
c = c + 1;
}
c = c * 2;
}
}
c2 = c2 * 2;
}
dst[d][(distV + j)] = c;
}
for (int jj = -n2; jj < 0; jj++)
{
if (image1[(i * width + (jj + j))] > image1[(i * width + (-1 * jj) + j)])
{
c = c + 1;
}
c = c * 2;
if (image2[(i * width + (jj + j))] > image2[(i * width + (-1 * jj) + j)])
{
c2 = c2 + 1;
}
c2 = c2 * 2;
}//a modified version of cs census which compares each pixel with its correspondent from
//the same distance from the center
dst1[(distV + j)] = c;
dst2[(distV + j)] = c2;
}
}
}
};
//!Implementation for computing the Census transform on the given image
void applyCensusOnImage(const cv::Mat &img, int kernelSize, cv::Mat &dist, const int type);
/**
Two variations of census applied on input images
Implementation of a census transform which is taking into account just the some pixels from the census kernel thus allowing for larger block sizes
**/
void applyCensusOnImages(const cv::Mat &im1,const cv::Mat &im2, int kernelSize, cv::Mat &dist, cv::Mat &dist2, const int type);
//void applyCensusOnImages(const cv::Mat &im1,const cv::Mat &im2, int kernelSize, cv::Mat &dist, cv::Mat &dist2, const int type);
void censusTransform(const cv::Mat &image1, const cv::Mat &image2, int kernelSize, cv::Mat &dist1, cv::Mat &dist2, const int type);
//single image census transform
void censusTransform(const cv::Mat &image1, int kernelSize, cv::Mat &dist1, const int type);
/**
STANDARD_MCT - Modified census which is memorizing for each pixel 2 bits and includes a tolerance to the pixel comparison
MCT_MEAN_VARIATION - Implementation of a modified census transform which is also taking into account the variation to the mean of the window not just the center pixel
**/
void applyMCTOnImages(const cv::Mat &img1, const cv::Mat &img2, int kernelSize, int t, cv::Mat &dist, cv::Mat &dist2, const int type);
void modifiedCensusTransform(const cv::Mat &img1, const cv::Mat &img2, int kernelSize, cv::Mat &dist1,cv::Mat &dist2, const int type, int t = 0 , const cv::Mat &IntegralImage1 = cv::Mat::zeros(100,100,CV_8UC1), const cv::Mat &IntegralImage2 = cv::Mat::zeros(100,100,CV_8UC1));
//single version of modified census transform descriptor
void modifiedCensusTransform(const cv::Mat &img1, int kernelSize, cv::Mat &dist, const int type, int t = 0 ,const cv::Mat &IntegralImage = cv::Mat::zeros(100,100,CV_8UC1));
/**The classical center symetric census
A modified version of cs census which is comparing a pixel with its correspondent after the center
**/
void applySimetricCensus(const cv::Mat &img1, const cv::Mat &img2, int kernelSize, cv::Mat &dist, cv::Mat &dist2, const int type);
//!brief binary descriptor used in stereo correspondence
void applyBrifeDescriptor(const cv::Mat &image1, const cv::Mat &image2, int kernelSize, cv::Mat &dist, cv::Mat &dist2);
//The classical Rank Transform
void applyRTDescriptor(const cv::Mat &image1, const cv::Mat &image2, int kernelSize, cv::Mat &dist, cv::Mat &dist2);
void symetricCensusTransform(const cv::Mat &img1, const cv::Mat &img2, int kernelSize, cv::Mat &dist1, cv::Mat &dist2, const int type);
//single version of census transform
void symetricCensusTransform(const cv::Mat &img1, int kernelSize, cv::Mat &dist1, const int type);
//in a 9x9 kernel only certain positions are choosen
void starCensusTransform(const cv::Mat &img1, const cv::Mat &img2, int kernelSize, cv::Mat &dist1,cv::Mat &dist2);
//single image version of star kernel
void starCensusTransform(const cv::Mat &img1, int kernelSize, cv::Mat &dist);
//integral image computation used in the Mean Variation Census Transform
void imageMeanKernelSize(const cv::Mat &img, int windowSize, cv::Mat &c);
}
}
#endif
......
//By downloading, copying, installing or using the software you agree to this license.
//If you do not agree to this license, do not download, install,
//copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
// (3-clause BSD License)
//
//Copyright (C) 2000-2015, Intel Corporation, all rights reserved.
//Copyright (C) 2009-2011, Willow Garage Inc., all rights reserved.
//Copyright (C) 2009-2015, NVIDIA Corporation, all rights reserved.
//Copyright (C) 2010-2013, Advanced Micro Devices, Inc., all rights reserved.
//Copyright (C) 2015, OpenCV Foundation, all rights reserved.
//Copyright (C) 2015, Itseez Inc., all rights reserved.
//Third party copyrights are property of their respective owners.
//
//Redistribution and use in source and binary forms, with or without modification,
//are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * Neither the names of the copyright holders nor the names of the contributors
// may be used to endorse or promote products derived from this software
// without specific prior written permission.
//
//This software is provided by the copyright holders and contributors "as is" and
//any express or implied warranties, including, but not limited to, the implied
//warranties of merchantability and fitness for a particular purpose are disclaimed.
//In no event shall copyright holders or contributors be liable for any direct,
//indirect, incidental, special, exemplary, or consequential damages
//(including, but not limited to, procurement of substitute goods or services;
//loss of use, data, or profits; or business interruption) however caused
//and on any theory of liability, whether in contract, strict liability,
//or tort (including negligence or otherwise) arising in any way out of
//the use of this software, even if advised of the possibility of such damage.
#include "precomp.hpp"
#include "matching.hpp"
namespace cv
{
namespace stereo
{
//null constructor
Matching::Matching(void)
{
}
Matching::~Matching(void)
{
}
//constructor for the matching class
//maxDisp - represents the maximum disparity
Matching::Matching(int maxDisp, int scalling, int confidence)
{
CV_Assert(maxDisp > 10);
CV_Assert(scalling != 0);
CV_Assert(confidence >= 1);
this->scallingFactor = scalling;
//set the maximum disparity
this->maxDisparity = maxDisp;
//set the value for the confidence
this->confidenceCheck = confidence;
//generate the hamming lut in case SSE is not available
hammingLut();
}
//!method for setting the maximum disparity
void Matching::setMaxDisparity(int val)
{
CV_Assert(val > 10);
this->maxDisparity = val;
}
//!method for getting the disparity
int Matching::getMaxDisparity()
{
return this->maxDisparity;
}
void Matching::setScallingFactor(int val)
{
CV_Assert(val > 0);
scallingFactor = val;
}
//!method for getting the scalling factor
int Matching::getScallingFactor()
{
return scallingFactor;
}
//! Hamming distance computation method
//! leftImage and rightImage are the two transformed images
//! the cost is the resulted cost volume and kernel Size is the size of the matching window
void Matching::hammingDistanceBlockMatching(const Mat &leftImage, const Mat &rightImage, Mat &cost, const int kernelSize)
{
CV_Assert(leftImage.cols == rightImage.cols);
CV_Assert(leftImage.rows == rightImage.rows);
CV_Assert(kernelSize % 2 != 0);
CV_Assert(cost.rows == leftImage.rows);
CV_Assert(cost.cols / (maxDisparity + 1) == leftImage.cols);
// cost.setTo(0);
int *c = (int *)cost.data;
memset(c, 0, sizeof(c[0]) * leftImage.cols * leftImage.rows * (maxDisparity + 1));
parallel_for_(cv::Range(kernelSize / 2,leftImage.rows - kernelSize / 2), hammingDistance(leftImage,rightImage,c,maxDisparity,kernelSize / 2,hamLut));
}
//preprocessing the cost volume in order to get it ready for aggregation
void Matching::costGathering(const Mat &hammingDistanceCost, Mat &cost)
{
CV_Assert(hammingDistanceCost.rows == hammingDistanceCost.rows);
CV_Assert(hammingDistanceCost.type() == CV_32SC4);
CV_Assert(cost.type() == CV_32SC4);
//cost.setTo(0);
int maxDisp = maxDisparity;
int width = cost.cols / ( maxDisp + 1) - 1;
int height = cost.rows - 1;
int *c = (int *)cost.data;
memset(c, 0, sizeof(c[0]) * (width + 1) * (height + 1) * (maxDisp + 1));
parallel_for_(cv::Range(1,height), costGatheringHorizontal(hammingDistanceCost,maxDisparity,cost));
for (int i = 1; i <= height; i++)
{
for (int j = 1; j <= width; j++)
{
int iwj = (i * width + j) * (maxDisp + 1);
int iwjmu = ((i - 1) * width + j) * (maxDisp + 1);
for (int d = 0; d <= maxDisp; d++)
{
c[iwj + d] += c[iwjmu + d];
}
}
}
}
//!The aggregation on the cost volume
void Matching::blockAgregation(const Mat &partialSums, int windowSize, Mat &cost)
{
CV_Assert(windowSize % 2 != 0);
CV_Assert(partialSums.rows == cost.rows);
CV_Assert(partialSums.cols == cost.cols);
int win = windowSize / 2;
int *c = (int *)cost.data;
int maxDisp = maxDisparity;
int width = cost.cols / ( maxDisp + 1) - 1;
int height = cost.rows - 1;
memset(c, 0, sizeof(c[0]) * width * height * (maxDisp + 1));
parallel_for_(cv::Range(win + 1,height - win - 1), agregateCost(partialSums,windowSize,maxDisp,cost));
}
//!Finding the correct disparity from the cost volume, we also make a confidence check
int Matching ::minim(int *c, int iwpj, int widthDisp,const double confidenceCheck, const int search_region)
{
double mini, mini2, mini3;
mini = mini2 = mini3 = DBL_MAX;
int index = 0;
int iw = iwpj;
int widthDisp2;
widthDisp2 = widthDisp;
widthDisp -= 1;
for (int i = 0; i <= widthDisp; i++)
{
if (c[(iw + i * search_region) * widthDisp2 + i] < mini)
{
mini3 = mini2;
mini2 = mini;
mini = c[(iw + i * search_region) * widthDisp2 + i];
index = i;
}
else if (c[(iw + i * search_region) * widthDisp2 + i] < mini2)
{
mini3 = mini2;
mini2 = c[(iw + i * search_region) * widthDisp2 + i];
}
else if (c[(iw + i * search_region) * widthDisp2 + i] < mini3)
{
mini3 = c[(iw + i * search_region) * widthDisp2 + i];
}
}
if (mini3 / mini <= confidenceCheck)
return index;
return -1;
}
//!Interpolate in order to obtain better results
double Matching::symetricVInterpolation(int *c, int iwjp, int widthDisp, int winDisp,const int search_region)
{
if (winDisp == 0 || winDisp == widthDisp - 1)
return winDisp;
double m2m1, m3m1, m3, m2, m1;
m2 = c[(iwjp + (winDisp - 1) * search_region) * widthDisp + winDisp - 1];
m3 = c[(iwjp + (winDisp + 1) * search_region)* widthDisp + winDisp + 1];
m1 = c[(iwjp + winDisp * search_region) * widthDisp + winDisp];
m2m1 = m2 - m1;
m3m1 = m3 - m1;
if (m2m1 == 0 || m3m1 == 0) return winDisp;
double p;
p = 0;
if (m2 > m3)
{
p = (0.5 - 0.25 * ((m3m1 * m3m1) / (m2m1 * m2m1) + (m3m1 / m2m1)));
}
else
{
p = -1 * (0.5 - 0.25 * ((m2m1 * m2m1) / (m3m1 * m3m1) + (m2m1 / m3m1)));
}
if (p >= -0.5 && p <= 0.5)
p = winDisp + p;
return p;
}
//!Generate the hamming LUT
void Matching::hammingLut()
{
for (int i = 0; i <= 65536; i++)
{
int dist = 0;
int j = i;
//we number the bits from our number
while (j)
{
dist = dist + 1;
j = j & (j - 1);
}
hamLut[i] = dist;
}
}
//!remove small regions that have an area smaller than t, we fill the region with the average of the good pixels around it
void Matching::smallRegionRemoval(const Mat &currentMap, int t, Mat &out)
{
CV_Assert(currentMap.cols == out.cols);
CV_Assert(currentMap.rows == out.rows);
CV_Assert(t > 0);
memset(pus, 0, 2000000 * sizeof(pus[0]));
uint8_t *map = currentMap.data;
uint8_t *outputMap = out.data;
int height = currentMap.rows;
int width = currentMap.cols;
uint8_t k = 1;
int st, dr;
int di[] = { -1, -1, -1, 0, 1, 1, 1, 0 },
dj[] = { -1, 0, 1, 1, 1, 0, -1, -1 };
int speckle_size = 0;
st = 0;
dr = 0;
for (int i = 1; i < height - 1; i++)
{
int iw = i * width;
for (int j = 1; j < width - 1; j++)
{
if (map[iw + j] != 0)
{
outputMap[iw + j] = map[iw + j];
}
else if (map[iw + j] == 0)
{
int nr = 1;
int avg = 0;
speckle_size = dr;
specklePointX[dr] = i;
specklePointY[dr] = j;
pus[i * width + j] = 1;
dr++;
map[iw + j] = k;
while (st < dr)
{
int ii = specklePointX[st];
int jj = specklePointY[st];
//going on 8 directions
for (int d = 0; d < 8; d++)
{//if insisde
if (ii + di[d] >= 0 && ii + di[d] < height && jj + dj[d] >= 0 && jj + dj[d] < width &&
pus[(ii + di[d]) * width + jj + dj[d]] == 0)
{
int val = map[(ii + di[d]) * width + jj + dj[d]];
if (val == 0)
{
map[(ii + di[d]) * width + jj + dj[d]] = k;
specklePointX[dr] = (ii + di[d]);
specklePointY[dr] = (jj + dj[d]);
dr++;
pus[(ii + di[d]) * width + jj + dj[d]] = 1;
}//this means that my point is a good point to be used in computing the final filling value
else if (val > 2 && val < 250)
{
avg += val;
nr++;
}
}
}
st++;
}//if hole size is smaller than a specified threshold we fill the respective hole with the average of the good neighbours
if (st - speckle_size <= t)
{
uint8_t fillValue = (uint8_t)(avg / nr);
while (speckle_size < st)
{
int ii = specklePointX[speckle_size];
int jj = specklePointY[speckle_size];
outputMap[ii * width + jj] = fillValue;
speckle_size++;
}
}
}
}
}
}
//!setting the confidence to a certain value
void Matching ::setConfidence(double val)
{
CV_Assert(val >= 1);
confidenceCheck = val;
}
//getter for confidence check
double Matching ::getConfidence()
{
return confidenceCheck;
}
//!Method responsible for generating the disparity map
void Matching::dispartyMapFormation(const Mat &costVolume, Mat &mapFinal, int th)
{
uint8_t *map = mapFinal.data;
int disparity = maxDisparity;
int width = costVolume.cols / ( disparity + 1) - 1;
int height = costVolume.rows - 1;
memset(map, 0, sizeof(map[0]) * width * height);
parallel_for_(Range(0,height - 1), makeMap(costVolume,th,disparity,confidenceCheck,scallingFactor,mapFinal));
}
//!1x9 median filter
void Matching::Median1x9Filter(const Mat &originalMap, Mat &map)
{
CV_Assert(originalMap.rows == map.rows);
CV_Assert(originalMap.cols == map.cols);
parallel_for_(Range(1,originalMap.rows - 2), Median1x9(originalMap,map));
}
//!9x1 median filter
void Matching::Median9x1Filter(const Mat &originalMap, Mat &map)
{
CV_Assert(originalMap.cols == map.cols);
CV_Assert(originalMap.cols == map.cols);
parallel_for_(Range(1,originalMap.cols - 2), Median9x1(originalMap,map));
}
}
}
//By downloading, copying, installing or using the software you agree to this license.
//If you do not agree to this license, do not download, install,
//copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
// (3-clause BSD License)
//
//Copyright (C) 2000-2015, Intel Corporation, all rights reserved.
//Copyright (C) 2009-2011, Willow Garage Inc., all rights reserved.
//Copyright (C) 2009-2015, NVIDIA Corporation, all rights reserved.
//Copyright (C) 2010-2013, Advanced Micro Devices, Inc., all rights reserved.
//Copyright (C) 2015, OpenCV Foundation, all rights reserved.
//Copyright (C) 2015, Itseez Inc., all rights reserved.
//Third party copyrights are property of their respective owners.
//
//Redistribution and use in source and binary forms, with or without modification,
//are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * Neither the names of the copyright holders nor the names of the contributors
// may be used to endorse or promote products derived from this software
// without specific prior written permission.
//
//This software is provided by the copyright holders and contributors "as is" and
//any express or implied warranties, including, but not limited to, the implied
//warranties of merchantability and fitness for a particular purpose are disclaimed.
//In no event shall copyright holders or contributors be liable for any direct,
//indirect, incidental, special, exemplary, or consequential damages
//(including, but not limited to, procurement of substitute goods or services;
//loss of use, data, or profits; or business interruption) however caused
//and on any theory of liability, whether in contract, strict liability,
//or tort (including negligence or otherwise) arising in any way out of
//the use of this software, even if advised of the possibility of such damage.
/*****************************************************************************************************************\
* The interface contains the main methods for computing the matching between the left and right images *
* *
\******************************************************************************************************************/
#include "precomp.hpp"
#include <stdint.h>
#ifndef _OPENCV_MATCHING_HPP_
#define _OPENCV_MATCHING_HPP_
#ifdef __cplusplus
namespace cv
{
namespace stereo
{
class Matching
{
private:
//arrays used in the region removal
int specklePointX[1000001];
int specklePointY[1000001];
long long pus[2000001];
//!The maximum disparity
int maxDisparity;
//!the factor by which we are multiplying the disparity
int scallingFactor;
//!the confidence to which a min disparity found is good or not
double confidenceCheck;
//!the LUT used in case SSE is not available
int hamLut[65537];
//!function for refining the disparity at sub pixel using simetric v
static double symetricVInterpolation(int *c, int iwjp, int widthDisp, int winDisp,const int search_region);
//!function used for getting the minimum disparity from the cost volume"
static int minim(int *c, int iwpj, int widthDisp,const double confidenceCheck, const int search_region);
//!a pre processing function that generates the Hamming LUT in case the algorithm will ever be used on platform where SSE is not available
void hammingLut();
//!the class used in computing the hamming distance
class hammingDistance : public ParallelLoopBody
{
private:
int *left, *right, *c;
int v,kernelSize, width, height;
int MASK;
int *hammLut;
public :
hammingDistance(const Mat &leftImage, const Mat &rightImage, int *cost, int maxDisp, int kerSize, int *hammingLUT):
left((int *)leftImage.data), right((int *)rightImage.data), c(cost), v(maxDisp),kernelSize(kerSize),width(leftImage.cols), height(leftImage.rows), MASK(65535), hammLut(hammingLUT){}
void operator()(const cv::Range &r) const {
for (int i = r.start; i <= r.end ; i++)
{
int iw = i * width;
for (int j = kernelSize; j < width - kernelSize; j++)
{
int j2;
int xorul;
int iwj;
iwj = iw + j;
for (int d = 0; d <= v; d++)
{
j2 = (0 > j - d) ? (0) : (j - d);
xorul = left[(iwj)] ^ right[(iw + j2)];
#if CV_SSE4_1
c[(iwj)* (v + 1) + d] = _mm_popcnt_u32(xorul);
#else
c[(iwj)* (v + 1) + d] = hammLut[xorul & MASK] + hammLut[xorul >> 16];
#endif
}
}
}
}
};
//!preprocessing used for agregation
class costGatheringHorizontal:public ParallelLoopBody
{
private:
int *c, *ham;
int width, maxDisp;
public:
costGatheringHorizontal(const Mat &hamimg,const int maxDispa, Mat &output)
{
ham = (int *)hamimg.data;
c = (int *)output.data;
maxDisp = maxDispa;
width = output.cols / ( maxDisp + 1) - 1;
}
void operator()(const cv::Range &r) const {
for (int i = r.start; i <= r.end; i++)
{
int iw = i * width;
int iwi = (i - 1) * width;
for (int j = 1; j <= width; j++)
{
int iwj = (iw + j) * (maxDisp + 1);
int iwjmu = (iw + j - 1) * (maxDisp + 1);
int iwijmu = (iwi + j - 1) * (maxDisp + 1);
for (int d = 0; d <= maxDisp; d++)
{
c[iwj + d] = ham[iwijmu + d] + c[iwjmu + d];
}
}
}
}
};
//!cost aggregation
class agregateCost:public ParallelLoopBody
{
private:
int win;
int *c, *parSum;
int maxDisp,width, height;
public:
agregateCost(const Mat &partialSums, int windowSize, int maxDispa, Mat &cost)
{
win = windowSize / 2;
c = (int *)cost.data;
maxDisp = maxDispa;
width = cost.cols / ( maxDisp + 1) - 1;
height = cost.rows - 1;
parSum = (int *)partialSums.data;
}
void operator()(const cv::Range &r) const {
for (int i = r.start; i <= r.end; i++)
{
int iwi = (i - 1) * width;
for (int j = win + 1; j <= width - win - 1; j++)
{
int w1 = ((i + win + 1) * width + j + win) * (maxDisp + 1);
int w2 = ((i - win) * width + j - win - 1) * (maxDisp + 1);
int w3 = ((i + win + 1) * width + j - win - 1) * (maxDisp + 1);
int w4 = ((i - win) * width + j + win) * (maxDisp + 1);
int w = (iwi + j - 1) * (maxDisp + 1);
for (int d = 0; d <= maxDisp; d++)
{
c[w + d] = parSum[w1 + d] + parSum[w2 + d]
- parSum[w3 + d] - parSum[w4 + d];
}
}
}
}
};
//!class that is responsable for generating the disparity map
class makeMap:public ParallelLoopBody
{
private:
//enum used to notify wether we are searching on the vertical ie (lr) or diagonal (rl)
enum {CV_VERTICAL_SEARCH, CV_DIAGONAL_SEARCH};
int width,disparity,scallingFact,th;
double confCheck;
uint8_t *map;
int *c;
public:
makeMap(const Mat &costVolume, int threshold, int maxDisp, double confidence,int scale, Mat &mapFinal)
{
c = (int *)costVolume.data;
map = mapFinal.data;
disparity = maxDisp;
width = costVolume.cols / ( disparity + 1) - 1;
th = threshold;
scallingFact = scale;
confCheck = confidence;
}
void operator()(const cv::Range &r) const {
for (int i = r.start; i <= r.end ; i++)
{
int lr;
int v = -1;
double p1, p2;
int iw = i * width;
for (int j = 0; j < width; j++)
{
lr = Matching:: minim(c, iw + j, disparity + 1, confCheck,CV_VERTICAL_SEARCH);
if (lr != -1)
{
v = Matching::minim(c, iw + j - lr, disparity + 1, confCheck,CV_DIAGONAL_SEARCH);
if (v != -1)
{
p1 = Matching::symetricVInterpolation(c, iw + j - lr, disparity + 1, v,CV_DIAGONAL_SEARCH);
p2 = Matching::symetricVInterpolation(c, iw + j, disparity + 1, lr,CV_VERTICAL_SEARCH);
if (abs(p1 - p2) <= th)
map[iw + j] = (uint8_t)((p2)* scallingFact);
else
{
map[iw + j] = 0;
}
}
else
{
if (width - j <= disparity)
{
p2 = Matching::symetricVInterpolation(c, iw + j, disparity + 1, lr,CV_VERTICAL_SEARCH);
map[iw + j] = (uint8_t)(p2* scallingFact);
}
}
}
else
{
map[iw + j] = 0;
}
}
}
}
};
//!median 1x9 paralelized filter
class Median1x9:public ParallelLoopBody
{
private:
uint8_t *harta;
uint8_t *mapModified;
int height, width;
public:
Median1x9(const Mat &hartaOriginala, Mat &map)
{
harta = hartaOriginala.data;
mapModified = map.data;
height = hartaOriginala.rows;
width = hartaOriginala.cols;
}
void operator()(const cv::Range &r) const{
for (int m = r.start; m <= r.end; m++)
{
for (int n = 4; n < width - 4; ++n)
{
int k = 0;
uint8_t window[9];
for (int i = n - 4; i <= n + 4; ++i)
window[k++] = harta[m * width + i];
for (int j = 0; j < 5; ++j)
{
int min = j;
for (int l = j + 1; l < 9; ++l)
if (window[l] < window[min])
min = l;
const uint8_t temp = window[j];
window[j] = window[min];
window[min] = temp;
}
mapModified[m * width + n] = window[4];
}
}
}
};
//!median 9x1 paralelized filter
class Median9x1:public ParallelLoopBody
{
private:
uint8_t *harta;
uint8_t *mapModified;
int height, width;
public:
Median9x1(const Mat &hartaOriginala, Mat &map)
{
harta = hartaOriginala.data;
mapModified = map.data;
height = hartaOriginala.rows;
width = hartaOriginala.cols;
}
void operator()(const Range &r) const{
for (int n = r.start; n <= r.end; ++n)
{
for (int m = 4; m < height - 4; ++m)
{
int k = 0;
uint8_t window[9];
for (int i = m - 4; i <= m + 4; ++i)
window[k++] = harta[i * width + n];
for (int j = 0; j < 5; j++)
{
int min = j;
for (int l = j + 1; l < 9; ++l)
if (window[l] < window[min])
min = l;
const uint8_t temp = window[j];
window[j] = window[min];
window[min] = temp;
}
mapModified[m * width + n] = window[4];
}
}
}
};
public:
//!method for setting the maximum disparity
void setMaxDisparity(int val);
//!method for getting the disparity
int getMaxDisparity();
//!method for setting the scalling factor
void setScallingFactor(int val);
//!method for getting the scalling factor
int getScallingFactor();
//!setter for the confidence check
void setConfidence(double val);
//!getter for confidence check
double getConfidence();
//!method for computing the hamming difference
void hammingDistanceBlockMatching(const Mat &left, const Mat &right, Mat &c, const int kernelSize = 9);
//!precomputation done on the cost volume to efficiently compute the block matching
void costGathering(const Mat &hammingDistanceCost, Mat &c);
//the aggregation on the cost volume
void blockAgregation(const Mat &parSum, int windowSize, Mat &c);
//!function for generating disparity maps at sub pixel level
/* costVolume - represents the cost volume
* width, height - represent the width and height of the iage
*disparity - represents the maximum disparity
*map - is the disparity map that will result
*th - is the LR threshold
*/
void dispartyMapFormation(const Mat &costVolume, Mat &map, int th);
//!constructor for the matching class
//!maxDisp - represents the maximum disparity
//!a median filter that has proven to work a bit better especially when applied on disparity maps
static void Median1x9Filter(const Mat &hartaOriginala, Mat &map);
static void Median9x1Filter(const Mat &hartaOriginala, Mat &map);
void smallRegionRemoval(const Mat &harta, int t, Mat &out);
Matching(int maxDisp, int scallingFactor = 4,int confidenceCheck = 6);
Matching(void);
~Matching(void);
};
}
}
#endif
#endif
/*End of file*/
\ No newline at end of file
......@@ -47,816 +47,788 @@ by Heiko Hirschmuller.
We match blocks rather than individual pixels, thus the algorithm is called
SGBM (Semi-global block matching)
*/
#include "precomp.hpp"
#include <limits.h>
namespace cv
{
namespace stereo
{
typedef uchar PixType;
typedef short CostType;
typedef short DispType;
enum { NR = 16, NR2 = NR/2 };
struct StereoBinarySGBMParams
{
StereoBinarySGBMParams()
{
minDisparity = numDisparities = 0;
SADWindowSize = 0;
P1 = P2 = 0;
disp12MaxDiff = 0;
preFilterCap = 0;
uniquenessRatio = 0;
speckleWindowSize = 0;
speckleRange = 0;
mode = StereoBinarySGBM::MODE_SGBM;
}
StereoBinarySGBMParams( int _minDisparity, int _numDisparities, int _SADWindowSize,
int _P1, int _P2, int _disp12MaxDiff, int _preFilterCap,
int _uniquenessRatio, int _speckleWindowSize, int _speckleRange,
int _mode )
{
minDisparity = _minDisparity;
numDisparities = _numDisparities;
SADWindowSize = _SADWindowSize;
P1 = _P1;
P2 = _P2;
disp12MaxDiff = _disp12MaxDiff;
preFilterCap = _preFilterCap;
uniquenessRatio = _uniquenessRatio;
speckleWindowSize = _speckleWindowSize;
speckleRange = _speckleRange;
mode = _mode;
}
int minDisparity;
int numDisparities;
int SADWindowSize;
int preFilterCap;
int uniquenessRatio;
int P1;
int P2;
int speckleWindowSize;
int speckleRange;
int disp12MaxDiff;
int mode;
};
/*
For each pixel row1[x], max(-maxD, 0) <= minX <= x < maxX <= width - max(0, -minD),
and for each disparity minD<=d<maxD the function
computes the cost (cost[(x-minX)*(maxD - minD) + (d - minD)]), depending on the difference between
row1[x] and row2[x-d]. The subpixel algorithm from
"Depth Discontinuities by Pixel-to-Pixel Stereo" by Stan Birchfield and C. Tomasi
is used, hence the suffix BT.
the temporary buffer should contain width2*2 elements
*/
static void calcPixelCostBT( const Mat& img1, const Mat& img2, int y,
int minD, int maxD, CostType* cost,
PixType* buffer, const PixType* tab,
int tabOfs, int )
{
int x, c, width = img1.cols, cn = img1.channels();
int minX1 = std::max(-maxD, 0), maxX1 = width + std::min(minD, 0);
int minX2 = std::max(minX1 - maxD, 0), maxX2 = std::min(maxX1 - minD, width);
int D = maxD - minD, width1 = maxX1 - minX1, width2 = maxX2 - minX2;
const PixType *row1 = img1.ptr<PixType>(y), *row2 = img2.ptr<PixType>(y);
PixType *prow1 = buffer + width2*2, *prow2 = prow1 + width*cn*2;
tab += tabOfs;
for( c = 0; c < cn*2; c++ )
{
prow1[width*c] = prow1[width*c + width-1] =
prow2[width*c] = prow2[width*c + width-1] = tab[0];
}
int n1 = y > 0 ? -(int)img1.step : 0, s1 = y < img1.rows-1 ? (int)img1.step : 0;
int n2 = y > 0 ? -(int)img2.step : 0, s2 = y < img2.rows-1 ? (int)img2.step : 0;
if( cn == 1 )
{
for( x = 1; x < width-1; x++ )
{
prow1[x] = tab[(row1[x+1] - row1[x-1])*2 + row1[x+n1+1] - row1[x+n1-1] + row1[x+s1+1] - row1[x+s1-1]];
prow2[width-1-x] = tab[(row2[x+1] - row2[x-1])*2 + row2[x+n2+1] - row2[x+n2-1] + row2[x+s2+1] - row2[x+s2-1]];
prow1[x+width] = row1[x];
prow2[width-1-x+width] = row2[x];
}
}
else
{
for( x = 1; x < width-1; x++ )
{
prow1[x] = tab[(row1[x*3+3] - row1[x*3-3])*2 + row1[x*3+n1+3] - row1[x*3+n1-3] + row1[x*3+s1+3] - row1[x*3+s1-3]];
prow1[x+width] = tab[(row1[x*3+4] - row1[x*3-2])*2 + row1[x*3+n1+4] - row1[x*3+n1-2] + row1[x*3+s1+4] - row1[x*3+s1-2]];
prow1[x+width*2] = tab[(row1[x*3+5] - row1[x*3-1])*2 + row1[x*3+n1+5] - row1[x*3+n1-1] + row1[x*3+s1+5] - row1[x*3+s1-1]];
prow2[width-1-x] = tab[(row2[x*3+3] - row2[x*3-3])*2 + row2[x*3+n2+3] - row2[x*3+n2-3] + row2[x*3+s2+3] - row2[x*3+s2-3]];
prow2[width-1-x+width] = tab[(row2[x*3+4] - row2[x*3-2])*2 + row2[x*3+n2+4] - row2[x*3+n2-2] + row2[x*3+s2+4] - row2[x*3+s2-2]];
prow2[width-1-x+width*2] = tab[(row2[x*3+5] - row2[x*3-1])*2 + row2[x*3+n2+5] - row2[x*3+n2-1] + row2[x*3+s2+5] - row2[x*3+s2-1]];
prow1[x+width*3] = row1[x*3];
prow1[x+width*4] = row1[x*3+1];
prow1[x+width*5] = row1[x*3+2];
prow2[width-1-x+width*3] = row2[x*3];
prow2[width-1-x+width*4] = row2[x*3+1];
prow2[width-1-x+width*5] = row2[x*3+2];
}
}
memset( cost, 0, width1*D*sizeof(cost[0]) );
buffer -= minX2;
cost -= minX1*D + minD; // simplify the cost indices inside the loop
namespace stereo
{
typedef uchar PixType;
typedef short CostType;
typedef short DispType;
enum { NR = 16, NR2 = NR/2 };
struct StereoBinarySGBMParams
{
StereoBinarySGBMParams()
{
minDisparity = numDisparities = 0;
SADWindowSize = 0;
P1 = P2 = 0;
disp12MaxDiff = 0;
preFilterCap = 0;
uniquenessRatio = 0;
speckleWindowSize = 0;
speckleRange = 0;
mode = StereoBinarySGBM::MODE_SGBM;
}
StereoBinarySGBMParams( int _minDisparity, int _numDisparities, int _SADWindowSize,
int _P1, int _P2, int _disp12MaxDiff, int _preFilterCap,
int _uniquenessRatio, int _speckleWindowSize, int _speckleRange,
int _mode )
{
minDisparity = _minDisparity;
numDisparities = _numDisparities;
SADWindowSize = _SADWindowSize;
P1 = _P1;
P2 = _P2;
disp12MaxDiff = _disp12MaxDiff;
preFilterCap = _preFilterCap;
uniquenessRatio = _uniquenessRatio;
speckleWindowSize = _speckleWindowSize;
speckleRange = _speckleRange;
mode = _mode;
}
int minDisparity;
int numDisparities;
int SADWindowSize;
int preFilterCap;
int uniquenessRatio;
int P1;
int P2;
int speckleWindowSize;
int speckleRange;
int disp12MaxDiff;
int mode;
};
/*
For each pixel row1[x], max(-maxD, 0) <= minX <= x < maxX <= width - max(0, -minD),
and for each disparity minD<=d<maxD the function
computes the cost (cost[(x-minX)*(maxD - minD) + (d - minD)]), depending on the difference between
row1[x] and row2[x-d]. The subpixel algorithm from
"Depth Discontinuities by Pixel-to-Pixel Stereo" by Stan Birchfield and C. Tomasi
is used, hence the suffix BT.
the temporary buffer should contain width2*2 elements
*/
static void calcPixelCostBT( const Mat& img1, const Mat& img2, int y,
int minD, int maxD, CostType* cost,
PixType* buffer, const PixType* tab,
int tabOfs, int )
{
int x, c, width = img1.cols, cn = img1.channels();
int minX1 = std::max(-maxD, 0), maxX1 = width + std::min(minD, 0);
int minX2 = std::max(minX1 - maxD, 0), maxX2 = std::min(maxX1 - minD, width);
int D = maxD - minD, width1 = maxX1 - minX1, width2 = maxX2 - minX2;
const PixType *row1 = img1.ptr<PixType>(y), *row2 = img2.ptr<PixType>(y);
PixType *prow1 = buffer + width2*2, *prow2 = prow1 + width*cn*2;
tab += tabOfs;
for( c = 0; c < cn*2; c++ )
{
prow1[width*c] = prow1[width*c + width-1] =
prow2[width*c] = prow2[width*c + width-1] = tab[0];
}
int n1 = y > 0 ? -(int)img1.step : 0, s1 = y < img1.rows-1 ? (int)img1.step : 0;
int n2 = y > 0 ? -(int)img2.step : 0, s2 = y < img2.rows-1 ? (int)img2.step : 0;
if( cn == 1 )
{
for( x = 1; x < width-1; x++ )
{
prow1[x] = tab[(row1[x+1] - row1[x-1])*2 + row1[x+n1+1] - row1[x+n1-1] + row1[x+s1+1] - row1[x+s1-1]];
prow2[width-1-x] = tab[(row2[x+1] - row2[x-1])*2 + row2[x+n2+1] - row2[x+n2-1] + row2[x+s2+1] - row2[x+s2-1]];
prow1[x+width] = row1[x];
prow2[width-1-x+width] = row2[x];
}
}
else
{
for( x = 1; x < width-1; x++ )
{
prow1[x] = tab[(row1[x*3+3] - row1[x*3-3])*2 + row1[x*3+n1+3] - row1[x*3+n1-3] + row1[x*3+s1+3] - row1[x*3+s1-3]];
prow1[x+width] = tab[(row1[x*3+4] - row1[x*3-2])*2 + row1[x*3+n1+4] - row1[x*3+n1-2] + row1[x*3+s1+4] - row1[x*3+s1-2]];
prow1[x+width*2] = tab[(row1[x*3+5] - row1[x*3-1])*2 + row1[x*3+n1+5] - row1[x*3+n1-1] + row1[x*3+s1+5] - row1[x*3+s1-1]];
prow2[width-1-x] = tab[(row2[x*3+3] - row2[x*3-3])*2 + row2[x*3+n2+3] - row2[x*3+n2-3] + row2[x*3+s2+3] - row2[x*3+s2-3]];
prow2[width-1-x+width] = tab[(row2[x*3+4] - row2[x*3-2])*2 + row2[x*3+n2+4] - row2[x*3+n2-2] + row2[x*3+s2+4] - row2[x*3+s2-2]];
prow2[width-1-x+width*2] = tab[(row2[x*3+5] - row2[x*3-1])*2 + row2[x*3+n2+5] - row2[x*3+n2-1] + row2[x*3+s2+5] - row2[x*3+s2-1]];
prow1[x+width*3] = row1[x*3];
prow1[x+width*4] = row1[x*3+1];
prow1[x+width*5] = row1[x*3+2];
prow2[width-1-x+width*3] = row2[x*3];
prow2[width-1-x+width*4] = row2[x*3+1];
prow2[width-1-x+width*5] = row2[x*3+2];
}
}
memset( cost, 0, width1*D*sizeof(cost[0]) );
buffer -= minX2;
cost -= minX1*D + minD; // simplify the cost indices inside the loop
#if CV_SSE2
volatile bool useSIMD = checkHardwareSupport(CV_CPU_SSE2);
volatile bool useSIMD = checkHardwareSupport(CV_CPU_SSE2);
#endif
#if 1
for( c = 0; c < cn*2; c++, prow1 += width, prow2 += width )
{
int diff_scale = c < cn ? 0 : 2;
// precompute
// v0 = min(row2[x-1/2], row2[x], row2[x+1/2]) and
// v1 = max(row2[x-1/2], row2[x], row2[x+1/2]) and
for( x = minX2; x < maxX2; x++ )
{
int v = prow2[x];
int vl = x > 0 ? (v + prow2[x-1])/2 : v;
int vr = x < width-1 ? (v + prow2[x+1])/2 : v;
int v0 = std::min(vl, vr); v0 = std::min(v0, v);
int v1 = std::max(vl, vr); v1 = std::max(v1, v);
buffer[x] = (PixType)v0;
buffer[x + width2] = (PixType)v1;
}
for( x = minX1; x < maxX1; x++ )
{
int u = prow1[x];
int ul = x > 0 ? (u + prow1[x-1])/2 : u;
int ur = x < width-1 ? (u + prow1[x+1])/2 : u;
int u0 = std::min(ul, ur); u0 = std::min(u0, u);
int u1 = std::max(ul, ur); u1 = std::max(u1, u);
for( c = 0; c < cn*2; c++, prow1 += width, prow2 += width )
{
int diff_scale = c < cn ? 0 : 2;
// precompute
// v0 = min(row2[x-1/2], row2[x], row2[x+1/2]) and
// v1 = max(row2[x-1/2], row2[x], row2[x+1/2]) and
for( x = minX2; x < maxX2; x++ )
{
int v = prow2[x];
int vl = x > 0 ? (v + prow2[x-1])/2 : v;
int vr = x < width-1 ? (v + prow2[x+1])/2 : v;
int v0 = std::min(vl, vr); v0 = std::min(v0, v);
int v1 = std::max(vl, vr); v1 = std::max(v1, v);
buffer[x] = (PixType)v0;
buffer[x + width2] = (PixType)v1;
}
for( x = minX1; x < maxX1; x++ )
{
int u = prow1[x];
int ul = x > 0 ? (u + prow1[x-1])/2 : u;
int ur = x < width-1 ? (u + prow1[x+1])/2 : u;
int u0 = std::min(ul, ur); u0 = std::min(u0, u);
int u1 = std::max(ul, ur); u1 = std::max(u1, u);
#if CV_SSE2
if( useSIMD )
{
__m128i _u = _mm_set1_epi8((char)u), _u0 = _mm_set1_epi8((char)u0);
__m128i _u1 = _mm_set1_epi8((char)u1), z = _mm_setzero_si128();
__m128i ds = _mm_cvtsi32_si128(diff_scale);
for( int d = minD; d < maxD; d += 16 )
{
__m128i _v = _mm_loadu_si128((const __m128i*)(prow2 + width-x-1 + d));
__m128i _v0 = _mm_loadu_si128((const __m128i*)(buffer + width-x-1 + d));
__m128i _v1 = _mm_loadu_si128((const __m128i*)(buffer + width-x-1 + d + width2));
__m128i c0 = _mm_max_epu8(_mm_subs_epu8(_u, _v1), _mm_subs_epu8(_v0, _u));
__m128i c1 = _mm_max_epu8(_mm_subs_epu8(_v, _u1), _mm_subs_epu8(_u0, _v));
__m128i diff = _mm_min_epu8(c0, c1);
c0 = _mm_load_si128((__m128i*)(cost + x*D + d));
c1 = _mm_load_si128((__m128i*)(cost + x*D + d + 8));
_mm_store_si128((__m128i*)(cost + x*D + d), _mm_adds_epi16(c0, _mm_srl_epi16(_mm_unpacklo_epi8(diff,z), ds)));
_mm_store_si128((__m128i*)(cost + x*D + d + 8), _mm_adds_epi16(c1, _mm_srl_epi16(_mm_unpackhi_epi8(diff,z), ds)));
}
}
else
if( useSIMD )
{
__m128i _u = _mm_set1_epi8((char)u), _u0 = _mm_set1_epi8((char)u0);
__m128i _u1 = _mm_set1_epi8((char)u1), z = _mm_setzero_si128();
__m128i ds = _mm_cvtsi32_si128(diff_scale);
for( int d = minD; d < maxD; d += 16 )
{
__m128i _v = _mm_loadu_si128((const __m128i*)(prow2 + width-x-1 + d));
__m128i _v0 = _mm_loadu_si128((const __m128i*)(buffer + width-x-1 + d));
__m128i _v1 = _mm_loadu_si128((const __m128i*)(buffer + width-x-1 + d + width2));
__m128i c0 = _mm_max_epu8(_mm_subs_epu8(_u, _v1), _mm_subs_epu8(_v0, _u));
__m128i c1 = _mm_max_epu8(_mm_subs_epu8(_v, _u1), _mm_subs_epu8(_u0, _v));
__m128i diff = _mm_min_epu8(c0, c1);
c0 = _mm_load_si128((__m128i*)(cost + x*D + d));
c1 = _mm_load_si128((__m128i*)(cost + x*D + d + 8));
_mm_store_si128((__m128i*)(cost + x*D + d), _mm_adds_epi16(c0, _mm_srl_epi16(_mm_unpacklo_epi8(diff,z), ds)));
_mm_store_si128((__m128i*)(cost + x*D + d + 8), _mm_adds_epi16(c1, _mm_srl_epi16(_mm_unpackhi_epi8(diff,z), ds)));
}
}
else
#endif
{
for( int d = minD; d < maxD; d++ )
{
int v = prow2[width-x-1 + d];
int v0 = buffer[width-x-1 + d];
int v1 = buffer[width-x-1 + d + width2];
int c0 = std::max(0, u - v1); c0 = std::max(c0, v0 - u);
int c1 = std::max(0, v - u1); c1 = std::max(c1, u0 - v);
cost[x*D + d] = (CostType)(cost[x*D+d] + (std::min(c0, c1) >> diff_scale));
}
}
}
}
{
for( int d = minD; d < maxD; d++ )
{
int v = prow2[width-x-1 + d];
int v0 = buffer[width-x-1 + d];
int v1 = buffer[width-x-1 + d + width2];
int c0 = std::max(0, u - v1); c0 = std::max(c0, v0 - u);
int c1 = std::max(0, v - u1); c1 = std::max(c1, u0 - v);
cost[x*D + d] = (CostType)(cost[x*D+d] + (std::min(c0, c1) >> diff_scale));
}
}
}
}
#else
for( c = 0; c < cn*2; c++, prow1 += width, prow2 += width )
{
for( x = minX1; x < maxX1; x++ )
{
int u = prow1[x];
for( c = 0; c < cn*2; c++, prow1 += width, prow2 += width )
{
for( x = minX1; x < maxX1; x++ )
{
int u = prow1[x];
#if CV_SSE2
if( useSIMD )
{
__m128i _u = _mm_set1_epi8(u), z = _mm_setzero_si128();
for( int d = minD; d < maxD; d += 16 )
{
__m128i _v = _mm_loadu_si128((const __m128i*)(prow2 + width-1-x + d));
__m128i diff = _mm_adds_epu8(_mm_subs_epu8(_u,_v), _mm_subs_epu8(_v,_u));
__m128i c0 = _mm_load_si128((__m128i*)(cost + x*D + d));
__m128i c1 = _mm_load_si128((__m128i*)(cost + x*D + d + 8));
_mm_store_si128((__m128i*)(cost + x*D + d), _mm_adds_epi16(c0, _mm_unpacklo_epi8(diff,z)));
_mm_store_si128((__m128i*)(cost + x*D + d + 8), _mm_adds_epi16(c1, _mm_unpackhi_epi8(diff,z)));
}
}
else
if( useSIMD )
{
__m128i _u = _mm_set1_epi8(u), z = _mm_setzero_si128();
for( int d = minD; d < maxD; d += 16 )
{
__m128i _v = _mm_loadu_si128((const __m128i*)(prow2 + width-1-x + d));
__m128i diff = _mm_adds_epu8(_mm_subs_epu8(_u,_v), _mm_subs_epu8(_v,_u));
__m128i c0 = _mm_load_si128((__m128i*)(cost + x*D + d));
__m128i c1 = _mm_load_si128((__m128i*)(cost + x*D + d + 8));
_mm_store_si128((__m128i*)(cost + x*D + d), _mm_adds_epi16(c0, _mm_unpacklo_epi8(diff,z)));
_mm_store_si128((__m128i*)(cost + x*D + d + 8), _mm_adds_epi16(c1, _mm_unpackhi_epi8(diff,z)));
}
}
else
#endif
{
for( int d = minD; d < maxD; d++ )
{
int v = prow2[width-1-x + d];
cost[x*D + d] = (CostType)(cost[x*D + d] + (CostType)std::abs(u - v));
}
}
}
}
{
for( int d = minD; d < maxD; d++ )
{
int v = prow2[width-1-x + d];
cost[x*D + d] = (CostType)(cost[x*D + d] + (CostType)std::abs(u - v));
}
}
}
}
#endif
}
/*
computes disparity for "roi" in img1 w.r.t. img2 and write it to disp1buf.
that is, disp1buf(x, y)=d means that img1(x+roi.x, y+roi.y) ~ img2(x+roi.x-d, y+roi.y).
minD <= d < maxD.
disp2full is the reverse disparity map, that is:
disp2full(x+roi.x,y+roi.y)=d means that img2(x+roi.x, y+roi.y) ~ img1(x+roi.x+d, y+roi.y)
note that disp1buf will have the same size as the roi and
disp2full will have the same size as img1 (or img2).
On exit disp2buf is not the final disparity, it is an intermediate result that becomes
final after all the tiles are processed.
the disparity in disp1buf is written with sub-pixel accuracy
(4 fractional bits, see StereoSGBM::DISP_SCALE),
using quadratic interpolation, while the disparity in disp2buf
is written as is, without interpolation.
disp2cost also has the same size as img1 (or img2).
It contains the minimum current cost, used to find the best disparity, corresponding to the minimal cost.
*/
static void computeDisparityBinarySGBM( const Mat& img1, const Mat& img2,
Mat& disp1, const StereoBinarySGBMParams& params,
Mat& buffer )
{
}
/*
computes disparity for "roi" in img1 w.r.t. img2 and write it to disp1buf.
that is, disp1buf(x, y)=d means that img1(x+roi.x, y+roi.y) ~ img2(x+roi.x-d, y+roi.y).
minD <= d < maxD.
disp2full is the reverse disparity map, that is:
disp2full(x+roi.x,y+roi.y)=d means that img2(x+roi.x, y+roi.y) ~ img1(x+roi.x+d, y+roi.y)
note that disp1buf will have the same size as the roi and
disp2full will have the same size as img1 (or img2).
On exit disp2buf is not the final disparity, it is an intermediate result that becomes
final after all the tiles are processed.
the disparity in disp1buf is written with sub-pixel accuracy
(4 fractional bits, see StereoSGBM::DISP_SCALE),
using quadratic interpolation, while the disparity in disp2buf
is written as is, without interpolation.
disp2cost also has the same size as img1 (or img2).
It contains the minimum current cost, used to find the best disparity, corresponding to the minimal cost.
*/
static void computeDisparityBinarySGBM( const Mat& img1, const Mat& img2,
Mat& disp1, const StereoBinarySGBMParams& params,
Mat& buffer )
{
#if CV_SSE2
static const uchar LSBTab[] =
{
0, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
5, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
6, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
5, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
7, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
5, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
6, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
5, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0
};
volatile bool useSIMD = checkHardwareSupport(CV_CPU_SSE2);
static const uchar LSBTab[] =
{
0, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
5, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
6, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
5, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
7, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
5, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
6, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
5, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0
};
volatile bool useSIMD = checkHardwareSupport(CV_CPU_SSE2);
#endif
const int ALIGN = 16;
const int DISP_SHIFT = StereoMatcher::DISP_SHIFT;
const int DISP_SCALE = (1 << DISP_SHIFT);
const CostType MAX_COST = SHRT_MAX;
int minD = params.minDisparity, maxD = minD + params.numDisparities;
Size SADWindowSize;
SADWindowSize.width = SADWindowSize.height = params.SADWindowSize > 0 ? params.SADWindowSize : 5;
int ftzero = std::max(params.preFilterCap, 15) | 1;
int uniquenessRatio = params.uniquenessRatio >= 0 ? params.uniquenessRatio : 10;
int disp12MaxDiff = params.disp12MaxDiff > 0 ? params.disp12MaxDiff : 1;
int P1 = params.P1 > 0 ? params.P1 : 2, P2 = std::max(params.P2 > 0 ? params.P2 : 5, P1+1);
int k, width = disp1.cols, height = disp1.rows;
int minX1 = std::max(-maxD, 0), maxX1 = width + std::min(minD, 0);
int D = maxD - minD, width1 = maxX1 - minX1;
int INVALID_DISP = minD - 1, INVALID_DISP_SCALED = INVALID_DISP*DISP_SCALE;
int SW2 = SADWindowSize.width/2, SH2 = SADWindowSize.height/2;
bool fullDP = params.mode == StereoBinarySGBM::MODE_HH;
int npasses = fullDP ? 2 : 1;
const int TAB_OFS = 256*4, TAB_SIZE = 256 + TAB_OFS*2;
PixType clipTab[TAB_SIZE];
for( k = 0; k < TAB_SIZE; k++ )
clipTab[k] = (PixType)(std::min(std::max(k - TAB_OFS, -ftzero), ftzero) + ftzero);
if( minX1 >= maxX1 )
{
disp1 = Scalar::all(INVALID_DISP_SCALED);
return;
}
CV_Assert( D % 16 == 0 );
// NR - the number of directions. the loop on x below that computes Lr assumes that NR == 8.
// if you change NR, please, modify the loop as well.
int D2 = D+16, NRD2 = NR2*D2;
// the number of L_r(.,.) and min_k L_r(.,.) lines in the buffer:
// for 8-way dynamic programming we need the current row and
// the previous row, i.e. 2 rows in total
const int NLR = 2;
const int LrBorder = NLR - 1;
// for each possible stereo match (img1(x,y) <=> img2(x-d,y))
// we keep pixel difference cost (C) and the summary cost over NR directions (S).
// we also keep all the partial costs for the previous line L_r(x,d) and also min_k L_r(x, k)
size_t costBufSize = width1*D;
size_t CSBufSize = costBufSize*(fullDP ? height : 1);
size_t minLrSize = (width1 + LrBorder*2)*NR2, LrSize = minLrSize*D2;
int hsumBufNRows = SH2*2 + 2;
size_t totalBufSize = (LrSize + minLrSize)*NLR*sizeof(CostType) + // minLr[] and Lr[]
costBufSize*(hsumBufNRows + 1)*sizeof(CostType) + // hsumBuf, pixdiff
CSBufSize*2*sizeof(CostType) + // C, S
width*16*img1.channels()*sizeof(PixType) + // temp buffer for computing per-pixel cost
width*(sizeof(CostType) + sizeof(DispType)) + 1024; // disp2cost + disp2
if( buffer.empty() || !buffer.isContinuous() ||
buffer.cols*buffer.rows*buffer.elemSize() < totalBufSize )
buffer.create(1, (int)totalBufSize, CV_8U);
// summary cost over different (nDirs) directions
CostType* Cbuf = (CostType*)alignPtr(buffer.ptr(), ALIGN);
CostType* Sbuf = Cbuf + CSBufSize;
CostType* hsumBuf = Sbuf + CSBufSize;
CostType* pixDiff = hsumBuf + costBufSize*hsumBufNRows;
CostType* disp2cost = pixDiff + costBufSize + (LrSize + minLrSize)*NLR;
DispType* disp2ptr = (DispType*)(disp2cost + width);
PixType* tempBuf = (PixType*)(disp2ptr + width);
// add P2 to every C(x,y). it saves a few operations in the inner loops
for( k = 0; k < width1*D; k++ )
Cbuf[k] = (CostType)P2;
for( int pass = 1; pass <= npasses; pass++ )
{
int x1, y1, x2, y2, dx, dy;
if( pass == 1 )
{
y1 = 0; y2 = height; dy = 1;
x1 = 0; x2 = width1; dx = 1;
}
else
{
y1 = height-1; y2 = -1; dy = -1;
x1 = width1-1; x2 = -1; dx = -1;
}
CostType *Lr[NLR]={0}, *minLr[NLR]={0};
for( k = 0; k < NLR; k++ )
{
// shift Lr[k] and minLr[k] pointers, because we allocated them with the borders,
// and will occasionally use negative indices with the arrays
// we need to shift Lr[k] pointers by 1, to give the space for d=-1.
// however, then the alignment will be imperfect, i.e. bad for SSE,
// thus we shift the pointers by 8 (8*sizeof(short) == 16 - ideal alignment)
Lr[k] = pixDiff + costBufSize + LrSize*k + NRD2*LrBorder + 8;
memset( Lr[k] - LrBorder*NRD2 - 8, 0, LrSize*sizeof(CostType) );
minLr[k] = pixDiff + costBufSize + LrSize*NLR + minLrSize*k + NR2*LrBorder;
memset( minLr[k] - LrBorder*NR2, 0, minLrSize*sizeof(CostType) );
}
for( int y = y1; y != y2; y += dy )
{
int x, d;
DispType* disp1ptr = disp1.ptr<DispType>(y);
CostType* C = Cbuf + (!fullDP ? 0 : y*costBufSize);
CostType* S = Sbuf + (!fullDP ? 0 : y*costBufSize);
if( pass == 1 ) // compute C on the first pass, and reuse it on the second pass, if any.
{
int dy1 = y == 0 ? 0 : y + SH2, dy2 = y == 0 ? SH2 : dy1;
for( k = dy1; k <= dy2; k++ )
{
CostType* hsumAdd = hsumBuf + (std::min(k, height-1) % hsumBufNRows)*costBufSize;
if( k < height )
{
calcPixelCostBT( img1, img2, k, minD, maxD, pixDiff, tempBuf, clipTab, TAB_OFS, ftzero );
memset(hsumAdd, 0, D*sizeof(CostType));
for( x = 0; x <= SW2*D; x += D )
{
int scale = x == 0 ? SW2 + 1 : 1;
for( d = 0; d < D; d++ )
hsumAdd[d] = (CostType)(hsumAdd[d] + pixDiff[x + d]*scale);
}
if( y > 0 )
{
const CostType* hsumSub = hsumBuf + (std::max(y - SH2 - 1, 0) % hsumBufNRows)*costBufSize;
const CostType* Cprev = !fullDP || y == 0 ? C : C - costBufSize;
for( x = D; x < width1*D; x += D )
{
const CostType* pixAdd = pixDiff + std::min(x + SW2*D, (width1-1)*D);
const CostType* pixSub = pixDiff + std::max(x - (SW2+1)*D, 0);
const int ALIGN = 16;
const int DISP_SHIFT = StereoMatcher::DISP_SHIFT;
const int DISP_SCALE = (1 << DISP_SHIFT);
const CostType MAX_COST = SHRT_MAX;
int minD = params.minDisparity, maxD = minD + params.numDisparities;
Size SADWindowSize;
SADWindowSize.width = SADWindowSize.height = params.SADWindowSize > 0 ? params.SADWindowSize : 5;
int ftzero = std::max(params.preFilterCap, 15) | 1;
int uniquenessRatio = params.uniquenessRatio >= 0 ? params.uniquenessRatio : 10;
int disp12MaxDiff = params.disp12MaxDiff > 0 ? params.disp12MaxDiff : 1;
int P1 = params.P1 > 0 ? params.P1 : 2, P2 = std::max(params.P2 > 0 ? params.P2 : 5, P1+1);
int k, width = disp1.cols, height = disp1.rows;
int minX1 = std::max(-maxD, 0), maxX1 = width + std::min(minD, 0);
int D = maxD - minD, width1 = maxX1 - minX1;
int INVALID_DISP = minD - 1, INVALID_DISP_SCALED = INVALID_DISP*DISP_SCALE;
int SW2 = SADWindowSize.width/2, SH2 = SADWindowSize.height/2;
bool fullDP = params.mode == StereoBinarySGBM::MODE_HH;
int npasses = fullDP ? 2 : 1;
const int TAB_OFS = 256*4, TAB_SIZE = 256 + TAB_OFS*2;
PixType clipTab[TAB_SIZE];
for( k = 0; k < TAB_SIZE; k++ )
clipTab[k] = (PixType)(std::min(std::max(k - TAB_OFS, -ftzero), ftzero) + ftzero);
if( minX1 >= maxX1 )
{
disp1 = Scalar::all(INVALID_DISP_SCALED);
return;
}
CV_Assert( D % 16 == 0 );
// NR - the number of directions. the loop on x below that computes Lr assumes that NR == 8.
// if you change NR, please, modify the loop as well.
int D2 = D+16, NRD2 = NR2*D2;
// the number of L_r(.,.) and min_k L_r(.,.) lines in the buffer:
// for 8-way dynamic programming we need the current row and
// the previous row, i.e. 2 rows in total
const int NLR = 2;
const int LrBorder = NLR - 1;
// for each possible stereo match (img1(x,y) <=> img2(x-d,y))
// we keep pixel difference cost (C) and the summary cost over NR directions (S).
// we also keep all the partial costs for the previous line L_r(x,d) and also min_k L_r(x, k)
size_t costBufSize = width1*D;
size_t CSBufSize = costBufSize*(fullDP ? height : 1);
size_t minLrSize = (width1 + LrBorder*2)*NR2, LrSize = minLrSize*D2;
int hsumBufNRows = SH2*2 + 2;
size_t totalBufSize = (LrSize + minLrSize)*NLR*sizeof(CostType) + // minLr[] and Lr[]
costBufSize*(hsumBufNRows + 1)*sizeof(CostType) + // hsumBuf, pixdiff
CSBufSize*2*sizeof(CostType) + // C, S
width*16*img1.channels()*sizeof(PixType) + // temp buffer for computing per-pixel cost
width*(sizeof(CostType) + sizeof(DispType)) + 1024; // disp2cost + disp2
if( buffer.empty() || !buffer.isContinuous() ||
buffer.cols*buffer.rows*buffer.elemSize() < totalBufSize )
buffer.create(1, (int)totalBufSize, CV_8U);
// summary cost over different (nDirs) directions
CostType* Cbuf = (CostType*)alignPtr(buffer.ptr(), ALIGN);
CostType* Sbuf = Cbuf + CSBufSize;
CostType* hsumBuf = Sbuf + CSBufSize;
CostType* pixDiff = hsumBuf + costBufSize*hsumBufNRows;
CostType* disp2cost = pixDiff + costBufSize + (LrSize + minLrSize)*NLR;
DispType* disp2ptr = (DispType*)(disp2cost + width);
PixType* tempBuf = (PixType*)(disp2ptr + width);
// add P2 to every C(x,y). it saves a few operations in the inner loops
for( k = 0; k < width1*D; k++ )
Cbuf[k] = (CostType)P2;
for( int pass = 1; pass <= npasses; pass++ )
{
int x1, y1, x2, y2, dx, dy;
if( pass == 1 )
{
y1 = 0; y2 = height; dy = 1;
x1 = 0; x2 = width1; dx = 1;
}
else
{
y1 = height-1; y2 = -1; dy = -1;
x1 = width1-1; x2 = -1; dx = -1;
}
CostType *Lr[NLR]={0}, *minLr[NLR]={0};
for( k = 0; k < NLR; k++ )
{
// shift Lr[k] and minLr[k] pointers, because we allocated them with the borders,
// and will occasionally use negative indices with the arrays
// we need to shift Lr[k] pointers by 1, to give the space for d=-1.
// however, then the alignment will be imperfect, i.e. bad for SSE,
// thus we shift the pointers by 8 (8*sizeof(short) == 16 - ideal alignment)
Lr[k] = pixDiff + costBufSize + LrSize*k + NRD2*LrBorder + 8;
memset( Lr[k] - LrBorder*NRD2 - 8, 0, LrSize*sizeof(CostType) );
minLr[k] = pixDiff + costBufSize + LrSize*NLR + minLrSize*k + NR2*LrBorder;
memset( minLr[k] - LrBorder*NR2, 0, minLrSize*sizeof(CostType) );
}
for( int y = y1; y != y2; y += dy )
{
int x, d;
DispType* disp1ptr = disp1.ptr<DispType>(y);
CostType* C = Cbuf + (!fullDP ? 0 : y*costBufSize);
CostType* S = Sbuf + (!fullDP ? 0 : y*costBufSize);
if( pass == 1 ) // compute C on the first pass, and reuse it on the second pass, if any.
{
int dy1 = y == 0 ? 0 : y + SH2, dy2 = y == 0 ? SH2 : dy1;
for( k = dy1; k <= dy2; k++ )
{
CostType* hsumAdd = hsumBuf + (std::min(k, height-1) % hsumBufNRows)*costBufSize;
if( k < height )
{
calcPixelCostBT( img1, img2, k, minD, maxD, pixDiff, tempBuf, clipTab, TAB_OFS, ftzero );
memset(hsumAdd, 0, D*sizeof(CostType));
for( x = 0; x <= SW2*D; x += D )
{
int scale = x == 0 ? SW2 + 1 : 1;
for( d = 0; d < D; d++ )
hsumAdd[d] = (CostType)(hsumAdd[d] + pixDiff[x + d]*scale);
}
if( y > 0 )
{
const CostType* hsumSub = hsumBuf + (std::max(y - SH2 - 1, 0) % hsumBufNRows)*costBufSize;
const CostType* Cprev = !fullDP || y == 0 ? C : C - costBufSize;
for( x = D; x < width1*D; x += D )
{
const CostType* pixAdd = pixDiff + std::min(x + SW2*D, (width1-1)*D);
const CostType* pixSub = pixDiff + std::max(x - (SW2+1)*D, 0);
#if CV_SSE2
if( useSIMD )
{
for( d = 0; d < D; d += 8 )
{
__m128i hv = _mm_load_si128((const __m128i*)(hsumAdd + x - D + d));
__m128i Cx = _mm_load_si128((__m128i*)(Cprev + x + d));
hv = _mm_adds_epi16(_mm_subs_epi16(hv,
_mm_load_si128((const __m128i*)(pixSub + d))),
_mm_load_si128((const __m128i*)(pixAdd + d)));
Cx = _mm_adds_epi16(_mm_subs_epi16(Cx,
_mm_load_si128((const __m128i*)(hsumSub + x + d))),
hv);
_mm_store_si128((__m128i*)(hsumAdd + x + d), hv);
_mm_store_si128((__m128i*)(C + x + d), Cx);
}
}
else
if( useSIMD )
{
for( d = 0; d < D; d += 8 )
{
__m128i hv = _mm_load_si128((const __m128i*)(hsumAdd + x - D + d));
__m128i Cx = _mm_load_si128((__m128i*)(Cprev + x + d));
hv = _mm_adds_epi16(_mm_subs_epi16(hv,
_mm_load_si128((const __m128i*)(pixSub + d))),
_mm_load_si128((const __m128i*)(pixAdd + d)));
Cx = _mm_adds_epi16(_mm_subs_epi16(Cx,
_mm_load_si128((const __m128i*)(hsumSub + x + d))),
hv);
_mm_store_si128((__m128i*)(hsumAdd + x + d), hv);
_mm_store_si128((__m128i*)(C + x + d), Cx);
}
}
else
#endif
{
for( d = 0; d < D; d++ )
{
int hv = hsumAdd[x + d] = (CostType)(hsumAdd[x - D + d] + pixAdd[d] - pixSub[d]);
C[x + d] = (CostType)(Cprev[x + d] + hv - hsumSub[x + d]);
}
}
}
}
else
{
for( x = D; x < width1*D; x += D )
{
const CostType* pixAdd = pixDiff + std::min(x + SW2*D, (width1-1)*D);
const CostType* pixSub = pixDiff + std::max(x - (SW2+1)*D, 0);
for( d = 0; d < D; d++ )
hsumAdd[x + d] = (CostType)(hsumAdd[x - D + d] + pixAdd[d] - pixSub[d]);
}
}
}
if( y == 0 )
{
int scale = k == 0 ? SH2 + 1 : 1;
for( x = 0; x < width1*D; x++ )
C[x] = (CostType)(C[x] + hsumAdd[x]*scale);
}
}
// also, clear the S buffer
for( k = 0; k < width1*D; k++ )
S[k] = 0;
}
// clear the left and the right borders
memset( Lr[0] - NRD2*LrBorder - 8, 0, NRD2*LrBorder*sizeof(CostType) );
memset( Lr[0] + width1*NRD2 - 8, 0, NRD2*LrBorder*sizeof(CostType) );
memset( minLr[0] - NR2*LrBorder, 0, NR2*LrBorder*sizeof(CostType) );
memset( minLr[0] + width1*NR2, 0, NR2*LrBorder*sizeof(CostType) );
/*
[formula 13 in the paper]
compute L_r(p, d) = C(p, d) +
min(L_r(p-r, d),
L_r(p-r, d-1) + P1,
L_r(p-r, d+1) + P1,
min_k L_r(p-r, k) + P2) - min_k L_r(p-r, k)
where p = (x,y), r is one of the directions.
we process all the directions at once:
0: r=(-dx, 0)
1: r=(-1, -dy)
2: r=(0, -dy)
3: r=(1, -dy)
4: r=(-2, -dy)
5: r=(-1, -dy*2)
6: r=(1, -dy*2)
7: r=(2, -dy)
*/
for( x = x1; x != x2; x += dx )
{
int xm = x*NR2, xd = xm*D2;
int delta0 = minLr[0][xm - dx*NR2] + P2, delta1 = minLr[1][xm - NR2 + 1] + P2;
int delta2 = minLr[1][xm + 2] + P2, delta3 = minLr[1][xm + NR2 + 3] + P2;
CostType* Lr_p0 = Lr[0] + xd - dx*NRD2;
CostType* Lr_p1 = Lr[1] + xd - NRD2 + D2;
CostType* Lr_p2 = Lr[1] + xd + D2*2;
CostType* Lr_p3 = Lr[1] + xd + NRD2 + D2*3;
Lr_p0[-1] = Lr_p0[D] = Lr_p1[-1] = Lr_p1[D] =
Lr_p2[-1] = Lr_p2[D] = Lr_p3[-1] = Lr_p3[D] = MAX_COST;
CostType* Lr_p = Lr[0] + xd;
const CostType* Cp = C + x*D;
CostType* Sp = S + x*D;
{
for( d = 0; d < D; d++ )
{
int hv = hsumAdd[x + d] = (CostType)(hsumAdd[x - D + d] + pixAdd[d] - pixSub[d]);
C[x + d] = (CostType)(Cprev[x + d] + hv - hsumSub[x + d]);
}
}
}
}
else
{
for( x = D; x < width1*D; x += D )
{
const CostType* pixAdd = pixDiff + std::min(x + SW2*D, (width1-1)*D);
const CostType* pixSub = pixDiff + std::max(x - (SW2+1)*D, 0);
for( d = 0; d < D; d++ )
hsumAdd[x + d] = (CostType)(hsumAdd[x - D + d] + pixAdd[d] - pixSub[d]);
}
}
}
if( y == 0 )
{
int scale = k == 0 ? SH2 + 1 : 1;
for( x = 0; x < width1*D; x++ )
C[x] = (CostType)(C[x] + hsumAdd[x]*scale);
}
}
// also, clear the S buffer
for( k = 0; k < width1*D; k++ )
S[k] = 0;
}
// clear the left and the right borders
memset( Lr[0] - NRD2*LrBorder - 8, 0, NRD2*LrBorder*sizeof(CostType) );
memset( Lr[0] + width1*NRD2 - 8, 0, NRD2*LrBorder*sizeof(CostType) );
memset( minLr[0] - NR2*LrBorder, 0, NR2*LrBorder*sizeof(CostType) );
memset( minLr[0] + width1*NR2, 0, NR2*LrBorder*sizeof(CostType) );
/*
[formula 13 in the paper]
compute L_r(p, d) = C(p, d) +
min(L_r(p-r, d),
L_r(p-r, d-1) + P1,
L_r(p-r, d+1) + P1,
min_k L_r(p-r, k) + P2) - min_k L_r(p-r, k)
where p = (x,y), r is one of the directions.
we process all the directions at once:
0: r=(-dx, 0)
1: r=(-1, -dy)
2: r=(0, -dy)
3: r=(1, -dy)
4: r=(-2, -dy)
5: r=(-1, -dy*2)
6: r=(1, -dy*2)
7: r=(2, -dy)
*/
for( x = x1; x != x2; x += dx )
{
int xm = x*NR2, xd = xm*D2;
int delta0 = minLr[0][xm - dx*NR2] + P2, delta1 = minLr[1][xm - NR2 + 1] + P2;
int delta2 = minLr[1][xm + 2] + P2, delta3 = minLr[1][xm + NR2 + 3] + P2;
CostType* Lr_p0 = Lr[0] + xd - dx*NRD2;
CostType* Lr_p1 = Lr[1] + xd - NRD2 + D2;
CostType* Lr_p2 = Lr[1] + xd + D2*2;
CostType* Lr_p3 = Lr[1] + xd + NRD2 + D2*3;
Lr_p0[-1] = Lr_p0[D] = Lr_p1[-1] = Lr_p1[D] =
Lr_p2[-1] = Lr_p2[D] = Lr_p3[-1] = Lr_p3[D] = MAX_COST;
CostType* Lr_p = Lr[0] + xd;
const CostType* Cp = C + x*D;
CostType* Sp = S + x*D;
#if CV_SSE2
if( useSIMD )
{
__m128i _P1 = _mm_set1_epi16((short)P1);
__m128i _delta0 = _mm_set1_epi16((short)delta0);
__m128i _delta1 = _mm_set1_epi16((short)delta1);
__m128i _delta2 = _mm_set1_epi16((short)delta2);
__m128i _delta3 = _mm_set1_epi16((short)delta3);
__m128i _minL0 = _mm_set1_epi16((short)MAX_COST);
for( d = 0; d < D; d += 8 )
{
__m128i Cpd = _mm_load_si128((const __m128i*)(Cp + d));
__m128i L0, L1, L2, L3;
L0 = _mm_load_si128((const __m128i*)(Lr_p0 + d));
L1 = _mm_load_si128((const __m128i*)(Lr_p1 + d));
L2 = _mm_load_si128((const __m128i*)(Lr_p2 + d));
L3 = _mm_load_si128((const __m128i*)(Lr_p3 + d));
L0 = _mm_min_epi16(L0, _mm_adds_epi16(_mm_loadu_si128((const __m128i*)(Lr_p0 + d - 1)), _P1));
L0 = _mm_min_epi16(L0, _mm_adds_epi16(_mm_loadu_si128((const __m128i*)(Lr_p0 + d + 1)), _P1));
L1 = _mm_min_epi16(L1, _mm_adds_epi16(_mm_loadu_si128((const __m128i*)(Lr_p1 + d - 1)), _P1));
L1 = _mm_min_epi16(L1, _mm_adds_epi16(_mm_loadu_si128((const __m128i*)(Lr_p1 + d + 1)), _P1));
L2 = _mm_min_epi16(L2, _mm_adds_epi16(_mm_loadu_si128((const __m128i*)(Lr_p2 + d - 1)), _P1));
L2 = _mm_min_epi16(L2, _mm_adds_epi16(_mm_loadu_si128((const __m128i*)(Lr_p2 + d + 1)), _P1));
L3 = _mm_min_epi16(L3, _mm_adds_epi16(_mm_loadu_si128((const __m128i*)(Lr_p3 + d - 1)), _P1));
L3 = _mm_min_epi16(L3, _mm_adds_epi16(_mm_loadu_si128((const __m128i*)(Lr_p3 + d + 1)), _P1));
L0 = _mm_min_epi16(L0, _delta0);
L0 = _mm_adds_epi16(_mm_subs_epi16(L0, _delta0), Cpd);
L1 = _mm_min_epi16(L1, _delta1);
L1 = _mm_adds_epi16(_mm_subs_epi16(L1, _delta1), Cpd);
L2 = _mm_min_epi16(L2, _delta2);
L2 = _mm_adds_epi16(_mm_subs_epi16(L2, _delta2), Cpd);
L3 = _mm_min_epi16(L3, _delta3);
L3 = _mm_adds_epi16(_mm_subs_epi16(L3, _delta3), Cpd);
_mm_store_si128( (__m128i*)(Lr_p + d), L0);
_mm_store_si128( (__m128i*)(Lr_p + d + D2), L1);
_mm_store_si128( (__m128i*)(Lr_p + d + D2*2), L2);
_mm_store_si128( (__m128i*)(Lr_p + d + D2*3), L3);
__m128i t0 = _mm_min_epi16(_mm_unpacklo_epi16(L0, L2), _mm_unpackhi_epi16(L0, L2));
__m128i t1 = _mm_min_epi16(_mm_unpacklo_epi16(L1, L3), _mm_unpackhi_epi16(L1, L3));
t0 = _mm_min_epi16(_mm_unpacklo_epi16(t0, t1), _mm_unpackhi_epi16(t0, t1));
_minL0 = _mm_min_epi16(_minL0, t0);
__m128i Sval = _mm_load_si128((const __m128i*)(Sp + d));
L0 = _mm_adds_epi16(L0, L1);
L2 = _mm_adds_epi16(L2, L3);
Sval = _mm_adds_epi16(Sval, L0);
Sval = _mm_adds_epi16(Sval, L2);
_mm_store_si128((__m128i*)(Sp + d), Sval);
}
_minL0 = _mm_min_epi16(_minL0, _mm_srli_si128(_minL0, 8));
_mm_storel_epi64((__m128i*)&minLr[0][xm], _minL0);
}
else
if( useSIMD )
{
__m128i _P1 = _mm_set1_epi16((short)P1);
__m128i _delta0 = _mm_set1_epi16((short)delta0);
__m128i _delta1 = _mm_set1_epi16((short)delta1);
__m128i _delta2 = _mm_set1_epi16((short)delta2);
__m128i _delta3 = _mm_set1_epi16((short)delta3);
__m128i _minL0 = _mm_set1_epi16((short)MAX_COST);
for( d = 0; d < D; d += 8 )
{
__m128i Cpd = _mm_load_si128((const __m128i*)(Cp + d));
__m128i L0, L1, L2, L3;
L0 = _mm_load_si128((const __m128i*)(Lr_p0 + d));
L1 = _mm_load_si128((const __m128i*)(Lr_p1 + d));
L2 = _mm_load_si128((const __m128i*)(Lr_p2 + d));
L3 = _mm_load_si128((const __m128i*)(Lr_p3 + d));
L0 = _mm_min_epi16(L0, _mm_adds_epi16(_mm_loadu_si128((const __m128i*)(Lr_p0 + d - 1)), _P1));
L0 = _mm_min_epi16(L0, _mm_adds_epi16(_mm_loadu_si128((const __m128i*)(Lr_p0 + d + 1)), _P1));
L1 = _mm_min_epi16(L1, _mm_adds_epi16(_mm_loadu_si128((const __m128i*)(Lr_p1 + d - 1)), _P1));
L1 = _mm_min_epi16(L1, _mm_adds_epi16(_mm_loadu_si128((const __m128i*)(Lr_p1 + d + 1)), _P1));
L2 = _mm_min_epi16(L2, _mm_adds_epi16(_mm_loadu_si128((const __m128i*)(Lr_p2 + d - 1)), _P1));
L2 = _mm_min_epi16(L2, _mm_adds_epi16(_mm_loadu_si128((const __m128i*)(Lr_p2 + d + 1)), _P1));
L3 = _mm_min_epi16(L3, _mm_adds_epi16(_mm_loadu_si128((const __m128i*)(Lr_p3 + d - 1)), _P1));
L3 = _mm_min_epi16(L3, _mm_adds_epi16(_mm_loadu_si128((const __m128i*)(Lr_p3 + d + 1)), _P1));
L0 = _mm_min_epi16(L0, _delta0);
L0 = _mm_adds_epi16(_mm_subs_epi16(L0, _delta0), Cpd);
L1 = _mm_min_epi16(L1, _delta1);
L1 = _mm_adds_epi16(_mm_subs_epi16(L1, _delta1), Cpd);
L2 = _mm_min_epi16(L2, _delta2);
L2 = _mm_adds_epi16(_mm_subs_epi16(L2, _delta2), Cpd);
L3 = _mm_min_epi16(L3, _delta3);
L3 = _mm_adds_epi16(_mm_subs_epi16(L3, _delta3), Cpd);
_mm_store_si128( (__m128i*)(Lr_p + d), L0);
_mm_store_si128( (__m128i*)(Lr_p + d + D2), L1);
_mm_store_si128( (__m128i*)(Lr_p + d + D2*2), L2);
_mm_store_si128( (__m128i*)(Lr_p + d + D2*3), L3);
__m128i t0 = _mm_min_epi16(_mm_unpacklo_epi16(L0, L2), _mm_unpackhi_epi16(L0, L2));
__m128i t1 = _mm_min_epi16(_mm_unpacklo_epi16(L1, L3), _mm_unpackhi_epi16(L1, L3));
t0 = _mm_min_epi16(_mm_unpacklo_epi16(t0, t1), _mm_unpackhi_epi16(t0, t1));
_minL0 = _mm_min_epi16(_minL0, t0);
__m128i Sval = _mm_load_si128((const __m128i*)(Sp + d));
L0 = _mm_adds_epi16(L0, L1);
L2 = _mm_adds_epi16(L2, L3);
Sval = _mm_adds_epi16(Sval, L0);
Sval = _mm_adds_epi16(Sval, L2);
_mm_store_si128((__m128i*)(Sp + d), Sval);
}
_minL0 = _mm_min_epi16(_minL0, _mm_srli_si128(_minL0, 8));
_mm_storel_epi64((__m128i*)&minLr[0][xm], _minL0);
}
else
#endif
{
int minL0 = MAX_COST, minL1 = MAX_COST, minL2 = MAX_COST, minL3 = MAX_COST;
for( d = 0; d < D; d++ )
{
int Cpd = Cp[d], L0, L1, L2, L3;
L0 = Cpd + std::min((int)Lr_p0[d], std::min(Lr_p0[d-1] + P1, std::min(Lr_p0[d+1] + P1, delta0))) - delta0;
L1 = Cpd + std::min((int)Lr_p1[d], std::min(Lr_p1[d-1] + P1, std::min(Lr_p1[d+1] + P1, delta1))) - delta1;
L2 = Cpd + std::min((int)Lr_p2[d], std::min(Lr_p2[d-1] + P1, std::min(Lr_p2[d+1] + P1, delta2))) - delta2;
L3 = Cpd + std::min((int)Lr_p3[d], std::min(Lr_p3[d-1] + P1, std::min(Lr_p3[d+1] + P1, delta3))) - delta3;
Lr_p[d] = (CostType)L0;
minL0 = std::min(minL0, L0);
Lr_p[d + D2] = (CostType)L1;
minL1 = std::min(minL1, L1);
Lr_p[d + D2*2] = (CostType)L2;
minL2 = std::min(minL2, L2);
Lr_p[d + D2*3] = (CostType)L3;
minL3 = std::min(minL3, L3);
Sp[d] = saturate_cast<CostType>(Sp[d] + L0 + L1 + L2 + L3);
}
minLr[0][xm] = (CostType)minL0;
minLr[0][xm+1] = (CostType)minL1;
minLr[0][xm+2] = (CostType)minL2;
minLr[0][xm+3] = (CostType)minL3;
}
}
if( pass == npasses )
{
for( x = 0; x < width; x++ )
{
disp1ptr[x] = disp2ptr[x] = (DispType)INVALID_DISP_SCALED;
disp2cost[x] = MAX_COST;
}
for( x = width1 - 1; x >= 0; x-- )
{
CostType* Sp = S + x*D;
int minS = MAX_COST, bestDisp = -1;
if( npasses == 1 )
{
int xm = x*NR2, xd = xm*D2;
int minL0 = MAX_COST;
int delta0 = minLr[0][xm + NR2] + P2;
CostType* Lr_p0 = Lr[0] + xd + NRD2;
Lr_p0[-1] = Lr_p0[D] = MAX_COST;
CostType* Lr_p = Lr[0] + xd;
const CostType* Cp = C + x*D;
{
int minL0 = MAX_COST, minL1 = MAX_COST, minL2 = MAX_COST, minL3 = MAX_COST;
for( d = 0; d < D; d++ )
{
int Cpd = Cp[d], L0, L1, L2, L3;
L0 = Cpd + std::min((int)Lr_p0[d], std::min(Lr_p0[d-1] + P1, std::min(Lr_p0[d+1] + P1, delta0))) - delta0;
L1 = Cpd + std::min((int)Lr_p1[d], std::min(Lr_p1[d-1] + P1, std::min(Lr_p1[d+1] + P1, delta1))) - delta1;
L2 = Cpd + std::min((int)Lr_p2[d], std::min(Lr_p2[d-1] + P1, std::min(Lr_p2[d+1] + P1, delta2))) - delta2;
L3 = Cpd + std::min((int)Lr_p3[d], std::min(Lr_p3[d-1] + P1, std::min(Lr_p3[d+1] + P1, delta3))) - delta3;
Lr_p[d] = (CostType)L0;
minL0 = std::min(minL0, L0);
Lr_p[d + D2] = (CostType)L1;
minL1 = std::min(minL1, L1);
Lr_p[d + D2*2] = (CostType)L2;
minL2 = std::min(minL2, L2);
Lr_p[d + D2*3] = (CostType)L3;
minL3 = std::min(minL3, L3);
Sp[d] = saturate_cast<CostType>(Sp[d] + L0 + L1 + L2 + L3);
}
minLr[0][xm] = (CostType)minL0;
minLr[0][xm+1] = (CostType)minL1;
minLr[0][xm+2] = (CostType)minL2;
minLr[0][xm+3] = (CostType)minL3;
}
}
if( pass == npasses )
{
for( x = 0; x < width; x++ )
{
disp1ptr[x] = disp2ptr[x] = (DispType)INVALID_DISP_SCALED;
disp2cost[x] = MAX_COST;
}
for( x = width1 - 1; x >= 0; x-- )
{
CostType* Sp = S + x*D;
int minS = MAX_COST, bestDisp = -1;
if( npasses == 1 )
{
int xm = x*NR2, xd = xm*D2;
int minL0 = MAX_COST;
int delta0 = minLr[0][xm + NR2] + P2;
CostType* Lr_p0 = Lr[0] + xd + NRD2;
Lr_p0[-1] = Lr_p0[D] = MAX_COST;
CostType* Lr_p = Lr[0] + xd;
const CostType* Cp = C + x*D;
#if CV_SSE2
if( useSIMD )
{
__m128i _P1 = _mm_set1_epi16((short)P1);
__m128i _delta0 = _mm_set1_epi16((short)delta0);
__m128i _minL0 = _mm_set1_epi16((short)minL0);
__m128i _minS = _mm_set1_epi16(MAX_COST), _bestDisp = _mm_set1_epi16(-1);
__m128i _d8 = _mm_setr_epi16(0, 1, 2, 3, 4, 5, 6, 7), _8 = _mm_set1_epi16(8);
for( d = 0; d < D; d += 8 )
{
__m128i Cpd = _mm_load_si128((const __m128i*)(Cp + d)), L0;
L0 = _mm_load_si128((const __m128i*)(Lr_p0 + d));
L0 = _mm_min_epi16(L0, _mm_adds_epi16(_mm_loadu_si128((const __m128i*)(Lr_p0 + d - 1)), _P1));
L0 = _mm_min_epi16(L0, _mm_adds_epi16(_mm_loadu_si128((const __m128i*)(Lr_p0 + d + 1)), _P1));
L0 = _mm_min_epi16(L0, _delta0);
L0 = _mm_adds_epi16(_mm_subs_epi16(L0, _delta0), Cpd);
_mm_store_si128((__m128i*)(Lr_p + d), L0);
_minL0 = _mm_min_epi16(_minL0, L0);
L0 = _mm_adds_epi16(L0, *(__m128i*)(Sp + d));
_mm_store_si128((__m128i*)(Sp + d), L0);
__m128i mask = _mm_cmpgt_epi16(_minS, L0);
_minS = _mm_min_epi16(_minS, L0);
_bestDisp = _mm_xor_si128(_bestDisp, _mm_and_si128(_mm_xor_si128(_bestDisp,_d8), mask));
_d8 = _mm_adds_epi16(_d8, _8);
}
short CV_DECL_ALIGNED(16) bestDispBuf[8];
_mm_store_si128((__m128i*)bestDispBuf, _bestDisp);
_minL0 = _mm_min_epi16(_minL0, _mm_srli_si128(_minL0, 8));
_minL0 = _mm_min_epi16(_minL0, _mm_srli_si128(_minL0, 4));
_minL0 = _mm_min_epi16(_minL0, _mm_srli_si128(_minL0, 2));
__m128i qS = _mm_min_epi16(_minS, _mm_srli_si128(_minS, 8));
qS = _mm_min_epi16(qS, _mm_srli_si128(qS, 4));
qS = _mm_min_epi16(qS, _mm_srli_si128(qS, 2));
minLr[0][xm] = (CostType)_mm_cvtsi128_si32(_minL0);
minS = (CostType)_mm_cvtsi128_si32(qS);
qS = _mm_shuffle_epi32(_mm_unpacklo_epi16(qS, qS), 0);
qS = _mm_cmpeq_epi16(_minS, qS);
int idx = _mm_movemask_epi8(_mm_packs_epi16(qS, qS)) & 255;
bestDisp = bestDispBuf[LSBTab[idx]];
}
else
if( useSIMD )
{
__m128i _P1 = _mm_set1_epi16((short)P1);
__m128i _delta0 = _mm_set1_epi16((short)delta0);
__m128i _minL0 = _mm_set1_epi16((short)minL0);
__m128i _minS = _mm_set1_epi16(MAX_COST), _bestDisp = _mm_set1_epi16(-1);
__m128i _d8 = _mm_setr_epi16(0, 1, 2, 3, 4, 5, 6, 7), _8 = _mm_set1_epi16(8);
for( d = 0; d < D; d += 8 )
{
__m128i Cpd = _mm_load_si128((const __m128i*)(Cp + d)), L0;
L0 = _mm_load_si128((const __m128i*)(Lr_p0 + d));
L0 = _mm_min_epi16(L0, _mm_adds_epi16(_mm_loadu_si128((const __m128i*)(Lr_p0 + d - 1)), _P1));
L0 = _mm_min_epi16(L0, _mm_adds_epi16(_mm_loadu_si128((const __m128i*)(Lr_p0 + d + 1)), _P1));
L0 = _mm_min_epi16(L0, _delta0);
L0 = _mm_adds_epi16(_mm_subs_epi16(L0, _delta0), Cpd);
_mm_store_si128((__m128i*)(Lr_p + d), L0);
_minL0 = _mm_min_epi16(_minL0, L0);
L0 = _mm_adds_epi16(L0, *(__m128i*)(Sp + d));
_mm_store_si128((__m128i*)(Sp + d), L0);
__m128i mask = _mm_cmpgt_epi16(_minS, L0);
_minS = _mm_min_epi16(_minS, L0);
_bestDisp = _mm_xor_si128(_bestDisp, _mm_and_si128(_mm_xor_si128(_bestDisp,_d8), mask));
_d8 = _mm_adds_epi16(_d8, _8);
}
short CV_DECL_ALIGNED(16) bestDispBuf[8];
_mm_store_si128((__m128i*)bestDispBuf, _bestDisp);
_minL0 = _mm_min_epi16(_minL0, _mm_srli_si128(_minL0, 8));
_minL0 = _mm_min_epi16(_minL0, _mm_srli_si128(_minL0, 4));
_minL0 = _mm_min_epi16(_minL0, _mm_srli_si128(_minL0, 2));
__m128i qS = _mm_min_epi16(_minS, _mm_srli_si128(_minS, 8));
qS = _mm_min_epi16(qS, _mm_srli_si128(qS, 4));
qS = _mm_min_epi16(qS, _mm_srli_si128(qS, 2));
minLr[0][xm] = (CostType)_mm_cvtsi128_si32(_minL0);
minS = (CostType)_mm_cvtsi128_si32(qS);
qS = _mm_shuffle_epi32(_mm_unpacklo_epi16(qS, qS), 0);
qS = _mm_cmpeq_epi16(_minS, qS);
int idx = _mm_movemask_epi8(_mm_packs_epi16(qS, qS)) & 255;
bestDisp = bestDispBuf[LSBTab[idx]];
}
else
#endif
{
for( d = 0; d < D; d++ )
{
int L0 = Cp[d] + std::min((int)Lr_p0[d], std::min(Lr_p0[d-1] + P1, std::min(Lr_p0[d+1] + P1, delta0))) - delta0;
Lr_p[d] = (CostType)L0;
minL0 = std::min(minL0, L0);
int Sval = Sp[d] = saturate_cast<CostType>(Sp[d] + L0);
if( Sval < minS )
{
minS = Sval;
bestDisp = d;
}
}
minLr[0][xm] = (CostType)minL0;
}
}
else
{
for( d = 0; d < D; d++ )
{
int Sval = Sp[d];
if( Sval < minS )
{
minS = Sval;
bestDisp = d;
}
}
}
for( d = 0; d < D; d++ )
{
if( Sp[d]*(100 - uniquenessRatio) < minS*100 && std::abs(bestDisp - d) > 1 )
break;
}
if( d < D )
continue;
d = bestDisp;
int _x2 = x + minX1 - d - minD;
if( disp2cost[_x2] > minS )
{
disp2cost[_x2] = (CostType)minS;
disp2ptr[_x2] = (DispType)(d + minD);
}
if( 0 < d && d < D-1 )
{
// do subpixel quadratic interpolation:
// fit parabola into (x1=d-1, y1=Sp[d-1]), (x2=d, y2=Sp[d]), (x3=d+1, y3=Sp[d+1])
// then find minimum of the parabola.
int denom2 = std::max(Sp[d-1] + Sp[d+1] - 2*Sp[d], 1);
d = d*DISP_SCALE + ((Sp[d-1] - Sp[d+1])*DISP_SCALE + denom2)/(denom2*2);
}
else
d *= DISP_SCALE;
disp1ptr[x + minX1] = (DispType)(d + minD*DISP_SCALE);
}
for( x = minX1; x < maxX1; x++ )
{
// we round the computed disparity both towards -inf and +inf and check
// if either of the corresponding disparities in disp2 is consistent.
// This is to give the computed disparity a chance to look valid if it is.
int d1 = disp1ptr[x];
if( d1 == INVALID_DISP_SCALED )
continue;
int _d = d1 >> DISP_SHIFT;
int d_ = (d1 + DISP_SCALE-1) >> DISP_SHIFT;
int _x = x - _d, x_ = x - d_;
if( 0 <= _x && _x < width && disp2ptr[_x] >= minD && std::abs(disp2ptr[_x] - _d) > disp12MaxDiff &&
0 <= x_ && x_ < width && disp2ptr[x_] >= minD && std::abs(disp2ptr[x_] - d_) > disp12MaxDiff )
disp1ptr[x] = (DispType)INVALID_DISP_SCALED;
}
}
// now shift the cyclic buffers
std::swap( Lr[0], Lr[1] );
std::swap( minLr[0], minLr[1] );
}
}
}
class StereoBinarySGBMImpl : public StereoBinarySGBM
{
public:
StereoBinarySGBMImpl()
{
params = StereoBinarySGBMParams();
}
StereoBinarySGBMImpl( int _minDisparity, int _numDisparities, int _SADWindowSize,
int _P1, int _P2, int _disp12MaxDiff, int _preFilterCap,
int _uniquenessRatio, int _speckleWindowSize, int _speckleRange,
int _mode )
{
params = StereoBinarySGBMParams( _minDisparity, _numDisparities, _SADWindowSize,
_P1, _P2, _disp12MaxDiff, _preFilterCap,
_uniquenessRatio, _speckleWindowSize, _speckleRange,
_mode );
}
void compute( InputArray leftarr, InputArray rightarr, OutputArray disparr )
{
Mat left = leftarr.getMat(), right = rightarr.getMat();
CV_Assert( left.size() == right.size() && left.type() == right.type() &&
left.depth() == CV_8U );
disparr.create( left.size(), CV_16S );
Mat disp = disparr.getMat();
computeDisparityBinarySGBM( left, right, disp, params, buffer );
medianBlur(disp, disp, 3);
if( params.speckleWindowSize > 0 )
filterSpeckles(disp, (params.minDisparity - 1)*StereoMatcher::DISP_SCALE, params.speckleWindowSize,
StereoMatcher::DISP_SCALE*params.speckleRange, buffer);
}
int getMinDisparity() const { return params.minDisparity; }
void setMinDisparity(int minDisparity) { params.minDisparity = minDisparity; }
int getNumDisparities() const { return params.numDisparities; }
void setNumDisparities(int numDisparities) { params.numDisparities = numDisparities; }
int getBlockSize() const { return params.SADWindowSize; }
void setBlockSize(int blockSize) { params.SADWindowSize = blockSize; }
int getSpeckleWindowSize() const { return params.speckleWindowSize; }
void setSpeckleWindowSize(int speckleWindowSize) { params.speckleWindowSize = speckleWindowSize; }
int getSpeckleRange() const { return params.speckleRange; }
void setSpeckleRange(int speckleRange) { params.speckleRange = speckleRange; }
int getDisp12MaxDiff() const { return params.disp12MaxDiff; }
void setDisp12MaxDiff(int disp12MaxDiff) { params.disp12MaxDiff = disp12MaxDiff; }
int getPreFilterCap() const { return params.preFilterCap; }
void setPreFilterCap(int preFilterCap) { params.preFilterCap = preFilterCap; }
int getUniquenessRatio() const { return params.uniquenessRatio; }
void setUniquenessRatio(int uniquenessRatio) { params.uniquenessRatio = uniquenessRatio; }
int getP1() const { return params.P1; }
void setP1(int P1) { params.P1 = P1; }
int getP2() const { return params.P2; }
void setP2(int P2) { params.P2 = P2; }
int getMode() const { return params.mode; }
void setMode(int mode) { params.mode = mode; }
void write(FileStorage& fs) const
{
fs << "name" << name_
<< "minDisparity" << params.minDisparity
<< "numDisparities" << params.numDisparities
<< "blockSize" << params.SADWindowSize
<< "speckleWindowSize" << params.speckleWindowSize
<< "speckleRange" << params.speckleRange
<< "disp12MaxDiff" << params.disp12MaxDiff
<< "preFilterCap" << params.preFilterCap
<< "uniquenessRatio" << params.uniquenessRatio
<< "P1" << params.P1
<< "P2" << params.P2
<< "mode" << params.mode;
}
void read(const FileNode& fn)
{
FileNode n = fn["name"];
CV_Assert( n.isString() && String(n) == name_ );
params.minDisparity = (int)fn["minDisparity"];
params.numDisparities = (int)fn["numDisparities"];
params.SADWindowSize = (int)fn["blockSize"];
params.speckleWindowSize = (int)fn["speckleWindowSize"];
params.speckleRange = (int)fn["speckleRange"];
params.disp12MaxDiff = (int)fn["disp12MaxDiff"];
params.preFilterCap = (int)fn["preFilterCap"];
params.uniquenessRatio = (int)fn["uniquenessRatio"];
params.P1 = (int)fn["P1"];
params.P2 = (int)fn["P2"];
params.mode = (int)fn["mode"];
}
StereoBinarySGBMParams params;
Mat buffer;
static const char* name_;
};
const char* StereoBinarySGBMImpl::name_ = "StereoMatcher.SGBM";
Ptr<StereoBinarySGBM> StereoBinarySGBM::create(int minDisparity, int numDisparities, int SADWindowSize,
int P1, int P2, int disp12MaxDiff,
int preFilterCap, int uniquenessRatio,
int speckleWindowSize, int speckleRange,
int mode)
{
return Ptr<StereoBinarySGBM>(
new StereoBinarySGBMImpl(minDisparity, numDisparities, SADWindowSize,
P1, P2, disp12MaxDiff,
preFilterCap, uniquenessRatio,
speckleWindowSize, speckleRange,
mode));
}
typedef cv::Point_<short> Point2s;
}
{
for( d = 0; d < D; d++ )
{
int L0 = Cp[d] + std::min((int)Lr_p0[d], std::min(Lr_p0[d-1] + P1, std::min(Lr_p0[d+1] + P1, delta0))) - delta0;
Lr_p[d] = (CostType)L0;
minL0 = std::min(minL0, L0);
int Sval = Sp[d] = saturate_cast<CostType>(Sp[d] + L0);
if( Sval < minS )
{
minS = Sval;
bestDisp = d;
}
}
minLr[0][xm] = (CostType)minL0;
}
}
else
{
for( d = 0; d < D; d++ )
{
int Sval = Sp[d];
if( Sval < minS )
{
minS = Sval;
bestDisp = d;
}
}
}
for( d = 0; d < D; d++ )
{
if( Sp[d]*(100 - uniquenessRatio) < minS*100 && std::abs(bestDisp - d) > 1 )
break;
}
if( d < D )
continue;
d = bestDisp;
int _x2 = x + minX1 - d - minD;
if( disp2cost[_x2] > minS )
{
disp2cost[_x2] = (CostType)minS;
disp2ptr[_x2] = (DispType)(d + minD);
}
if( 0 < d && d < D-1 )
{
// do subpixel quadratic interpolation:
// fit parabola into (x1=d-1, y1=Sp[d-1]), (x2=d, y2=Sp[d]), (x3=d+1, y3=Sp[d+1])
// then find minimum of the parabola.
int denom2 = std::max(Sp[d-1] + Sp[d+1] - 2*Sp[d], 1);
d = d*DISP_SCALE + ((Sp[d-1] - Sp[d+1])*DISP_SCALE + denom2)/(denom2*2);
}
else
d *= DISP_SCALE;
disp1ptr[x + minX1] = (DispType)(d + minD*DISP_SCALE);
}
for( x = minX1; x < maxX1; x++ )
{
// we round the computed disparity both towards -inf and +inf and check
// if either of the corresponding disparities in disp2 is consistent.
// This is to give the computed disparity a chance to look valid if it is.
int d1 = disp1ptr[x];
if( d1 == INVALID_DISP_SCALED )
continue;
int _d = d1 >> DISP_SHIFT;
int d_ = (d1 + DISP_SCALE-1) >> DISP_SHIFT;
int _x = x - _d, x_ = x - d_;
if( 0 <= _x && _x < width && disp2ptr[_x] >= minD && std::abs(disp2ptr[_x] - _d) > disp12MaxDiff &&
0 <= x_ && x_ < width && disp2ptr[x_] >= minD && std::abs(disp2ptr[x_] - d_) > disp12MaxDiff )
disp1ptr[x] = (DispType)INVALID_DISP_SCALED;
}
}
// now shift the cyclic buffers
std::swap( Lr[0], Lr[1] );
std::swap( minLr[0], minLr[1] );
}
}
}
class StereoBinarySGBMImpl : public StereoBinarySGBM
{
public:
StereoBinarySGBMImpl()
{
params = StereoBinarySGBMParams();
}
StereoBinarySGBMImpl( int _minDisparity, int _numDisparities, int _SADWindowSize,
int _P1, int _P2, int _disp12MaxDiff, int _preFilterCap,
int _uniquenessRatio, int _speckleWindowSize, int _speckleRange,
int _mode )
{
params = StereoBinarySGBMParams( _minDisparity, _numDisparities, _SADWindowSize,
_P1, _P2, _disp12MaxDiff, _preFilterCap,
_uniquenessRatio, _speckleWindowSize, _speckleRange,
_mode );
}
void compute( InputArray leftarr, InputArray rightarr, OutputArray disparr )
{
Mat left = leftarr.getMat(), right = rightarr.getMat();
CV_Assert( left.size() == right.size() && left.type() == right.type() &&
left.depth() == CV_8U );
disparr.create( left.size(), CV_16S );
Mat disp = disparr.getMat();
computeDisparityBinarySGBM( left, right, disp, params, buffer );
// medianBlur(disp, disp, 3);
if( params.speckleWindowSize > 0 )
filterSpeckles(disp, (params.minDisparity - 1)*StereoMatcher::DISP_SCALE, params.speckleWindowSize,
StereoMatcher::DISP_SCALE*params.speckleRange, buffer);
}
int getMinDisparity() const { return params.minDisparity; }
void setMinDisparity(int minDisparity) { params.minDisparity = minDisparity; }
int getNumDisparities() const { return params.numDisparities; }
void setNumDisparities(int numDisparities) { params.numDisparities = numDisparities; }
int getBlockSize() const { return params.SADWindowSize; }
void setBlockSize(int blockSize) { params.SADWindowSize = blockSize; }
int getSpeckleWindowSize() const { return params.speckleWindowSize; }
void setSpeckleWindowSize(int speckleWindowSize) { params.speckleWindowSize = speckleWindowSize; }
int getSpeckleRange() const { return params.speckleRange; }
void setSpeckleRange(int speckleRange) { params.speckleRange = speckleRange; }
int getDisp12MaxDiff() const { return params.disp12MaxDiff; }
void setDisp12MaxDiff(int disp12MaxDiff) { params.disp12MaxDiff = disp12MaxDiff; }
int getPreFilterCap() const { return params.preFilterCap; }
void setPreFilterCap(int preFilterCap) { params.preFilterCap = preFilterCap; }
int getUniquenessRatio() const { return params.uniquenessRatio; }
void setUniquenessRatio(int uniquenessRatio) { params.uniquenessRatio = uniquenessRatio; }
int getP1() const { return params.P1; }
void setP1(int P1) { params.P1 = P1; }
int getP2() const { return params.P2; }
void setP2(int P2) { params.P2 = P2; }
int getMode() const { return params.mode; }
void setMode(int mode) { params.mode = mode; }
void write(FileStorage& fs) const
{
fs << "name" << name_
<< "minDisparity" << params.minDisparity
<< "numDisparities" << params.numDisparities
<< "blockSize" << params.SADWindowSize
<< "speckleWindowSize" << params.speckleWindowSize
<< "speckleRange" << params.speckleRange
<< "disp12MaxDiff" << params.disp12MaxDiff
<< "preFilterCap" << params.preFilterCap
<< "uniquenessRatio" << params.uniquenessRatio
<< "P1" << params.P1
<< "P2" << params.P2
<< "mode" << params.mode;
}
void read(const FileNode& fn)
{
FileNode n = fn["name"];
CV_Assert( n.isString() && String(n) == name_ );
params.minDisparity = (int)fn["minDisparity"];
params.numDisparities = (int)fn["numDisparities"];
params.SADWindowSize = (int)fn["blockSize"];
params.speckleWindowSize = (int)fn["speckleWindowSize"];
params.speckleRange = (int)fn["speckleRange"];
params.disp12MaxDiff = (int)fn["disp12MaxDiff"];
params.preFilterCap = (int)fn["preFilterCap"];
params.uniquenessRatio = (int)fn["uniquenessRatio"];
params.P1 = (int)fn["P1"];
params.P2 = (int)fn["P2"];
params.mode = (int)fn["mode"];
}
StereoBinarySGBMParams params;
Mat buffer;
static const char* name_;
};
const char* StereoBinarySGBMImpl::name_ = "StereoMatcher.SGBM";
Ptr<StereoBinarySGBM> StereoBinarySGBM::create(int minDisparity, int numDisparities, int SADWindowSize,
int P1, int P2, int disp12MaxDiff,
int preFilterCap, int uniquenessRatio,
int speckleWindowSize, int speckleRange,
int mode)
{
return Ptr<StereoBinarySGBM>(
new StereoBinarySGBMImpl(minDisparity, numDisparities, SADWindowSize,
P1, P2, disp12MaxDiff,
preFilterCap, uniquenessRatio,
speckleWindowSize, speckleRange,
mode));
}
typedef cv::Point_<short> Point2s;
}
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment