Commit df859520 authored by Muresan Mircea Paul's avatar Muresan Mircea Paul

made modifications in accordance to the comments

fixed that unused variable warning

fixed windows warnings

Added 2 samples just to show how to access functionality

fixed issues

added the up to date version of sbm

modified samples to be warning free
parent 54d41251
This diff is collapsed.
#include <iostream>
#include "opencv2/stereo.hpp"
#include "opencv2/imgproc.hpp"
#include "opencv2/highgui.hpp"
#include <iostream>
using namespace cv;
using namespace stereo;
using namespace std;
//in this example we will load a sequence of images from a file process them and display the result on the screen
//the descriptor used is the modified_census transform
int main(int, char**)
{
//begin the program
cout << " Running Main function \n";
//declare 2 images
Mat image1, image2;
// -- 1. Call the constructor for StereoBinaryBM
int ndisparities = 32; /**< Range of disparity */
int kernelSize = 9; /**< Size of the block window. Must be odd */
Ptr<StereoBinaryBM> sbm = StereoBinaryBM::create(ndisparities, kernelSize);
// -- 2. Set parameters
sbm->setPreFilterCap(31);
sbm->setMinDisparity(0);
sbm->setTextureThreshold(10);
sbm->setUniquenessRatio(0);
sbm->setSpeckleWindowSize(400);//speckle size
sbm->setSpeckleRange(200);
sbm->setDisp12MaxDiff(0);
sbm->setScalleFactor(4);//the scalling factor
sbm->setBinaryKernelType(CV_MODIFIED_CENSUS_TRANSFORM);//binary descriptor kernel
sbm->setAgregationWindowSize(9);
sbm->setSpekleRemovalTechnique(CV_SPECKLE_REMOVAL_AVG_ALGORITHM);//speckle removal algorithm
sbm->setUsePrefilter(false);//prefilter or not the images prior to making the transformations
for(int i = 0 ; i < 200; i++)
{
string path = "D:\\WorkingSec";
string left = "l.bmp";
string right = ".bmp";
std::string s;
std::stringstream out;
out << i;
s = out.str();
string finLeft = path + "\\rezult" + s + left;
string finRigth = path + "\\rezult" + s + right;
image1 = imread(finLeft, CV_8UC1);
image2 = imread(finRigth, CV_8UC1);
//set a certain region of interest
Rect region_of_interest = Rect(0, 20, image1.cols, (image1.rows - 20 - 110));
Mat imgLeft = image1(region_of_interest);
Mat imgRight = image2(region_of_interest);
Mat imgDisparity8U = Mat(imgLeft.rows, imgLeft.cols, CV_8UC1);
if (imgLeft.empty() || imgRight.empty())
{
std::cout << " --(!) Error reading images \n" ; return -1;
}
////-- 3. Calculate the disparity image
sbm->compute(imgLeft, imgRight, imgDisparity8U);
imshow("RealImage", image1);
imshow("Disparity", imgDisparity8U);
waitKey(1);
}
waitKey(0);
return 0;
}
#include <iostream>
#include "opencv2/stereo.hpp"
#include "opencv2/imgproc.hpp"
#include "opencv2/highgui.hpp"
#include <iostream>
using namespace cv;
using namespace stereo;
using namespace std;
int main(int, char**)
{
//begin the program
cout << " Running Main function \n";
//declare 2 images
Mat image1, image2;
// -- 1. Call the constructor for StereoBinaryBM
int ndisparities = 32; /**< Range of disparity */
int kernelSize = 9; /**< Size of the block window. Must be odd */
Ptr<StereoBinaryBM> sbm = StereoBinaryBM::create(ndisparities, kernelSize);
// -- 2. Set parameters
sbm->setPreFilterCap(31);
sbm->setMinDisparity(0);
sbm->setTextureThreshold(10);
sbm->setUniquenessRatio(0);
sbm->setSpeckleWindowSize(400);//speckle size
sbm->setSpeckleRange(200);
sbm->setDisp12MaxDiff(0);
sbm->setScalleFactor(4);//the scalling factor
sbm->setBinaryKernelType(CV_MEAN_VARIATION);//binary descriptor kernel
sbm->setAgregationWindowSize(9);
sbm->setSpekleRemovalTechnique(CV_SPECKLE_REMOVAL_AVG_ALGORITHM);//speckle removal algorithm
sbm->setUsePrefilter(false);//prefilter or not the images prior to making the transformations
//load 2 images from disc
image1 = imread("D:\\rezult0l.bmp", CV_8UC1);
image2 = imread("D:\\rezult0.bmp", CV_8UC1);
//set a certain region of interest
Rect region_of_interest = Rect(0, 20, image1.cols, (image1.rows - 20 - 110));
Mat imgLeft = image1(region_of_interest);
Mat imgRight = image2(region_of_interest);
Mat imgDisparity8U = Mat(imgLeft.rows, imgLeft.cols, CV_8UC1);
if (imgLeft.empty() || imgRight.empty())
{
std::cout << " --(!) Error reading images \n" ; return -1;
}
////-- 3. Calculate the disparity image
sbm->compute(imgLeft, imgRight, imgDisparity8U);
imshow("RealImage", image1);
imshow("Disparity", imgDisparity8U);
waitKey(0);
return 0;
}
......@@ -58,7 +58,7 @@ namespace cv
enum {
CV_DENSE_CENSUS, CV_SPARSE_CENSUS,
CV_CS_CENSUS, CV_MODIFIED_CS_CENSUS, CV_MODIFIED_CENSUS_TRANSFORM,
CV_MEAN_VARIATION
CV_MEAN_VARIATION, CV_STAR_KERNEL
};
//!Mean Variation is a robust kernel that compares a pixel
//!not just with the center but also with the mean of the window
......
......@@ -57,14 +57,12 @@ namespace cv
//maxDisp - represents the maximum disparity
Matching::Matching(int maxDisp, int scalling, int confidence)
{
CV_Assert(maxDisp > 10);
CV_Assert(scalling != 0);
CV_Assert(confidence >= 1);
this->scallingFactor = scalling;
//set the maximum disparity
this->maxDisparity = maxDisp;
setMaxDisparity(maxDisp);
//set scalling factor
setScallingFactor(scalling);
//set the value for the confidence
this->confidenceCheck = confidence;
setConfidence(confidence);
//generate the hamming lut in case SSE is not available
hammingLut();
}
......@@ -82,7 +80,7 @@ namespace cv
void Matching::setScallingFactor(int val)
{
CV_Assert(val > 0);
scallingFactor = val;
this->scallingFactor = val;
}
//!method for getting the scalling factor
int Matching::getScallingFactor()
......@@ -99,10 +97,10 @@ namespace cv
CV_Assert(kernelSize % 2 != 0);
CV_Assert(cost.rows == leftImage.rows);
CV_Assert(cost.cols / (maxDisparity + 1) == leftImage.cols);
// cost.setTo(0);
int *c = (int *)cost.data;
memset(c, 0, sizeof(c[0]) * leftImage.cols * leftImage.rows * (maxDisparity + 1));
parallel_for_(cv::Range(kernelSize / 2,leftImage.rows - kernelSize / 2), hammingDistance(leftImage,rightImage,c,maxDisparity,kernelSize / 2,hamLut));
cost.setTo(0);
//int *c = (int *)cost.data;
//memset(c, 0, sizeof(c[0]) * leftImage.cols * leftImage.rows * (maxDisparity + 1));
parallel_for_(cv::Range(kernelSize / 2,leftImage.rows - kernelSize / 2), hammingDistance(leftImage,rightImage,(int *)cost.data,maxDisparity,kernelSize / 2,hamLut));
}
//preprocessing the cost volume in order to get it ready for aggregation
void Matching::costGathering(const Mat &hammingDistanceCost, Mat &cost)
......@@ -110,12 +108,12 @@ namespace cv
CV_Assert(hammingDistanceCost.rows == hammingDistanceCost.rows);
CV_Assert(hammingDistanceCost.type() == CV_32SC4);
CV_Assert(cost.type() == CV_32SC4);
//cost.setTo(0);
cost.setTo(0);
int maxDisp = maxDisparity;
int width = cost.cols / ( maxDisp + 1) - 1;
int height = cost.rows - 1;
int *c = (int *)cost.data;
memset(c, 0, sizeof(c[0]) * (width + 1) * (height + 1) * (maxDisp + 1));
//memset(c, 0, sizeof(c[0]) * (width + 1) * (height + 1) * (maxDisp + 1));
parallel_for_(cv::Range(1,height), costGatheringHorizontal(hammingDistanceCost,maxDisparity,cost));
for (int i = 1; i <= height; i++)
{
......@@ -136,12 +134,13 @@ namespace cv
CV_Assert(windowSize % 2 != 0);
CV_Assert(partialSums.rows == cost.rows);
CV_Assert(partialSums.cols == cost.cols);
cost.setTo(0);
int win = windowSize / 2;
int *c = (int *)cost.data;
//int *c = (int *)cost.data;
int maxDisp = maxDisparity;
int width = cost.cols / ( maxDisp + 1) - 1;
//int width = cost.cols / ( maxDisp + 1) - 1;
int height = cost.rows - 1;
memset(c, 0, sizeof(c[0]) * width * height * (maxDisp + 1));
//memset(c, 0, sizeof(c[0]) * width * height * (maxDisp + 1));
parallel_for_(cv::Range(win + 1,height - win - 1), agregateCost(partialSums,windowSize,maxDisp,cost));
}
//!Finding the correct disparity from the cost volume, we also make a confidence check
......@@ -303,7 +302,7 @@ namespace cv
void Matching ::setConfidence(double val)
{
CV_Assert(val >= 1);
confidenceCheck = val;
this->confidenceCheck = val;
}
//getter for confidence check
double Matching ::getConfidence()
......@@ -313,26 +312,27 @@ namespace cv
//!Method responsible for generating the disparity map
void Matching::dispartyMapFormation(const Mat &costVolume, Mat &mapFinal, int th)
{
uint8_t *map = mapFinal.data;
mapFinal.setTo(0);
//uint8_t *map = mapFinal.data;
int disparity = maxDisparity;
int width = costVolume.cols / ( disparity + 1) - 1;
//int width = costVolume.cols / ( disparity + 1) - 1;
int height = costVolume.rows - 1;
memset(map, 0, sizeof(map[0]) * width * height);
//memset(map, 0, sizeof(map[0]) * width * height);
parallel_for_(Range(0,height - 1), makeMap(costVolume,th,disparity,confidenceCheck,scallingFactor,mapFinal));
}
//!1x9 median filter
void Matching::Median1x9Filter(const Mat &originalMap, Mat &map)
void Matching::Median1x9Filter(const Mat &originalImage, Mat &filteredImage)
{
CV_Assert(originalMap.rows == map.rows);
CV_Assert(originalMap.cols == map.cols);
parallel_for_(Range(1,originalMap.rows - 2), Median1x9(originalMap,map));
CV_Assert(originalImage.rows == filteredImage.rows);
CV_Assert(originalImage.cols == filteredImage.cols);
parallel_for_(Range(1,originalImage.rows - 2), Median1x9(originalImage,filteredImage));
}
//!9x1 median filter
void Matching::Median9x1Filter(const Mat &originalMap, Mat &map)
void Matching::Median9x1Filter(const Mat &originalImage, Mat &filteredImage)
{
CV_Assert(originalMap.cols == map.cols);
CV_Assert(originalMap.cols == map.cols);
parallel_for_(Range(1,originalMap.cols - 2), Median9x1(originalMap,map));
CV_Assert(originalImage.cols == filteredImage.cols);
CV_Assert(originalImage.cols == filteredImage.cols);
parallel_for_(Range(1,originalImage.cols - 2), Median9x1(originalImage,filteredImage));
}
}
}
......@@ -81,12 +81,12 @@ namespace cv
{
private:
int *left, *right, *c;
int v,kernelSize, width, height;
int v,kernelSize, width, height,_stride;
int MASK;
int *hammLut;
public :
hammingDistance(const Mat &leftImage, const Mat &rightImage, int *cost, int maxDisp, int kerSize, int *hammingLUT):
left((int *)leftImage.data), right((int *)rightImage.data), c(cost), v(maxDisp),kernelSize(kerSize),width(leftImage.cols), height(leftImage.rows), MASK(65535), hammLut(hammingLUT){}
left((int *)leftImage.data), right((int *)rightImage.data), c(cost), v(maxDisp),kernelSize(kerSize),width(leftImage.cols), height(leftImage.rows), _stride((int)leftImage.step1()), MASK(65535), hammLut(hammingLUT){}
void operator()(const cv::Range &r) const {
for (int i = r.start; i <= r.end ; i++)
{
......@@ -246,16 +246,17 @@ namespace cv
class Median1x9:public ParallelLoopBody
{
private:
uint8_t *harta;
uint8_t *mapModified;
int height, width;
uint8_t *original;
uint8_t *filtered;
int height, width,_stride;
public:
Median1x9(const Mat &hartaOriginala, Mat &map)
Median1x9(const Mat &originalImage, Mat &filteredImage)
{
harta = hartaOriginala.data;
mapModified = map.data;
height = hartaOriginala.rows;
width = hartaOriginala.cols;
original = originalImage.data;
filtered = filteredImage.data;
height = originalImage.rows;
width = originalImage.cols;
_stride = (int)originalImage.step;
}
void operator()(const cv::Range &r) const{
for (int m = r.start; m <= r.end; m++)
......@@ -265,7 +266,7 @@ namespace cv
int k = 0;
uint8_t window[9];
for (int i = n - 4; i <= n + 4; ++i)
window[k++] = harta[m * width + i];
window[k++] = original[m * _stride + i];
for (int j = 0; j < 5; ++j)
{
int min = j;
......@@ -276,7 +277,7 @@ namespace cv
window[j] = window[min];
window[min] = temp;
}
mapModified[m * width + n] = window[4];
filtered[m * _stride + n] = window[4];
}
}
}
......@@ -285,16 +286,17 @@ namespace cv
class Median9x1:public ParallelLoopBody
{
private:
uint8_t *harta;
uint8_t *mapModified;
int height, width;
uint8_t *original;
uint8_t *filtered;
int height, width, _stride;
public:
Median9x1(const Mat &hartaOriginala, Mat &map)
Median9x1(const Mat &originalImage, Mat &filteredImage)
{
harta = hartaOriginala.data;
mapModified = map.data;
height = hartaOriginala.rows;
width = hartaOriginala.cols;
original = originalImage.data;
filtered = filteredImage.data;
height = originalImage.rows;
width = originalImage.cols;
_stride = (int)originalImage.step;
}
void operator()(const Range &r) const{
for (int n = r.start; n <= r.end; ++n)
......@@ -304,7 +306,7 @@ namespace cv
int k = 0;
uint8_t window[9];
for (int i = m - 4; i <= m + 4; ++i)
window[k++] = harta[i * width + n];
window[k++] = original[i * _stride + n];
for (int j = 0; j < 5; j++)
{
int min = j;
......@@ -315,12 +317,12 @@ namespace cv
window[j] = window[min];
window[min] = temp;
}
mapModified[m * width + n] = window[4];
filtered[m * _stride + n] = window[4];
}
}
}
};
public:
protected:
//!method for setting the maximum disparity
void setMaxDisparity(int val);
//!method for getting the disparity
......@@ -347,12 +349,13 @@ namespace cv
*th - is the LR threshold
*/
void dispartyMapFormation(const Mat &costVolume, Mat &map, int th);
void smallRegionRemoval(const Mat &input, int t, Mat &out);
public:
static void Median1x9Filter(const Mat &inputImage, Mat &outputImage);
static void Median9x1Filter(const Mat &inputImage, Mat &outputImage);
//!constructor for the matching class
//!maxDisp - represents the maximum disparity
//!a median filter that has proven to work a bit better especially when applied on disparity maps
static void Median1x9Filter(const Mat &hartaOriginala, Mat &map);
static void Median9x1Filter(const Mat &hartaOriginala, Mat &map);
void smallRegionRemoval(const Mat &harta, int t, Mat &out);
Matching(int maxDisp, int scallingFactor = 4,int confidenceCheck = 6);
Matching(void);
~Matching(void);
......
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment