Commit 27609c12 authored by baisheng lai's avatar baisheng lai

make optimization of omnidir::calibrate more stable

parent 971b7459
......@@ -133,10 +133,10 @@ public:
@matcher feature matcher.
*/
multiCameraCalibration(int cameraType, int nCameras, const std::string& fileName, float patternWidth,
float patternHeight, int showExtration = 0, int nMiniMatches = 20, int flags = 0,
TermCriteria criteria = TermCriteria(TermCriteria::COUNT + TermCriteria::EPS, 100, 1e-5),
Ptr<FeatureDetector> detector = AKAZE::create(AKAZE::DESCRIPTOR_MLDB, 0, 3, 0.005f),
Ptr<DescriptorExtractor> descriptor = AKAZE::create(AKAZE::DESCRIPTOR_MLDB,0, 3, 0.005f),
float patternHeight, int verbose = 0, int showExtration = 0, int nMiniMatches = 20, int flags = 0,
TermCriteria criteria = TermCriteria(TermCriteria::COUNT + TermCriteria::EPS, 200, 1e-7),
Ptr<FeatureDetector> detector = AKAZE::create(AKAZE::DESCRIPTOR_MLDB, 0, 3, 0.006f),
Ptr<DescriptorExtractor> descriptor = AKAZE::create(AKAZE::DESCRIPTOR_MLDB,0, 3, 0.006f),
Ptr<DescriptorMatcher> matcher = DescriptorMatcher::create("BruteForce-L1"));
/* @brief load images
......@@ -189,6 +189,7 @@ private:
int _nCamera;
int _nMiniMatches;
int _flags;
int _verbose;
double _error;
float _patternWidth, _patternHeight;
TermCriteria _criteria;
......
......@@ -251,10 +251,11 @@ namespace internal
const Size& size1, const Size& size2, OutputArray om, OutputArray T, OutputArrayOfArrays omL, OutputArrayOfArrays tL, OutputArray K1, OutputArray D1, OutputArray K2, OutputArray D2,
double &xi1, double &xi2, int flags, OutputArray idx);
void computeJacobian(InputArrayOfArrays objectPoints, InputArrayOfArrays imagePoints, InputArray parameters, Mat& JTJ_inv, Mat& JTE, int flags);
void computeJacobian(InputArrayOfArrays objectPoints, InputArrayOfArrays imagePoints, InputArray parameters, Mat& JTJ_inv, Mat& JTE, int flags,
double epsilon);
void computeJacobianStereo(InputArrayOfArrays objectPoints, InputArrayOfArrays imagePoints1, InputArrayOfArrays imagePoints2,
InputArray parameters, Mat& JTJ_inv, Mat& JTE, int flags);
InputArray parameters, Mat& JTJ_inv, Mat& JTE, int flags, double epsilon);
void encodeParameters(InputArray K, InputArrayOfArrays omAll, InputArrayOfArrays tAll, InputArray distoaration, double xi, OutputArray parameters);
......
......@@ -79,9 +79,9 @@ public:
@matcher feature matcher.
*/
randomPatternCornerFinder(float patternWidth, float patternHeight,
int nminiMatch = 20, int depth = CV_32F, int showExtraction = 0,
Ptr<FeatureDetector> detector = AKAZE::create(AKAZE::DESCRIPTOR_MLDB, 0, 3, 0.002f),
Ptr<DescriptorExtractor> descriptor = AKAZE::create(AKAZE::DESCRIPTOR_MLDB,0, 3, 0.002f),
int nminiMatch = 20, int depth = CV_32F, int verbose = 0, int showExtraction = 0,
Ptr<FeatureDetector> detector = AKAZE::create(AKAZE::DESCRIPTOR_MLDB, 0, 3, 0.005f),
Ptr<DescriptorExtractor> descriptor = AKAZE::create(AKAZE::DESCRIPTOR_MLDB,0, 3, 0.005f),
Ptr<DescriptorMatcher> matcher = DescriptorMatcher::create("BruteForce-L1"));
/* @brief Load pattern image and compute features for pattern
......@@ -122,6 +122,7 @@ private:
cv::Size _patternImageSize;
int _nminiMatch;
int _depth;
int _verbose;
Ptr<FeatureDetector> _detector;
Ptr<DescriptorExtractor> _descriptor;
......@@ -141,7 +142,7 @@ private:
std::vector<DMatch>& filteredMatches12, int knn=1 );
void drawCorrespondence(const Mat& image1, const std::vector<cv::KeyPoint> keypoint1,
const Mat& image2, const std::vector<cv::KeyPoint> keypoint2, const std::vector<cv::DMatch> matchces,
const Mat& mask1, const Mat& mask2);
const Mat& mask1, const Mat& mask2, const int step);
};
/* @brief Class to generate "random" pattern image that are used for randomPatternCornerFinder
......
......@@ -7,7 +7,7 @@ using namespace cv;
const char * usage =
"\n example command line for multi-camera calibration by using random pattern \n"
" multiCamCalib -nc 5 -pw 800 -ph 600 -ct 1 -fe 0 -nm 20 multi_camera_omnidir.xml \n"
" multiCamCalib -nc 5 -pw 800 -ph 600 -ct 1 -fe 0 -nm 25 -v 0 multi_camera_omnidir.xml \n"
"\n"
" the file multi_camera_omnidir.xml is generated by imagelist_creator as \n"
" imagelist_creator multi_camera_omnidir.xml *.* \n"
......@@ -26,6 +26,7 @@ static void help()
" -ct <camera_type> # camera type, 0 for pinhole and 1 for omnidirectional \n"
" -fe # whether show feature extraction\n"
" -nm # number of minimal matches of an image \n"
" -v # whether show verbose information \n"
" input_data # text file with pattern file names and a list of photo names, the file is generated by imagelist_creator \n");
printf("\n %s", usage);
}
......@@ -37,7 +38,7 @@ int main(int argc, char** argv)
int nCamera, nMiniMatches, cameraType;
const char* outputFilename = "multi-camera-results.xml";
const char* inputFilename = 0;
int showFeatureExtraction;
int showFeatureExtraction, verbose;
if (argc < 2)
{
help();
......@@ -89,6 +90,13 @@ int main(int argc, char** argv)
return fprintf(stderr, "Invalid number of minimal matches \n"), -1;
}
}
else if ( strcmp( s, "-v" ) == 0 )
{
if (sscanf( argv[++i], "%u", &verbose) != 1 || (verbose !=1 && verbose !=0) )
{
return fprintf(stderr, "verbose is not bool value, set to 0 or 1 \n"), -1;
}
}
else if( s[0] != '-')
{
inputFilename = s;
......@@ -100,12 +108,13 @@ int main(int argc, char** argv)
}
// do multi-camera calibration
multiCameraCalibration multiCalib(cameraType, nCamera, inputFilename, patternWidth, patternHeight, showFeatureExtraction, nMiniMatches);
multiCameraCalibration multiCalib(cameraType, nCamera, inputFilename, patternWidth, patternHeight, verbose, showFeatureExtraction, nMiniMatches);
multiCalib.loadImages();
multiCalib.initialize();
multiCalib.optimizeExtrinsics();
// the above three lines can be replaced by multiCalib.run();
multiCalib.writeParameters(outputFilename);
multiCalib.writeParameters(outputFilename);
}
\ No newline at end of file
......@@ -59,6 +59,7 @@
#include "precomp.hpp"
#include "opencv2/ccalib/multiCameraCalibration.hpp"
#include "opencv2/core.hpp"
#include <string>
#include <vector>
#include <queue>
......@@ -66,7 +67,7 @@
using namespace cv;
multiCameraCalibration::multiCameraCalibration(int cameraType, int nCameras, const std::string& fileName,
float patternWidth, float patternHeight, int showExtration, int nMiniMatches, int flags, TermCriteria criteria,
float patternWidth, float patternHeight, int verbose, int showExtration, int nMiniMatches, int flags, TermCriteria criteria,
Ptr<FeatureDetector> detector, Ptr<DescriptorExtractor> descriptor,
Ptr<DescriptorMatcher> matcher)
{
......@@ -89,7 +90,7 @@ multiCameraCalibration::multiCameraCalibration(int cameraType, int nCameras, con
_detector = detector;
_descriptor = descriptor;
_matcher = matcher;
_verbose = verbose;
for (int i = 0; i < _nCamera; ++i)
{
_vertexList.push_back(vertex());
......@@ -133,7 +134,7 @@ void multiCameraCalibration::loadImages()
Ptr<DescriptorExtractor> descriptor = _descriptor;
Ptr<DescriptorMatcher> matcher = _matcher;
randomPatternCornerFinder finder(_patternWidth, _patternHeight, 10, CV_32F, this->_showExtraction, detector, descriptor, matcher);
randomPatternCornerFinder finder(_patternWidth, _patternHeight, _nMiniMatches, CV_32F, _verbose, this->_showExtraction, detector, descriptor, matcher);
Mat pattern = cv::imread(file_list[0]);
finder.loadPattern(pattern);
......@@ -170,6 +171,14 @@ void multiCameraCalibration::loadImages()
for (int imgIdx = 0; imgIdx < (int)filesEachCameraFull[camera].size(); ++imgIdx)
{
image = imread(filesEachCameraFull[camera][imgIdx], IMREAD_GRAYSCALE);
if (!image.empty() && _verbose)
{
std::cout << "open image " << filesEachCameraFull[camera][imgIdx] << " successfully" << std::endl;
}
else if (image.empty() && _verbose)
{
std::cout << "open image" << filesEachCameraFull[camera][imgIdx] << " failed" << std::endl;
}
std::vector<Mat> imgObj = finder.computeObjectImagePointsForSingle(image);
if ((int)imgObj[0].total() > _nMiniMatches)
{
......@@ -177,6 +186,10 @@ void multiCameraCalibration::loadImages()
_objectPointsForEachCamera[camera].push_back(imgObj[1]);
timestampAvailable[camera].push_back(timestampFull[camera][imgIdx]);
}
else if ((int)imgObj[0].total() <= _nMiniMatches && _verbose)
{
std::cout << "image " << filesEachCameraFull[camera][imgIdx] <<" has too few matched points "<< std::endl;
}
}
// calibrate
......@@ -246,6 +259,9 @@ void multiCameraCalibration::loadImages()
this->_edgeList.push_back(edge(cameraVertex, photoVertex, idx.at<int>(i), transform));
}
std::cout << "initialized for camera " << camera << " rms = " << rms << std::endl;
std::cout << "initialized camera matrix for camera " << camera << " is" << std::endl;
std::cout << _cameraMatrix[camera] << std::endl;
std::cout << "xi for camera " << camera << " is " << _xi[camera] << std::endl;
}
}
......@@ -310,6 +326,11 @@ void multiCameraCalibration::initialize()
{
this->_vertexList[vertexIdx].pose = transform * prePose.inv();
this->_vertexList[vertexIdx].pose.convertTo(this->_vertexList[vertexIdx].pose, CV_32F);
if (_verbose)
{
std::cout << "initial pose for camera " << vertexIdx << " is " << std::endl;
std::cout << this->_vertexList[vertexIdx].pose << std::endl;
}
}
else
{
......@@ -375,6 +396,11 @@ double multiCameraCalibration::optimizeExtrinsics()
R.copyTo(pose.colRange(0, 3).rowRange(0, 3));
Mat(tvecVertex[verIdx-1]).reshape(1, 3).copyTo(pose.rowRange(0, 3).col(3));
_vertexList[verIdx].pose = pose;
if (_verbose && verIdx < _nCamera)
{
std::cout << "final camera pose of camera " << verIdx << " is" << std::endl;
std::cout << pose << std::endl;
}
}
return error;
}
......
......@@ -843,7 +843,7 @@ void cv::omnidir::internal::initializeStereoCalibration(InputArrayOfArrays objec
/// cv::omnidir::internal::computeJacobian
void cv::omnidir::internal::computeJacobian(InputArrayOfArrays objectPoints, InputArrayOfArrays imagePoints,
InputArray parameters, Mat& JTJ_inv, Mat& JTE, int flags)
InputArray parameters, Mat& JTJ_inv, Mat& JTE, int flags, double epsilon)
{
CV_Assert(!objectPoints.empty() && objectPoints.type() == CV_64FC3);
CV_Assert(!imagePoints.empty() && imagePoints.type() == CV_64FC2);
......@@ -915,12 +915,21 @@ void cv::omnidir::internal::computeJacobian(InputArrayOfArrays objectPoints, Inp
subMatrix(JTJ, JTJ, _idx, _idx);
subMatrix(JTE, JTE, std::vector<int>(1, 1), _idx);
// in case JTJ is singular
double epsilon = 1e-10;
//SVD svd(JTJ, SVD::NO_UV);
//double cond = svd.w.at<double>(0)/svd.w.at<double>(5);
//if (cond_JTJ.needed())
//{
// cond_JTJ.create(1, 1, CV_64F);
// cond_JTJ.getMat().at<double>(0) = cond;
//}
//double epsilon = 1e-4*std::exp(cond);
JTJ_inv = Mat(JTJ+epsilon).inv();
}
void cv::omnidir::internal::computeJacobianStereo(InputArrayOfArrays objectPoints, InputArrayOfArrays imagePoints1, InputArrayOfArrays imagePoints2,
InputArray parameters, Mat& JTJ_inv, Mat& JTE, int flags)
InputArray parameters, Mat& JTJ_inv, Mat& JTE, int flags, double epsilon)
{
CV_Assert(!objectPoints.empty() && objectPoints.type() == CV_64FC3);
CV_Assert(!imagePoints1.empty() && imagePoints1.type() == CV_64FC2);
......@@ -1000,7 +1009,7 @@ void cv::omnidir::internal::computeJacobianStereo(InputArrayOfArrays objectPoint
JTE = J.t()*exAll;
subMatrix(JTJ, JTJ, _idx, _idx);
subMatrix(JTE, JTE, std::vector<int>(1, 1), _idx);
double epsilon = 1e-10;
JTJ_inv = Mat(JTJ+epsilon).inv();
}
......@@ -1168,7 +1177,7 @@ double cv::omnidir::calibrate(InputArray patternPoints, InputArray imagePoints,
cv::omnidir::internal::encodeParameters(_K, _omAll, _tAll, Mat::zeros(1,4,CV_64F), _xi, currentParam);
// optimization
const double alpha_smooth = 0.001;
const double alpha_smooth = 0.01;
//const double thresh_cond = 1e6;
double change = 1;
for(int iter = 0; ; ++iter)
......@@ -1179,8 +1188,8 @@ double cv::omnidir::calibrate(InputArray patternPoints, InputArray imagePoints,
break;
double alpha_smooth2 = 1 - std::pow(1 - alpha_smooth, (double)iter + 1.0);
Mat JTJ_inv, JTError;
cv::omnidir::internal::computeJacobian(_patternPoints, _imagePoints, currentParam, JTJ_inv, JTError, flags);
double epsilon = 0.01 * std::pow(0.9, (double)iter/10);
cv::omnidir::internal::computeJacobian(_patternPoints, _imagePoints, currentParam, JTJ_inv, JTError, flags, epsilon);
// GaussCNewton
Mat G = alpha_smooth2*JTJ_inv * JTError;
......@@ -1330,8 +1339,10 @@ double cv::omnidir::stereoCalibrate(InputOutputArrayOfArrays objectPoints, Input
break;
double alpha_smooth2 = 1 - std::pow(1 - alpha_smooth, (double)iter + 1.0);
Mat JTJ_inv, JTError;
double epsilon = 0.01 * std::pow(0.9, (double)iter/10);
cv::omnidir::internal::computeJacobianStereo(_objectPointsFilt, _imagePoints1Filt, _imagePoints2Filt, currentParam, JTJ_inv, JTError, flags);
cv::omnidir::internal::computeJacobianStereo(_objectPointsFilt, _imagePoints1Filt, _imagePoints2Filt, currentParam,
JTJ_inv, JTError, flags, epsilon);
// GaussCNewton
Mat G = alpha_smooth2*JTJ_inv * JTError;
......@@ -1835,7 +1846,7 @@ void cv::omnidir::internal::estimateUncertainties(InputArrayOfArrays objectPoint
double s = sigma_x.at<double>(0);
Mat _JTJ_inv, _JTE;
computeJacobian(objectPoints, imagePoints, parameters, _JTJ_inv, _JTE, flags);
computeJacobian(objectPoints, imagePoints, parameters, _JTJ_inv, _JTE, flags, 0.0);
sqrt(_JTJ_inv, _JTJ_inv);
errors = 3 * s * _JTJ_inv.diag();
......@@ -1923,7 +1934,7 @@ void cv::omnidir::internal::estimateUncertaintiesStereo(InputArrayOfArrays objec
double s = sigma_x.at<double>(0);
Mat _JTJ_inv, _JTE;
computeJacobianStereo(objectPoints, imagePoints1, imagePoints2, _parameters, _JTJ_inv, _JTE, flags);
computeJacobianStereo(objectPoints, imagePoints1, imagePoints2, _parameters, _JTJ_inv, _JTE, flags, 0.0);
cv::sqrt(_JTJ_inv, _JTJ_inv);
errors = 3 * s * _JTJ_inv.diag();
......
......@@ -58,7 +58,7 @@
using namespace cv;
using namespace std;
randomPatternCornerFinder::randomPatternCornerFinder(float patternWidth, float patternHeight,
int nminiMatch, int depth, int showExtraction, Ptr<FeatureDetector> detector, Ptr<DescriptorExtractor> descriptor,
int nminiMatch, int depth, int verbose, int showExtraction, Ptr<FeatureDetector> detector, Ptr<DescriptorExtractor> descriptor,
Ptr<DescriptorMatcher> matcher)
{
_patternHeight = patternHeight;
......@@ -71,6 +71,7 @@ randomPatternCornerFinder::randomPatternCornerFinder(float patternWidth, float p
_descriptor = descriptor;
_matcher = matcher;
_showExtraction = showExtraction;
_verbose = verbose;
}
//void randomPatternCornerFinder::computeObjectImagePoints2(std::vector<cv::Mat> inputImages)
......@@ -270,26 +271,26 @@ void randomPatternCornerFinder::crossCheckMatching( Ptr<DescriptorMatcher>& desc
void randomPatternCornerFinder::drawCorrespondence(const Mat& image1, const std::vector<cv::KeyPoint> keypoint1,
const Mat& image2, const std::vector<cv::KeyPoint> keypoint2, const std::vector<cv::DMatch> matchces,
const Mat& mask1, const Mat& mask2)
const Mat& mask1, const Mat& mask2, const int step)
{
Mat img_corr;
if(mask1.empty())
if(step == 1)
{
drawMatches(image1, keypoint1, image2, keypoint2, matchces, img_corr);
}
else if(!mask1.empty() && mask2.empty())
else if(step == 2)
{
std::vector<cv::DMatch> matchesFilter;
for (int i = 0; i < (int)mask1.total(); ++i)
{
if (mask1.at<uchar>(i) == 1)
if (!mask1.empty() && mask1.at<uchar>(i) == 1)
{
matchesFilter.push_back(matchces[i]);
}
}
drawMatches(image1, keypoint1, image2, keypoint2, matchesFilter, img_corr);
}
else if(!mask1.empty() && !mask2.empty())
else if(step == 3)
{
std::vector<cv::DMatch> matchesFilter;
int j = 0;
......@@ -297,7 +298,7 @@ void randomPatternCornerFinder::drawCorrespondence(const Mat& image1, const std:
{
if (mask1.at<uchar>(i) == 1)
{
if (mask2.at<uchar>(j) == 1)
if (!mask2.empty() && mask2.at<uchar>(j) == 1)
{
matchesFilter.push_back(matchces[i]);
}
......@@ -357,8 +358,8 @@ std::vector<cv::Mat> randomPatternCornerFinder::computeObjectImagePointsForSingl
cv::Mat keypointsImageLocation, keypointsPatternLocation;
crossCheckMatching(this->_matcher, descriptorImage1, this->_descriptorPattern, matchesImgtoPat1, 5);
crossCheckMatching(this->_matcher, descriptorImage2, this->_descriptorPattern, matchesImgtoPat2, 5);
crossCheckMatching(this->_matcher, descriptorImage1, this->_descriptorPattern, matchesImgtoPat1, 1);
crossCheckMatching(this->_matcher, descriptorImage2, this->_descriptorPattern, matchesImgtoPat2, 1);
if ((int)matchesImgtoPat1.size() > (int)matchesImgtoPat2.size())
{
matchesImgtoPat = matchesImgtoPat1;
......@@ -382,23 +383,37 @@ std::vector<cv::Mat> randomPatternCornerFinder::computeObjectImagePointsForSingl
if(this->_showExtraction)
{
drawCorrespondence(inputImage, keypointsImage, _patternImage, _keypointsPattern, matchesImgtoPat,
innerMask1, innerMask2);
innerMask1, innerMask2, 1);
}
if (_verbose)
{
std::cout << "number of matched points " << (int)keypointsImageLocation.total() << std::endl;
}
// outlier remove
findFundamentalMat(keypointsImageLocation, keypointsPatternLocation,
FM_RANSAC, 1, 0.995, innerMask1);
getFilteredLocation(keypointsImageLocation, keypointsPatternLocation, innerMask1);
findHomography(keypointsImageLocation, keypointsPatternLocation, RANSAC, 10*inputImage.cols/1000, innerMask2);
if (this->_showExtraction)
{
drawCorrespondence(inputImage, keypointsImage, _patternImage, _keypointsPattern, matchesImgtoPat,
innerMask1, innerMask2, 2);
}
findHomography(keypointsImageLocation, keypointsPatternLocation, RANSAC, 30*inputImage.cols/1000, innerMask2);
getFilteredLocation(keypointsImageLocation, keypointsPatternLocation, innerMask2);
if (_verbose)
{
std::cout << "number of filtered points " << (int)keypointsImageLocation.total() << std::endl;
}
// draw filtered correspondence
if (this->_showExtraction)
{
drawCorrespondence(inputImage, keypointsImage, _patternImage, _keypointsPattern, matchesImgtoPat,
innerMask1, innerMask2);
innerMask1, innerMask2, 3);
}
std::vector<Vec3d> objectPoints;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment