Commit 7ed858e3 authored by catree's avatar catree

Fix issue with solvePnPRansac and Nx3 1-channel input when the number of points…

Fix issue with solvePnPRansac and Nx3 1-channel input when the number of points is 5. Try to uniform the input shape of projectPoints and undistortPoints.
parent a4333948
......@@ -516,7 +516,7 @@ vector\<Point3f\> ), where N is the number of points in the view.
@param distCoeffs Input vector of distortion coefficients
\f$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6 [, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\f$ of
4, 5, 8, 12 or 14 elements. If the vector is empty, the zero distortion coefficients are assumed.
@param imagePoints Output array of image points, 2xN/Nx2 1-channel or 1xN/Nx1 2-channel, or
@param imagePoints Output array of image points, 1xN/Nx1 2-channel, or
vector\<Point2f\> .
@param jacobian Optional output 2Nx(10+\<numDistCoeffs\>) jacobian matrix of derivatives of image
points with respect to components of the rotation vector, translation vector, focal lengths,
......
......@@ -12,8 +12,8 @@ typedef perf::TestBaseWithParam<PointsNum_Algo_t> PointsNum_Algo;
typedef perf::TestBaseWithParam<int> PointsNum;
PERF_TEST_P(PointsNum_Algo, solvePnP,
testing::Combine(
testing::Values(5, 3*9, 7*13), //TODO: find why results on 4 points are too unstable
testing::Combine( //When non planar, DLT needs at least 6 points for SOLVEPNP_ITERATIVE flag
testing::Values(6, 3*9, 7*13), //TODO: find why results on 4 points are too unstable
testing::Values((int)SOLVEPNP_ITERATIVE, (int)SOLVEPNP_EPNP, (int)SOLVEPNP_UPNP, (int)SOLVEPNP_DLS)
)
)
......
......@@ -1094,6 +1094,7 @@ CV_IMPL void cvFindExtrinsicCameraParams2( const CvMat* objectPoints,
else
{
// non-planar structure. Use DLT method
CV_CheckGE(count, 6, "DLT algorithm needs at least 6 points for pose estimation from 3D-2D point correspondences.");
double* L;
double LL[12*12], LW[12], LV[12*12], sc;
CvMat _LL = cvMat( 12, 12, CV_64F, LL );
......@@ -3314,8 +3315,14 @@ void cv::projectPoints( InputArray _opoints,
{
Mat opoints = _opoints.getMat();
int npoints = opoints.checkVector(3), depth = opoints.depth();
if (npoints < 0)
opoints = opoints.t();
npoints = opoints.checkVector(3);
CV_Assert(npoints >= 0 && (depth == CV_32F || depth == CV_64F));
if (opoints.cols == 3)
opoints = opoints.reshape(3);
CvMat dpdrot, dpdt, dpdf, dpdc, dpddist;
CvMat *pdpdrot=0, *pdpdt=0, *pdpdf=0, *pdpdc=0, *pdpddist=0;
......
......@@ -245,6 +245,9 @@ bool solvePnPRansac(InputArray _opoints, InputArray _ipoints,
if( model_points == npoints )
{
opoints = opoints.reshape(3);
ipoints = ipoints.reshape(2);
bool result = solvePnP(opoints, ipoints, cameraMatrix, distCoeffs, _rvec, _tvec, useExtrinsicGuess, ransac_kernel_method);
if(!result)
......@@ -350,6 +353,11 @@ int solveP3P( InputArray _opoints, InputArray _ipoints,
CV_Assert( npoints == 3 || npoints == 4 );
CV_Assert( flags == SOLVEPNP_P3P || flags == SOLVEPNP_AP3P );
if (opoints.cols == 3)
opoints = opoints.reshape(3);
if (ipoints.cols == 2)
ipoints = ipoints.reshape(2);
Mat cameraMatrix0 = _cameraMatrix.getMat();
Mat distCoeffs0 = _distCoeffs.getMat();
Mat cameraMatrix = Mat_<double>(cameraMatrix0);
......@@ -745,6 +753,11 @@ int solvePnPGeneric( InputArray _opoints, InputArray _ipoints,
CV_Assert( ( (npoints >= 4) || (npoints == 3 && flags == SOLVEPNP_ITERATIVE && useExtrinsicGuess) )
&& npoints == std::max(ipoints.checkVector(2, CV_32F), ipoints.checkVector(2, CV_64F)) );
if (opoints.cols == 3)
opoints = opoints.reshape(3);
if (ipoints.cols == 2)
ipoints = ipoints.reshape(2);
if( flags != SOLVEPNP_ITERATIVE )
useExtrinsicGuess = false;
......
......@@ -2071,6 +2071,183 @@ TEST(Calib3d_CalibrationMatrixValues_C, accuracy) { CV_CalibrationMatrixValuesTe
TEST(Calib3d_CalibrationMatrixValues_CPP, accuracy) { CV_CalibrationMatrixValuesTest_CPP test; test.safe_run(); }
TEST(Calib3d_ProjectPoints_C, accuracy) { CV_ProjectPointsTest_C test; test.safe_run(); }
TEST(Calib3d_ProjectPoints_CPP, regression) { CV_ProjectPointsTest_CPP test; test.safe_run(); }
TEST(Calib3d_ProjectPoints_CPP, inputShape)
{
Matx31d rvec = Matx31d::zeros();
Matx31d tvec(0, 0, 1);
Matx33d cameraMatrix = Matx33d::eye();
const float L = 0.1f;
{
//3xN 1-channel
Mat objectPoints = (Mat_<float>(3, 2) << -L, L,
L, L,
0, 0);
vector<Point2f> imagePoints;
projectPoints(objectPoints, rvec, tvec, cameraMatrix, noArray(), imagePoints);
EXPECT_EQ(objectPoints.cols, static_cast<int>(imagePoints.size()));
EXPECT_NEAR(imagePoints[0].x, -L, std::numeric_limits<float>::epsilon());
EXPECT_NEAR(imagePoints[0].y, L, std::numeric_limits<float>::epsilon());
EXPECT_NEAR(imagePoints[1].x, L, std::numeric_limits<float>::epsilon());
EXPECT_NEAR(imagePoints[1].y, L, std::numeric_limits<float>::epsilon());
}
{
//Nx2 1-channel
Mat objectPoints = (Mat_<float>(2, 3) << -L, L, 0,
L, L, 0);
vector<Point2f> imagePoints;
projectPoints(objectPoints, rvec, tvec, cameraMatrix, noArray(), imagePoints);
EXPECT_EQ(objectPoints.rows, static_cast<int>(imagePoints.size()));
EXPECT_NEAR(imagePoints[0].x, -L, std::numeric_limits<float>::epsilon());
EXPECT_NEAR(imagePoints[0].y, L, std::numeric_limits<float>::epsilon());
EXPECT_NEAR(imagePoints[1].x, L, std::numeric_limits<float>::epsilon());
EXPECT_NEAR(imagePoints[1].y, L, std::numeric_limits<float>::epsilon());
}
{
//1xN 3-channel
Mat objectPoints(1, 2, CV_32FC3);
objectPoints.at<Vec3f>(0,0) = Vec3f(-L, L, 0);
objectPoints.at<Vec3f>(0,1) = Vec3f(L, L, 0);
vector<Point2f> imagePoints;
projectPoints(objectPoints, rvec, tvec, cameraMatrix, noArray(), imagePoints);
EXPECT_EQ(objectPoints.cols, static_cast<int>(imagePoints.size()));
EXPECT_NEAR(imagePoints[0].x, -L, std::numeric_limits<float>::epsilon());
EXPECT_NEAR(imagePoints[0].y, L, std::numeric_limits<float>::epsilon());
EXPECT_NEAR(imagePoints[1].x, L, std::numeric_limits<float>::epsilon());
EXPECT_NEAR(imagePoints[1].y, L, std::numeric_limits<float>::epsilon());
}
{
//Nx1 3-channel
Mat objectPoints(2, 1, CV_32FC3);
objectPoints.at<Vec3f>(0,0) = Vec3f(-L, L, 0);
objectPoints.at<Vec3f>(1,0) = Vec3f(L, L, 0);
vector<Point2f> imagePoints;
projectPoints(objectPoints, rvec, tvec, cameraMatrix, noArray(), imagePoints);
EXPECT_EQ(objectPoints.rows, static_cast<int>(imagePoints.size()));
EXPECT_NEAR(imagePoints[0].x, -L, std::numeric_limits<float>::epsilon());
EXPECT_NEAR(imagePoints[0].y, L, std::numeric_limits<float>::epsilon());
EXPECT_NEAR(imagePoints[1].x, L, std::numeric_limits<float>::epsilon());
EXPECT_NEAR(imagePoints[1].y, L, std::numeric_limits<float>::epsilon());
}
{
//vector<Point3f>
vector<Point3f> objectPoints;
objectPoints.push_back(Point3f(-L, L, 0));
objectPoints.push_back(Point3f(L, L, 0));
vector<Point2f> imagePoints;
projectPoints(objectPoints, rvec, tvec, cameraMatrix, noArray(), imagePoints);
EXPECT_EQ(objectPoints.size(), imagePoints.size());
EXPECT_NEAR(imagePoints[0].x, -L, std::numeric_limits<float>::epsilon());
EXPECT_NEAR(imagePoints[0].y, L, std::numeric_limits<float>::epsilon());
EXPECT_NEAR(imagePoints[1].x, L, std::numeric_limits<float>::epsilon());
EXPECT_NEAR(imagePoints[1].y, L, std::numeric_limits<float>::epsilon());
}
{
//vector<Point3d>
vector<Point3d> objectPoints;
objectPoints.push_back(Point3d(-L, L, 0));
objectPoints.push_back(Point3d(L, L, 0));
vector<Point2d> imagePoints;
projectPoints(objectPoints, rvec, tvec, cameraMatrix, noArray(), imagePoints);
EXPECT_EQ(objectPoints.size(), imagePoints.size());
EXPECT_NEAR(imagePoints[0].x, -L, std::numeric_limits<double>::epsilon());
EXPECT_NEAR(imagePoints[0].y, L, std::numeric_limits<double>::epsilon());
EXPECT_NEAR(imagePoints[1].x, L, std::numeric_limits<double>::epsilon());
EXPECT_NEAR(imagePoints[1].y, L, std::numeric_limits<double>::epsilon());
}
}
TEST(Calib3d_ProjectPoints_CPP, outputShape)
{
Matx31d rvec = Matx31d::zeros();
Matx31d tvec(0, 0, 1);
Matx33d cameraMatrix = Matx33d::eye();
const float L = 0.1f;
{
vector<Point3f> objectPoints;
objectPoints.push_back(Point3f(-L, L, 0));
objectPoints.push_back(Point3f( L, L, 0));
objectPoints.push_back(Point3f( L, -L, 0));
//Mat --> will be Nx1 2-channel
Mat imagePoints;
projectPoints(objectPoints, rvec, tvec, cameraMatrix, noArray(), imagePoints);
EXPECT_EQ(static_cast<int>(objectPoints.size()), imagePoints.rows);
EXPECT_NEAR(imagePoints.at<Vec2f>(0,0)(0), -L, std::numeric_limits<float>::epsilon());
EXPECT_NEAR(imagePoints.at<Vec2f>(0,0)(1), L, std::numeric_limits<float>::epsilon());
EXPECT_NEAR(imagePoints.at<Vec2f>(1,0)(0), L, std::numeric_limits<float>::epsilon());
EXPECT_NEAR(imagePoints.at<Vec2f>(1,0)(1), L, std::numeric_limits<float>::epsilon());
EXPECT_NEAR(imagePoints.at<Vec2f>(2,0)(0), L, std::numeric_limits<float>::epsilon());
EXPECT_NEAR(imagePoints.at<Vec2f>(2,0)(1), -L, std::numeric_limits<float>::epsilon());
}
{
vector<Point3f> objectPoints;
objectPoints.push_back(Point3f(-L, L, 0));
objectPoints.push_back(Point3f( L, L, 0));
objectPoints.push_back(Point3f( L, -L, 0));
//Nx1 2-channel
Mat imagePoints(3,1,CV_32FC2);
projectPoints(objectPoints, rvec, tvec, cameraMatrix, noArray(), imagePoints);
EXPECT_EQ(static_cast<int>(objectPoints.size()), imagePoints.rows);
EXPECT_NEAR(imagePoints.at<Vec2f>(0,0)(0), -L, std::numeric_limits<float>::epsilon());
EXPECT_NEAR(imagePoints.at<Vec2f>(0,0)(1), L, std::numeric_limits<float>::epsilon());
EXPECT_NEAR(imagePoints.at<Vec2f>(1,0)(0), L, std::numeric_limits<float>::epsilon());
EXPECT_NEAR(imagePoints.at<Vec2f>(1,0)(1), L, std::numeric_limits<float>::epsilon());
EXPECT_NEAR(imagePoints.at<Vec2f>(2,0)(0), L, std::numeric_limits<float>::epsilon());
EXPECT_NEAR(imagePoints.at<Vec2f>(2,0)(1), -L, std::numeric_limits<float>::epsilon());
}
{
vector<Point3f> objectPoints;
objectPoints.push_back(Point3f(-L, L, 0));
objectPoints.push_back(Point3f( L, L, 0));
objectPoints.push_back(Point3f( L, -L, 0));
//1xN 2-channel
Mat imagePoints(1,3,CV_32FC2);
projectPoints(objectPoints, rvec, tvec, cameraMatrix, noArray(), imagePoints);
EXPECT_EQ(static_cast<int>(objectPoints.size()), imagePoints.cols);
EXPECT_NEAR(imagePoints.at<Vec2f>(0,0)(0), -L, std::numeric_limits<float>::epsilon());
EXPECT_NEAR(imagePoints.at<Vec2f>(0,0)(1), L, std::numeric_limits<float>::epsilon());
EXPECT_NEAR(imagePoints.at<Vec2f>(0,1)(0), L, std::numeric_limits<float>::epsilon());
EXPECT_NEAR(imagePoints.at<Vec2f>(0,1)(1), L, std::numeric_limits<float>::epsilon());
EXPECT_NEAR(imagePoints.at<Vec2f>(0,2)(0), L, std::numeric_limits<float>::epsilon());
EXPECT_NEAR(imagePoints.at<Vec2f>(0,2)(1), -L, std::numeric_limits<float>::epsilon());
}
{
vector<Point3f> objectPoints;
objectPoints.push_back(Point3f(-L, L, 0));
objectPoints.push_back(Point3f(L, L, 0));
//vector<Point2f>
vector<Point2f> imagePoints;
projectPoints(objectPoints, rvec, tvec, cameraMatrix, noArray(), imagePoints);
EXPECT_EQ(objectPoints.size(), imagePoints.size());
EXPECT_NEAR(imagePoints[0].x, -L, std::numeric_limits<float>::epsilon());
EXPECT_NEAR(imagePoints[0].y, L, std::numeric_limits<float>::epsilon());
EXPECT_NEAR(imagePoints[1].x, L, std::numeric_limits<float>::epsilon());
EXPECT_NEAR(imagePoints[1].y, L, std::numeric_limits<float>::epsilon());
}
{
vector<Point3d> objectPoints;
objectPoints.push_back(Point3d(-L, L, 0));
objectPoints.push_back(Point3d(L, L, 0));
//vector<Point2d>
vector<Point2d> imagePoints;
projectPoints(objectPoints, rvec, tvec, cameraMatrix, noArray(), imagePoints);
EXPECT_EQ(objectPoints.size(), imagePoints.size());
EXPECT_NEAR(imagePoints[0].x, -L, std::numeric_limits<double>::epsilon());
EXPECT_NEAR(imagePoints[0].y, L, std::numeric_limits<double>::epsilon());
EXPECT_NEAR(imagePoints[1].x, L, std::numeric_limits<double>::epsilon());
EXPECT_NEAR(imagePoints[1].y, L, std::numeric_limits<double>::epsilon());
}
}
TEST(Calib3d_StereoCalibrate_C, regression) { CV_StereoCalibrationTest_C test; test.safe_run(); }
TEST(Calib3d_StereoCalibrate_CPP, regression) { CV_StereoCalibrationTest_CPP test; test.safe_run(); }
TEST(Calib3d_StereoCalibrateCorner, regression) { CV_StereoCalibrationCornerTest test; test.safe_run(); }
......
......@@ -938,4 +938,180 @@ TEST(Calib3d_DefaultNewCameraMatrix, accuracy) { CV_DefaultNewCameraMatrixTest t
TEST(Calib3d_UndistortPoints, accuracy) { CV_UndistortPointsTest test; test.safe_run(); }
TEST(Calib3d_InitUndistortRectifyMap, accuracy) { CV_InitUndistortRectifyMapTest test; test.safe_run(); }
TEST(Calib3d_UndistortPoints, inputShape)
{
//https://github.com/opencv/opencv/issues/14423
Matx33d cameraMatrix = Matx33d::eye();
{
//2xN 1-channel
Mat imagePoints(2, 3, CV_32FC1);
imagePoints.at<float>(0,0) = 320; imagePoints.at<float>(1,0) = 240;
imagePoints.at<float>(0,1) = 0; imagePoints.at<float>(1,1) = 240;
imagePoints.at<float>(0,2) = 320; imagePoints.at<float>(1,2) = 0;
vector<Point2f> normalized;
undistortPoints(imagePoints, normalized, cameraMatrix, noArray());
EXPECT_EQ(static_cast<int>(normalized.size()), imagePoints.cols);
for (int i = 0; i < static_cast<int>(normalized.size()); i++) {
EXPECT_NEAR(normalized[i].x, imagePoints.at<float>(0,i), std::numeric_limits<float>::epsilon());
EXPECT_NEAR(normalized[i].y, imagePoints.at<float>(1,i), std::numeric_limits<float>::epsilon());
}
}
{
//Nx2 1-channel
Mat imagePoints(3, 2, CV_32FC1);
imagePoints.at<float>(0,0) = 320; imagePoints.at<float>(0,1) = 240;
imagePoints.at<float>(1,0) = 0; imagePoints.at<float>(1,1) = 240;
imagePoints.at<float>(2,0) = 320; imagePoints.at<float>(2,1) = 0;
vector<Point2f> normalized;
undistortPoints(imagePoints, normalized, cameraMatrix, noArray());
EXPECT_EQ(static_cast<int>(normalized.size()), imagePoints.rows);
for (int i = 0; i < static_cast<int>(normalized.size()); i++) {
EXPECT_NEAR(normalized[i].x, imagePoints.at<float>(i,0), std::numeric_limits<float>::epsilon());
EXPECT_NEAR(normalized[i].y, imagePoints.at<float>(i,1), std::numeric_limits<float>::epsilon());
}
}
{
//1xN 2-channel
Mat imagePoints(1, 3, CV_32FC2);
imagePoints.at<Vec2f>(0,0) = Vec2f(320, 240);
imagePoints.at<Vec2f>(0,1) = Vec2f(0, 240);
imagePoints.at<Vec2f>(0,2) = Vec2f(320, 0);
vector<Point2f> normalized;
undistortPoints(imagePoints, normalized, cameraMatrix, noArray());
EXPECT_EQ(static_cast<int>(normalized.size()), imagePoints.cols);
for (int i = 0; i < static_cast<int>(normalized.size()); i++) {
EXPECT_NEAR(normalized[i].x, imagePoints.at<Vec2f>(0,i)(0), std::numeric_limits<float>::epsilon());
EXPECT_NEAR(normalized[i].y, imagePoints.at<Vec2f>(0,i)(1), std::numeric_limits<float>::epsilon());
}
}
{
//Nx1 2-channel
Mat imagePoints(3, 1, CV_32FC2);
imagePoints.at<Vec2f>(0,0) = Vec2f(320, 240);
imagePoints.at<Vec2f>(1,0) = Vec2f(0, 240);
imagePoints.at<Vec2f>(2,0) = Vec2f(320, 0);
vector<Point2f> normalized;
undistortPoints(imagePoints, normalized, cameraMatrix, noArray());
EXPECT_EQ(static_cast<int>(normalized.size()), imagePoints.rows);
for (int i = 0; i < static_cast<int>(normalized.size()); i++) {
EXPECT_NEAR(normalized[i].x, imagePoints.at<Vec2f>(i,0)(0), std::numeric_limits<float>::epsilon());
EXPECT_NEAR(normalized[i].y, imagePoints.at<Vec2f>(i,0)(1), std::numeric_limits<float>::epsilon());
}
}
{
//vector<Point2f>
vector<Point2f> imagePoints;
imagePoints.push_back(Point2f(320, 240));
imagePoints.push_back(Point2f(0, 240));
imagePoints.push_back(Point2f(320, 0));
vector<Point2f> normalized;
undistortPoints(imagePoints, normalized, cameraMatrix, noArray());
EXPECT_EQ(normalized.size(), imagePoints.size());
for (int i = 0; i < static_cast<int>(normalized.size()); i++) {
EXPECT_NEAR(normalized[i].x, imagePoints[i].x, std::numeric_limits<float>::epsilon());
EXPECT_NEAR(normalized[i].y, imagePoints[i].y, std::numeric_limits<float>::epsilon());
}
}
{
//vector<Point2d>
vector<Point2d> imagePoints;
imagePoints.push_back(Point2d(320, 240));
imagePoints.push_back(Point2d(0, 240));
imagePoints.push_back(Point2d(320, 0));
vector<Point2d> normalized;
undistortPoints(imagePoints, normalized, cameraMatrix, noArray());
EXPECT_EQ(normalized.size(), imagePoints.size());
for (int i = 0; i < static_cast<int>(normalized.size()); i++) {
EXPECT_NEAR(normalized[i].x, imagePoints[i].x, std::numeric_limits<double>::epsilon());
EXPECT_NEAR(normalized[i].y, imagePoints[i].y, std::numeric_limits<double>::epsilon());
}
}
}
TEST(Calib3d_UndistortPoints, outputShape)
{
Matx33d cameraMatrix = Matx33d::eye();
{
vector<Point2f> imagePoints;
imagePoints.push_back(Point2f(320, 240));
imagePoints.push_back(Point2f(0, 240));
imagePoints.push_back(Point2f(320, 0));
//Mat --> will be Nx1 2-channel
Mat normalized;
undistortPoints(imagePoints, normalized, cameraMatrix, noArray());
EXPECT_EQ(static_cast<int>(imagePoints.size()), normalized.rows);
for (int i = 0; i < normalized.rows; i++) {
EXPECT_NEAR(normalized.at<Vec2f>(i,0)(0), imagePoints[i].x, std::numeric_limits<float>::epsilon());
EXPECT_NEAR(normalized.at<Vec2f>(i,0)(1), imagePoints[i].y, std::numeric_limits<float>::epsilon());
}
}
{
vector<Point2f> imagePoints;
imagePoints.push_back(Point2f(320, 240));
imagePoints.push_back(Point2f(0, 240));
imagePoints.push_back(Point2f(320, 0));
//Nx1 2-channel
Mat normalized(static_cast<int>(imagePoints.size()), 1, CV_32FC2);
undistortPoints(imagePoints, normalized, cameraMatrix, noArray());
EXPECT_EQ(static_cast<int>(imagePoints.size()), normalized.rows);
for (int i = 0; i < normalized.rows; i++) {
EXPECT_NEAR(normalized.at<Vec2f>(i,0)(0), imagePoints[i].x, std::numeric_limits<float>::epsilon());
EXPECT_NEAR(normalized.at<Vec2f>(i,0)(1), imagePoints[i].y, std::numeric_limits<float>::epsilon());
}
}
{
vector<Point2f> imagePoints;
imagePoints.push_back(Point2f(320, 240));
imagePoints.push_back(Point2f(0, 240));
imagePoints.push_back(Point2f(320, 0));
//1xN 2-channel
Mat normalized(1, static_cast<int>(imagePoints.size()), CV_32FC2);
undistortPoints(imagePoints, normalized, cameraMatrix, noArray());
EXPECT_EQ(static_cast<int>(imagePoints.size()), normalized.cols);
for (int i = 0; i < normalized.rows; i++) {
EXPECT_NEAR(normalized.at<Vec2f>(0,i)(0), imagePoints[i].x, std::numeric_limits<float>::epsilon());
EXPECT_NEAR(normalized.at<Vec2f>(0,i)(1), imagePoints[i].y, std::numeric_limits<float>::epsilon());
}
}
{
vector<Point2f> imagePoints;
imagePoints.push_back(Point2f(320, 240));
imagePoints.push_back(Point2f(0, 240));
imagePoints.push_back(Point2f(320, 0));
//vector<Point2f>
vector<Point2f> normalized;
undistortPoints(imagePoints, normalized, cameraMatrix, noArray());
EXPECT_EQ(imagePoints.size(), normalized.size());
for (int i = 0; i < static_cast<int>(normalized.size()); i++) {
EXPECT_NEAR(normalized[i].x, imagePoints[i].x, std::numeric_limits<float>::epsilon());
EXPECT_NEAR(normalized[i].y, imagePoints[i].y, std::numeric_limits<float>::epsilon());
}
}
{
vector<Point2d> imagePoints;
imagePoints.push_back(Point2d(320, 240));
imagePoints.push_back(Point2d(0, 240));
imagePoints.push_back(Point2d(320, 0));
//vector<Point2d>
vector<Point2d> normalized;
undistortPoints(imagePoints, normalized, cameraMatrix, noArray());
EXPECT_EQ(imagePoints.size(), normalized.size());
for (int i = 0; i < static_cast<int>(normalized.size()); i++) {
EXPECT_NEAR(normalized[i].x, imagePoints[i].x, std::numeric_limits<double>::epsilon());
EXPECT_NEAR(normalized[i].y, imagePoints[i].y, std::numeric_limits<double>::epsilon());
}
}
}
}} // namespace
......@@ -3116,9 +3116,9 @@ point coordinates out of the normalized distorted point coordinates ("normalized
coordinates do not depend on the camera matrix).
The function can be used for both a stereo camera head or a monocular camera (when R is empty).
@param src Observed point coordinates, 1xN or Nx1 2-channel (CV_32FC2 or CV_64FC2).
@param dst Output ideal point coordinates after undistortion and reverse perspective
@param src Observed point coordinates, 2xN/Nx2 1-channel or 1xN/Nx1 2-channel (CV_32FC2 or CV_64FC2) (or
vector\<Point2f\> ).
@param dst Output ideal point coordinates (1xN/Nx1 2-channel or vector\<Point2f\> ) after undistortion and reverse perspective
transformation. If matrix P is identity or omitted, dst will contain normalized point coordinates.
@param cameraMatrix Camera matrix \f$\vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\f$ .
@param distCoeffs Input vector of distortion coefficients
......@@ -3131,14 +3131,14 @@ of 4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion
*/
CV_EXPORTS_W void undistortPoints( InputArray src, OutputArray dst,
InputArray cameraMatrix, InputArray distCoeffs,
InputArray R = noArray(), InputArray P = noArray());
InputArray R = noArray(), InputArray P = noArray() );
/** @overload
@note Default version of #undistortPoints does 5 iterations to compute undistorted points.
*/
CV_EXPORTS_AS(undistortPointsIter) void undistortPoints( InputArray src, OutputArray dst,
InputArray cameraMatrix, InputArray distCoeffs,
InputArray R, InputArray P, TermCriteria criteria);
InputArray cameraMatrix, InputArray distCoeffs,
InputArray R, InputArray P, TermCriteria criteria );
//! @} imgproc_transform
......
......@@ -561,10 +561,16 @@ void cv::undistortPoints( InputArray _src, OutputArray _dst,
Mat src = _src.getMat(), cameraMatrix = _cameraMatrix.getMat();
Mat distCoeffs = _distCoeffs.getMat(), R = _Rmat.getMat(), P = _Pmat.getMat();
CV_Assert( src.isContinuous() && (src.depth() == CV_32F || src.depth() == CV_64F) &&
((src.rows == 1 && src.channels() == 2) || src.cols*src.channels() == 2));
int npoints = src.checkVector(2), depth = src.depth();
if (npoints < 0)
src = src.t();
npoints = src.checkVector(2);
CV_Assert(npoints >= 0 && src.isContinuous() && (depth == CV_32F || depth == CV_64F));
_dst.create(src.size(), src.type(), -1, true);
if (src.cols == 2)
src = src.reshape(2);
_dst.create(npoints, 1, CV_MAKETYPE(depth, 2), -1, true);
Mat dst = _dst.getMat();
CvMat _csrc = cvMat(src), _cdst = cvMat(dst), _ccameraMatrix = cvMat(cameraMatrix);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment