Unverified Commit 72f35e06 authored by Alexander Alekhin's avatar Alexander Alekhin Committed by GitHub

Merge pull request #16052 from alalek:issue_16040

* calib3d: use normalized input in solvePnPGeneric()

* calib3d: java regression test for solvePnPGeneric

* calib3d: python regression test for solvePnPGeneric
parent f21bde4d
package org.opencv.test.calib3d; package org.opencv.test.calib3d;
import java.util.ArrayList;
import org.opencv.calib3d.Calib3d; import org.opencv.calib3d.Calib3d;
import org.opencv.core.CvType; import org.opencv.core.CvType;
import org.opencv.core.Mat; import org.opencv.core.Mat;
...@@ -639,4 +641,45 @@ public class Calib3dTest extends OpenCVTestCase { ...@@ -639,4 +641,45 @@ public class Calib3dTest extends OpenCVTestCase {
assertEquals((1 << 17), Calib3d.CALIB_USE_LU); assertEquals((1 << 17), Calib3d.CALIB_USE_LU);
assertEquals((1 << 22), Calib3d.CALIB_USE_EXTRINSIC_GUESS); assertEquals((1 << 22), Calib3d.CALIB_USE_EXTRINSIC_GUESS);
} }
public void testSolvePnPGeneric_regression_16040() {
Mat intrinsics = Mat.eye(3, 3, CvType.CV_64F);
intrinsics.put(0, 0, 400);
intrinsics.put(1, 1, 400);
intrinsics.put(0, 2, 640 / 2);
intrinsics.put(1, 2, 480 / 2);
final int minPnpPointsNum = 4;
MatOfPoint3f points3d = new MatOfPoint3f();
points3d.alloc(minPnpPointsNum);
MatOfPoint2f points2d = new MatOfPoint2f();
points2d.alloc(minPnpPointsNum);
for (int i = 0; i < minPnpPointsNum; i++) {
double x = Math.random() * 100 - 50;
double y = Math.random() * 100 - 50;
points2d.put(i, 0, x, y); //add(new Point(x, y));
points3d.put(i, 0, 0, y, x); // add(new Point3(0, y, x));
}
ArrayList<Mat> rvecs = new ArrayList<Mat>();
ArrayList<Mat> tvecs = new ArrayList<Mat>();
Mat rvec = new Mat();
Mat tvec = new Mat();
Mat reprojectionError = new Mat(2, 1, CvType.CV_64FC1);
Calib3d.solvePnPGeneric(points3d, points2d, intrinsics, new MatOfDouble(), rvecs, tvecs, false, Calib3d.SOLVEPNP_IPPE, rvec, tvec, reprojectionError);
Mat truth_rvec = new Mat(3, 1, CvType.CV_64F);
truth_rvec.put(0, 0, 0, Math.PI / 2, 0);
Mat truth_tvec = new Mat(3, 1, CvType.CV_64F);
truth_tvec.put(0, 0, -320, -240, 400);
assertMatEqual(truth_rvec, rvecs.get(0), 10 * EPS);
assertMatEqual(truth_tvec, tvecs.get(0), 1000 * EPS);
}
} }
#!/usr/bin/env python
# Python 2/3 compatibility
from __future__ import print_function
import numpy as np
import cv2 as cv
from tests_common import NewOpenCVTests
class solvepnp_test(NewOpenCVTests):
def test_regression_16040(self):
obj_points = np.array([[0, 0, 0], [0, 1, 0], [1, 1, 0], [1, 0, 0]], dtype=np.float32)
img_points = np.array(
[[700, 400], [700, 600], [900, 600], [900, 400]], dtype=np.float32
)
cameraMatrix = np.array(
[[712.0634, 0, 800], [0, 712.540, 500], [0, 0, 1]], dtype=np.float32
)
distCoeffs = np.array([[0, 0, 0, 0]], dtype=np.float32)
r = np.array([], dtype=np.float32)
x, r, t, e = cv.solvePnPGeneric(
obj_points, img_points, cameraMatrix, distCoeffs, reprojectionError=r
)
def test_regression_16040_2(self):
obj_points = np.array([[0, 0, 0], [0, 1, 0], [1, 1, 0], [1, 0, 0]], dtype=np.float32)
img_points = np.array(
[[[700, 400], [700, 600], [900, 600], [900, 400]]], dtype=np.float32
)
cameraMatrix = np.array(
[[712.0634, 0, 800], [0, 712.540, 500], [0, 0, 1]], dtype=np.float32
)
distCoeffs = np.array([[0, 0, 0, 0]], dtype=np.float32)
r = np.array([], dtype=np.float32)
x, r, t, e = cv.solvePnPGeneric(
obj_points, img_points, cameraMatrix, distCoeffs, reprojectionError=r
)
if __name__ == '__main__':
NewOpenCVTests.bootstrap()
...@@ -753,10 +753,8 @@ int solvePnPGeneric( InputArray _opoints, InputArray _ipoints, ...@@ -753,10 +753,8 @@ int solvePnPGeneric( InputArray _opoints, InputArray _ipoints,
CV_Assert( ( (npoints >= 4) || (npoints == 3 && flags == SOLVEPNP_ITERATIVE && useExtrinsicGuess) ) CV_Assert( ( (npoints >= 4) || (npoints == 3 && flags == SOLVEPNP_ITERATIVE && useExtrinsicGuess) )
&& npoints == std::max(ipoints.checkVector(2, CV_32F), ipoints.checkVector(2, CV_64F)) ); && npoints == std::max(ipoints.checkVector(2, CV_32F), ipoints.checkVector(2, CV_64F)) );
if (opoints.cols == 3) opoints = opoints.reshape(3, npoints);
opoints = opoints.reshape(3); ipoints = ipoints.reshape(2, npoints);
if (ipoints.cols == 2)
ipoints = ipoints.reshape(2);
if( flags != SOLVEPNP_ITERATIVE ) if( flags != SOLVEPNP_ITERATIVE )
useExtrinsicGuess = false; useExtrinsicGuess = false;
...@@ -796,7 +794,7 @@ int solvePnPGeneric( InputArray _opoints, InputArray _ipoints, ...@@ -796,7 +794,7 @@ int solvePnPGeneric( InputArray _opoints, InputArray _ipoints,
else if (flags == SOLVEPNP_P3P || flags == SOLVEPNP_AP3P) else if (flags == SOLVEPNP_P3P || flags == SOLVEPNP_AP3P)
{ {
vector<Mat> rvecs, tvecs; vector<Mat> rvecs, tvecs;
solveP3P(_opoints, _ipoints, _cameraMatrix, _distCoeffs, rvecs, tvecs, flags); solveP3P(opoints, ipoints, _cameraMatrix, _distCoeffs, rvecs, tvecs, flags);
vec_rvecs.insert(vec_rvecs.end(), rvecs.begin(), rvecs.end()); vec_rvecs.insert(vec_rvecs.end(), rvecs.begin(), rvecs.end());
vec_tvecs.insert(vec_tvecs.end(), tvecs.begin(), tvecs.end()); vec_tvecs.insert(vec_tvecs.end(), tvecs.begin(), tvecs.end());
} }
...@@ -1017,37 +1015,37 @@ int solvePnPGeneric( InputArray _opoints, InputArray _ipoints, ...@@ -1017,37 +1015,37 @@ int solvePnPGeneric( InputArray _opoints, InputArray _ipoints,
"Type of reprojectionError must be CV_32FC1 or CV_64FC1!"); "Type of reprojectionError must be CV_32FC1 or CV_64FC1!");
Mat objectPoints, imagePoints; Mat objectPoints, imagePoints;
if (_opoints.depth() == CV_32F) if (opoints.depth() == CV_32F)
{ {
_opoints.getMat().convertTo(objectPoints, CV_64F); opoints.convertTo(objectPoints, CV_64F);
} }
else else
{ {
objectPoints = _opoints.getMat(); objectPoints = opoints;
} }
if (_ipoints.depth() == CV_32F) if (ipoints.depth() == CV_32F)
{ {
_ipoints.getMat().convertTo(imagePoints, CV_64F); ipoints.convertTo(imagePoints, CV_64F);
} }
else else
{ {
imagePoints = _ipoints.getMat(); imagePoints = ipoints;
} }
for (size_t i = 0; i < vec_rvecs.size(); i++) for (size_t i = 0; i < vec_rvecs.size(); i++)
{ {
vector<Point2d> projectedPoints; vector<Point2d> projectedPoints;
projectPoints(objectPoints, vec_rvecs[i], vec_tvecs[i], cameraMatrix, distCoeffs, projectedPoints); projectPoints(objectPoints, vec_rvecs[i], vec_tvecs[i], cameraMatrix, distCoeffs, projectedPoints);
double rmse = norm(projectedPoints, imagePoints, NORM_L2) / sqrt(2*projectedPoints.size()); double rmse = norm(Mat(projectedPoints, false), imagePoints, NORM_L2) / sqrt(2*projectedPoints.size());
Mat err = reprojectionError.getMat(); Mat err = reprojectionError.getMat();
if (type == CV_32F) if (type == CV_32F)
{ {
err.at<float>(0,static_cast<int>(i)) = static_cast<float>(rmse); err.at<float>(static_cast<int>(i)) = static_cast<float>(rmse);
} }
else else
{ {
err.at<double>(0,static_cast<int>(i)) = rmse; err.at<double>(static_cast<int>(i)) = rmse;
} }
} }
} }
......
...@@ -1062,7 +1062,8 @@ double cv::norm( InputArray _src1, InputArray _src2, int normType, InputArray _m ...@@ -1062,7 +1062,8 @@ double cv::norm( InputArray _src1, InputArray _src2, int normType, InputArray _m
{ {
CV_INSTRUMENT_REGION(); CV_INSTRUMENT_REGION();
CV_Assert( _src1.sameSize(_src2) && _src1.type() == _src2.type() ); CV_CheckTypeEQ(_src1.type(), _src2.type(), "Input type mismatch");
CV_Assert(_src1.sameSize(_src2));
#if defined HAVE_OPENCL || defined HAVE_IPP #if defined HAVE_OPENCL || defined HAVE_IPP
double _result = 0; double _result = 0;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment