Commit 33cb9c5c authored by catree's avatar catree

Add SOLVEPNP_IPPE for planar pose estimation. Add solvePnPGeneric function that…

Add SOLVEPNP_IPPE for planar pose estimation. Add solvePnPGeneric function that returns all the pose solutions and the reprojection errors.
parent 7bb8d89e
...@@ -209,7 +209,21 @@ ...@@ -209,7 +209,21 @@
hal_id = {inria-00350283}, hal_id = {inria-00350283},
hal_version = {v1}, hal_version = {v1},
} }
@article{Collins14
year = {2014},
issn = {0920-5691},
journal = {International Journal of Computer Vision},
volume = {109},
number = {3},
doi = {10.1007/s11263-014-0725-5},
title = {Infinitesimal Plane-Based Pose Estimation},
url = {http://dx.doi.org/10.1007/s11263-014-0725-5},
publisher = {Springer US},
keywords = {Plane; Pose; SfM; PnP; Homography},
author = {Collins, Toby and Bartoli, Adrien},
pages = {252-286},
language = {English}
}
@article{Daniilidis98, @article{Daniilidis98,
author = {Konstantinos Daniilidis}, author = {Konstantinos Daniilidis},
title = {Hand-Eye Calibration Using Dual Quaternions}, title = {Hand-Eye Calibration Using Dual Quaternions},
......
...@@ -231,13 +231,25 @@ enum { LMEDS = 4, //!< least-median of squares algorithm ...@@ -231,13 +231,25 @@ enum { LMEDS = 4, //!< least-median of squares algorithm
RHO = 16 //!< RHO algorithm RHO = 16 //!< RHO algorithm
}; };
enum { SOLVEPNP_ITERATIVE = 0, enum SolvePnPMethod {
SOLVEPNP_ITERATIVE = 0,
SOLVEPNP_EPNP = 1, //!< EPnP: Efficient Perspective-n-Point Camera Pose Estimation @cite lepetit2009epnp SOLVEPNP_EPNP = 1, //!< EPnP: Efficient Perspective-n-Point Camera Pose Estimation @cite lepetit2009epnp
SOLVEPNP_P3P = 2, //!< Complete Solution Classification for the Perspective-Three-Point Problem @cite gao2003complete SOLVEPNP_P3P = 2, //!< Complete Solution Classification for the Perspective-Three-Point Problem @cite gao2003complete
SOLVEPNP_DLS = 3, //!< A Direct Least-Squares (DLS) Method for PnP @cite hesch2011direct SOLVEPNP_DLS = 3, //!< A Direct Least-Squares (DLS) Method for PnP @cite hesch2011direct
SOLVEPNP_UPNP = 4, //!< Exhaustive Linearization for Robust Camera Pose and Focal Length Estimation @cite penate2013exhaustive SOLVEPNP_UPNP = 4, //!< Exhaustive Linearization for Robust Camera Pose and Focal Length Estimation @cite penate2013exhaustive
SOLVEPNP_AP3P = 5, //!< An Efficient Algebraic Solution to the Perspective-Three-Point Problem @cite Ke17 SOLVEPNP_AP3P = 5, //!< An Efficient Algebraic Solution to the Perspective-Three-Point Problem @cite Ke17
SOLVEPNP_IPPE = 6, //!< Infinitesimal Plane-Based Pose Estimation @cite Collins14 \n
//!< Object points must be coplanar.
SOLVEPNP_IPPE_SQUARE = 7, //!< Infinitesimal Plane-Based Pose Estimation @cite Collins14 \n
//!< This is a special case suitable for marker pose estimation.\n
//!< 4 coplanar object points must be defined in the following order:
//!< - point 0: [-squareLength / 2, squareLength / 2, 0]
//!< - point 1: [ squareLength / 2, squareLength / 2, 0]
//!< - point 2: [ squareLength / 2, -squareLength / 2, 0]
//!< - point 3: [-squareLength / 2, -squareLength / 2, 0]
#ifndef CV_DOXYGEN
SOLVEPNP_MAX_COUNT //!< Used for count SOLVEPNP_MAX_COUNT //!< Used for count
#endif
}; };
enum { CALIB_CB_ADAPTIVE_THRESH = 1, enum { CALIB_CB_ADAPTIVE_THRESH = 1,
...@@ -540,6 +552,17 @@ Check @ref tutorial_homography "the corresponding tutorial" for more details ...@@ -540,6 +552,17 @@ Check @ref tutorial_homography "the corresponding tutorial" for more details
*/ */
/** @brief Finds an object pose from 3D-2D point correspondences. /** @brief Finds an object pose from 3D-2D point correspondences.
This function returns the rotation and the translation vectors that transform a 3D point expressed in the object
coordinate frame to the camera coordinate frame, using different methods:
- P3P methods (@ref SOLVEPNP_P3P, @ref SOLVEPNP_AP3P): need 4 input points to return a unique solution.
- @ref SOLVEPNP_IPPE Input points must be >= 4 and object points must be coplanar.
- @ref SOLVEPNP_IPPE_SQUARE Special case suitable for marker pose estimation.
Number of input points must be 4. Object points must be defined in the following order:
- point 0: [-squareLength / 2, squareLength / 2, 0]
- point 1: [ squareLength / 2, squareLength / 2, 0]
- point 2: [ squareLength / 2, -squareLength / 2, 0]
- point 3: [-squareLength / 2, -squareLength / 2, 0]
- for all the other flags, number of input points must be >= 4 and object points can be in any configuration.
@param objectPoints Array of object points in the object coordinate space, Nx3 1-channel or @param objectPoints Array of object points in the object coordinate space, Nx3 1-channel or
1xN/Nx1 3-channel, where N is the number of points. vector\<Point3f\> can be also passed here. 1xN/Nx1 3-channel, where N is the number of points. vector\<Point3f\> can be also passed here.
...@@ -550,14 +573,14 @@ where N is the number of points. vector\<Point2f\> can be also passed here. ...@@ -550,14 +573,14 @@ where N is the number of points. vector\<Point2f\> can be also passed here.
\f$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6 [, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\f$ of \f$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6 [, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\f$ of
4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are 4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are
assumed. assumed.
@param rvec Output rotation vector (see @ref Rodrigues ) that, together with tvec , brings points from @param rvec Output rotation vector (see @ref Rodrigues ) that, together with tvec, brings points from
the model coordinate system to the camera coordinate system. the model coordinate system to the camera coordinate system.
@param tvec Output translation vector. @param tvec Output translation vector.
@param useExtrinsicGuess Parameter used for #SOLVEPNP_ITERATIVE. If true (1), the function uses @param useExtrinsicGuess Parameter used for #SOLVEPNP_ITERATIVE. If true (1), the function uses
the provided rvec and tvec values as initial approximations of the rotation and translation the provided rvec and tvec values as initial approximations of the rotation and translation
vectors, respectively, and further optimizes them. vectors, respectively, and further optimizes them.
@param flags Method for solving a PnP problem: @param flags Method for solving a PnP problem:
- **SOLVEPNP_ITERATIVE** Iterative method is based on Levenberg-Marquardt optimization. In - **SOLVEPNP_ITERATIVE** Iterative method is based on a Levenberg-Marquardt optimization. In
this case the function finds such a pose that minimizes reprojection error, that is the sum this case the function finds such a pose that minimizes reprojection error, that is the sum
of squared distances between the observed projections imagePoints and the projected (using of squared distances between the observed projections imagePoints and the projected (using
projectPoints ) objectPoints . projectPoints ) objectPoints .
...@@ -567,18 +590,24 @@ In this case the function requires exactly four object and image points. ...@@ -567,18 +590,24 @@ In this case the function requires exactly four object and image points.
- **SOLVEPNP_AP3P** Method is based on the paper of T. Ke, S. Roumeliotis - **SOLVEPNP_AP3P** Method is based on the paper of T. Ke, S. Roumeliotis
"An Efficient Algebraic Solution to the Perspective-Three-Point Problem" (@cite Ke17). "An Efficient Algebraic Solution to the Perspective-Three-Point Problem" (@cite Ke17).
In this case the function requires exactly four object and image points. In this case the function requires exactly four object and image points.
- **SOLVEPNP_EPNP** Method has been introduced by F.Moreno-Noguer, V.Lepetit and P.Fua in the - **SOLVEPNP_EPNP** Method has been introduced by F. Moreno-Noguer, V. Lepetit and P. Fua in the
paper "EPnP: Efficient Perspective-n-Point Camera Pose Estimation" (@cite lepetit2009epnp). paper "EPnP: Efficient Perspective-n-Point Camera Pose Estimation" (@cite lepetit2009epnp).
- **SOLVEPNP_DLS** Method is based on the paper of Joel A. Hesch and Stergios I. Roumeliotis. - **SOLVEPNP_DLS** Method is based on the paper of J. Hesch and S. Roumeliotis.
"A Direct Least-Squares (DLS) Method for PnP" (@cite hesch2011direct). "A Direct Least-Squares (DLS) Method for PnP" (@cite hesch2011direct).
- **SOLVEPNP_UPNP** Method is based on the paper of A.Penate-Sanchez, J.Andrade-Cetto, - **SOLVEPNP_UPNP** Method is based on the paper of A. Penate-Sanchez, J. Andrade-Cetto,
F.Moreno-Noguer. "Exhaustive Linearization for Robust Camera Pose and Focal Length F. Moreno-Noguer. "Exhaustive Linearization for Robust Camera Pose and Focal Length
Estimation" (@cite penate2013exhaustive). In this case the function also estimates the parameters \f$f_x\f$ and \f$f_y\f$ Estimation" (@cite penate2013exhaustive). In this case the function also estimates the parameters \f$f_x\f$ and \f$f_y\f$
assuming that both have the same value. Then the cameraMatrix is updated with the estimated assuming that both have the same value. Then the cameraMatrix is updated with the estimated
focal length. focal length.
- **SOLVEPNP_AP3P** Method is based on the paper of Tong Ke and Stergios I. Roumeliotis. - **SOLVEPNP_IPPE** Method is based on the paper of T. Collins and A. Bartoli.
"An Efficient Algebraic Solution to the Perspective-Three-Point Problem" (@cite Ke17). In this case the "Infinitesimal Plane-Based Pose Estimation" (@cite Collins14). This method requires coplanar object points.
function requires exactly four object and image points. - **SOLVEPNP_IPPE_SQUARE** Method is based on the paper of Toby Collins and Adrien Bartoli.
"Infinitesimal Plane-Based Pose Estimation" (@cite Collins14). This method is suitable for marker pose estimation.
It requires 4 coplanar object points defined in the following order:
- point 0: [-squareLength / 2, squareLength / 2, 0]
- point 1: [ squareLength / 2, squareLength / 2, 0]
- point 2: [ squareLength / 2, -squareLength / 2, 0]
- point 3: [-squareLength / 2, -squareLength / 2, 0]
The function estimates the object pose given a set of object points, their corresponding image The function estimates the object pose given a set of object points, their corresponding image
projections, as well as the camera matrix and the distortion coefficients, see the figure below projections, as well as the camera matrix and the distortion coefficients, see the figure below
...@@ -634,7 +663,7 @@ using the perspective projection model \f$ \Pi \f$ and the camera intrinsic para ...@@ -634,7 +663,7 @@ using the perspective projection model \f$ \Pi \f$ and the camera intrinsic para
\end{align*} \end{align*}
\f] \f]
The estimated pose is thus the rotation (`rvec`) and the translation (`tvec`) vectors that allow to transform The estimated pose is thus the rotation (`rvec`) and the translation (`tvec`) vectors that allow transforming
a 3D point expressed in the world frame into the camera frame: a 3D point expressed in the world frame into the camera frame:
\f[ \f[
...@@ -695,6 +724,13 @@ a 3D point expressed in the world frame into the camera frame: ...@@ -695,6 +724,13 @@ a 3D point expressed in the world frame into the camera frame:
- With **SOLVEPNP_ITERATIVE** method and `useExtrinsicGuess=true`, the minimum number of points is 3 (3 points - With **SOLVEPNP_ITERATIVE** method and `useExtrinsicGuess=true`, the minimum number of points is 3 (3 points
are sufficient to compute a pose but there are up to 4 solutions). The initial solution should be close to the are sufficient to compute a pose but there are up to 4 solutions). The initial solution should be close to the
global solution to converge. global solution to converge.
- With **SOLVEPNP_IPPE** input points must be >= 4 and object points must be coplanar.
- With **SOLVEPNP_IPPE_SQUARE** this is a special case suitable for marker pose estimation.
Number of input points must be 4. Object points must be defined in the following order:
- point 0: [-squareLength / 2, squareLength / 2, 0]
- point 1: [ squareLength / 2, squareLength / 2, 0]
- point 2: [ squareLength / 2, -squareLength / 2, 0]
- point 3: [-squareLength / 2, -squareLength / 2, 0]
*/ */
CV_EXPORTS_W bool solvePnP( InputArray objectPoints, InputArray imagePoints, CV_EXPORTS_W bool solvePnP( InputArray objectPoints, InputArray imagePoints,
InputArray cameraMatrix, InputArray distCoeffs, InputArray cameraMatrix, InputArray distCoeffs,
...@@ -712,10 +748,10 @@ where N is the number of points. vector\<Point2f\> can be also passed here. ...@@ -712,10 +748,10 @@ where N is the number of points. vector\<Point2f\> can be also passed here.
\f$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6 [, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\f$ of \f$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6 [, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\f$ of
4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are 4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are
assumed. assumed.
@param rvec Output rotation vector (see Rodrigues ) that, together with tvec , brings points from @param rvec Output rotation vector (see @ref Rodrigues ) that, together with tvec, brings points from
the model coordinate system to the camera coordinate system. the model coordinate system to the camera coordinate system.
@param tvec Output translation vector. @param tvec Output translation vector.
@param useExtrinsicGuess Parameter used for SOLVEPNP_ITERATIVE. If true (1), the function uses @param useExtrinsicGuess Parameter used for @ref SOLVEPNP_ITERATIVE. If true (1), the function uses
the provided rvec and tvec values as initial approximations of the rotation and translation the provided rvec and tvec values as initial approximations of the rotation and translation
vectors, respectively, and further optimizes them. vectors, respectively, and further optimizes them.
@param iterationsCount Number of iterations. @param iterationsCount Number of iterations.
...@@ -724,12 +760,12 @@ is the maximum allowed distance between the observed and computed point projecti ...@@ -724,12 +760,12 @@ is the maximum allowed distance between the observed and computed point projecti
an inlier. an inlier.
@param confidence The probability that the algorithm produces a useful result. @param confidence The probability that the algorithm produces a useful result.
@param inliers Output vector that contains indices of inliers in objectPoints and imagePoints . @param inliers Output vector that contains indices of inliers in objectPoints and imagePoints .
@param flags Method for solving a PnP problem (see solvePnP ). @param flags Method for solving a PnP problem (see @ref solvePnP ).
The function estimates an object pose given a set of object points, their corresponding image The function estimates an object pose given a set of object points, their corresponding image
projections, as well as the camera matrix and the distortion coefficients. This function finds such projections, as well as the camera matrix and the distortion coefficients. This function finds such
a pose that minimizes reprojection error, that is, the sum of squared distances between the observed a pose that minimizes reprojection error, that is, the sum of squared distances between the observed
projections imagePoints and the projected (using projectPoints ) objectPoints. The use of RANSAC projections imagePoints and the projected (using @ref projectPoints ) objectPoints. The use of RANSAC
makes the function resistant to outliers. makes the function resistant to outliers.
@note @note
...@@ -749,6 +785,7 @@ CV_EXPORTS_W bool solvePnPRansac( InputArray objectPoints, InputArray imagePoint ...@@ -749,6 +785,7 @@ CV_EXPORTS_W bool solvePnPRansac( InputArray objectPoints, InputArray imagePoint
bool useExtrinsicGuess = false, int iterationsCount = 100, bool useExtrinsicGuess = false, int iterationsCount = 100,
float reprojectionError = 8.0, double confidence = 0.99, float reprojectionError = 8.0, double confidence = 0.99,
OutputArray inliers = noArray(), int flags = SOLVEPNP_ITERATIVE ); OutputArray inliers = noArray(), int flags = SOLVEPNP_ITERATIVE );
/** @brief Finds an object pose from 3 3D-2D point correspondences. /** @brief Finds an object pose from 3 3D-2D point correspondences.
@param objectPoints Array of object points in the object coordinate space, 3x3 1-channel or @param objectPoints Array of object points in the object coordinate space, 3x3 1-channel or
...@@ -760,17 +797,20 @@ CV_EXPORTS_W bool solvePnPRansac( InputArray objectPoints, InputArray imagePoint ...@@ -760,17 +797,20 @@ CV_EXPORTS_W bool solvePnPRansac( InputArray objectPoints, InputArray imagePoint
\f$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6 [, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\f$ of \f$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6 [, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\f$ of
4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are 4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are
assumed. assumed.
@param rvecs Output rotation vectors (see Rodrigues ) that, together with tvecs , brings points from @param rvecs Output rotation vectors (see @ref Rodrigues ) that, together with tvecs, brings points from
the model coordinate system to the camera coordinate system. A P3P problem has up to 4 solutions. the model coordinate system to the camera coordinate system. A P3P problem has up to 4 solutions.
@param tvecs Output translation vectors. @param tvecs Output translation vectors.
@param flags Method for solving a P3P problem: @param flags Method for solving a P3P problem:
- **SOLVEPNP_P3P** Method is based on the paper of X.S. Gao, X.-R. Hou, J. Tang, H.-F. Chang - **SOLVEPNP_P3P** Method is based on the paper of X.S. Gao, X.-R. Hou, J. Tang, H.-F. Chang
"Complete Solution Classification for the Perspective-Three-Point Problem" (@cite gao2003complete). "Complete Solution Classification for the Perspective-Three-Point Problem" (@cite gao2003complete).
- **SOLVEPNP_AP3P** Method is based on the paper of Tong Ke and Stergios I. Roumeliotis. - **SOLVEPNP_AP3P** Method is based on the paper of T. Ke and S. Roumeliotis.
"An Efficient Algebraic Solution to the Perspective-Three-Point Problem" (@cite Ke17). "An Efficient Algebraic Solution to the Perspective-Three-Point Problem" (@cite Ke17).
The function estimates the object pose given 3 object points, their corresponding image The function estimates the object pose given 3 object points, their corresponding image
projections, as well as the camera matrix and the distortion coefficients. projections, as well as the camera matrix and the distortion coefficients.
@note
The solutions are sorted by reprojection errors (lowest to highest).
*/ */
CV_EXPORTS_W int solveP3P( InputArray objectPoints, InputArray imagePoints, CV_EXPORTS_W int solveP3P( InputArray objectPoints, InputArray imagePoints,
InputArray cameraMatrix, InputArray distCoeffs, InputArray cameraMatrix, InputArray distCoeffs,
...@@ -789,7 +829,7 @@ where N is the number of points. vector\<Point2f\> can also be passed here. ...@@ -789,7 +829,7 @@ where N is the number of points. vector\<Point2f\> can also be passed here.
\f$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6 [, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\f$ of \f$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6 [, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\f$ of
4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are 4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are
assumed. assumed.
@param rvec Input/Output rotation vector (see @ref Rodrigues ) that, together with tvec , brings points from @param rvec Input/Output rotation vector (see @ref Rodrigues ) that, together with tvec, brings points from
the model coordinate system to the camera coordinate system. Input values are used as an initial solution. the model coordinate system to the camera coordinate system. Input values are used as an initial solution.
@param tvec Input/Output translation vector. Input values are used as an initial solution. @param tvec Input/Output translation vector. Input values are used as an initial solution.
@param criteria Criteria when to stop the Levenberg-Marquard iterative algorithm. @param criteria Criteria when to stop the Levenberg-Marquard iterative algorithm.
...@@ -817,12 +857,12 @@ where N is the number of points. vector\<Point2f\> can also be passed here. ...@@ -817,12 +857,12 @@ where N is the number of points. vector\<Point2f\> can also be passed here.
\f$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6 [, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\f$ of \f$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6 [, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\f$ of
4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are 4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are
assumed. assumed.
@param rvec Input/Output rotation vector (see @ref Rodrigues ) that, together with tvec , brings points from @param rvec Input/Output rotation vector (see @ref Rodrigues ) that, together with tvec, brings points from
the model coordinate system to the camera coordinate system. Input values are used as an initial solution. the model coordinate system to the camera coordinate system. Input values are used as an initial solution.
@param tvec Input/Output translation vector. Input values are used as an initial solution. @param tvec Input/Output translation vector. Input values are used as an initial solution.
@param criteria Criteria when to stop the Levenberg-Marquard iterative algorithm. @param criteria Criteria when to stop the Levenberg-Marquard iterative algorithm.
@param VVSlambda Gain for the virtual visual servoing control law, equivalent to the \f$\alpha\f$ @param VVSlambda Gain for the virtual visual servoing control law, equivalent to the \f$\alpha\f$
gain in the Gauss-Newton formulation. gain in the Damped Gauss-Newton formulation.
The function refines the object pose given at least 3 object points, their corresponding image The function refines the object pose given at least 3 object points, their corresponding image
projections, an initial solution for the rotation and translation vector, projections, an initial solution for the rotation and translation vector,
...@@ -836,6 +876,202 @@ CV_EXPORTS_W void solvePnPRefineVVS( InputArray objectPoints, InputArray imagePo ...@@ -836,6 +876,202 @@ CV_EXPORTS_W void solvePnPRefineVVS( InputArray objectPoints, InputArray imagePo
TermCriteria criteria = TermCriteria(TermCriteria::EPS + TermCriteria::COUNT, 20, FLT_EPSILON), TermCriteria criteria = TermCriteria(TermCriteria::EPS + TermCriteria::COUNT, 20, FLT_EPSILON),
double VVSlambda = 1); double VVSlambda = 1);
/** @brief Finds an object pose from 3D-2D point correspondences.
This function returns a list of all the possible solutions (a solution is a <rotation vector, translation vector>
couple), depending on the number of input points and the chosen method:
- P3P methods (@ref SOLVEPNP_P3P, @ref SOLVEPNP_AP3P): 3 or 4 input points. Number of returned solutions can be between 0 and 4 with 3 input points.
- @ref SOLVEPNP_IPPE Input points must be >= 4 and object points must be coplanar. Returns 2 solutions.
- @ref SOLVEPNP_IPPE_SQUARE Special case suitable for marker pose estimation.
Number of input points must be 4 and 2 solutions are returned. Object points must be defined in the following order:
- point 0: [-squareLength / 2, squareLength / 2, 0]
- point 1: [ squareLength / 2, squareLength / 2, 0]
- point 2: [ squareLength / 2, -squareLength / 2, 0]
- point 3: [-squareLength / 2, -squareLength / 2, 0]
- for all the other flags, number of input points must be >= 4 and object points can be in any configuration.
Only 1 solution is returned.
@param objectPoints Array of object points in the object coordinate space, Nx3 1-channel or
1xN/Nx1 3-channel, where N is the number of points. vector\<Point3f\> can be also passed here.
@param imagePoints Array of corresponding image points, Nx2 1-channel or 1xN/Nx1 2-channel,
where N is the number of points. vector\<Point2f\> can be also passed here.
@param cameraMatrix Input camera matrix \f$A = \vecthreethree{fx}{0}{cx}{0}{fy}{cy}{0}{0}{1}\f$ .
@param distCoeffs Input vector of distortion coefficients
\f$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6 [, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\f$ of
4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are
assumed.
@param rvecs Vector of output rotation vectors (see @ref Rodrigues ) that, together with tvecs, brings points from
the model coordinate system to the camera coordinate system.
@param tvecs Vector of output translation vectors.
@param useExtrinsicGuess Parameter used for #SOLVEPNP_ITERATIVE. If true (1), the function uses
the provided rvec and tvec values as initial approximations of the rotation and translation
vectors, respectively, and further optimizes them.
@param flags Method for solving a PnP problem:
- **SOLVEPNP_ITERATIVE** Iterative method is based on a Levenberg-Marquardt optimization. In
this case the function finds such a pose that minimizes reprojection error, that is the sum
of squared distances between the observed projections imagePoints and the projected (using
projectPoints ) objectPoints .
- **SOLVEPNP_P3P** Method is based on the paper of X.S. Gao, X.-R. Hou, J. Tang, H.-F. Chang
"Complete Solution Classification for the Perspective-Three-Point Problem" (@cite gao2003complete).
In this case the function requires exactly four object and image points.
- **SOLVEPNP_AP3P** Method is based on the paper of T. Ke, S. Roumeliotis
"An Efficient Algebraic Solution to the Perspective-Three-Point Problem" (@cite Ke17).
In this case the function requires exactly four object and image points.
- **SOLVEPNP_EPNP** Method has been introduced by F.Moreno-Noguer, V.Lepetit and P.Fua in the
paper "EPnP: Efficient Perspective-n-Point Camera Pose Estimation" (@cite lepetit2009epnp).
- **SOLVEPNP_DLS** Method is based on the paper of Joel A. Hesch and Stergios I. Roumeliotis.
"A Direct Least-Squares (DLS) Method for PnP" (@cite hesch2011direct).
- **SOLVEPNP_UPNP** Method is based on the paper of A.Penate-Sanchez, J.Andrade-Cetto,
F.Moreno-Noguer. "Exhaustive Linearization for Robust Camera Pose and Focal Length
Estimation" (@cite penate2013exhaustive). In this case the function also estimates the parameters \f$f_x\f$ and \f$f_y\f$
assuming that both have the same value. Then the cameraMatrix is updated with the estimated
focal length.
- **SOLVEPNP_IPPE** Method is based on the paper of T. Collins and A. Bartoli.
"Infinitesimal Plane-Based Pose Estimation" (@cite Collins14). This method requires coplanar object points.
- **SOLVEPNP_IPPE_SQUARE** Method is based on the paper of Toby Collins and Adrien Bartoli.
"Infinitesimal Plane-Based Pose Estimation" (@cite Collins14). This method is suitable for marker pose estimation.
It requires 4 coplanar object points defined in the following order:
- point 0: [-squareLength / 2, squareLength / 2, 0]
- point 1: [ squareLength / 2, squareLength / 2, 0]
- point 2: [ squareLength / 2, -squareLength / 2, 0]
- point 3: [-squareLength / 2, -squareLength / 2, 0]
@param rvec Rotation vector used to initialize an iterative PnP refinement algorithm, when flag is SOLVEPNP_ITERATIVE
and useExtrinsicGuess is set to true.
@param tvec Translation vector used to initialize an iterative PnP refinement algorithm, when flag is SOLVEPNP_ITERATIVE
and useExtrinsicGuess is set to true.
@param reprojectionError Optional vector of reprojection error, that is the RMS error
(\f$ \text{RMSE} = \sqrt{\frac{\sum_{i}^{N} \left ( \hat{y_i} - y_i \right )^2}{N}} \f$) between the input image points
and the 3D object points projected with the estimated pose.
The function estimates the object pose given a set of object points, their corresponding image
projections, as well as the camera matrix and the distortion coefficients, see the figure below
(more precisely, the X-axis of the camera frame is pointing to the right, the Y-axis downward
and the Z-axis forward).
![](pnp.jpg)
Points expressed in the world frame \f$ \bf{X}_w \f$ are projected into the image plane \f$ \left[ u, v \right] \f$
using the perspective projection model \f$ \Pi \f$ and the camera intrinsic parameters matrix \f$ \bf{A} \f$:
\f[
\begin{align*}
\begin{bmatrix}
u \\
v \\
1
\end{bmatrix} &=
\bf{A} \hspace{0.1em} \Pi \hspace{0.2em} ^{c}\bf{M}_w
\begin{bmatrix}
X_{w} \\
Y_{w} \\
Z_{w} \\
1
\end{bmatrix} \\
\begin{bmatrix}
u \\
v \\
1
\end{bmatrix} &=
\begin{bmatrix}
f_x & 0 & c_x \\
0 & f_y & c_y \\
0 & 0 & 1
\end{bmatrix}
\begin{bmatrix}
1 & 0 & 0 & 0 \\
0 & 1 & 0 & 0 \\
0 & 0 & 1 & 0
\end{bmatrix}
\begin{bmatrix}
r_{11} & r_{12} & r_{13} & t_x \\
r_{21} & r_{22} & r_{23} & t_y \\
r_{31} & r_{32} & r_{33} & t_z \\
0 & 0 & 0 & 1
\end{bmatrix}
\begin{bmatrix}
X_{w} \\
Y_{w} \\
Z_{w} \\
1
\end{bmatrix}
\end{align*}
\f]
The estimated pose is thus the rotation (`rvec`) and the translation (`tvec`) vectors that allow transforming
a 3D point expressed in the world frame into the camera frame:
\f[
\begin{align*}
\begin{bmatrix}
X_c \\
Y_c \\
Z_c \\
1
\end{bmatrix} &=
\hspace{0.2em} ^{c}\bf{M}_w
\begin{bmatrix}
X_{w} \\
Y_{w} \\
Z_{w} \\
1
\end{bmatrix} \\
\begin{bmatrix}
X_c \\
Y_c \\
Z_c \\
1
\end{bmatrix} &=
\begin{bmatrix}
r_{11} & r_{12} & r_{13} & t_x \\
r_{21} & r_{22} & r_{23} & t_y \\
r_{31} & r_{32} & r_{33} & t_z \\
0 & 0 & 0 & 1
\end{bmatrix}
\begin{bmatrix}
X_{w} \\
Y_{w} \\
Z_{w} \\
1
\end{bmatrix}
\end{align*}
\f]
@note
- An example of how to use solvePnP for planar augmented reality can be found at
opencv_source_code/samples/python/plane_ar.py
- If you are using Python:
- Numpy array slices won't work as input because solvePnP requires contiguous
arrays (enforced by the assertion using cv::Mat::checkVector() around line 55 of
modules/calib3d/src/solvepnp.cpp version 2.4.9)
- The P3P algorithm requires image points to be in an array of shape (N,1,2) due
to its calling of cv::undistortPoints (around line 75 of modules/calib3d/src/solvepnp.cpp version 2.4.9)
which requires 2-channel information.
- Thus, given some data D = np.array(...) where D.shape = (N,M), in order to use a subset of
it as, e.g., imagePoints, one must effectively copy it into a new array: imagePoints =
np.ascontiguousarray(D[:,:2]).reshape((N,1,2))
- The methods **SOLVEPNP_DLS** and **SOLVEPNP_UPNP** cannot be used as the current implementations are
unstable and sometimes give completely wrong results. If you pass one of these two
flags, **SOLVEPNP_EPNP** method will be used instead.
- The minimum number of points is 4 in the general case. In the case of **SOLVEPNP_P3P** and **SOLVEPNP_AP3P**
methods, it is required to use exactly 4 points (the first 3 points are used to estimate all the solutions
of the P3P problem, the last one is used to retain the best solution that minimizes the reprojection error).
- With **SOLVEPNP_ITERATIVE** method and `useExtrinsicGuess=true`, the minimum number of points is 3 (3 points
are sufficient to compute a pose but there are up to 4 solutions). The initial solution should be close to the
global solution to converge.
- With **SOLVEPNP_IPPE** input points must be >= 4 and object points must be coplanar.
- With **SOLVEPNP_IPPE_SQUARE** this is a special case suitable for marker pose estimation.
Number of input points must be 4. Object points must be defined in the following order:
- point 0: [-squareLength / 2, squareLength / 2, 0]
- point 1: [ squareLength / 2, squareLength / 2, 0]
- point 2: [ squareLength / 2, -squareLength / 2, 0]
- point 3: [-squareLength / 2, -squareLength / 2, 0]
*/
CV_EXPORTS_W int solvePnPGeneric( InputArray objectPoints, InputArray imagePoints,
InputArray cameraMatrix, InputArray distCoeffs,
OutputArrayOfArrays rvecs, OutputArrayOfArrays tvecs,
bool useExtrinsicGuess = false, SolvePnPMethod flags = SOLVEPNP_ITERATIVE,
InputArray rvec = noArray(), InputArray tvec = noArray(),
OutputArray reprojectionError = noArray() );
/** @brief Finds an initial camera matrix from 3D-2D point correspondences. /** @brief Finds an initial camera matrix from 3D-2D point correspondences.
@param objectPoints Vector of vectors of the calibration pattern points in the calibration pattern @param objectPoints Vector of vectors of the calibration pattern points in the calibration pattern
...@@ -933,7 +1169,7 @@ CV_EXPORTS_W void drawChessboardCorners( InputOutputArray image, Size patternSiz ...@@ -933,7 +1169,7 @@ CV_EXPORTS_W void drawChessboardCorners( InputOutputArray image, Size patternSiz
@param distCoeffs Input vector of distortion coefficients @param distCoeffs Input vector of distortion coefficients
\f$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6 [, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\f$ of \f$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6 [, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\f$ of
4, 5, 8, 12 or 14 elements. If the vector is empty, the zero distortion coefficients are assumed. 4, 5, 8, 12 or 14 elements. If the vector is empty, the zero distortion coefficients are assumed.
@param rvec Rotation vector (see @ref Rodrigues ) that, together with tvec , brings points from @param rvec Rotation vector (see @ref Rodrigues ) that, together with tvec, brings points from
the model coordinate system to the camera coordinate system. the model coordinate system to the camera coordinate system.
@param tvec Translation vector. @param tvec Translation vector.
@param length Length of the painted axes in the same unit than tvec (usually in meters). @param length Length of the painted axes in the same unit than tvec (usually in meters).
......
#include "precomp.hpp"
#include "ap3p.h" #include "ap3p.h"
#include <cmath> #include <cmath>
...@@ -154,10 +155,11 @@ ap3p::ap3p(double _fx, double _fy, double _cx, double _cy) { ...@@ -154,10 +155,11 @@ ap3p::ap3p(double _fx, double _fy, double _cx, double _cy) {
// worldPoints: The positions of the 3 feature points stored as column vectors // worldPoints: The positions of the 3 feature points stored as column vectors
// solutionsR: 4 possible solutions of rotation matrix of the world w.r.t the camera frame // solutionsR: 4 possible solutions of rotation matrix of the world w.r.t the camera frame
// solutionsT: 4 possible solutions of translation of the world origin w.r.t the camera frame // solutionsT: 4 possible solutions of translation of the world origin w.r.t the camera frame
int ap3p::computePoses(const double featureVectors[3][3], int ap3p::computePoses(const double featureVectors[3][4],
const double worldPoints[3][3], const double worldPoints[3][4],
double solutionsR[4][3][3], double solutionsR[4][3][3],
double solutionsT[4][3]) { double solutionsT[4][3],
bool p4p) {
//world point vectors //world point vectors
double w1[3] = {worldPoints[0][0], worldPoints[1][0], worldPoints[2][0]}; double w1[3] = {worldPoints[0][0], worldPoints[1][0], worldPoints[2][0]};
...@@ -246,6 +248,13 @@ int ap3p::computePoses(const double featureVectors[3][3], ...@@ -246,6 +248,13 @@ int ap3p::computePoses(const double featureVectors[3][3],
double b3p[3]; double b3p[3];
vect_scale((delta / k3b3), b3, b3p); vect_scale((delta / k3b3), b3, b3p);
double X3 = worldPoints[0][3];
double Y3 = worldPoints[1][3];
double Z3 = worldPoints[2][3];
double mu3 = featureVectors[0][3];
double mv3 = featureVectors[1][3];
double reproj_errors[4];
int nb_solutions = 0; int nb_solutions = 0;
for (int i = 0; i < 4; ++i) { for (int i = 0; i < 4; ++i) {
double ctheta1p = s[i]; double ctheta1p = s[i];
...@@ -290,9 +299,29 @@ int ap3p::computePoses(const double featureVectors[3][3], ...@@ -290,9 +299,29 @@ int ap3p::computePoses(const double featureVectors[3][3],
solutionsR[nb_solutions][1][2] = R[2][1]; solutionsR[nb_solutions][1][2] = R[2][1];
solutionsR[nb_solutions][2][2] = R[2][2]; solutionsR[nb_solutions][2][2] = R[2][2];
if (p4p) {
double X3p = solutionsR[nb_solutions][0][0] * X3 + solutionsR[nb_solutions][0][1] * Y3 + solutionsR[nb_solutions][0][2] * Z3 + solutionsT[nb_solutions][0];
double Y3p = solutionsR[nb_solutions][1][0] * X3 + solutionsR[nb_solutions][1][1] * Y3 + solutionsR[nb_solutions][1][2] * Z3 + solutionsT[nb_solutions][1];
double Z3p = solutionsR[nb_solutions][2][0] * X3 + solutionsR[nb_solutions][2][1] * Y3 + solutionsR[nb_solutions][2][2] * Z3 + solutionsT[nb_solutions][2];
double mu3p = X3p / Z3p;
double mv3p = Y3p / Z3p;
reproj_errors[nb_solutions] = (mu3p - mu3) * (mu3p - mu3) + (mv3p - mv3) * (mv3p - mv3);
}
nb_solutions++; nb_solutions++;
} }
//sort the solutions
if (p4p) {
for (int i = 1; i < nb_solutions; i++) {
for (int j = i; j > 0 && reproj_errors[j-1] > reproj_errors[j]; j--) {
std::swap(reproj_errors[j], reproj_errors[j-1]);
std::swap(solutionsR[j], solutionsR[j-1]);
std::swap(solutionsT[j], solutionsT[j-1]);
}
}
}
return nb_solutions; return nb_solutions;
} }
...@@ -311,9 +340,10 @@ bool ap3p::solve(cv::Mat &R, cv::Mat &tvec, const cv::Mat &opoints, const cv::Ma ...@@ -311,9 +340,10 @@ bool ap3p::solve(cv::Mat &R, cv::Mat &tvec, const cv::Mat &opoints, const cv::Ma
else else
extract_points<cv::Point3d, cv::Point2f>(opoints, ipoints, points); extract_points<cv::Point3d, cv::Point2f>(opoints, ipoints, points);
bool result = solve(rotation_matrix, translation, points[0], points[1], points[2], points[3], points[4], points[5], bool result = solve(rotation_matrix, translation,
points[6], points[7], points[8], points[9], points[10], points[11], points[12], points[13], points[0], points[1], points[2], points[3], points[4],
points[14], points[5], points[6], points[7], points[8], points[9],
points[10], points[11], points[12], points[13],points[14],
points[15], points[16], points[17], points[18], points[19]); points[15], points[16], points[17], points[18], points[19]);
cv::Mat(3, 1, CV_64F, translation).copyTo(tvec); cv::Mat(3, 1, CV_64F, translation).copyTo(tvec);
cv::Mat(3, 3, CV_64F, rotation_matrix).copyTo(R); cv::Mat(3, 3, CV_64F, rotation_matrix).copyTo(R);
...@@ -335,10 +365,13 @@ int ap3p::solve(std::vector<cv::Mat> &Rs, std::vector<cv::Mat> &tvecs, const cv: ...@@ -335,10 +365,13 @@ int ap3p::solve(std::vector<cv::Mat> &Rs, std::vector<cv::Mat> &tvecs, const cv:
else else
extract_points<cv::Point3d, cv::Point2f>(opoints, ipoints, points); extract_points<cv::Point3d, cv::Point2f>(opoints, ipoints, points);
const bool p4p = std::max(opoints.checkVector(3, CV_32F), opoints.checkVector(3, CV_64F)) == 4;
int solutions = solve(rotation_matrix, translation, int solutions = solve(rotation_matrix, translation,
points[0], points[1], points[2], points[3], points[4], points[0], points[1], points[2], points[3], points[4],
points[5], points[6], points[7], points[8], points[9], points[5], points[6], points[7], points[8], points[9],
points[10], points[11], points[12], points[13], points[14]); points[10], points[11], points[12], points[13], points[14],
points[15], points[16], points[17], points[18], points[19],
p4p);
for (int i = 0; i < solutions; i++) { for (int i = 0; i < solutions; i++) {
cv::Mat R, tvec; cv::Mat R, tvec;
...@@ -353,42 +386,33 @@ int ap3p::solve(std::vector<cv::Mat> &Rs, std::vector<cv::Mat> &tvecs, const cv: ...@@ -353,42 +386,33 @@ int ap3p::solve(std::vector<cv::Mat> &Rs, std::vector<cv::Mat> &tvecs, const cv:
} }
bool bool
ap3p::solve(double R[3][3], double t[3], double mu0, double mv0, double X0, double Y0, double Z0, double mu1, ap3p::solve(double R[3][3], double t[3],
double mv1, double mu0, double mv0, double X0, double Y0, double Z0,
double X1, double Y1, double Z1, double mu2, double mv2, double X2, double Y2, double Z2, double mu3, double mu1, double mv1, double X1, double Y1, double Z1,
double mv3, double X3, double Y3, double Z3) { double mu2, double mv2, double X2, double Y2, double Z2,
double mu3, double mv3, double X3, double Y3, double Z3) {
double Rs[4][3][3], ts[4][3]; double Rs[4][3][3], ts[4][3];
int n = solve(Rs, ts, mu0, mv0, X0, Y0, Z0, mu1, mv1, X1, Y1, Z1, mu2, mv2, X2, Y2, Z2); const bool p4p = true;
int n = solve(Rs, ts, mu0, mv0, X0, Y0, Z0, mu1, mv1, X1, Y1, Z1, mu2, mv2, X2, Y2, Z2, mu3, mv3, X3, Y3, Z3, p4p);
if (n == 0) if (n == 0)
return false; return false;
int ns = 0;
double min_reproj = 0;
for (int i = 0; i < n; i++) {
double X3p = Rs[i][0][0] * X3 + Rs[i][0][1] * Y3 + Rs[i][0][2] * Z3 + ts[i][0];
double Y3p = Rs[i][1][0] * X3 + Rs[i][1][1] * Y3 + Rs[i][1][2] * Z3 + ts[i][1];
double Z3p = Rs[i][2][0] * X3 + Rs[i][2][1] * Y3 + Rs[i][2][2] * Z3 + ts[i][2];
double mu3p = cx + fx * X3p / Z3p;
double mv3p = cy + fy * Y3p / Z3p;
double reproj = (mu3p - mu3) * (mu3p - mu3) + (mv3p - mv3) * (mv3p - mv3);
if (i == 0 || min_reproj > reproj) {
ns = i;
min_reproj = reproj;
}
}
for (int i = 0; i < 3; i++) { for (int i = 0; i < 3; i++) {
for (int j = 0; j < 3; j++) for (int j = 0; j < 3; j++)
R[i][j] = Rs[ns][i][j]; R[i][j] = Rs[0][i][j];
t[i] = ts[ns][i]; t[i] = ts[0][i];
} }
return true; return true;
} }
int ap3p::solve(double R[4][3][3], double t[4][3], double mu0, double mv0, double X0, double Y0, double Z0, double mu1, int ap3p::solve(double R[4][3][3], double t[4][3],
double mv1, double X1, double Y1, double Z1, double mu2, double mv2, double X2, double Y2, double Z2) { double mu0, double mv0, double X0, double Y0, double Z0,
double mu1, double mv1, double X1, double Y1, double Z1,
double mu2, double mv2, double X2, double Y2, double Z2,
double mu3, double mv3, double X3, double Y3, double Z3,
bool p4p) {
double mk0, mk1, mk2; double mk0, mk1, mk2;
double norm; double norm;
...@@ -413,13 +437,17 @@ int ap3p::solve(double R[4][3][3], double t[4][3], double mu0, double mv0, doubl ...@@ -413,13 +437,17 @@ int ap3p::solve(double R[4][3][3], double t[4][3], double mu0, double mv0, doubl
mu2 *= mk2; mu2 *= mk2;
mv2 *= mk2; mv2 *= mk2;
double featureVectors[3][3] = {{mu0, mu1, mu2}, mu3 = inv_fx * mu3 - cx_fx;
{mv0, mv1, mv2}, mv3 = inv_fy * mv3 - cy_fy;
{mk0, mk1, mk2}}; double mk3 = 1; //not used
double worldPoints[3][3] = {{X0, X1, X2},
{Y0, Y1, Y2}, double featureVectors[3][4] = {{mu0, mu1, mu2, mu3},
{Z0, Z1, Z2}}; {mv0, mv1, mv2, mv3},
{mk0, mk1, mk2, mk3}};
double worldPoints[3][4] = {{X0, X1, X2, X3},
{Y0, Y1, Y2, Y3},
{Z0, Z1, Z2, Z3}};
return computePoses(featureVectors, worldPoints, R, t); return computePoses(featureVectors, worldPoints, R, t, p4p);
} }
} }
#ifndef P3P_P3P_H #ifndef P3P_P3P_H
#define P3P_P3P_H #define P3P_P3P_H
#include "precomp.hpp" #include <opencv2/core.hpp>
namespace cv { namespace cv {
class ap3p { class ap3p {
...@@ -18,7 +18,7 @@ private: ...@@ -18,7 +18,7 @@ private:
void extract_points(const cv::Mat &opoints, const cv::Mat &ipoints, std::vector<double> &points) { void extract_points(const cv::Mat &opoints, const cv::Mat &ipoints, std::vector<double> &points) {
points.clear(); points.clear();
int npoints = std::max(opoints.checkVector(3, CV_32F), opoints.checkVector(3, CV_64F)); int npoints = std::max(opoints.checkVector(3, CV_32F), opoints.checkVector(3, CV_64F));
points.resize(5*npoints); points.resize(5*4); //resize vector to fit for p4p case
for (int i = 0; i < npoints; i++) { for (int i = 0; i < npoints; i++) {
points[i * 5] = ipoints.at<IpointType>(i).x * fx + cx; points[i * 5] = ipoints.at<IpointType>(i).x * fx + cx;
points[i * 5 + 1] = ipoints.at<IpointType>(i).y * fy + cy; points[i * 5 + 1] = ipoints.at<IpointType>(i).y * fy + cy;
...@@ -26,6 +26,12 @@ private: ...@@ -26,6 +26,12 @@ private:
points[i * 5 + 3] = opoints.at<OpointType>(i).y; points[i * 5 + 3] = opoints.at<OpointType>(i).y;
points[i * 5 + 4] = opoints.at<OpointType>(i).z; points[i * 5 + 4] = opoints.at<OpointType>(i).z;
} }
//Fill vectors with unused values for p3p case
for (int i = npoints; i < 4; i++) {
for (int j = 0; j < 5; j++) {
points[i * 5 + j] = 0;
}
}
} }
void init_inverse_parameters(); void init_inverse_parameters();
...@@ -45,7 +51,9 @@ public: ...@@ -45,7 +51,9 @@ public:
int solve(double R[4][3][3], double t[4][3], int solve(double R[4][3][3], double t[4][3],
double mu0, double mv0, double X0, double Y0, double Z0, double mu0, double mv0, double X0, double Y0, double Z0,
double mu1, double mv1, double X1, double Y1, double Z1, double mu1, double mv1, double X1, double Y1, double Z1,
double mu2, double mv2, double X2, double Y2, double Z2); double mu2, double mv2, double X2, double Y2, double Z2,
double mu3, double mv3, double X3, double Y3, double Z3,
bool p4p);
bool solve(double R[3][3], double t[3], bool solve(double R[3][3], double t[3],
double mu0, double mv0, double X0, double Y0, double Z0, double mu0, double mv0, double X0, double Y0, double Z0,
...@@ -59,8 +67,8 @@ public: ...@@ -59,8 +67,8 @@ public:
// worldPoints: Positions of the 3 feature points stored as column vectors // worldPoints: Positions of the 3 feature points stored as column vectors
// solutionsR: 4 possible solutions of rotation matrix of the world w.r.t the camera frame // solutionsR: 4 possible solutions of rotation matrix of the world w.r.t the camera frame
// solutionsT: 4 possible solutions of translation of the world origin w.r.t the camera frame // solutionsT: 4 possible solutions of translation of the world origin w.r.t the camera frame
int computePoses(const double featureVectors[3][3], const double worldPoints[3][3], double solutionsR[4][3][3], int computePoses(const double featureVectors[3][4], const double worldPoints[3][4], double solutionsR[4][3][3],
double solutionsT[4][3]); double solutionsT[4][3], bool p4p);
}; };
} }
......
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html
// This file is based on file issued with the following license:
/*============================================================================
Copyright 2017 Toby Collins
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice, this
list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its contributors may
be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "precomp.hpp"
#include "ippe.hpp"
namespace cv {
namespace IPPE {
PoseSolver::PoseSolver() : IPPE_SMALL(1e-3)
{
}
void PoseSolver::solveGeneric(InputArray _objectPoints, InputArray _imagePoints, OutputArray _rvec1, OutputArray _tvec1,
float& err1, OutputArray _rvec2, OutputArray _tvec2, float& err2)
{
Mat normalizedImagePoints;
if (_imagePoints.getMat().type() == CV_32FC2)
{
_imagePoints.getMat().convertTo(normalizedImagePoints, CV_64F);
}
else
{
normalizedImagePoints = _imagePoints.getMat();
}
//solve:
Mat Ma, Mb;
solveGeneric(_objectPoints, normalizedImagePoints, Ma, Mb);
//the two poses computed by IPPE (sorted):
Mat M1, M2;
//sort poses by reprojection error:
sortPosesByReprojError(_objectPoints, normalizedImagePoints, Ma, Mb, M1, M2, err1, err2);
//fill outputs
rot2vec(M1.colRange(0, 3).rowRange(0, 3), _rvec1);
rot2vec(M2.colRange(0, 3).rowRange(0, 3), _rvec2);
M1.colRange(3, 4).rowRange(0, 3).copyTo(_tvec1);
M2.colRange(3, 4).rowRange(0, 3).copyTo(_tvec2);
}
void PoseSolver::solveGeneric(InputArray _objectPoints, InputArray _normalizedInputPoints,
OutputArray _Ma, OutputArray _Mb)
{
//argument checking:
size_t n = static_cast<size_t>(_objectPoints.rows() * _objectPoints.cols()); //number of points
int objType = _objectPoints.type();
int type_input = _normalizedInputPoints.type();
CV_CheckType(objType, objType == CV_32FC3 || objType == CV_64FC3,
"Type of _objectPoints must be CV_32FC3 or CV_64FC3" );
CV_CheckType(type_input, type_input == CV_32FC2 || type_input == CV_64FC2,
"Type of _normalizedInputPoints must be CV_32FC3 or CV_64FC3" );
CV_Assert(_objectPoints.rows() == 1 || _objectPoints.cols() == 1);
CV_Assert(_objectPoints.rows() >= 4 || _objectPoints.cols() >= 4);
CV_Assert(_normalizedInputPoints.rows() == 1 || _normalizedInputPoints.cols() == 1);
CV_Assert(static_cast<size_t>(_objectPoints.rows() * _objectPoints.cols()) == n);
Mat normalizedInputPoints;
if (type_input == CV_32FC2)
{
_normalizedInputPoints.getMat().convertTo(normalizedInputPoints, CV_64F);
}
else
{
normalizedInputPoints = _normalizedInputPoints.getMat();
}
Mat objectInputPoints;
if (type_input == CV_32FC3)
{
_objectPoints.getMat().convertTo(objectInputPoints, CV_64F);
}
else
{
objectInputPoints = _objectPoints.getMat();
}
Mat canonicalObjPoints;
Mat MmodelPoints2Canonical;
//transform object points to the canonical position (zero centred and on the plane z=0):
makeCanonicalObjectPoints(objectInputPoints, canonicalObjPoints, MmodelPoints2Canonical);
//compute the homography mapping the model's points to normalizedInputPoints
Matx33d H;
HomographyHO::homographyHO(canonicalObjPoints, _normalizedInputPoints, H);
//now solve
Mat MaCanon, MbCanon;
solveCanonicalForm(canonicalObjPoints, normalizedInputPoints, H, MaCanon, MbCanon);
//transform computed poses to account for canonical transform:
Mat Ma = MaCanon * MmodelPoints2Canonical;
Mat Mb = MbCanon * MmodelPoints2Canonical;
//output poses:
Ma.copyTo(_Ma);
Mb.copyTo(_Mb);
}
void PoseSolver::solveCanonicalForm(InputArray _canonicalObjPoints, InputArray _normalizedInputPoints, const Matx33d& H,
OutputArray _Ma, OutputArray _Mb)
{
_Ma.create(4, 4, CV_64FC1);
_Mb.create(4, 4, CV_64FC1);
Mat Ma = _Ma.getMat();
Mat Mb = _Mb.getMat();
//initialise poses:
Ma.setTo(0);
Ma.at<double>(3, 3) = 1;
Mb.setTo(0);
Mb.at<double>(3, 3) = 1;
//Compute the Jacobian J of the homography at (0,0):
double j00 = H(0, 0) - H(2, 0) * H(0, 2);
double j01 = H(0, 1) - H(2, 1) * H(0, 2);
double j10 = H(1, 0) - H(2, 0) * H(1, 2);
double j11 = H(1, 1) - H(2, 1) * H(1, 2);
//Compute the transformation of (0,0) into the image:
double v0 = H(0, 2);
double v1 = H(1, 2);
//compute the two rotation solutions:
Mat Ra = Ma.colRange(0, 3).rowRange(0, 3);
Mat Rb = Mb.colRange(0, 3).rowRange(0, 3);
computeRotations(j00, j01, j10, j11, v0, v1, Ra, Rb);
//for each rotation solution, compute the corresponding translation solution:
Mat ta = Ma.colRange(3, 4).rowRange(0, 3);
Mat tb = Mb.colRange(3, 4).rowRange(0, 3);
computeTranslation(_canonicalObjPoints, _normalizedInputPoints, Ra, ta);
computeTranslation(_canonicalObjPoints, _normalizedInputPoints, Rb, tb);
}
void PoseSolver::solveSquare(InputArray _objectPoints, InputArray _imagePoints, OutputArray _rvec1, OutputArray _tvec1,
float& err1, OutputArray _rvec2, OutputArray _tvec2, float& err2)
{
//allocate outputs:
_rvec1.create(3, 1, CV_64FC1);
_tvec1.create(3, 1, CV_64FC1);
_rvec2.create(3, 1, CV_64FC1);
_tvec2.create(3, 1, CV_64FC1);
Mat objectPoints2D;
//generate the object points:
objectPoints2D.create(1, 4, CV_64FC2);
Mat objectPoints = _objectPoints.getMat();
double squareLength;
if (objectPoints.depth() == CV_32F)
{
objectPoints2D.ptr<Vec2d>(0)[0] = Vec2d(objectPoints.ptr<Vec3f>(0)[0](0), objectPoints.ptr<Vec3f>(0)[0](1));
objectPoints2D.ptr<Vec2d>(0)[1] = Vec2d(objectPoints.ptr<Vec3f>(0)[1](0), objectPoints.ptr<Vec3f>(0)[1](1));
objectPoints2D.ptr<Vec2d>(0)[2] = Vec2d(objectPoints.ptr<Vec3f>(0)[2](0), objectPoints.ptr<Vec3f>(0)[2](1));
objectPoints2D.ptr<Vec2d>(0)[3] = Vec2d(objectPoints.ptr<Vec3f>(0)[3](0), objectPoints.ptr<Vec3f>(0)[3](1));
squareLength = sqrt( (objectPoints.ptr<Vec3f>(0)[1](0) - objectPoints.ptr<Vec3f>(0)[0](0))*
(objectPoints.ptr<Vec3f>(0)[1](0) - objectPoints.ptr<Vec3f>(0)[0](0)) +
(objectPoints.ptr<Vec3f>(0)[1](1) - objectPoints.ptr<Vec3f>(0)[0](1))*
(objectPoints.ptr<Vec3f>(0)[1](1) - objectPoints.ptr<Vec3f>(0)[0](1)) );
}
else
{
objectPoints2D.ptr<Vec2d>(0)[0] = Vec2d(objectPoints.ptr<Vec3d>(0)[0](0), objectPoints.ptr<Vec3d>(0)[0](1));
objectPoints2D.ptr<Vec2d>(0)[1] = Vec2d(objectPoints.ptr<Vec3d>(0)[1](0), objectPoints.ptr<Vec3d>(0)[1](1));
objectPoints2D.ptr<Vec2d>(0)[2] = Vec2d(objectPoints.ptr<Vec3d>(0)[2](0), objectPoints.ptr<Vec3d>(0)[2](1));
objectPoints2D.ptr<Vec2d>(0)[3] = Vec2d(objectPoints.ptr<Vec3d>(0)[3](0), objectPoints.ptr<Vec3d>(0)[3](1));
squareLength = sqrt( (objectPoints.ptr<Vec3d>(0)[1](0) - objectPoints.ptr<Vec3d>(0)[0](0))*
(objectPoints.ptr<Vec3d>(0)[1](0) - objectPoints.ptr<Vec3d>(0)[0](0)) +
(objectPoints.ptr<Vec3d>(0)[1](1) - objectPoints.ptr<Vec3d>(0)[0](1))*
(objectPoints.ptr<Vec3d>(0)[1](1) - objectPoints.ptr<Vec3d>(0)[0](1)) );
}
Mat H; //homography from canonical object points to normalized pixels
Mat normalizedInputPoints;
if (_imagePoints.getMat().type() == CV_32FC2)
{
_imagePoints.getMat().convertTo(normalizedInputPoints, CV_64F);
}
else
{
normalizedInputPoints = _imagePoints.getMat();
}
//compute H
homographyFromSquarePoints(normalizedInputPoints, squareLength / 2.0, H);
//now solve
Mat Ma, Mb;
solveCanonicalForm(objectPoints2D, normalizedInputPoints, H, Ma, Mb);
//sort poses according to reprojection error:
Mat M1, M2;
sortPosesByReprojError(_objectPoints, normalizedInputPoints, Ma, Mb, M1, M2, err1, err2);
//fill outputs
rot2vec(M1.colRange(0, 3).rowRange(0, 3), _rvec1);
rot2vec(M2.colRange(0, 3).rowRange(0, 3), _rvec2);
M1.colRange(3, 4).rowRange(0, 3).copyTo(_tvec1);
M2.colRange(3, 4).rowRange(0, 3).copyTo(_tvec2);
}
void PoseSolver::generateSquareObjectCorners3D(double squareLength, OutputArray _objectPoints)
{
_objectPoints.create(1, 4, CV_64FC3);
Mat objectPoints = _objectPoints.getMat();
objectPoints.ptr<Vec3d>(0)[0] = Vec3d(-squareLength / 2.0, squareLength / 2.0, 0.0);
objectPoints.ptr<Vec3d>(0)[1] = Vec3d(squareLength / 2.0, squareLength / 2.0, 0.0);
objectPoints.ptr<Vec3d>(0)[2] = Vec3d(squareLength / 2.0, -squareLength / 2.0, 0.0);
objectPoints.ptr<Vec3d>(0)[3] = Vec3d(-squareLength / 2.0, -squareLength / 2.0, 0.0);
}
void PoseSolver::generateSquareObjectCorners2D(double squareLength, OutputArray _objectPoints)
{
_objectPoints.create(1, 4, CV_64FC2);
Mat objectPoints = _objectPoints.getMat();
objectPoints.ptr<Vec2d>(0)[0] = Vec2d(-squareLength / 2.0, squareLength / 2.0);
objectPoints.ptr<Vec2d>(0)[1] = Vec2d(squareLength / 2.0, squareLength / 2.0);
objectPoints.ptr<Vec2d>(0)[2] = Vec2d(squareLength / 2.0, -squareLength / 2.0);
objectPoints.ptr<Vec2d>(0)[3] = Vec2d(-squareLength / 2.0, -squareLength / 2.0);
}
double PoseSolver::meanSceneDepth(InputArray _objectPoints, InputArray _rvec, InputArray _tvec)
{
CV_CheckType(_objectPoints.type(), _objectPoints.type() == CV_64FC3,
"Type of _objectPoints must be CV_64FC3" );
size_t n = static_cast<size_t>(_objectPoints.rows() * _objectPoints.cols());
Mat R;
Mat q;
Rodrigues(_rvec, R);
double zBar = 0;
for (size_t i = 0; i < n; i++)
{
Mat p(_objectPoints.getMat().at<Point3d>(static_cast<int>(i)));
q = R * p + _tvec.getMat();
double z;
if (q.depth() == CV_64F)
{
z = q.at<double>(2);
}
else
{
z = static_cast<double>(q.at<float>(2));
}
zBar += z;
}
return zBar / static_cast<double>(n);
}
void PoseSolver::rot2vec(InputArray _R, OutputArray _r)
{
CV_CheckType(_R.type(), _R.type() == CV_64FC1,
"Type of _R must be CV_64FC1" );
CV_Assert(_R.rows() == 3);
CV_Assert(_R.cols() == 3);
_r.create(3, 1, CV_64FC1);
Mat R = _R.getMat();
Mat rvec = _r.getMat();
double trace = R.at<double>(0, 0) + R.at<double>(1, 1) + R.at<double>(2, 2);
double w_norm = acos((trace - 1.0) / 2.0);
double eps = std::numeric_limits<float>::epsilon();
double d = 1 / (2 * sin(w_norm)) * w_norm;
if (w_norm < eps) //rotation is the identity
{
rvec.setTo(0);
}
else
{
double c0 = R.at<double>(2, 1) - R.at<double>(1, 2);
double c1 = R.at<double>(0, 2) - R.at<double>(2, 0);
double c2 = R.at<double>(1, 0) - R.at<double>(0, 1);
rvec.at<double>(0) = d * c0;
rvec.at<double>(1) = d * c1;
rvec.at<double>(2) = d * c2;
}
}
void PoseSolver::computeTranslation(InputArray _objectPoints, InputArray _normalizedImgPoints, InputArray _R, OutputArray _t)
{
//This is solved by building the linear system At = b, where t corresponds to the (unknown) translation.
//This is then inverted with the associated normal equations to give t = inv(transpose(A)*A)*transpose(A)*b
//For efficiency we only store the coefficients of (transpose(A)*A) and (transpose(A)*b)
CV_CheckType(_objectPoints.type(), _objectPoints.type() == CV_64FC2,
"Type of _objectPoints must be CV_64FC2" );
CV_CheckType(_normalizedImgPoints.type(), _normalizedImgPoints.type() == CV_64FC2,
"Type of _normalizedImgPoints must be CV_64FC2" );
CV_CheckType(_R.type(), _R.type() == CV_64FC1,
"Type of _R must be CV_64FC1" );
CV_Assert(_R.rows() == 3 && _R.cols() == 3);
CV_Assert(_objectPoints.rows() == 1 || _objectPoints.cols() == 1);
CV_Assert(_normalizedImgPoints.rows() == 1 || _normalizedImgPoints.cols() == 1);
size_t n = static_cast<size_t>(_normalizedImgPoints.rows() * _normalizedImgPoints.cols());
CV_Assert(n == static_cast<size_t>(_objectPoints.rows() * _objectPoints.cols()));
Mat objectPoints = _objectPoints.getMat();
Mat imgPoints = _normalizedImgPoints.getMat();
_t.create(3, 1, CV_64FC1);
Mat R = _R.getMat();
//coefficients of (transpose(A)*A)
double ATA00 = static_cast<double>(n);
double ATA02 = 0;
double ATA11 = static_cast<double>(n);
double ATA12 = 0;
double ATA20 = 0;
double ATA21 = 0;
double ATA22 = 0;
//coefficients of (transpose(A)*b)
double ATb0 = 0;
double ATb1 = 0;
double ATb2 = 0;
//now loop through each point and increment the coefficients:
for (int i = 0; i < static_cast<int>(n); i++)
{
const Vec2d& objPt = objectPoints.at<Vec2d>(i);
double rx = R.at<double>(0, 0) * objPt(0) + R.at<double>(0, 1) * objPt(1);
double ry = R.at<double>(1, 0) * objPt(0) + R.at<double>(1, 1) * objPt(1);
double rz = R.at<double>(2, 0) * objPt(0) + R.at<double>(2, 1) * objPt(1);
const Vec2d& imgPt = imgPoints.at<Vec2d>(i);
double a2 = -imgPt(0);
double b2 = -imgPt(1);
ATA02 = ATA02 + a2;
ATA12 = ATA12 + b2;
ATA20 = ATA20 + a2;
ATA21 = ATA21 + b2;
ATA22 = ATA22 + a2 * a2 + b2 * b2;
double bx = -a2 * rz - rx;
double by = -b2 * rz - ry;
ATb0 = ATb0 + bx;
ATb1 = ATb1 + by;
ATb2 = ATb2 + a2 * bx + b2 * by;
}
double detAInv = 1.0 / (ATA00 * ATA11 * ATA22 - ATA00 * ATA12 * ATA21 - ATA02 * ATA11 * ATA20);
//S gives inv(transpose(A)*A)/det(A)^2
//construct S:
double S00 = ATA11 * ATA22 - ATA12 * ATA21;
double S01 = ATA02 * ATA21;
double S02 = -ATA02 * ATA11;
double S10 = ATA12 * ATA20;
double S11 = ATA00 * ATA22 - ATA02 * ATA20;
double S12 = -ATA00 * ATA12;
double S20 = -ATA11 * ATA20;
double S21 = -ATA00 * ATA21;
double S22 = ATA00 * ATA11;
//solve t:
Mat t = _t.getMat();
t.at<double>(0) = detAInv * (S00 * ATb0 + S01 * ATb1 + S02 * ATb2);
t.at<double>(1) = detAInv * (S10 * ATb0 + S11 * ATb1 + S12 * ATb2);
t.at<double>(2) = detAInv * (S20 * ATb0 + S21 * ATb1 + S22 * ATb2);
}
void PoseSolver::computeRotations(double j00, double j01, double j10, double j11, double p, double q, OutputArray _R1, OutputArray _R2)
{
//This is fairly optimized code which makes it hard to understand. The matlab code is certainly easier to read.
_R1.create(3, 3, CV_64FC1);
_R2.create(3, 3, CV_64FC1);
Matx33d Rv;
Matx31d v(p, q, 1);
rotateVec2ZAxis(v,Rv);
Rv = Rv.t();
//setup the 2x2 SVD decomposition:
double rv00 = Rv(0,0);
double rv01 = Rv(0,1);
double rv02 = Rv(0,2);
double rv10 = Rv(1,0);
double rv11 = Rv(1,1);
double rv12 = Rv(1,2);
double rv20 = Rv(2,0);
double rv21 = Rv(2,1);
double rv22 = Rv(2,2);
double b00 = rv00 - p * rv20;
double b01 = rv01 - p * rv21;
double b10 = rv10 - q * rv20;
double b11 = rv11 - q * rv21;
double dtinv = 1.0 / ((b00 * b11 - b01 * b10));
double binv00 = dtinv * b11;
double binv01 = -dtinv * b01;
double binv10 = -dtinv * b10;
double binv11 = dtinv * b00;
double a00 = binv00 * j00 + binv01 * j10;
double a01 = binv00 * j01 + binv01 * j11;
double a10 = binv10 * j00 + binv11 * j10;
double a11 = binv10 * j01 + binv11 * j11;
//compute the largest singular value of A:
double ata00 = a00 * a00 + a01 * a01;
double ata01 = a00 * a10 + a01 * a11;
double ata11 = a10 * a10 + a11 * a11;
double gamma2 = 0.5 * (ata00 + ata11 + sqrt((ata00 - ata11) * (ata00 - ata11) + 4.0 * ata01 * ata01));
if (gamma2 < 0)
CV_Error(Error::StsNoConv, "gamma2 is negative.");
double gamma = sqrt(gamma2);
if (std::fabs(gamma) < std::numeric_limits<float>::epsilon())
CV_Error(Error::StsNoConv, "gamma is zero.");
//reconstruct the full rotation matrices:
double rtilde00 = a00 / gamma;
double rtilde01 = a01 / gamma;
double rtilde10 = a10 / gamma;
double rtilde11 = a11 / gamma;
double rtilde00_2 = rtilde00 * rtilde00;
double rtilde01_2 = rtilde01 * rtilde01;
double rtilde10_2 = rtilde10 * rtilde10;
double rtilde11_2 = rtilde11 * rtilde11;
double b0 = sqrt(-rtilde00_2 - rtilde10_2 + 1);
double b1 = sqrt(-rtilde01_2 - rtilde11_2 + 1);
double sp = (-rtilde00 * rtilde01 - rtilde10 * rtilde11);
if (sp < 0)
{
b1 = -b1;
}
//store results:
Mat R1 = _R1.getMat();
Mat R2 = _R2.getMat();
R1.at<double>(0, 0) = (rtilde00)*rv00 + (rtilde10)*rv01 + (b0)*rv02;
R1.at<double>(0, 1) = (rtilde01)*rv00 + (rtilde11)*rv01 + (b1)*rv02;
R1.at<double>(0, 2) = (b1 * rtilde10 - b0 * rtilde11) * rv00 + (b0 * rtilde01 - b1 * rtilde00) * rv01 + (rtilde00 * rtilde11 - rtilde01 * rtilde10) * rv02;
R1.at<double>(1, 0) = (rtilde00)*rv10 + (rtilde10)*rv11 + (b0)*rv12;
R1.at<double>(1, 1) = (rtilde01)*rv10 + (rtilde11)*rv11 + (b1)*rv12;
R1.at<double>(1, 2) = (b1 * rtilde10 - b0 * rtilde11) * rv10 + (b0 * rtilde01 - b1 * rtilde00) * rv11 + (rtilde00 * rtilde11 - rtilde01 * rtilde10) * rv12;
R1.at<double>(2, 0) = (rtilde00)*rv20 + (rtilde10)*rv21 + (b0)*rv22;
R1.at<double>(2, 1) = (rtilde01)*rv20 + (rtilde11)*rv21 + (b1)*rv22;
R1.at<double>(2, 2) = (b1 * rtilde10 - b0 * rtilde11) * rv20 + (b0 * rtilde01 - b1 * rtilde00) * rv21 + (rtilde00 * rtilde11 - rtilde01 * rtilde10) * rv22;
R2.at<double>(0, 0) = (rtilde00)*rv00 + (rtilde10)*rv01 + (-b0) * rv02;
R2.at<double>(0, 1) = (rtilde01)*rv00 + (rtilde11)*rv01 + (-b1) * rv02;
R2.at<double>(0, 2) = (b0 * rtilde11 - b1 * rtilde10) * rv00 + (b1 * rtilde00 - b0 * rtilde01) * rv01 + (rtilde00 * rtilde11 - rtilde01 * rtilde10) * rv02;
R2.at<double>(1, 0) = (rtilde00)*rv10 + (rtilde10)*rv11 + (-b0) * rv12;
R2.at<double>(1, 1) = (rtilde01)*rv10 + (rtilde11)*rv11 + (-b1) * rv12;
R2.at<double>(1, 2) = (b0 * rtilde11 - b1 * rtilde10) * rv10 + (b1 * rtilde00 - b0 * rtilde01) * rv11 + (rtilde00 * rtilde11 - rtilde01 * rtilde10) * rv12;
R2.at<double>(2, 0) = (rtilde00)*rv20 + (rtilde10)*rv21 + (-b0) * rv22;
R2.at<double>(2, 1) = (rtilde01)*rv20 + (rtilde11)*rv21 + (-b1) * rv22;
R2.at<double>(2, 2) = (b0 * rtilde11 - b1 * rtilde10) * rv20 + (b1 * rtilde00 - b0 * rtilde01) * rv21 + (rtilde00 * rtilde11 - rtilde01 * rtilde10) * rv22;
}
void PoseSolver::homographyFromSquarePoints(InputArray _targetPoints, double halfLength, OutputArray H_)
{
CV_CheckType(_targetPoints.type(), _targetPoints.type() == CV_32FC2 || _targetPoints.type() == CV_64FC2,
"Type of _targetPoints must be CV_32FC2 or CV_64FC2" );
Mat pts = _targetPoints.getMat();
double p1x, p1y;
double p2x, p2y;
double p3x, p3y;
double p4x, p4y;
if (_targetPoints.type() == CV_32FC2)
{
p1x = -pts.at<Vec2f>(0)(0);
p1y = -pts.at<Vec2f>(0)(1);
p2x = -pts.at<Vec2f>(1)(0);
p2y = -pts.at<Vec2f>(1)(1);
p3x = -pts.at<Vec2f>(2)(0);
p3y = -pts.at<Vec2f>(2)(1);
p4x = -pts.at<Vec2f>(3)(0);
p4y = -pts.at<Vec2f>(3)(1);
}
else
{
p1x = -pts.at<Vec2d>(0)(0);
p1y = -pts.at<Vec2d>(0)(1);
p2x = -pts.at<Vec2d>(1)(0);
p2y = -pts.at<Vec2d>(1)(1);
p3x = -pts.at<Vec2d>(2)(0);
p3y = -pts.at<Vec2d>(2)(1);
p4x = -pts.at<Vec2d>(3)(0);
p4y = -pts.at<Vec2d>(3)(1);
}
//analytic solution:
double det = (halfLength * (p1x * p2y - p2x * p1y - p1x * p4y + p2x * p3y - p3x * p2y + p4x * p1y + p3x * p4y - p4x * p3y));
if (abs(det) < 1e-9)
CV_Error(Error::StsNoConv, "Determinant is zero!");
double detsInv = -1 / det;
Matx33d H;
H(0, 0) = detsInv * (p1x * p3x * p2y - p2x * p3x * p1y - p1x * p4x * p2y + p2x * p4x * p1y - p1x * p3x * p4y + p1x * p4x * p3y + p2x * p3x * p4y - p2x * p4x * p3y);
H(0, 1) = detsInv * (p1x * p2x * p3y - p1x * p3x * p2y - p1x * p2x * p4y + p2x * p4x * p1y + p1x * p3x * p4y - p3x * p4x * p1y - p2x * p4x * p3y + p3x * p4x * p2y);
H(0, 2) = detsInv * halfLength * (p1x * p2x * p3y - p2x * p3x * p1y - p1x * p2x * p4y + p1x * p4x * p2y - p1x * p4x * p3y + p3x * p4x * p1y + p2x * p3x * p4y - p3x * p4x * p2y);
H(1, 0) = detsInv * (p1x * p2y * p3y - p2x * p1y * p3y - p1x * p2y * p4y + p2x * p1y * p4y - p3x * p1y * p4y + p4x * p1y * p3y + p3x * p2y * p4y - p4x * p2y * p3y);
H(1, 1) = detsInv * (p2x * p1y * p3y - p3x * p1y * p2y - p1x * p2y * p4y + p4x * p1y * p2y + p1x * p3y * p4y - p4x * p1y * p3y - p2x * p3y * p4y + p3x * p2y * p4y);
H(1, 2) = detsInv * halfLength * (p1x * p2y * p3y - p3x * p1y * p2y - p2x * p1y * p4y + p4x * p1y * p2y - p1x * p3y * p4y + p3x * p1y * p4y + p2x * p3y * p4y - p4x * p2y * p3y);
H(2, 0) = -detsInv * (p1x * p3y - p3x * p1y - p1x * p4y - p2x * p3y + p3x * p2y + p4x * p1y + p2x * p4y - p4x * p2y);
H(2, 1) = detsInv * (p1x * p2y - p2x * p1y - p1x * p3y + p3x * p1y + p2x * p4y - p4x * p2y - p3x * p4y + p4x * p3y);
H(2, 2) = 1.0;
Mat(H, false).copyTo(H_);
}
void PoseSolver::makeCanonicalObjectPoints(InputArray _objectPoints, OutputArray _canonicalObjPoints, OutputArray _MmodelPoints2Canonical)
{
int objType = _objectPoints.type();
CV_CheckType(objType, objType == CV_32FC3 || objType == CV_64FC3,
"Type of _objectPoints must be CV_32FC3 or CV_64FC3" );
int n = _objectPoints.rows() * _objectPoints.cols();
_canonicalObjPoints.create(1, n, CV_64FC2);
Mat objectPoints = _objectPoints.getMat();
Mat canonicalObjPoints = _canonicalObjPoints.getMat();
Mat UZero(3, n, CV_64FC1);
double xBar = 0;
double yBar = 0;
double zBar = 0;
bool isOnZPlane = true;
for (int i = 0; i < n; i++)
{
double x, y, z;
if (objType == CV_32FC3)
{
x = static_cast<double>(objectPoints.at<Vec3f>(i)[0]);
y = static_cast<double>(objectPoints.at<Vec3f>(i)[1]);
z = static_cast<double>(objectPoints.at<Vec3f>(i)[2]);
}
else
{
x = objectPoints.at<Vec3d>(i)[0];
y = objectPoints.at<Vec3d>(i)[1];
z = objectPoints.at<Vec3d>(i)[2];
}
if (abs(z) > IPPE_SMALL)
{
isOnZPlane = false;
}
xBar += x;
yBar += y;
zBar += z;
UZero.at<double>(0, i) = x;
UZero.at<double>(1, i) = y;
UZero.at<double>(2, i) = z;
}
xBar = xBar / static_cast<double>(n);
yBar = yBar / static_cast<double>(n);
zBar = zBar / static_cast<double>(n);
for (int i = 0; i < n; i++)
{
UZero.at<double>(0, i) -= xBar;
UZero.at<double>(1, i) -= yBar;
UZero.at<double>(2, i) -= zBar;
}
Matx44d MCenter = Matx44d::eye();
MCenter(0, 3) = -xBar;
MCenter(1, 3) = -yBar;
MCenter(2, 3) = -zBar;
if (isOnZPlane)
{
//MmodelPoints2Canonical is given by MCenter
Mat(MCenter, false).copyTo(_MmodelPoints2Canonical);
for (int i = 0; i < n; i++)
{
canonicalObjPoints.at<Vec2d>(i)[0] = UZero.at<double>(0, i);
canonicalObjPoints.at<Vec2d>(i)[1] = UZero.at<double>(1, i);
}
}
else
{
Mat UZeroAligned(3, n, CV_64FC1);
Matx33d R; //rotation that rotates objectPoints to the plane z=0
if (!computeObjextSpaceR3Pts(objectPoints,R))
{
//we could not compute R, problably because there is a duplicate point in {objectPoints(0),objectPoints(1),objectPoints(2)}.
//So we compute it with the SVD (which is slower):
computeObjextSpaceRSvD(UZero,R);
}
UZeroAligned = R * UZero;
for (int i = 0; i < n; i++)
{
canonicalObjPoints.at<Vec2d>(i)[0] = UZeroAligned.at<double>(0, i);
canonicalObjPoints.at<Vec2d>(i)[1] = UZeroAligned.at<double>(1, i);
if (abs(UZeroAligned.at<double>(2, i)) > IPPE_SMALL)
CV_Error(Error::StsNoConv, "Cannot transform object points to the plane z=0!");
}
Matx44d MRot = Matx44d::zeros();
MRot(3, 3) = 1;
for (int i = 0; i < 3; i++)
{
for (int j = 0; j < 3; j++)
{
MRot(i,j) = R(i,j);
}
}
Matx44d Mb = MRot * MCenter;
Mat(Mb, false).copyTo(_MmodelPoints2Canonical);
}
}
void PoseSolver::evalReprojError(InputArray _objectPoints, InputArray _imagePoints, InputArray _M, float& err)
{
Mat projectedPoints;
Mat imagePoints = _imagePoints.getMat();
Mat r;
rot2vec(_M.getMat().colRange(0, 3).rowRange(0, 3), r);
Mat K = Mat::eye(3, 3, CV_64FC1);
Mat dist;
projectPoints(_objectPoints, r, _M.getMat().colRange(3, 4).rowRange(0, 3), K, dist, projectedPoints);
err = 0;
int n = _objectPoints.rows() * _objectPoints.cols();
float dx, dy;
const int projPtsDepth = projectedPoints.depth();
for (int i = 0; i < n; i++)
{
if (projPtsDepth == CV_32F)
{
dx = projectedPoints.at<Vec2f>(i)[0] - static_cast<float>(imagePoints.at<Vec2d>(i)[0]);
dy = projectedPoints.at<Vec2f>(i)[1] - static_cast<float>(imagePoints.at<Vec2d>(i)[1]);
}
else
{
dx = static_cast<float>(projectedPoints.at<Vec2d>(i)[0] - imagePoints.at<Vec2d>(i)[0]);
dy = static_cast<float>(projectedPoints.at<Vec2d>(i)[1] - imagePoints.at<Vec2d>(i)[1]);
}
err += dx * dx + dy * dy;
}
err = sqrt(err / (2.0f * n));
}
void PoseSolver::sortPosesByReprojError(InputArray _objectPoints, InputArray _imagePoints, InputArray _Ma, InputArray _Mb,
OutputArray _M1, OutputArray _M2, float& err1, float& err2)
{
float erra, errb;
evalReprojError(_objectPoints, _imagePoints, _Ma, erra);
evalReprojError(_objectPoints, _imagePoints, _Mb, errb);
if (erra < errb)
{
err1 = erra;
_Ma.copyTo(_M1);
err2 = errb;
_Mb.copyTo(_M2);
}
else
{
err1 = errb;
_Mb.copyTo(_M1);
err2 = erra;
_Ma.copyTo(_M2);
}
}
void PoseSolver::rotateVec2ZAxis(const Matx31d& a, Matx33d& Ra)
{
double ax = a(0);
double ay = a(1);
double az = a(2);
double nrm = sqrt(ax*ax + ay*ay + az*az);
ax = ax/nrm;
ay = ay/nrm;
az = az/nrm;
double c = az;
if (abs(1.0+c) < std::numeric_limits<float>::epsilon())
{
Ra = Matx33d::zeros();
Ra(0,0) = 1.0;
Ra(1,1) = 1.0;
Ra(2,2) = -1.0;
}
else
{
double d = 1.0/(1.0+c);
double ax2 = ax*ax;
double ay2 = ay*ay;
double axay = ax*ay;
Ra(0,0) = -ax2*d + 1.0;
Ra(0,1) = -axay*d;
Ra(0,2) = -ax;
Ra(1,0) = -axay*d;
Ra(1,1) = -ay2*d + 1.0;
Ra(1,2) = -ay;
Ra(2,0) = ax;
Ra(2,1) = ay;
Ra(2,2) = 1.0 - (ax2 + ay2)*d;
}
}
bool PoseSolver::computeObjextSpaceR3Pts(InputArray _objectPoints, Matx33d& R)
{
bool ret; //return argument
double p1x,p1y,p1z;
double p2x,p2y,p2z;
double p3x,p3y,p3z;
Mat objectPoints = _objectPoints.getMat();
if (objectPoints.type() == CV_32FC3)
{
p1x = objectPoints.at<Vec3f>(0)[0];
p1y = objectPoints.at<Vec3f>(0)[1];
p1z = objectPoints.at<Vec3f>(0)[2];
p2x = objectPoints.at<Vec3f>(1)[0];
p2y = objectPoints.at<Vec3f>(1)[1];
p2z = objectPoints.at<Vec3f>(1)[2];
p3x = objectPoints.at<Vec3f>(2)[0];
p3y = objectPoints.at<Vec3f>(2)[1];
p3z = objectPoints.at<Vec3f>(2)[2];
}
else
{
p1x = objectPoints.at<Vec3d>(0)[0];
p1y = objectPoints.at<Vec3d>(0)[1];
p1z = objectPoints.at<Vec3d>(0)[2];
p2x = objectPoints.at<Vec3d>(1)[0];
p2y = objectPoints.at<Vec3d>(1)[1];
p2z = objectPoints.at<Vec3d>(1)[2];
p3x = objectPoints.at<Vec3d>(2)[0];
p3y = objectPoints.at<Vec3d>(2)[1];
p3z = objectPoints.at<Vec3d>(2)[2];
}
double nx = (p1y - p2y)*(p1z - p3z) - (p1y - p3y)*(p1z - p2z);
double ny = (p1x - p3x)*(p1z - p2z) - (p1x - p2x)*(p1z - p3z);
double nz = (p1x - p2x)*(p1y - p3y) - (p1x - p3x)*(p1y - p2y);
double nrm = sqrt(nx*nx+ ny*ny + nz*nz);
if (nrm > IPPE_SMALL)
{
nx = nx/nrm;
ny = ny/nrm;
nz = nz/nrm;
Matx31d v(nx, ny, nz);
rotateVec2ZAxis(v,R);
ret = true;
}
else
{
ret = false;
}
return ret;
}
void PoseSolver::computeObjextSpaceRSvD(InputArray _objectPointsZeroMean, OutputArray _R)
{
_R.create(3, 3, CV_64FC1);
Mat R = _R.getMat();
//we could not compute R with the first three points, so lets use the SVD
SVD s;
Mat W, U, VT;
s.compute(_objectPointsZeroMean.getMat() * _objectPointsZeroMean.getMat().t(), W, U, VT);
double s3 = W.at<double>(2);
double s2 = W.at<double>(1);
//check if points are coplanar:
CV_Assert(s3 / s2 < IPPE_SMALL);
R = U.t();
if (determinant(R) < 0)
{
//this ensures R is a rotation matrix and not a general unitary matrix:
R.at<double>(2, 0) = -R.at<double>(2, 0);
R.at<double>(2, 1) = -R.at<double>(2, 1);
R.at<double>(2, 2) = -R.at<double>(2, 2);
}
}
} //namespace IPPE
namespace HomographyHO {
void normalizeDataIsotropic(InputArray _Data, OutputArray _DataN, OutputArray _T, OutputArray _Ti)
{
Mat Data = _Data.getMat();
int numPoints = Data.rows * Data.cols;
CV_Assert(Data.rows == 1 || Data.cols == 1);
CV_Assert(Data.channels() == 2 || Data.channels() == 3);
CV_Assert(numPoints >= 4);
int dataType = _Data.type();
CV_CheckType(dataType, dataType == CV_32FC2 || dataType == CV_32FC3 || dataType == CV_64FC2 || dataType == CV_64FC3,
"Type of _Data must be one of CV_32FC2, CV_32FC3, CV_64FC2, CV_64FC3");
_DataN.create(2, numPoints, CV_64FC1);
_T.create(3, 3, CV_64FC1);
_Ti.create(3, 3, CV_64FC1);
Mat DataN = _DataN.getMat();
Mat T = _T.getMat();
Mat Ti = _Ti.getMat();
_T.setTo(0);
_Ti.setTo(0);
int numChannels = Data.channels();
double xm = 0;
double ym = 0;
for (int i = 0; i < numPoints; i++)
{
if (numChannels == 2)
{
if (dataType == CV_32FC2)
{
xm = xm + Data.at<Vec2f>(i)[0];
ym = ym + Data.at<Vec2f>(i)[1];
}
else
{
xm = xm + Data.at<Vec2d>(i)[0];
ym = ym + Data.at<Vec2d>(i)[1];
}
}
else
{
if (dataType == CV_32FC3)
{
xm = xm + Data.at<Vec3f>(i)[0];
ym = ym + Data.at<Vec3f>(i)[1];
}
else
{
xm = xm + Data.at<Vec3d>(i)[0];
ym = ym + Data.at<Vec3d>(i)[1];
}
}
}
xm = xm / static_cast<double>(numPoints);
ym = ym / static_cast<double>(numPoints);
double kappa = 0;
double xh, yh;
for (int i = 0; i < numPoints; i++)
{
if (numChannels == 2)
{
if (dataType == CV_32FC2)
{
xh = Data.at<Vec2f>(i)[0] - xm;
yh = Data.at<Vec2f>(i)[1] - ym;
}
else
{
xh = Data.at<Vec2d>(i)[0] - xm;
yh = Data.at<Vec2d>(i)[1] - ym;
}
}
else
{
if (dataType == CV_32FC3)
{
xh = Data.at<Vec3f>(i)[0] - xm;
yh = Data.at<Vec3f>(i)[1] - ym;
}
else
{
xh = Data.at<Vec3d>(i)[0] - xm;
yh = Data.at<Vec3d>(i)[1] - ym;
}
}
DataN.at<double>(0, i) = xh;
DataN.at<double>(1, i) = yh;
kappa = kappa + xh * xh + yh * yh;
}
double beta = sqrt(2 * numPoints / kappa);
DataN = DataN * beta;
T.at<double>(0, 0) = 1.0 / beta;
T.at<double>(1, 1) = 1.0 / beta;
T.at<double>(0, 2) = xm;
T.at<double>(1, 2) = ym;
T.at<double>(2, 2) = 1;
Ti.at<double>(0, 0) = beta;
Ti.at<double>(1, 1) = beta;
Ti.at<double>(0, 2) = -beta * xm;
Ti.at<double>(1, 2) = -beta * ym;
Ti.at<double>(2, 2) = 1;
}
void homographyHO(InputArray _srcPoints, InputArray _targPoints, Matx33d& H)
{
Mat DataA, DataB, TA, TAi, TB, TBi;
HomographyHO::normalizeDataIsotropic(_srcPoints, DataA, TA, TAi);
HomographyHO::normalizeDataIsotropic(_targPoints, DataB, TB, TBi);
int n = DataA.cols;
CV_Assert(n == DataB.cols);
Mat C1(1, n, CV_64FC1);
Mat C2(1, n, CV_64FC1);
Mat C3(1, n, CV_64FC1);
Mat C4(1, n, CV_64FC1);
double mC1 = 0, mC2 = 0, mC3 = 0, mC4 = 0;
for (int i = 0; i < n; i++)
{
C1.at<double>(0, i) = -DataB.at<double>(0, i) * DataA.at<double>(0, i);
C2.at<double>(0, i) = -DataB.at<double>(0, i) * DataA.at<double>(1, i);
C3.at<double>(0, i) = -DataB.at<double>(1, i) * DataA.at<double>(0, i);
C4.at<double>(0, i) = -DataB.at<double>(1, i) * DataA.at<double>(1, i);
mC1 += C1.at<double>(0, i);
mC2 += C2.at<double>(0, i);
mC3 += C3.at<double>(0, i);
mC4 += C4.at<double>(0, i);
}
mC1 /= n;
mC2 /= n;
mC3 /= n;
mC4 /= n;
Mat Mx(n, 3, CV_64FC1);
Mat My(n, 3, CV_64FC1);
for (int i = 0; i < n; i++)
{
Mx.at<double>(i, 0) = C1.at<double>(0, i) - mC1;
Mx.at<double>(i, 1) = C2.at<double>(0, i) - mC2;
Mx.at<double>(i, 2) = -DataB.at<double>(0, i);
My.at<double>(i, 0) = C3.at<double>(0, i) - mC3;
My.at<double>(i, 1) = C4.at<double>(0, i) - mC4;
My.at<double>(i, 2) = -DataB.at<double>(1, i);
}
Mat DataAT, DataADataAT;
transpose(DataA, DataAT);
DataADataAT = DataA * DataAT;
double dt = DataADataAT.at<double>(0, 0) * DataADataAT.at<double>(1, 1) - DataADataAT.at<double>(0, 1) * DataADataAT.at<double>(1, 0);
Mat DataADataATi(2, 2, CV_64FC1);
DataADataATi.at<double>(0, 0) = DataADataAT.at<double>(1, 1) / dt;
DataADataATi.at<double>(0, 1) = -DataADataAT.at<double>(0, 1) / dt;
DataADataATi.at<double>(1, 0) = -DataADataAT.at<double>(1, 0) / dt;
DataADataATi.at<double>(1, 1) = DataADataAT.at<double>(0, 0) / dt;
Mat Pp = DataADataATi * DataA;
Mat Bx = Pp * Mx;
Mat By = Pp * My;
Mat Ex = DataAT * Bx;
Mat Ey = DataAT * By;
Mat D(2 * n, 3, CV_64FC1);
for (int i = 0; i < n; i++)
{
D.at<double>(i, 0) = Mx.at<double>(i, 0) - Ex.at<double>(i, 0);
D.at<double>(i, 1) = Mx.at<double>(i, 1) - Ex.at<double>(i, 1);
D.at<double>(i, 2) = Mx.at<double>(i, 2) - Ex.at<double>(i, 2);
D.at<double>(i + n, 0) = My.at<double>(i, 0) - Ey.at<double>(i, 0);
D.at<double>(i + n, 1) = My.at<double>(i, 1) - Ey.at<double>(i, 1);
D.at<double>(i + n, 2) = My.at<double>(i, 2) - Ey.at<double>(i, 2);
}
Mat DT, DDT;
transpose(D, DT);
DDT = DT * D;
Mat S, U;
eigen(DDT, S, U);
Mat h789(3, 1, CV_64FC1);
h789.at<double>(0, 0) = U.at<double>(2, 0);
h789.at<double>(1, 0) = U.at<double>(2, 1);
h789.at<double>(2, 0) = U.at<double>(2, 2);
Mat h12 = -Bx * h789;
Mat h45 = -By * h789;
double h3 = -(mC1 * h789.at<double>(0, 0) + mC2 * h789.at<double>(1, 0));
double h6 = -(mC3 * h789.at<double>(0, 0) + mC4 * h789.at<double>(1, 0));
H(0, 0) = h12.at<double>(0, 0);
H(0, 1) = h12.at<double>(1, 0);
H(0, 2) = h3;
H(1, 0) = h45.at<double>(0, 0);
H(1, 1) = h45.at<double>(1, 0);
H(1, 2) = h6;
H(2, 0) = h789.at<double>(0, 0);
H(2, 1) = h789.at<double>(1, 0);
H(2, 2) = h789.at<double>(2, 0);
H = Mat(TB * H * TAi);
double h22_inv = 1 / H(2, 2);
H = H * h22_inv;
}
}
} //namespace cv
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html
// This file is based on file issued with the following license:
/*============================================================================
Copyright 2017 Toby Collins
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice, this
list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its contributors may
be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef OPENCV_CALIB3D_IPPE_HPP
#define OPENCV_CALIB3D_IPPE_HPP
#include <opencv2/core.hpp>
namespace cv {
namespace IPPE {
class PoseSolver {
public:
/**
* @brief PoseSolver constructor
*/
PoseSolver();
/**
* @brief Finds the two possible poses of a planar object given a set of correspondences and their respective reprojection errors.
* The poses are sorted with the first having the lowest reprojection error.
* @param objectPoints Array of 4 or more coplanar object points defined in object coordinates.
* 1xN/Nx1 3-channel (float or double) where N is the number of points
* @param imagePoints Array of corresponding image points, 1xN/Nx1 2-channel. Points are in normalized pixel coordinates.
* @param rvec1 First rotation solution (3x1 rotation vector)
* @param tvec1 First translation solution (3x1 vector)
* @param reprojErr1 Reprojection error of first solution
* @param rvec2 Second rotation solution (3x1 rotation vector)
* @param tvec2 Second translation solution (3x1 vector)
* @param reprojErr2 Reprojection error of second solution
*/
void solveGeneric(InputArray objectPoints, InputArray imagePoints, OutputArray rvec1, OutputArray tvec1,
float& reprojErr1, OutputArray rvec2, OutputArray tvec2, float& reprojErr2);
/**
* @brief Finds the two possible poses of a square planar object and their respective reprojection errors using IPPE.
* The poses are sorted so that the first one is the one with the lowest reprojection error.
*
* @param objectPoints Array of 4 coplanar object points defined in the following object coordinates:
* - point 0: [-squareLength / 2.0, squareLength / 2.0, 0]
* - point 1: [squareLength / 2.0, squareLength / 2.0, 0]
* - point 2: [squareLength / 2.0, -squareLength / 2.0, 0]
* - point 3: [-squareLength / 2.0, -squareLength / 2.0, 0]
* 1xN/Nx1 3-channel (float or double) where N is the number of points
* @param imagePoints Array of corresponding image points, 1xN/Nx1 2-channel. Points are in normalized pixel coordinates.
* @param rvec1 First rotation solution (3x1 rotation vector)
* @param tvec1 First translation solution (3x1 vector)
* @param reprojErr1 Reprojection error of first solution
* @param rvec2 Second rotation solution (3x1 rotation vector)
* @param tvec2 Second translation solution (3x1 vector)
* @param reprojErr2 Reprojection error of second solution
*/
void solveSquare(InputArray objectPoints, InputArray imagePoints, OutputArray rvec1, OutputArray tvec1,
float& reprojErr1, OutputArray rvec2, OutputArray tvec2, float& reprojErr2);
private:
/**
* @brief Finds the two possible poses of a planar object given a set of correspondences in normalized pixel coordinates.
* These poses are **NOT** sorted on reprojection error. Note that the returned poses are object-to-camera transforms, and not camera-to-object transforms.
* @param objectPoints Array of 4 or more coplanar object points defined in object coordinates. 1xN/Nx1 3-channel (float or double).
* @param normalizedImagePoints Array of corresponding image points in normalized pixel coordinates, 1xN/Nx1 2-channel (float or double).
* @param Ma First pose solution (unsorted)
* @param Mb Second pose solution (unsorted)
*/
void solveGeneric(InputArray objectPoints, InputArray normalizedImagePoints, OutputArray Ma, OutputArray Mb);
/**
* @brief Finds the two possible poses of a planar object in its canonical position, given a set of correspondences in normalized pixel coordinates.
* These poses are **NOT** sorted on reprojection error. Note that the returned poses are object-to-camera transforms, and not camera-to-object transforms.
* @param canonicalObjPoints Array of 4 or more coplanar object points defined in object coordinates. 1xN/Nx1 3-channel (double) where N is the number of points
* @param normalizedInputPoints Array of corresponding image points in normalized pixel coordinates, 1xN/Nx1 2-channel (double) where N is the number of points
* @param H Homography mapping canonicalObjPoints to normalizedInputPoints.
* @param Ma
* @param Mb
*/
void solveCanonicalForm(InputArray canonicalObjPoints, InputArray normalizedInputPoints, const Matx33d& H,
OutputArray Ma, OutputArray Mb);
/**
* @brief Computes the translation solution for a given rotation solution
* @param objectPoints Array of corresponding object points, 1xN/Nx1 3-channel where N is the number of points
* @param normalizedImagePoints Array of corresponding image points (undistorted), 1xN/Nx1 2-channel where N is the number of points
* @param R Rotation solution (3x1 rotation vector)
* @param t Translation solution (3x1 rotation vector)
*/
void computeTranslation(InputArray objectPoints, InputArray normalizedImgPoints, InputArray R, OutputArray t);
/**
* @brief Computes the two rotation solutions from the Jacobian of a homography matrix H at a point (ux,uy) on the object plane.
* For highest accuracy the Jacobian should be computed at the centroid of the point correspondences (see the IPPE paper for the explanation of this).
* For a point (ux,uy) on the object plane, suppose the homography H maps (ux,uy) to a point (p,q) in the image (in normalized pixel coordinates).
* The Jacobian matrix [J00, J01; J10,J11] is the Jacobian of the mapping evaluated at (ux,uy).
* @param j00 Homography jacobian coefficent at (ux,uy)
* @param j01 Homography jacobian coefficent at (ux,uy)
* @param j10 Homography jacobian coefficent at (ux,uy)
* @param j11 Homography jacobian coefficent at (ux,uy)
* @param p The x coordinate of point (ux,uy) mapped into the image (undistorted and normalized position)
* @param q The y coordinate of point (ux,uy) mapped into the image (undistorted and normalized position)
*/
void computeRotations(double j00, double j01, double j10, double j11, double p, double q, OutputArray _R1, OutputArray _R2);
/**
* @brief Closed-form solution for the homography mapping with four corner correspondences of a square (it maps source points to target points).
* The source points are the four corners of a zero-centred squared defined by:
* - point 0: [-squareLength / 2.0, squareLength / 2.0]
* - point 1: [squareLength / 2.0, squareLength / 2.0]
* - point 2: [squareLength / 2.0, -squareLength / 2.0]
* - point 3: [-squareLength / 2.0, -squareLength / 2.0]
*
* @param targetPoints Array of four corresponding target points, 1x4/4x1 2-channel. Note that the points should be ordered to correspond with points 0, 1, 2 and 3.
* @param halfLength The square's half length (i.e. squareLength/2.0)
* @param H Homograhy mapping the source points to the target points, 3x3 single channel
*/
void homographyFromSquarePoints(InputArray targetPoints, double halfLength, OutputArray H);
/**
* @brief Fast conversion from a rotation matrix to a rotation vector using Rodrigues' formula
* @param R Input rotation matrix, 3x3 1-channel (double)
* @param r Output rotation vector, 3x1/1x3 1-channel (double)
*/
void rot2vec(InputArray R, OutputArray r);
/**
* @brief Takes a set of planar object points and transforms them to 'canonical' object coordinates This is when they have zero mean and are on the plane z=0
* @param objectPoints Array of 4 or more coplanar object points defined in object coordinates. 1xN/Nx1 3-channel (float or double) where N is the number of points
* @param canonicalObjectPoints Object points in canonical coordinates 1xN/Nx1 2-channel (double)
* @param MobjectPoints2Canonical Transform matrix mapping _objectPoints to _canonicalObjectPoints: 4x4 1-channel (double)
*/
void makeCanonicalObjectPoints(InputArray objectPoints, OutputArray canonicalObjectPoints, OutputArray MobjectPoints2Canonical);
/**
* @brief Evaluates the Root Mean Squared (RMS) reprojection error of a pose solution.
* @param objectPoints Array of 4 or more coplanar object points defined in object coordinates. 1xN/Nx1 3-channel (float or double) where N is the number of points
* @param imagePoints Array of corresponding image points, 1xN/Nx1 2-channel. This can either be in pixel coordinates or normalized pixel coordinates.
* @param M Pose matrix from 3D object to camera coordinates: 4x4 1-channel (double)
* @param err RMS reprojection error
*/
void evalReprojError(InputArray objectPoints, InputArray imagePoints, InputArray M, float& err);
/**
* @brief Sorts two pose solutions according to their RMS reprojection error (lowest first).
* @param objectPoints Array of 4 or more coplanar object points defined in object coordinates. 1xN/Nx1 3-channel (float or double) where N is the number of points
* @param imagePoints Array of corresponding image points, 1xN/Nx1 2-channel. This can either be in pixel coordinates or normalized pixel coordinates.
* @param Ma Pose matrix 1: 4x4 1-channel
* @param Mb Pose matrix 2: 4x4 1-channel
* @param M1 Member of (Ma,Mb} with lowest RMS reprojection error. Performs deep copy.
* @param M2 Member of (Ma,Mb} with highest RMS reprojection error. Performs deep copy.
* @param err1 RMS reprojection error of _M1
* @param err2 RMS reprojection error of _M2
*/
void sortPosesByReprojError(InputArray objectPoints, InputArray imagePoints, InputArray Ma, InputArray Mb, OutputArray M1, OutputArray M2, float& err1, float& err2);
/**
* @brief Finds the rotation _Ra that rotates a vector _a to the z axis (0,0,1)
* @param a vector: 3x1 mat (double)
* @param Ra Rotation: 3x3 mat (double)
*/
void rotateVec2ZAxis(const Matx31d& a, Matx33d& Ra);
/**
* @brief Computes the rotation _R that rotates the object points to the plane z=0. This uses the cross-product method with the first three object points.
* @param objectPoints Array of N>=3 coplanar object points defined in object coordinates. 1xN/Nx1 3-channel (float or double) where N is the number of points
* @param R Rotation Mat: 3x3 (double)
* @return Success (true) or failure (false)
*/
bool computeObjextSpaceR3Pts(InputArray objectPoints, Matx33d& R);
/**
* @brief computeObjextSpaceRSvD Computes the rotation _R that rotates the object points to the plane z=0. This uses the cross-product method with the first three object points.
* @param objectPointsZeroMean Zero-meaned coplanar object points: 3xN matrix (double) where N>=3
* @param R Rotation Mat: 3x3 (double)
*/
void computeObjextSpaceRSvD(InputArray objectPointsZeroMean, OutputArray R);
/**
* @brief Generates the 4 object points of a square planar object
* @param squareLength The square's length (which is also it's width) in object coordinate units (e.g. millimeters, meters, etc.)
* @param objectPoints Set of 4 object points (1x4 3-channel double)
*/
void generateSquareObjectCorners3D(double squareLength, OutputArray objectPoints);
/**
* @brief Generates the 4 object points of a square planar object, without including the z-component (which is z=0 for all points).
* @param squareLength The square's length (which is also it's width) in object coordinate units (e.g. millimeters, meters, etc.)
* @param objectPoints Set of 4 object points (1x4 2-channel double)
*/
void generateSquareObjectCorners2D(double squareLength, OutputArray objectPoints);
/**
* @brief Computes the average depth of an object given its pose in camera coordinates
* @param objectPoints: Object points defined in 3D object space
* @param rvec: Rotation component of pose
* @param tvec: Translation component of pose
* @return: average depth of the object
*/
double meanSceneDepth(InputArray objectPoints, InputArray rvec, InputArray tvec);
//! a small constant used to test 'small' values close to zero.
double IPPE_SMALL;
};
} //namespace IPPE
namespace HomographyHO {
/**
* @brief Computes the best-fitting homography matrix from source to target points using Harker and O'Leary's method:
* Harker, M., O'Leary, P., Computation of Homographies, Proceedings of the British Machine Vision Conference 2005, Oxford, England.
* This is not the author's implementation.
* @param srcPoints Array of source points: 1xN/Nx1 2-channel (float or double) where N is the number of points
* @param targPoints Array of target points: 1xN/Nx1 2-channel (float or double)
* @param H Homography from source to target: 3x3 1-channel (double)
*/
void homographyHO(InputArray srcPoints, InputArray targPoints, Matx33d& H);
/**
* @brief Performs data normalization before homography estimation. For details see Hartley, R., Zisserman, A., Multiple View Geometry in Computer Vision,
* Cambridge University Press, Cambridge, 2001
* @param Data Array of source data points: 1xN/Nx1 2-channel (float or double) where N is the number of points
* @param DataN Normalized data points: 1xN/Nx1 2-channel (float or double) where N is the number of points
* @param T Homogeneous transform from source to normalized: 3x3 1-channel (double)
* @param Ti Homogeneous transform from normalized to source: 3x3 1-channel (double)
*/
void normalizeDataIsotropic(InputArray Data, OutputArray DataN, OutputArray T, OutputArray Ti);
}
} //namespace cv
#endif
...@@ -49,8 +49,10 @@ bool p3p::solve(cv::Mat& R, cv::Mat& tvec, const cv::Mat& opoints, const cv::Mat ...@@ -49,8 +49,10 @@ bool p3p::solve(cv::Mat& R, cv::Mat& tvec, const cv::Mat& opoints, const cv::Mat
else else
extract_points<cv::Point3d,cv::Point2f>(opoints, ipoints, points); extract_points<cv::Point3d,cv::Point2f>(opoints, ipoints, points);
bool result = solve(rotation_matrix, translation, points[0], points[1], points[2], points[3], points[4], points[5], bool result = solve(rotation_matrix, translation,
points[6], points[7], points[8], points[9], points[10], points[11], points[12], points[13], points[14], points[0], points[1], points[2], points[3], points[4],
points[5], points[6], points[7], points[8], points[9],
points[10], points[11], points[12], points[13], points[14],
points[15], points[16], points[17], points[18], points[19]); points[15], points[16], points[17], points[18], points[19]);
cv::Mat(3, 1, CV_64F, translation).copyTo(tvec); cv::Mat(3, 1, CV_64F, translation).copyTo(tvec);
cv::Mat(3, 3, CV_64F, rotation_matrix).copyTo(R); cv::Mat(3, 3, CV_64F, rotation_matrix).copyTo(R);
...@@ -75,10 +77,13 @@ int p3p::solve(std::vector<cv::Mat>& Rs, std::vector<cv::Mat>& tvecs, const cv:: ...@@ -75,10 +77,13 @@ int p3p::solve(std::vector<cv::Mat>& Rs, std::vector<cv::Mat>& tvecs, const cv::
else else
extract_points<cv::Point3d,cv::Point2f>(opoints, ipoints, points); extract_points<cv::Point3d,cv::Point2f>(opoints, ipoints, points);
const bool p4p = std::max(opoints.checkVector(3, CV_32F), opoints.checkVector(3, CV_64F)) == 4;
int solutions = solve(rotation_matrix, translation, int solutions = solve(rotation_matrix, translation,
points[0], points[1], points[2], points[3], points[4], points[0], points[1], points[2], points[3], points[4],
points[5], points[6], points[7], points[8], points[9], points[5], points[6], points[7], points[8], points[9],
points[10], points[11], points[12], points[13], points[14]); points[10], points[11], points[12], points[13], points[14],
points[15], points[16], points[17], points[18], points[19],
p4p);
for (int i = 0; i < solutions; i++) { for (int i = 0; i < solutions; i++) {
cv::Mat R, tvec; cv::Mat R, tvec;
...@@ -100,30 +105,16 @@ bool p3p::solve(double R[3][3], double t[3], ...@@ -100,30 +105,16 @@ bool p3p::solve(double R[3][3], double t[3],
{ {
double Rs[4][3][3], ts[4][3]; double Rs[4][3][3], ts[4][3];
int n = solve(Rs, ts, mu0, mv0, X0, Y0, Z0, mu1, mv1, X1, Y1, Z1, mu2, mv2, X2, Y2, Z2); const bool p4p = true;
int n = solve(Rs, ts, mu0, mv0, X0, Y0, Z0, mu1, mv1, X1, Y1, Z1, mu2, mv2, X2, Y2, Z2, mu3, mv3, X3, Y3, Z3, p4p);
if (n == 0) if (n == 0)
return false; return false;
int ns = 0;
double min_reproj = 0;
for(int i = 0; i < n; i++) {
double X3p = Rs[i][0][0] * X3 + Rs[i][0][1] * Y3 + Rs[i][0][2] * Z3 + ts[i][0];
double Y3p = Rs[i][1][0] * X3 + Rs[i][1][1] * Y3 + Rs[i][1][2] * Z3 + ts[i][1];
double Z3p = Rs[i][2][0] * X3 + Rs[i][2][1] * Y3 + Rs[i][2][2] * Z3 + ts[i][2];
double mu3p = cx + fx * X3p / Z3p;
double mv3p = cy + fy * Y3p / Z3p;
double reproj = (mu3p - mu3) * (mu3p - mu3) + (mv3p - mv3) * (mv3p - mv3);
if (i == 0 || min_reproj > reproj) {
ns = i;
min_reproj = reproj;
}
}
for(int i = 0; i < 3; i++) { for(int i = 0; i < 3; i++) {
for(int j = 0; j < 3; j++) for(int j = 0; j < 3; j++)
R[i][j] = Rs[ns][i][j]; R[i][j] = Rs[0][i][j];
t[i] = ts[ns][i]; t[i] = ts[0][i];
} }
return true; return true;
...@@ -132,7 +123,9 @@ bool p3p::solve(double R[3][3], double t[3], ...@@ -132,7 +123,9 @@ bool p3p::solve(double R[3][3], double t[3],
int p3p::solve(double R[4][3][3], double t[4][3], int p3p::solve(double R[4][3][3], double t[4][3],
double mu0, double mv0, double X0, double Y0, double Z0, double mu0, double mv0, double X0, double Y0, double Z0,
double mu1, double mv1, double X1, double Y1, double Z1, double mu1, double mv1, double X1, double Y1, double Z1,
double mu2, double mv2, double X2, double Y2, double Z2) double mu2, double mv2, double X2, double Y2, double Z2,
double mu3, double mv3, double X3, double Y3, double Z3,
bool p4p)
{ {
double mk0, mk1, mk2; double mk0, mk1, mk2;
double norm; double norm;
...@@ -152,6 +145,9 @@ int p3p::solve(double R[4][3][3], double t[4][3], ...@@ -152,6 +145,9 @@ int p3p::solve(double R[4][3][3], double t[4][3],
norm = sqrt(mu2 * mu2 + mv2 * mv2 + 1); norm = sqrt(mu2 * mu2 + mv2 * mv2 + 1);
mk2 = 1. / norm; mu2 *= mk2; mv2 *= mk2; mk2 = 1. / norm; mu2 *= mk2; mv2 *= mk2;
mu3 = inv_fx * mu3 - cx_fx;
mv3 = inv_fy * mv3 - cy_fy;
double distances[3]; double distances[3];
distances[0] = sqrt( (X1 - X2) * (X1 - X2) + (Y1 - Y2) * (Y1 - Y2) + (Z1 - Z2) * (Z1 - Z2) ); distances[0] = sqrt( (X1 - X2) * (X1 - X2) + (Y1 - Y2) * (Y1 - Y2) + (Z1 - Z2) * (Z1 - Z2) );
distances[1] = sqrt( (X0 - X2) * (X0 - X2) + (Y0 - Y2) * (Y0 - Y2) + (Z0 - Z2) * (Z0 - Z2) ); distances[1] = sqrt( (X0 - X2) * (X0 - X2) + (Y0 - Y2) * (Y0 - Y2) + (Z0 - Z2) * (Z0 - Z2) );
...@@ -167,6 +163,7 @@ int p3p::solve(double R[4][3][3], double t[4][3], ...@@ -167,6 +163,7 @@ int p3p::solve(double R[4][3][3], double t[4][3],
int n = solve_for_lengths(lengths, distances, cosines); int n = solve_for_lengths(lengths, distances, cosines);
int nb_solutions = 0; int nb_solutions = 0;
double reproj_errors[4];
for(int i = 0; i < n; i++) { for(int i = 0; i < n; i++) {
double M_orig[3][3]; double M_orig[3][3];
...@@ -185,9 +182,29 @@ int p3p::solve(double R[4][3][3], double t[4][3], ...@@ -185,9 +182,29 @@ int p3p::solve(double R[4][3][3], double t[4][3],
if (!align(M_orig, X0, Y0, Z0, X1, Y1, Z1, X2, Y2, Z2, R[nb_solutions], t[nb_solutions])) if (!align(M_orig, X0, Y0, Z0, X1, Y1, Z1, X2, Y2, Z2, R[nb_solutions], t[nb_solutions]))
continue; continue;
if (p4p) {
double X3p = R[nb_solutions][0][0] * X3 + R[nb_solutions][0][1] * Y3 + R[nb_solutions][0][2] * Z3 + t[nb_solutions][0];
double Y3p = R[nb_solutions][1][0] * X3 + R[nb_solutions][1][1] * Y3 + R[nb_solutions][1][2] * Z3 + t[nb_solutions][1];
double Z3p = R[nb_solutions][2][0] * X3 + R[nb_solutions][2][1] * Y3 + R[nb_solutions][2][2] * Z3 + t[nb_solutions][2];
double mu3p = X3p / Z3p;
double mv3p = Y3p / Z3p;
reproj_errors[nb_solutions] = (mu3p - mu3) * (mu3p - mu3) + (mv3p - mv3) * (mv3p - mv3);
}
nb_solutions++; nb_solutions++;
} }
if (p4p) {
//sort the solutions
for (int i = 1; i < nb_solutions; i++) {
for (int j = i; j > 0 && reproj_errors[j-1] > reproj_errors[j]; j--) {
std::swap(reproj_errors[j], reproj_errors[j-1]);
std::swap(R[j], R[j-1]);
std::swap(t[j], t[j-1]);
}
}
}
return nb_solutions; return nb_solutions;
} }
......
...@@ -15,7 +15,9 @@ class p3p ...@@ -15,7 +15,9 @@ class p3p
int solve(double R[4][3][3], double t[4][3], int solve(double R[4][3][3], double t[4][3],
double mu0, double mv0, double X0, double Y0, double Z0, double mu0, double mv0, double X0, double Y0, double Z0,
double mu1, double mv1, double X1, double Y1, double Z1, double mu1, double mv1, double X1, double Y1, double Z1,
double mu2, double mv2, double X2, double Y2, double Z2); double mu2, double mv2, double X2, double Y2, double Z2,
double mu3, double mv3, double X3, double Y3, double Z3,
bool p4p);
bool solve(double R[3][3], double t[3], bool solve(double R[3][3], double t[3],
double mu0, double mv0, double X0, double Y0, double Z0, double mu0, double mv0, double X0, double Y0, double Z0,
double mu1, double mv1, double X1, double Y1, double Z1, double mu1, double mv1, double X1, double Y1, double Z1,
...@@ -36,7 +38,7 @@ class p3p ...@@ -36,7 +38,7 @@ class p3p
{ {
points.clear(); points.clear();
int npoints = std::max(opoints.checkVector(3, CV_32F), opoints.checkVector(3, CV_64F)); int npoints = std::max(opoints.checkVector(3, CV_32F), opoints.checkVector(3, CV_64F));
points.resize(5*npoints); points.resize(5*4); //resize vector to fit for p4p case
for(int i = 0; i < npoints; i++) for(int i = 0; i < npoints; i++)
{ {
points[i*5] = ipoints.at<IpointType>(i).x*fx + cx; points[i*5] = ipoints.at<IpointType>(i).x*fx + cx;
...@@ -45,6 +47,12 @@ class p3p ...@@ -45,6 +47,12 @@ class p3p
points[i*5+3] = opoints.at<OpointType>(i).y; points[i*5+3] = opoints.at<OpointType>(i).y;
points[i*5+4] = opoints.at<OpointType>(i).z; points[i*5+4] = opoints.at<OpointType>(i).z;
} }
//Fill vectors with unused values for p3p case
for (int i = npoints; i < 4; i++) {
for (int j = 0; j < 5; j++) {
points[i * 5 + j] = 0;
}
}
} }
void init_inverse_parameters(); void init_inverse_parameters();
int solve_for_lengths(double lengths[4][3], double distances[3], double cosines[3]); int solve_for_lengths(double lengths[4][3], double distances[3], double cosines[3]);
......
...@@ -46,12 +46,44 @@ ...@@ -46,12 +46,44 @@
#include "epnp.h" #include "epnp.h"
#include "p3p.h" #include "p3p.h"
#include "ap3p.h" #include "ap3p.h"
#include "ippe.hpp"
#include "opencv2/calib3d/calib3d_c.h" #include "opencv2/calib3d/calib3d_c.h"
#include <iostream>
namespace cv namespace cv
{ {
#if defined _DEBUG || defined CV_STATIC_ANALYSIS
static bool isPlanarObjectPoints(InputArray _objectPoints, double threshold)
{
CV_CheckType(_objectPoints.type(), _objectPoints.type() == CV_32FC3 || _objectPoints.type() == CV_64FC3,
"Type of _objectPoints must be CV_32FC3 or CV_64FC3");
Mat objectPoints;
if (_objectPoints.type() == CV_32FC3)
{
_objectPoints.getMat().convertTo(objectPoints, CV_64F);
}
else
{
objectPoints = _objectPoints.getMat();
}
Scalar meanValues = mean(objectPoints);
int nbPts = objectPoints.checkVector(3, CV_64F);
Mat objectPointsCentred = objectPoints - meanValues;
objectPointsCentred = objectPointsCentred.reshape(1, nbPts);
Mat w, u, vt;
Mat MM = objectPointsCentred.t() * objectPointsCentred;
SVDecomp(MM, w, u, vt);
return (w.at<double>(2) < w.at<double>(1) * threshold);
}
static bool approxEqual(double a, double b, double eps)
{
return std::fabs(a-b) < eps;
}
#endif
void drawFrameAxes(InputOutputArray image, InputArray cameraMatrix, InputArray distCoeffs, void drawFrameAxes(InputOutputArray image, InputArray cameraMatrix, InputArray distCoeffs,
InputArray rvec, InputArray tvec, float length, int thickness) InputArray rvec, InputArray tvec, float length, int thickness)
{ {
...@@ -80,120 +112,24 @@ void drawFrameAxes(InputOutputArray image, InputArray cameraMatrix, InputArray d ...@@ -80,120 +112,24 @@ void drawFrameAxes(InputOutputArray image, InputArray cameraMatrix, InputArray d
line(image, imagePoints[0], imagePoints[3], Scalar(255, 0, 0), thickness); line(image, imagePoints[0], imagePoints[3], Scalar(255, 0, 0), thickness);
} }
bool solvePnP( InputArray _opoints, InputArray _ipoints, bool solvePnP( InputArray opoints, InputArray ipoints,
InputArray _cameraMatrix, InputArray _distCoeffs, InputArray cameraMatrix, InputArray distCoeffs,
OutputArray _rvec, OutputArray _tvec, bool useExtrinsicGuess, int flags ) OutputArray rvec, OutputArray tvec, bool useExtrinsicGuess, int flags )
{ {
CV_INSTRUMENT_REGION(); CV_INSTRUMENT_REGION();
Mat opoints = _opoints.getMat(), ipoints = _ipoints.getMat(); vector<Mat> rvecs, tvecs;
int npoints = std::max(opoints.checkVector(3, CV_32F), opoints.checkVector(3, CV_64F)); int solutions = solvePnPGeneric(opoints, ipoints, cameraMatrix, distCoeffs, rvecs, tvecs, useExtrinsicGuess, (SolvePnPMethod)flags, rvec, tvec);
CV_Assert( ( (npoints >= 4) || (npoints == 3 && flags == SOLVEPNP_ITERATIVE && useExtrinsicGuess) )
&& npoints == std::max(ipoints.checkVector(2, CV_32F), ipoints.checkVector(2, CV_64F)) );
Mat rvec, tvec;
if( flags != SOLVEPNP_ITERATIVE )
useExtrinsicGuess = false;
if( useExtrinsicGuess )
{
int rtype = _rvec.type(), ttype = _tvec.type();
Size rsize = _rvec.size(), tsize = _tvec.size();
CV_Assert( (rtype == CV_32F || rtype == CV_64F) &&
(ttype == CV_32F || ttype == CV_64F) );
CV_Assert( (rsize == Size(1, 3) || rsize == Size(3, 1)) &&
(tsize == Size(1, 3) || tsize == Size(3, 1)) );
}
else
{
int mtype = CV_64F;
// use CV_32F if all PnP inputs are CV_32F and outputs are empty
if (_ipoints.depth() == _cameraMatrix.depth() && _ipoints.depth() == _opoints.depth() &&
_rvec.empty() && _tvec.empty())
mtype = _opoints.depth();
_rvec.create(3, 1, mtype);
_tvec.create(3, 1, mtype);
}
rvec = _rvec.getMat();
tvec = _tvec.getMat();
Mat cameraMatrix0 = _cameraMatrix.getMat();
Mat distCoeffs0 = _distCoeffs.getMat();
Mat cameraMatrix = Mat_<double>(cameraMatrix0);
Mat distCoeffs = Mat_<double>(distCoeffs0);
bool result = false;
if (flags == SOLVEPNP_EPNP || flags == SOLVEPNP_DLS || flags == SOLVEPNP_UPNP)
{
Mat undistortedPoints;
undistortPoints(ipoints, undistortedPoints, cameraMatrix, distCoeffs);
epnp PnP(cameraMatrix, opoints, undistortedPoints);
Mat R;
PnP.compute_pose(R, tvec);
Rodrigues(R, rvec);
result = true;
}
else if (flags == SOLVEPNP_P3P)
{
CV_Assert( npoints == 4);
Mat undistortedPoints;
undistortPoints(ipoints, undistortedPoints, cameraMatrix, distCoeffs);
p3p P3Psolver(cameraMatrix);
Mat R;
result = P3Psolver.solve(R, tvec, opoints, undistortedPoints);
if (result)
Rodrigues(R, rvec);
}
else if (flags == SOLVEPNP_AP3P)
{
CV_Assert( npoints == 4);
Mat undistortedPoints;
undistortPoints(ipoints, undistortedPoints, cameraMatrix, distCoeffs);
ap3p P3Psolver(cameraMatrix);
Mat R; if (solutions > 0)
result = P3Psolver.solve(R, tvec, opoints, undistortedPoints);
if (result)
Rodrigues(R, rvec);
}
else if (flags == SOLVEPNP_ITERATIVE)
{
CvMat c_objectPoints = cvMat(opoints), c_imagePoints = cvMat(ipoints);
CvMat c_cameraMatrix = cvMat(cameraMatrix), c_distCoeffs = cvMat(distCoeffs);
CvMat c_rvec = cvMat(rvec), c_tvec = cvMat(tvec);
cvFindExtrinsicCameraParams2(&c_objectPoints, &c_imagePoints, &c_cameraMatrix,
(c_distCoeffs.rows && c_distCoeffs.cols) ? &c_distCoeffs : 0,
&c_rvec, &c_tvec, useExtrinsicGuess );
result = true;
}
/*else if (flags == SOLVEPNP_DLS)
{ {
Mat undistortedPoints; int rdepth = rvec.empty() ? CV_64F : rvec.depth();
undistortPoints(ipoints, undistortedPoints, cameraMatrix, distCoeffs); int tdepth = tvec.empty() ? CV_64F : tvec.depth();
rvecs[0].convertTo(rvec, rdepth);
dls PnP(opoints, undistortedPoints); tvecs[0].convertTo(tvec, tdepth);
Mat R, rvec = _rvec.getMat(), tvec = _tvec.getMat();
bool result = PnP.compute_pose(R, tvec);
if (result)
Rodrigues(R, rvec);
return result;
} }
else if (flags == SOLVEPNP_UPNP)
{
upnp PnP(cameraMatrix, opoints, ipoints);
Mat R, rvec = _rvec.getMat(), tvec = _tvec.getMat(); return solutions > 0;
PnP.compute_pose(R, tvec);
Rodrigues(R, rvec);
return true;
}*/
else
CV_Error(CV_StsBadArg, "The flags argument must be one of SOLVEPNP_ITERATIVE, SOLVEPNP_P3P, SOLVEPNP_EPNP or SOLVEPNP_DLS");
return result;
} }
class PnPRansacCallback CV_FINAL : public PointSetRegistrator::Callback class PnPRansacCallback CV_FINAL : public PointSetRegistrator::Callback
...@@ -410,7 +346,8 @@ int solveP3P( InputArray _opoints, InputArray _ipoints, ...@@ -410,7 +346,8 @@ int solveP3P( InputArray _opoints, InputArray _ipoints,
Mat opoints = _opoints.getMat(), ipoints = _ipoints.getMat(); Mat opoints = _opoints.getMat(), ipoints = _ipoints.getMat();
int npoints = std::max(opoints.checkVector(3, CV_32F), opoints.checkVector(3, CV_64F)); int npoints = std::max(opoints.checkVector(3, CV_32F), opoints.checkVector(3, CV_64F));
CV_Assert( npoints == 3 && npoints == std::max(ipoints.checkVector(2, CV_32F), ipoints.checkVector(2, CV_64F)) ); CV_Assert( npoints == std::max(ipoints.checkVector(2, CV_32F), ipoints.checkVector(2, CV_64F)) );
CV_Assert( npoints == 3 || npoints == 4 );
CV_Assert( flags == SOLVEPNP_P3P || flags == SOLVEPNP_AP3P ); CV_Assert( flags == SOLVEPNP_P3P || flags == SOLVEPNP_AP3P );
Mat cameraMatrix0 = _cameraMatrix.getMat(); Mat cameraMatrix0 = _cameraMatrix.getMat();
...@@ -420,7 +357,7 @@ int solveP3P( InputArray _opoints, InputArray _ipoints, ...@@ -420,7 +357,7 @@ int solveP3P( InputArray _opoints, InputArray _ipoints,
Mat undistortedPoints; Mat undistortedPoints;
undistortPoints(ipoints, undistortedPoints, cameraMatrix, distCoeffs); undistortPoints(ipoints, undistortedPoints, cameraMatrix, distCoeffs);
std::vector<Mat> Rs, ts; std::vector<Mat> Rs, ts, rvecs;
int solutions = 0; int solutions = 0;
if (flags == SOLVEPNP_P3P) if (flags == SOLVEPNP_P3P)
...@@ -438,19 +375,91 @@ int solveP3P( InputArray _opoints, InputArray _ipoints, ...@@ -438,19 +375,91 @@ int solveP3P( InputArray _opoints, InputArray _ipoints,
return 0; return 0;
} }
if (_rvecs.needed()) { Mat objPts, imgPts;
_rvecs.create(solutions, 1, CV_64F); opoints.convertTo(objPts, CV_64F);
} ipoints.convertTo(imgPts, CV_64F);
if (imgPts.cols > 1)
if (_tvecs.needed()) { {
_tvecs.create(solutions, 1, CV_64F); imgPts = imgPts.reshape(1);
imgPts = imgPts.t();
} }
else
imgPts = imgPts.reshape(1, 2*imgPts.rows);
for (int i = 0; i < solutions; i++) { vector<double> reproj_errors(solutions);
for (size_t i = 0; i < reproj_errors.size(); i++)
{
Mat rvec; Mat rvec;
Rodrigues(Rs[i], rvec); Rodrigues(Rs[i], rvec);
_tvecs.getMatRef(i) = ts[i]; rvecs.push_back(rvec);
_rvecs.getMatRef(i) = rvec;
Mat projPts;
projectPoints(objPts, rvec, ts[i], _cameraMatrix, _distCoeffs, projPts);
projPts = projPts.reshape(1, 2*projPts.rows);
Mat err = imgPts - projPts;
err = err.t() * err;
reproj_errors[i] = err.at<double>(0,0);
}
//sort the solutions
for (int i = 1; i < solutions; i++)
{
for (int j = i; j > 0 && reproj_errors[j-1] > reproj_errors[j]; j--)
{
std::swap(reproj_errors[j], reproj_errors[j-1]);
std::swap(rvecs[j], rvecs[j-1]);
std::swap(ts[j], ts[j-1]);
}
}
int depthRot = _rvecs.fixedType() ? _rvecs.depth() : CV_64F;
int depthTrans = _tvecs.fixedType() ? _tvecs.depth() : CV_64F;
_rvecs.create(solutions, 1, CV_MAKETYPE(depthRot, _rvecs.fixedType() && _rvecs.kind() == _InputArray::STD_VECTOR ? 3 : 1));
_tvecs.create(solutions, 1, CV_MAKETYPE(depthTrans, _tvecs.fixedType() && _tvecs.kind() == _InputArray::STD_VECTOR ? 3 : 1));
for (int i = 0; i < solutions; i++)
{
Mat rvec0, tvec0;
if (depthRot == CV_64F)
rvec0 = rvecs[i];
else
rvecs[i].convertTo(rvec0, depthRot);
if (depthTrans == CV_64F)
tvec0 = ts[i];
else
ts[i].convertTo(tvec0, depthTrans);
if (_rvecs.fixedType() && _rvecs.kind() == _InputArray::STD_VECTOR)
{
Mat rref = _rvecs.getMat_();
if (_rvecs.depth() == CV_32F)
rref.at<Vec3f>(0,i) = Vec3f(rvec0.at<float>(0,0), rvec0.at<float>(1,0), rvec0.at<float>(2,0));
else
rref.at<Vec3d>(0,i) = Vec3d(rvec0.at<double>(0,0), rvec0.at<double>(1,0), rvec0.at<double>(2,0));
}
else
{
_rvecs.getMatRef(i) = rvec0;
}
if (_tvecs.fixedType() && _tvecs.kind() == _InputArray::STD_VECTOR)
{
Mat tref = _tvecs.getMat_();
if (_tvecs.depth() == CV_32F)
tref.at<Vec3f>(0,i) = Vec3f(tvec0.at<float>(0,0), tvec0.at<float>(1,0), tvec0.at<float>(2,0));
else
tref.at<Vec3d>(0,i) = Vec3d(tvec0.at<double>(0,0), tvec0.at<double>(1,0), tvec0.at<double>(2,0));
}
else
{
_tvecs.getMatRef(i) = tvec0;
}
} }
return solutions; return solutions;
...@@ -723,4 +732,314 @@ void solvePnPRefineVVS(InputArray _objectPoints, InputArray _imagePoints, ...@@ -723,4 +732,314 @@ void solvePnPRefineVVS(InputArray _objectPoints, InputArray _imagePoints,
solvePnPRefine(_objectPoints, _imagePoints, _cameraMatrix, _distCoeffs, _rvec, _tvec, SOLVEPNP_REFINE_VVS, _criteria, _VVSlambda); solvePnPRefine(_objectPoints, _imagePoints, _cameraMatrix, _distCoeffs, _rvec, _tvec, SOLVEPNP_REFINE_VVS, _criteria, _VVSlambda);
} }
int solvePnPGeneric( InputArray _opoints, InputArray _ipoints,
InputArray _cameraMatrix, InputArray _distCoeffs,
OutputArrayOfArrays _rvecs, OutputArrayOfArrays _tvecs,
bool useExtrinsicGuess, SolvePnPMethod flags,
InputArray _rvec, InputArray _tvec,
OutputArray reprojectionError) {
CV_INSTRUMENT_REGION();
Mat opoints = _opoints.getMat(), ipoints = _ipoints.getMat();
int npoints = std::max(opoints.checkVector(3, CV_32F), opoints.checkVector(3, CV_64F));
CV_Assert( ( (npoints >= 4) || (npoints == 3 && flags == SOLVEPNP_ITERATIVE && useExtrinsicGuess) )
&& npoints == std::max(ipoints.checkVector(2, CV_32F), ipoints.checkVector(2, CV_64F)) );
if( flags != SOLVEPNP_ITERATIVE )
useExtrinsicGuess = false;
if (useExtrinsicGuess)
CV_Assert( !_rvec.empty() && !_tvec.empty() );
if( useExtrinsicGuess )
{
int rtype = _rvec.type(), ttype = _tvec.type();
Size rsize = _rvec.size(), tsize = _tvec.size();
CV_Assert( (rtype == CV_32FC1 || rtype == CV_64FC1) &&
(ttype == CV_32FC1 || ttype == CV_64FC1) );
CV_Assert( (rsize == Size(1, 3) || rsize == Size(3, 1)) &&
(tsize == Size(1, 3) || tsize == Size(3, 1)) );
}
Mat cameraMatrix0 = _cameraMatrix.getMat();
Mat distCoeffs0 = _distCoeffs.getMat();
Mat cameraMatrix = Mat_<double>(cameraMatrix0);
Mat distCoeffs = Mat_<double>(distCoeffs0);
vector<Mat> vec_rvecs, vec_tvecs;
if (flags == SOLVEPNP_EPNP || flags == SOLVEPNP_DLS || flags == SOLVEPNP_UPNP)
{
Mat undistortedPoints;
undistortPoints(ipoints, undistortedPoints, cameraMatrix, distCoeffs);
epnp PnP(cameraMatrix, opoints, undistortedPoints);
Mat rvec, tvec, R;
PnP.compute_pose(R, tvec);
Rodrigues(R, rvec);
vec_rvecs.push_back(rvec);
vec_tvecs.push_back(tvec);
}
else if (flags == SOLVEPNP_P3P || flags == SOLVEPNP_AP3P)
{
vector<Mat> rvecs, tvecs;
solveP3P(_opoints, _ipoints, _cameraMatrix, _distCoeffs, rvecs, tvecs, flags);
vec_rvecs.insert(vec_rvecs.end(), rvecs.begin(), rvecs.end());
vec_tvecs.insert(vec_tvecs.end(), tvecs.begin(), tvecs.end());
}
else if (flags == SOLVEPNP_ITERATIVE)
{
Mat rvec, tvec;
if (useExtrinsicGuess)
{
rvec = _rvec.getMat();
tvec = _tvec.getMat();
}
else
{
rvec.create(3, 1, CV_64FC1);
tvec.create(3, 1, CV_64FC1);
}
CvMat c_objectPoints = cvMat(opoints), c_imagePoints = cvMat(ipoints);
CvMat c_cameraMatrix = cvMat(cameraMatrix), c_distCoeffs = cvMat(distCoeffs);
CvMat c_rvec = cvMat(rvec), c_tvec = cvMat(tvec);
cvFindExtrinsicCameraParams2(&c_objectPoints, &c_imagePoints, &c_cameraMatrix,
(c_distCoeffs.rows && c_distCoeffs.cols) ? &c_distCoeffs : 0,
&c_rvec, &c_tvec, useExtrinsicGuess );
vec_rvecs.push_back(rvec);
vec_tvecs.push_back(tvec);
}
else if (flags == SOLVEPNP_IPPE)
{
CV_DbgAssert(isPlanarObjectPoints(opoints, 1e-3));
Mat undistortedPoints;
undistortPoints(ipoints, undistortedPoints, cameraMatrix, distCoeffs);
IPPE::PoseSolver poseSolver;
Mat rvec1, tvec1, rvec2, tvec2;
float reprojErr1, reprojErr2;
try
{
poseSolver.solveGeneric(opoints, undistortedPoints, rvec1, tvec1, reprojErr1, rvec2, tvec2, reprojErr2);
if (reprojErr1 < reprojErr2)
{
vec_rvecs.push_back(rvec1);
vec_tvecs.push_back(tvec1);
vec_rvecs.push_back(rvec2);
vec_tvecs.push_back(tvec2);
}
else
{
vec_rvecs.push_back(rvec2);
vec_tvecs.push_back(tvec2);
vec_rvecs.push_back(rvec1);
vec_tvecs.push_back(tvec1);
}
}
catch (...) { }
}
else if (flags == SOLVEPNP_IPPE_SQUARE)
{
CV_Assert(npoints == 4);
#if defined _DEBUG || defined CV_STATIC_ANALYSIS
double Xs[4][3];
if (opoints.depth() == CV_32F)
{
for (int i = 0; i < 4; i++)
{
for (int j = 0; j < 3; j++)
{
Xs[i][j] = opoints.ptr<Vec3f>(0)[i](j);
}
}
}
else
{
for (int i = 0; i < 4; i++)
{
for (int j = 0; j < 3; j++)
{
Xs[i][j] = opoints.ptr<Vec3d>(0)[i](j);
}
}
}
const double equalThreshold = 1e-9;
//Z must be zero
for (int i = 0; i < 4; i++)
{
CV_DbgCheck(Xs[i][2], approxEqual(Xs[i][2], 0, equalThreshold), "Z object point coordinate must be zero!");
}
//Y0 == Y1 && Y2 == Y3
CV_DbgCheck(Xs[0][1], approxEqual(Xs[0][1], Xs[1][1], equalThreshold), "Object points must be: Y0 == Y1!");
CV_DbgCheck(Xs[2][1], approxEqual(Xs[2][1], Xs[3][1], equalThreshold), "Object points must be: Y2 == Y3!");
//X0 == X3 && X1 == X2
CV_DbgCheck(Xs[0][0], approxEqual(Xs[0][0], Xs[3][0], equalThreshold), "Object points must be: X0 == X3!");
CV_DbgCheck(Xs[1][0], approxEqual(Xs[1][0], Xs[2][0], equalThreshold), "Object points must be: X1 == X2!");
//X1 == Y1 && X3 == Y3
CV_DbgCheck(Xs[1][0], approxEqual(Xs[1][0], Xs[1][1], equalThreshold), "Object points must be: X1 == Y1!");
CV_DbgCheck(Xs[3][0], approxEqual(Xs[3][0], Xs[3][1], equalThreshold), "Object points must be: X3 == Y3!");
#endif
Mat undistortedPoints;
undistortPoints(ipoints, undistortedPoints, cameraMatrix, distCoeffs);
IPPE::PoseSolver poseSolver;
Mat rvec1, tvec1, rvec2, tvec2;
float reprojErr1, reprojErr2;
try
{
poseSolver.solveSquare(opoints, undistortedPoints, rvec1, tvec1, reprojErr1, rvec2, tvec2, reprojErr2);
if (reprojErr1 < reprojErr2)
{
vec_rvecs.push_back(rvec1);
vec_tvecs.push_back(tvec1);
vec_rvecs.push_back(rvec2);
vec_tvecs.push_back(tvec2);
}
else
{
vec_rvecs.push_back(rvec2);
vec_tvecs.push_back(tvec2);
vec_rvecs.push_back(rvec1);
vec_tvecs.push_back(tvec1);
}
} catch (...) { }
}
/*else if (flags == SOLVEPNP_DLS)
{
Mat undistortedPoints;
undistortPoints(ipoints, undistortedPoints, cameraMatrix, distCoeffs);
dls PnP(opoints, undistortedPoints);
Mat rvec, tvec, R;
bool result = PnP.compute_pose(R, tvec);
if (result)
{
Rodrigues(R, rvec);
vec_rvecs.push_back(rvec);
vec_tvecs.push_back(tvec);
}
}
else if (flags == SOLVEPNP_UPNP)
{
upnp PnP(cameraMatrix, opoints, ipoints);
Mat rvec, tvec, R;
PnP.compute_pose(R, tvec);
Rodrigues(R, rvec);
vec_rvecs.push_back(rvec);
vec_tvecs.push_back(tvec);
}*/
else
CV_Error(CV_StsBadArg, "The flags argument must be one of SOLVEPNP_ITERATIVE, SOLVEPNP_P3P, SOLVEPNP_EPNP or SOLVEPNP_DLS");
CV_Assert(vec_rvecs.size() == vec_tvecs.size());
int solutions = static_cast<int>(vec_rvecs.size());
int depthRot = _rvecs.fixedType() ? _rvecs.depth() : CV_64F;
int depthTrans = _tvecs.fixedType() ? _tvecs.depth() : CV_64F;
_rvecs.create(solutions, 1, CV_MAKETYPE(depthRot, _rvecs.fixedType() && _rvecs.kind() == _InputArray::STD_VECTOR ? 3 : 1));
_tvecs.create(solutions, 1, CV_MAKETYPE(depthTrans, _tvecs.fixedType() && _tvecs.kind() == _InputArray::STD_VECTOR ? 3 : 1));
for (int i = 0; i < solutions; i++)
{
Mat rvec0, tvec0;
if (depthRot == CV_64F)
rvec0 = vec_rvecs[i];
else
vec_rvecs[i].convertTo(rvec0, depthRot);
if (depthTrans == CV_64F)
tvec0 = vec_tvecs[i];
else
vec_tvecs[i].convertTo(tvec0, depthTrans);
if (_rvecs.fixedType() && _rvecs.kind() == _InputArray::STD_VECTOR)
{
Mat rref = _rvecs.getMat_();
if (_rvecs.depth() == CV_32F)
rref.at<Vec3f>(0,i) = Vec3f(rvec0.at<float>(0,0), rvec0.at<float>(1,0), rvec0.at<float>(2,0));
else
rref.at<Vec3d>(0,i) = Vec3d(rvec0.at<double>(0,0), rvec0.at<double>(1,0), rvec0.at<double>(2,0));
}
else
{
_rvecs.getMatRef(i) = rvec0;
}
if (_tvecs.fixedType() && _tvecs.kind() == _InputArray::STD_VECTOR)
{
Mat tref = _tvecs.getMat_();
if (_tvecs.depth() == CV_32F)
tref.at<Vec3f>(0,i) = Vec3f(tvec0.at<float>(0,0), tvec0.at<float>(1,0), tvec0.at<float>(2,0));
else
tref.at<Vec3d>(0,i) = Vec3d(tvec0.at<double>(0,0), tvec0.at<double>(1,0), tvec0.at<double>(2,0));
}
else
{
_tvecs.getMatRef(i) = tvec0;
}
}
if (reprojectionError.needed())
{
int type = reprojectionError.type();
reprojectionError.create(solutions, 1, type);
CV_CheckType(reprojectionError.type(), type == CV_32FC1 || type == CV_64FC1,
"Type of reprojectionError must be CV_32FC1 or CV_64FC1!");
Mat objectPoints, imagePoints;
if (_opoints.depth() == CV_32F)
{
_opoints.getMat().convertTo(objectPoints, CV_64F);
}
else
{
objectPoints = _opoints.getMat();
}
if (_ipoints.depth() == CV_32F)
{
_ipoints.getMat().convertTo(imagePoints, CV_64F);
}
else
{
imagePoints = _ipoints.getMat();
}
for (size_t i = 0; i < vec_rvecs.size(); i++)
{
vector<Point2d> projectedPoints;
projectPoints(objectPoints, vec_rvecs[i], vec_tvecs[i], cameraMatrix, distCoeffs, projectedPoints);
double rmse = norm(projectedPoints, imagePoints, NORM_L2) / sqrt(2*projectedPoints.size());
Mat err = reprojectionError.getMat();
if (type == CV_32F)
{
err.at<float>(0,static_cast<int>(i)) = static_cast<float>(rmse);
}
else
{
err.at<double>(0,static_cast<int>(i)) = rmse;
}
}
}
return solutions;
}
} }
...@@ -44,10 +44,161 @@ ...@@ -44,10 +44,161 @@
namespace opencv_test { namespace { namespace opencv_test { namespace {
//Statistics Helpers
struct ErrorInfo
{
ErrorInfo(double errT, double errR) : errorTrans(errT), errorRot(errR)
{
}
bool operator<(const ErrorInfo& e) const
{
return sqrt(errorTrans*errorTrans + errorRot*errorRot) <
sqrt(e.errorTrans*e.errorTrans + e.errorRot*e.errorRot);
}
double errorTrans;
double errorRot;
};
//Try to find the translation and rotation thresholds to achieve a predefined percentage of success.
//Since a success is defined by error_trans < trans_thresh && error_rot < rot_thresh
//this just gives an idea of the values to use
static void findThreshold(const std::vector<double>& v_trans, const std::vector<double>& v_rot, double percentage,
double& transThresh, double& rotThresh)
{
if (v_trans.empty() || v_rot.empty() || v_trans.size() != v_rot.size())
{
transThresh = -1;
rotThresh = -1;
return;
}
std::vector<ErrorInfo> error_info;
error_info.reserve(v_trans.size());
for (size_t i = 0; i < v_trans.size(); i++)
{
error_info.push_back(ErrorInfo(v_trans[i], v_rot[i]));
}
std::sort(error_info.begin(), error_info.end());
size_t idx = static_cast<size_t>(error_info.size() * percentage);
transThresh = error_info[idx].errorTrans;
rotThresh = error_info[idx].errorRot;
}
static double getMax(const std::vector<double>& v)
{
return *std::max_element(v.begin(), v.end());
}
static double getMean(const std::vector<double>& v)
{
if (v.empty())
{
return 0.0;
}
double sum = std::accumulate(v.begin(), v.end(), 0.0);
return sum / v.size();
}
static double getMedian(const std::vector<double>& v)
{
if (v.empty())
{
return 0.0;
}
std::vector<double> v_copy = v;
size_t size = v_copy.size();
size_t n = size / 2;
std::nth_element(v_copy.begin(), v_copy.begin() + n, v_copy.end());
double val_n = v_copy[n];
if (size % 2 == 1)
{
return val_n;
} else
{
std::nth_element(v_copy.begin(), v_copy.begin() + n - 1, v_copy.end());
return 0.5 * (val_n + v_copy[n - 1]);
}
}
static void generatePose(const vector<Point3d>& points, Mat& rvec, Mat& tvec, RNG& rng, int nbTrials=10)
{
const double minVal = 1.0e-3;
const double maxVal = 1.0;
rvec.create(3, 1, CV_64FC1);
tvec.create(3, 1, CV_64FC1);
bool validPose = false;
for (int trial = 0; trial < nbTrials && !validPose; trial++)
{
for (int i = 0; i < 3; i++)
{
rvec.at<double>(i,0) = rng.uniform(minVal, maxVal);
tvec.at<double>(i,0) = (i == 2) ? rng.uniform(minVal*10, maxVal) : rng.uniform(-maxVal, maxVal);
}
Mat R;
cv::Rodrigues(rvec, R);
bool positiveDepth = true;
for (size_t i = 0; i < points.size() && positiveDepth; i++)
{
Matx31d objPts(points[i].x, points[i].y, points[i].z);
Mat camPts = R*objPts + tvec;
if (camPts.at<double>(2,0) <= 0)
{
positiveDepth = false;
}
}
validPose = positiveDepth;
}
}
static void generatePose(const vector<Point3f>& points, Mat& rvec, Mat& tvec, RNG& rng, int nbTrials=10)
{
vector<Point3d> points_double(points.size());
for (size_t i = 0; i < points.size(); i++)
{
points_double[i] = Point3d(points[i].x, points[i].y, points[i].z);
}
generatePose(points_double, rvec, tvec, rng, nbTrials);
}
static std::string printMethod(int method)
{
switch (method) {
case 0:
return "SOLVEPNP_ITERATIVE";
case 1:
return "SOLVEPNP_EPNP";
case 2:
return "SOLVEPNP_P3P";
case 3:
return "SOLVEPNP_DLS (remaped to SOLVEPNP_EPNP)";
case 4:
return "SOLVEPNP_UPNP (remaped to SOLVEPNP_EPNP)";
case 5:
return "SOLVEPNP_AP3P";
case 6:
return "SOLVEPNP_IPPE";
case 7:
return "SOLVEPNP_IPPE_SQUARE";
default:
return "Unknown value";
}
}
class CV_solvePnPRansac_Test : public cvtest::BaseTest class CV_solvePnPRansac_Test : public cvtest::BaseTest
{ {
public: public:
CV_solvePnPRansac_Test() CV_solvePnPRansac_Test(bool planar_=false, bool planarTag_=false) : planar(planar_), planarTag(planarTag_)
{ {
eps[SOLVEPNP_ITERATIVE] = 1.0e-2; eps[SOLVEPNP_ITERATIVE] = 1.0e-2;
eps[SOLVEPNP_EPNP] = 1.0e-2; eps[SOLVEPNP_EPNP] = 1.0e-2;
...@@ -64,7 +215,7 @@ protected: ...@@ -64,7 +215,7 @@ protected:
Point3f pmin = Point3f(-1, -1, 5), Point3f pmin = Point3f(-1, -1, 5),
Point3f pmax = Point3f(1, 1, 10)) Point3f pmax = Point3f(1, 1, 10))
{ {
RNG rng = cv::theRNG(); // fix the seed to use "fixed" input 3D points RNG& rng = theRNG(); // fix the seed to use "fixed" input 3D points
for (size_t i = 0; i < points.size(); i++) for (size_t i = 0; i < points.size(); i++)
{ {
...@@ -75,6 +226,44 @@ protected: ...@@ -75,6 +226,44 @@ protected:
} }
} }
void generatePlanarPointCloud(vector<Point3f>& points,
Point2f pmin = Point2f(-1, -1),
Point2f pmax = Point2f(1, 1))
{
RNG& rng = theRNG(); // fix the seed to use "fixed" input 3D points
if (planarTag)
{
const float squareLength_2 = rng.uniform(0.01f, pmax.x) / 2;
points.clear();
points.push_back(Point3f(-squareLength_2, squareLength_2, 0));
points.push_back(Point3f(squareLength_2, squareLength_2, 0));
points.push_back(Point3f(squareLength_2, -squareLength_2, 0));
points.push_back(Point3f(-squareLength_2, -squareLength_2, 0));
}
else
{
Mat rvec_double, tvec_double;
generatePose(points, rvec_double, tvec_double, rng);
Mat rvec, tvec, R;
rvec_double.convertTo(rvec, CV_32F);
tvec_double.convertTo(tvec, CV_32F);
cv::Rodrigues(rvec, R);
for (size_t i = 0; i < points.size(); i++)
{
float x = rng.uniform(pmin.x, pmax.x);
float y = rng.uniform(pmin.y, pmax.y);
float z = 0;
Matx31f pt(x, y, z);
Mat pt_trans = R * pt + tvec;
points[i] = Point3f(pt_trans.at<float>(0,0), pt_trans.at<float>(1,0), pt_trans.at<float>(2,0));
}
}
}
void generateCameraMatrix(Mat& cameraMatrix, RNG& rng) void generateCameraMatrix(Mat& cameraMatrix, RNG& rng)
{ {
const double fcMinVal = 1e-3; const double fcMinVal = 1e-3;
...@@ -95,32 +284,34 @@ protected: ...@@ -95,32 +284,34 @@ protected:
distCoeffs.at<double>(i,0) = rng.uniform(0.0, 1.0e-6); distCoeffs.at<double>(i,0) = rng.uniform(0.0, 1.0e-6);
} }
void generatePose(Mat& rvec, Mat& tvec, RNG& rng) virtual bool runTest(RNG& rng, int mode, int method, const vector<Point3f>& points, double& errorTrans, double& errorRot)
{ {
const double minVal = 1.0e-3; if ((!planar && method == SOLVEPNP_IPPE) || method == SOLVEPNP_IPPE_SQUARE)
const double maxVal = 1.0;
rvec.create(3, 1, CV_64FC1);
tvec.create(3, 1, CV_64FC1);
for (int i = 0; i < 3; i++)
{ {
rvec.at<double>(i,0) = rng.uniform(minVal, maxVal); return true;
tvec.at<double>(i,0) = rng.uniform(minVal, maxVal/10);
}
} }
virtual bool runTest(RNG& rng, int mode, int method, const vector<Point3f>& points, const double* epsilon, double& maxError)
{
Mat rvec, tvec; Mat rvec, tvec;
vector<int> inliers; vector<int> inliers;
Mat trueRvec, trueTvec; Mat trueRvec, trueTvec;
Mat intrinsics, distCoeffs; Mat intrinsics, distCoeffs;
generateCameraMatrix(intrinsics, rng); generateCameraMatrix(intrinsics, rng);
if (method == 4) intrinsics.at<double>(1,1) = intrinsics.at<double>(0,0); //UPnP is mapped to EPnP
//Uncomment this when UPnP is fixed
// if (method == SOLVEPNP_UPNP)
// {
// intrinsics.at<double>(1,1) = intrinsics.at<double>(0,0);
// }
if (mode == 0) if (mode == 0)
{
distCoeffs = Mat::zeros(4, 1, CV_64FC1); distCoeffs = Mat::zeros(4, 1, CV_64FC1);
}
else else
{
generateDistCoeffs(distCoeffs, rng); generateDistCoeffs(distCoeffs, rng);
generatePose(trueRvec, trueTvec, rng); }
generatePose(points, trueRvec, trueTvec, rng);
vector<Point2f> projectedPoints; vector<Point2f> projectedPoints;
projectedPoints.resize(points.size()); projectedPoints.resize(points.size());
...@@ -138,11 +329,9 @@ protected: ...@@ -138,11 +329,9 @@ protected:
bool isTestSuccess = inliers.size() >= points.size()*0.95; bool isTestSuccess = inliers.size() >= points.size()*0.95;
double rvecDiff = cvtest::norm(rvec, trueRvec, NORM_L2), tvecDiff = cvtest::norm(tvec, trueTvec, NORM_L2); double rvecDiff = cvtest::norm(rvec, trueRvec, NORM_L2), tvecDiff = cvtest::norm(tvec, trueTvec, NORM_L2);
isTestSuccess = isTestSuccess && rvecDiff < epsilon[method] && tvecDiff < epsilon[method]; isTestSuccess = isTestSuccess && rvecDiff < eps[method] && tvecDiff < eps[method];
double error = rvecDiff > tvecDiff ? rvecDiff : tvecDiff; errorTrans = tvecDiff;
//cout << error << " " << inliers.size() << " " << eps[method] << endl; errorRot = rvecDiff;
if (error > maxError)
maxError = error;
return isTestSuccess; return isTestSuccess;
} }
...@@ -152,68 +341,184 @@ protected: ...@@ -152,68 +341,184 @@ protected:
ts->set_failed_test_info(cvtest::TS::OK); ts->set_failed_test_info(cvtest::TS::OK);
vector<Point3f> points, points_dls; vector<Point3f> points, points_dls;
points.resize(pointsCount); points.resize(static_cast<size_t>(pointsCount));
generate3DPointCloud(points);
RNG rng = ts->get_rng(); if (planar || planarTag)
{
generatePlanarPointCloud(points);
}
else
{
generate3DPointCloud(points);
}
RNG& rng = ts->get_rng();
for (int mode = 0; mode < 2; mode++) for (int mode = 0; mode < 2; mode++)
{ {
for (int method = 0; method < SOLVEPNP_MAX_COUNT; method++) for (int method = 0; method < SOLVEPNP_MAX_COUNT; method++)
{ {
double maxError = 0; //To get the same input for each methods
RNG rngCopy = rng;
std::vector<double> vec_errorTrans, vec_errorRot;
vec_errorTrans.reserve(static_cast<size_t>(totalTestsCount));
vec_errorRot.reserve(static_cast<size_t>(totalTestsCount));
int successfulTestsCount = 0; int successfulTestsCount = 0;
for (int testIndex = 0; testIndex < totalTestsCount; testIndex++) for (int testIndex = 0; testIndex < totalTestsCount; testIndex++)
{ {
if (runTest(rng, mode, method, points, eps, maxError)) double errorTrans, errorRot;
if (runTest(rngCopy, mode, method, points, errorTrans, errorRot))
{ {
successfulTestsCount++; successfulTestsCount++;
} }
vec_errorTrans.push_back(errorTrans);
vec_errorRot.push_back(errorRot);
} }
double maxErrorTrans = getMax(vec_errorTrans);
double maxErrorRot = getMax(vec_errorRot);
double meanErrorTrans = getMean(vec_errorTrans);
double meanErrorRot = getMean(vec_errorRot);
double medianErrorTrans = getMedian(vec_errorTrans);
double medianErrorRot = getMedian(vec_errorRot);
if (successfulTestsCount < 0.7*totalTestsCount) if (successfulTestsCount < 0.7*totalTestsCount)
{ {
ts->printf( cvtest::TS::LOG, "Invalid accuracy for method %d, failed %d tests from %d, maximum error equals %f, distortion mode equals %d\n", ts->printf(cvtest::TS::LOG, "Invalid accuracy for %s, failed %d tests from %d, %s, "
method, totalTestsCount - successfulTestsCount, totalTestsCount, maxError, mode); "maxErrT: %f, maxErrR: %f, "
"meanErrT: %f, meanErrR: %f, "
"medErrT: %f, medErrR: %f\n",
printMethod(method).c_str(), totalTestsCount - successfulTestsCount, totalTestsCount, printMode(mode).c_str(),
maxErrorTrans, maxErrorRot, meanErrorTrans, meanErrorRot, medianErrorTrans, medianErrorRot);
ts->set_failed_test_info(cvtest::TS::FAIL_BAD_ACCURACY); ts->set_failed_test_info(cvtest::TS::FAIL_BAD_ACCURACY);
} }
cout << "mode: " << mode << ", method: " << method << " -> " cout << "mode: " << printMode(mode) << ", method: " << printMethod(method) << " -> "
<< ((double)successfulTestsCount / totalTestsCount) * 100 << "%" << ((double)successfulTestsCount / totalTestsCount) * 100 << "%"
<< " (err < " << maxError << ")" << endl; << " (maxErrT: " << maxErrorTrans << ", maxErrR: " << maxErrorRot
<< ", meanErrT: " << meanErrorTrans << ", meanErrR: " << meanErrorRot
<< ", medErrT: " << medianErrorTrans << ", medErrR: " << medianErrorRot << ")" << endl;
double transThres, rotThresh;
findThreshold(vec_errorTrans, vec_errorRot, 0.7, transThres, rotThresh);
cout << "approximate translation threshold for 0.7: " << transThres
<< ", approximate rotation threshold for 0.7: " << rotThresh << endl;
} }
cout << endl;
}
}
std::string printMode(int mode)
{
switch (mode) {
case 0:
return "no distortion";
case 1:
default:
return "distorsion";
} }
} }
double eps[SOLVEPNP_MAX_COUNT]; double eps[SOLVEPNP_MAX_COUNT];
int totalTestsCount; int totalTestsCount;
int pointsCount; int pointsCount;
bool planar;
bool planarTag;
}; };
class CV_solvePnP_Test : public CV_solvePnPRansac_Test class CV_solvePnP_Test : public CV_solvePnPRansac_Test
{ {
public: public:
CV_solvePnP_Test() CV_solvePnP_Test(bool planar_=false, bool planarTag_=false) : CV_solvePnPRansac_Test(planar_, planarTag_)
{ {
eps[SOLVEPNP_ITERATIVE] = 1.0e-6; eps[SOLVEPNP_ITERATIVE] = 1.0e-6;
eps[SOLVEPNP_EPNP] = 1.0e-6; eps[SOLVEPNP_EPNP] = 1.0e-6;
eps[SOLVEPNP_P3P] = 2.0e-4; eps[SOLVEPNP_P3P] = 2.0e-4;
eps[SOLVEPNP_AP3P] = 1.0e-4; eps[SOLVEPNP_AP3P] = 1.0e-4;
eps[SOLVEPNP_DLS] = 1.0e-4; eps[SOLVEPNP_DLS] = 1.0e-6; //DLS is remapped to EPnP, so we use the same threshold
eps[SOLVEPNP_UPNP] = 1.0e-4; eps[SOLVEPNP_UPNP] = 1.0e-6; //UPnP is remapped to EPnP, so we use the same threshold
eps[SOLVEPNP_IPPE] = 1.0e-6;
eps[SOLVEPNP_IPPE_SQUARE] = 1.0e-6;
totalTestsCount = 1000; totalTestsCount = 1000;
if (planar || planarTag)
{
if (planarTag)
{
pointsCount = 4;
}
else
{
pointsCount = 30;
}
}
else
{
pointsCount = 500;
}
} }
~CV_solvePnP_Test() {} ~CV_solvePnP_Test() {}
protected: protected:
virtual bool runTest(RNG& rng, int mode, int method, const vector<Point3f>& points, const double* epsilon, double& maxError) virtual bool runTest(RNG& rng, int mode, int method, const vector<Point3f>& points, double& errorTrans, double& errorRot)
{ {
Mat rvec, tvec; if ((!planar && (method == SOLVEPNP_IPPE || method == SOLVEPNP_IPPE_SQUARE)) ||
(!planarTag && method == SOLVEPNP_IPPE_SQUARE))
{
errorTrans = -1;
errorRot = -1;
//SOLVEPNP_IPPE and SOLVEPNP_IPPE_SQUARE need planar object
return true;
}
//Tune thresholds...
double epsilon_trans[SOLVEPNP_MAX_COUNT];
memcpy(epsilon_trans, eps, SOLVEPNP_MAX_COUNT * sizeof(*epsilon_trans));
double epsilon_rot[SOLVEPNP_MAX_COUNT];
memcpy(epsilon_rot, eps, SOLVEPNP_MAX_COUNT * sizeof(*epsilon_rot));
if (planar)
{
if (mode == 0)
{
epsilon_trans[SOLVEPNP_EPNP] = 5.0e-3;
epsilon_trans[SOLVEPNP_DLS] = 5.0e-3;
epsilon_trans[SOLVEPNP_UPNP] = 5.0e-3;
epsilon_rot[SOLVEPNP_EPNP] = 5.0e-3;
epsilon_rot[SOLVEPNP_DLS] = 5.0e-3;
epsilon_rot[SOLVEPNP_UPNP] = 5.0e-3;
}
else
{
epsilon_trans[SOLVEPNP_ITERATIVE] = 1e-4;
epsilon_trans[SOLVEPNP_EPNP] = 5e-3;
epsilon_trans[SOLVEPNP_DLS] = 5e-3;
epsilon_trans[SOLVEPNP_UPNP] = 5e-3;
epsilon_trans[SOLVEPNP_P3P] = 1e-4;
epsilon_trans[SOLVEPNP_AP3P] = 1e-4;
epsilon_trans[SOLVEPNP_IPPE] = 1e-4;
epsilon_trans[SOLVEPNP_IPPE_SQUARE] = 1e-4;
epsilon_rot[SOLVEPNP_ITERATIVE] = 1e-4;
epsilon_rot[SOLVEPNP_EPNP] = 5e-3;
epsilon_rot[SOLVEPNP_DLS] = 5e-3;
epsilon_rot[SOLVEPNP_UPNP] = 5e-3;
epsilon_rot[SOLVEPNP_P3P] = 1e-4;
epsilon_rot[SOLVEPNP_AP3P] = 1e-4;
epsilon_rot[SOLVEPNP_IPPE] = 1e-4;
epsilon_rot[SOLVEPNP_IPPE_SQUARE] = 1e-4;
}
}
Mat trueRvec, trueTvec; Mat trueRvec, trueTvec;
Mat intrinsics, distCoeffs; Mat intrinsics, distCoeffs;
generateCameraMatrix(intrinsics, rng); generateCameraMatrix(intrinsics, rng);
if (method == SOLVEPNP_DLS) //UPnP is mapped to EPnP
{ //Uncomment this when UPnP is fixed
intrinsics.at<double>(1,1) = intrinsics.at<double>(0,0); // if (method == SOLVEPNP_UPNP)
} // {
// intrinsics.at<double>(1,1) = intrinsics.at<double>(0,0);
// }
if (mode == 0) if (mode == 0)
{ {
distCoeffs = Mat::zeros(4, 1, CV_64FC1); distCoeffs = Mat::zeros(4, 1, CV_64FC1);
...@@ -222,7 +527,8 @@ protected: ...@@ -222,7 +527,8 @@ protected:
{ {
generateDistCoeffs(distCoeffs, rng); generateDistCoeffs(distCoeffs, rng);
} }
generatePose(trueRvec, trueTvec, rng);
generatePose(points, trueRvec, trueTvec, rng);
std::vector<Point3f> opoints; std::vector<Point3f> opoints;
switch(method) switch(method)
...@@ -231,9 +537,18 @@ protected: ...@@ -231,9 +537,18 @@ protected:
case SOLVEPNP_AP3P: case SOLVEPNP_AP3P:
opoints = std::vector<Point3f>(points.begin(), points.begin()+4); opoints = std::vector<Point3f>(points.begin(), points.begin()+4);
break; break;
case SOLVEPNP_UPNP: //UPnP is mapped to EPnP
opoints = std::vector<Point3f>(points.begin(), points.begin()+50); //Uncomment this when UPnP is fixed
break; // case SOLVEPNP_UPNP:
// if (points.size() > 50)
// {
// opoints = std::vector<Point3f>(points.begin(), points.begin()+50);
// }
// else
// {
// opoints = points;
// }
// break;
default: default:
opoints = points; opoints = points;
break; break;
...@@ -243,20 +558,19 @@ protected: ...@@ -243,20 +558,19 @@ protected:
projectedPoints.resize(opoints.size()); projectedPoints.resize(opoints.size());
projectPoints(opoints, trueRvec, trueTvec, intrinsics, distCoeffs, projectedPoints); projectPoints(opoints, trueRvec, trueTvec, intrinsics, distCoeffs, projectedPoints);
Mat rvec, tvec;
bool isEstimateSuccess = solvePnP(opoints, projectedPoints, intrinsics, distCoeffs, rvec, tvec, false, method); bool isEstimateSuccess = solvePnP(opoints, projectedPoints, intrinsics, distCoeffs, rvec, tvec, false, method);
if (isEstimateSuccess == false)
if (!isEstimateSuccess)
{ {
return isEstimateSuccess; return false;
} }
double rvecDiff = cvtest::norm(rvec, trueRvec, NORM_L2), tvecDiff = cvtest::norm(tvec, trueTvec, NORM_L2); double rvecDiff = cvtest::norm(rvec, trueRvec, NORM_L2), tvecDiff = cvtest::norm(tvec, trueTvec, NORM_L2);
bool isTestSuccess = rvecDiff < epsilon[method] && tvecDiff < epsilon[method]; bool isTestSuccess = rvecDiff < epsilon_rot[method] && tvecDiff < epsilon_trans[method];
double error = rvecDiff > tvecDiff ? rvecDiff : tvecDiff; errorTrans = tvecDiff;
if (error > maxError) errorRot = rvecDiff;
{
maxError = error;
}
return isTestSuccess; return isTestSuccess;
} }
...@@ -264,7 +578,7 @@ protected: ...@@ -264,7 +578,7 @@ protected:
class CV_solveP3P_Test : public CV_solvePnPRansac_Test class CV_solveP3P_Test : public CV_solvePnPRansac_Test
{ {
public: public:
CV_solveP3P_Test() CV_solveP3P_Test()
{ {
eps[SOLVEPNP_P3P] = 2.0e-4; eps[SOLVEPNP_P3P] = 2.0e-4;
...@@ -273,18 +587,22 @@ class CV_solveP3P_Test : public CV_solvePnPRansac_Test ...@@ -273,18 +587,22 @@ class CV_solveP3P_Test : public CV_solvePnPRansac_Test
} }
~CV_solveP3P_Test() {} ~CV_solveP3P_Test() {}
protected: protected:
virtual bool runTest(RNG& rng, int mode, int method, const vector<Point3f>& points, const double* epsilon, double& maxError) virtual bool runTest(RNG& rng, int mode, int method, const vector<Point3f>& points, double& errorTrans, double& errorRot)
{ {
std::vector<Mat> rvecs, tvecs; std::vector<Mat> rvecs, tvecs;
Mat trueRvec, trueTvec; Mat trueRvec, trueTvec;
Mat intrinsics, distCoeffs; Mat intrinsics, distCoeffs;
generateCameraMatrix(intrinsics, rng); generateCameraMatrix(intrinsics, rng);
if (mode == 0) if (mode == 0)
{
distCoeffs = Mat::zeros(4, 1, CV_64FC1); distCoeffs = Mat::zeros(4, 1, CV_64FC1);
}
else else
{
generateDistCoeffs(distCoeffs, rng); generateDistCoeffs(distCoeffs, rng);
generatePose(trueRvec, trueTvec, rng); }
generatePose(points, trueRvec, trueTvec, rng);
std::vector<Point3f> opoints; std::vector<Point3f> opoints;
opoints = std::vector<Point3f>(points.begin(), points.begin()+3); opoints = std::vector<Point3f>(points.begin(), points.begin()+3);
...@@ -295,19 +613,19 @@ class CV_solveP3P_Test : public CV_solvePnPRansac_Test ...@@ -295,19 +613,19 @@ class CV_solveP3P_Test : public CV_solvePnPRansac_Test
int num_of_solutions = solveP3P(opoints, projectedPoints, intrinsics, distCoeffs, rvecs, tvecs, method); int num_of_solutions = solveP3P(opoints, projectedPoints, intrinsics, distCoeffs, rvecs, tvecs, method);
if (num_of_solutions != (int) rvecs.size() || num_of_solutions != (int) tvecs.size() || num_of_solutions == 0) if (num_of_solutions != (int) rvecs.size() || num_of_solutions != (int) tvecs.size() || num_of_solutions == 0)
{
return false; return false;
}
bool isTestSuccess = false; bool isTestSuccess = false;
double error = DBL_MAX; for (size_t i = 0; i < rvecs.size() && !isTestSuccess; i++) {
for (unsigned int i = 0; i < rvecs.size() && !isTestSuccess; ++i) {
double rvecDiff = cvtest::norm(rvecs[i], trueRvec, NORM_L2); double rvecDiff = cvtest::norm(rvecs[i], trueRvec, NORM_L2);
double tvecDiff = cvtest::norm(tvecs[i], trueTvec, NORM_L2); double tvecDiff = cvtest::norm(tvecs[i], trueTvec, NORM_L2);
isTestSuccess = rvecDiff < epsilon[method] && tvecDiff < epsilon[method]; isTestSuccess = rvecDiff < eps[method] && tvecDiff < eps[method];
error = std::min(error, std::max(rvecDiff, tvecDiff));
}
if (error > maxError) errorTrans = std::min(errorTrans, tvecDiff);
maxError = error; errorRot = std::min(errorRot, rvecDiff);
}
return isTestSuccess; return isTestSuccess;
} }
...@@ -317,7 +635,7 @@ class CV_solveP3P_Test : public CV_solvePnPRansac_Test ...@@ -317,7 +635,7 @@ class CV_solveP3P_Test : public CV_solvePnPRansac_Test
ts->set_failed_test_info(cvtest::TS::OK); ts->set_failed_test_info(cvtest::TS::OK);
vector<Point3f> points; vector<Point3f> points;
points.resize(pointsCount); points.resize(static_cast<size_t>(pointsCount));
generate3DPointCloud(points); generate3DPointCloud(points);
const int methodsCount = 2; const int methodsCount = 2;
...@@ -326,24 +644,52 @@ class CV_solveP3P_Test : public CV_solvePnPRansac_Test ...@@ -326,24 +644,52 @@ class CV_solveP3P_Test : public CV_solvePnPRansac_Test
for (int mode = 0; mode < 2; mode++) for (int mode = 0; mode < 2; mode++)
{ {
//To get the same input for each methods
RNG rngCopy = rng;
for (int method = 0; method < methodsCount; method++) for (int method = 0; method < methodsCount; method++)
{ {
double maxError = 0; std::vector<double> vec_errorTrans, vec_errorRot;
vec_errorTrans.reserve(static_cast<size_t>(totalTestsCount));
vec_errorRot.reserve(static_cast<size_t>(totalTestsCount));
int successfulTestsCount = 0; int successfulTestsCount = 0;
for (int testIndex = 0; testIndex < totalTestsCount; testIndex++) for (int testIndex = 0; testIndex < totalTestsCount; testIndex++)
{ {
if (runTest(rng, mode, methods[method], points, eps, maxError)) double errorTrans = 0, errorRot = 0;
if (runTest(rngCopy, mode, methods[method], points, errorTrans, errorRot))
{
successfulTestsCount++; successfulTestsCount++;
} }
vec_errorTrans.push_back(errorTrans);
vec_errorRot.push_back(errorRot);
}
double maxErrorTrans = getMax(vec_errorTrans);
double maxErrorRot = getMax(vec_errorRot);
double meanErrorTrans = getMean(vec_errorTrans);
double meanErrorRot = getMean(vec_errorRot);
double medianErrorTrans = getMedian(vec_errorTrans);
double medianErrorRot = getMedian(vec_errorRot);
if (successfulTestsCount < 0.7*totalTestsCount) if (successfulTestsCount < 0.7*totalTestsCount)
{ {
ts->printf( cvtest::TS::LOG, "Invalid accuracy for method %d, failed %d tests from %d, maximum error equals %f, distortion mode equals %d\n", ts->printf(cvtest::TS::LOG, "Invalid accuracy for %s, failed %d tests from %d, %s, "
method, totalTestsCount - successfulTestsCount, totalTestsCount, maxError, mode); "maxErrT: %f, maxErrR: %f, "
"meanErrT: %f, meanErrR: %f, "
"medErrT: %f, medErrR: %f\n",
printMethod(methods[method]).c_str(), totalTestsCount - successfulTestsCount, totalTestsCount, printMode(mode).c_str(),
maxErrorTrans, maxErrorRot, meanErrorTrans, meanErrorRot, medianErrorTrans, medianErrorRot);
ts->set_failed_test_info(cvtest::TS::FAIL_BAD_ACCURACY); ts->set_failed_test_info(cvtest::TS::FAIL_BAD_ACCURACY);
} }
cout << "mode: " << mode << ", method: " << method << " -> " cout << "mode: " << printMode(mode) << ", method: " << printMethod(methods[method]) << " -> "
<< ((double)successfulTestsCount / totalTestsCount) * 100 << "%" << ((double)successfulTestsCount / totalTestsCount) * 100 << "%"
<< " (err < " << maxError << ")" << endl; << " (maxErrT: " << maxErrorTrans << ", maxErrR: " << maxErrorRot
<< ", meanErrT: " << meanErrorTrans << ", meanErrR: " << meanErrorRot
<< ", medErrT: " << medianErrorTrans << ", medErrR: " << medianErrorRot << ")" << endl;
double transThres, rotThresh;
findThreshold(vec_errorTrans, vec_errorRot, 0.7, transThres, rotThresh);
cout << "approximate translation threshold for 0.7: " << transThres
<< ", approximate rotation threshold for 0.7: " << rotThresh << endl;
} }
} }
} }
...@@ -353,6 +699,8 @@ class CV_solveP3P_Test : public CV_solvePnPRansac_Test ...@@ -353,6 +699,8 @@ class CV_solveP3P_Test : public CV_solvePnPRansac_Test
TEST(Calib3d_SolveP3P, accuracy) { CV_solveP3P_Test test; test.safe_run();} TEST(Calib3d_SolveP3P, accuracy) { CV_solveP3P_Test test; test.safe_run();}
TEST(Calib3d_SolvePnPRansac, accuracy) { CV_solvePnPRansac_Test test; test.safe_run(); } TEST(Calib3d_SolvePnPRansac, accuracy) { CV_solvePnPRansac_Test test; test.safe_run(); }
TEST(Calib3d_SolvePnP, accuracy) { CV_solvePnP_Test test; test.safe_run(); } TEST(Calib3d_SolvePnP, accuracy) { CV_solvePnP_Test test; test.safe_run(); }
TEST(Calib3d_SolvePnP, accuracy_planar) { CV_solvePnP_Test test(true); test.safe_run(); }
TEST(Calib3d_SolvePnP, accuracy_planar_tag) { CV_solvePnP_Test test(true, true); test.safe_run(); }
TEST(Calib3d_SolvePnPRansac, concurrency) TEST(Calib3d_SolvePnPRansac, concurrency)
{ {
...@@ -367,6 +715,7 @@ TEST(Calib3d_SolvePnPRansac, concurrency) ...@@ -367,6 +715,7 @@ TEST(Calib3d_SolvePnPRansac, concurrency)
camera_mat.at<float>(1, 0) = 0.f; camera_mat.at<float>(1, 0) = 0.f;
camera_mat.at<float>(2, 0) = 0.f; camera_mat.at<float>(2, 0) = 0.f;
camera_mat.at<float>(2, 1) = 0.f; camera_mat.at<float>(2, 1) = 0.f;
camera_mat.at<float>(2, 2) = 1.f;
Mat dist_coef(1, 8, CV_32F, cv::Scalar::all(0)); Mat dist_coef(1, 8, CV_32F, cv::Scalar::all(0));
...@@ -455,7 +804,7 @@ TEST(Calib3d_SolvePnPRansac, input_type) ...@@ -455,7 +804,7 @@ TEST(Calib3d_SolvePnPRansac, input_type)
EXPECT_LE(cvtest::norm(t1, t4, NORM_INF), 1e-6); EXPECT_LE(cvtest::norm(t1, t4, NORM_INF), 1e-6);
} }
TEST(Calib3d_SolvePnP, double_support) TEST(Calib3d_SolvePnPRansac, double_support)
{ {
Matx33d intrinsics(5.4794130238156129e+002, 0., 2.9835545700043139e+002, 0., Matx33d intrinsics(5.4794130238156129e+002, 0., 2.9835545700043139e+002, 0.,
5.4817724002728005e+002, 2.3062194051986233e+002, 0., 0., 1.); 5.4817724002728005e+002, 2.3062194051986233e+002, 0., 0., 1.);
...@@ -466,15 +815,15 @@ TEST(Calib3d_SolvePnP, double_support) ...@@ -466,15 +815,15 @@ TEST(Calib3d_SolvePnP, double_support)
for (int i = 0; i < 10 ; i+=2) for (int i = 0; i < 10 ; i+=2)
{ {
points3d.push_back(cv::Point3d(5+i, 3, 2)); points3d.push_back(cv::Point3d(5+i, 3, 2));
points3dF.push_back(cv::Point3d(5+i, 3, 2)); points3dF.push_back(cv::Point3f(static_cast<float>(5+i), 3, 2));
points3d.push_back(cv::Point3d(5+i, 3+i, 2+i)); points3d.push_back(cv::Point3d(5+i, 3+i, 2+i));
points3dF.push_back(cv::Point3d(5+i, 3+i, 2+i)); points3dF.push_back(cv::Point3f(static_cast<float>(5+i), static_cast<float>(3+i), static_cast<float>(2+i)));
points2d.push_back(cv::Point2d(0, i)); points2d.push_back(cv::Point2d(0, i));
points2dF.push_back(cv::Point2d(0, i)); points2dF.push_back(cv::Point2f(0, static_cast<float>(i)));
points2d.push_back(cv::Point2d(-i, i)); points2d.push_back(cv::Point2d(-i, i));
points2dF.push_back(cv::Point2d(-i, i)); points2dF.push_back(cv::Point2f(static_cast<float>(-i), static_cast<float>(i)));
} }
Mat R,t, RF, tF; Mat R, t, RF, tF;
vector<int> inliers; vector<int> inliers;
solvePnPRansac(points3dF, points2dF, intrinsics, cv::Mat(), RF, tF, true, 100, 8.f, 0.999, inliers, cv::SOLVEPNP_P3P); solvePnPRansac(points3dF, points2dF, intrinsics, cv::Mat(), RF, tF, true, 100, 8.f, 0.999, inliers, cv::SOLVEPNP_P3P);
...@@ -484,6 +833,367 @@ TEST(Calib3d_SolvePnP, double_support) ...@@ -484,6 +833,367 @@ TEST(Calib3d_SolvePnP, double_support)
EXPECT_LE(cvtest::norm(t, Mat_<double>(tF), NORM_INF), 1e-3); EXPECT_LE(cvtest::norm(t, Mat_<double>(tF), NORM_INF), 1e-3);
} }
TEST(Calib3d_SolvePnP, input_type)
{
Matx33d intrinsics(5.4794130238156129e+002, 0., 2.9835545700043139e+002, 0.,
5.4817724002728005e+002, 2.3062194051986233e+002, 0., 0., 1.);
vector<Point3d> points3d_;
vector<Point3f> points3dF_;
//Cube
const float l = -0.1f;
//Front face
points3d_.push_back(Point3d(-l, -l, -l));
points3dF_.push_back(Point3f(-l, -l, -l));
points3d_.push_back(Point3d(l, -l, -l));
points3dF_.push_back(Point3f(l, -l, -l));
points3d_.push_back(Point3d(l, l, -l));
points3dF_.push_back(Point3f(l, l, -l));
points3d_.push_back(Point3d(-l, l, -l));
points3dF_.push_back(Point3f(-l, l, -l));
//Back face
points3d_.push_back(Point3d(-l, -l, l));
points3dF_.push_back(Point3f(-l, -l, l));
points3d_.push_back(Point3d(l, -l, l));
points3dF_.push_back(Point3f(l, -l, l));
points3d_.push_back(Point3d(l, l, l));
points3dF_.push_back(Point3f(l, l, l));
points3d_.push_back(Point3d(-l, l, l));
points3dF_.push_back(Point3f(-l, l, l));
Mat trueRvec = (Mat_<double>(3,1) << 0.1, -0.25, 0.467);
Mat trueTvec = (Mat_<double>(3,1) << -0.21, 0.12, 0.746);
for (int method = 0; method < SOLVEPNP_MAX_COUNT; method++)
{
vector<Point3d> points3d;
vector<Point2d> points2d;
vector<Point3f> points3dF;
vector<Point2f> points2dF;
if (method == SOLVEPNP_IPPE || method == SOLVEPNP_IPPE_SQUARE)
{
const float tagSize_2 = 0.05f / 2;
points3d.push_back(Point3d(-tagSize_2, tagSize_2, 0));
points3d.push_back(Point3d( tagSize_2, tagSize_2, 0));
points3d.push_back(Point3d( tagSize_2, -tagSize_2, 0));
points3d.push_back(Point3d(-tagSize_2, -tagSize_2, 0));
points3dF.push_back(Point3f(-tagSize_2, tagSize_2, 0));
points3dF.push_back(Point3f( tagSize_2, tagSize_2, 0));
points3dF.push_back(Point3f( tagSize_2, -tagSize_2, 0));
points3dF.push_back(Point3f(-tagSize_2, -tagSize_2, 0));
}
else if (method == SOLVEPNP_P3P || method == SOLVEPNP_AP3P)
{
points3d = vector<Point3d>(points3d_.begin(), points3d_.begin()+4);
points3dF = vector<Point3f>(points3dF_.begin(), points3dF_.begin()+4);
}
else
{
points3d = points3d_;
points3dF = points3dF_;
}
projectPoints(points3d, trueRvec, trueTvec, intrinsics, noArray(), points2d);
projectPoints(points3dF, trueRvec, trueTvec, intrinsics, noArray(), points2dF);
//solvePnP
{
Mat R, t, RF, tF;
solvePnP(points3dF, points2dF, Matx33f(intrinsics), Mat(), RF, tF, false, method);
solvePnP(points3d, points2d, intrinsics, Mat(), R, t, false, method);
//By default rvec and tvec must be returned in double precision
EXPECT_EQ(RF.type(), tF.type());
EXPECT_EQ(RF.type(), CV_64FC1);
EXPECT_EQ(R.type(), t.type());
EXPECT_EQ(R.type(), CV_64FC1);
EXPECT_LE(cvtest::norm(R, RF, NORM_INF), 1e-3);
EXPECT_LE(cvtest::norm(t, tF, NORM_INF), 1e-3);
EXPECT_LE(cvtest::norm(trueRvec, R, NORM_INF), 1e-3);
EXPECT_LE(cvtest::norm(trueTvec, t, NORM_INF), 1e-3);
EXPECT_LE(cvtest::norm(trueRvec, RF, NORM_INF), 1e-3);
EXPECT_LE(cvtest::norm(trueTvec, tF, NORM_INF), 1e-3);
}
{
Mat R1, t1, R2, t2;
solvePnP(points3dF, points2d, intrinsics, Mat(), R1, t1, false, method);
solvePnP(points3d, points2dF, intrinsics, Mat(), R2, t2, false, method);
//By default rvec and tvec must be returned in double precision
EXPECT_EQ(R1.type(), t1.type());
EXPECT_EQ(R1.type(), CV_64FC1);
EXPECT_EQ(R2.type(), t2.type());
EXPECT_EQ(R2.type(), CV_64FC1);
EXPECT_LE(cvtest::norm(R1, R2, NORM_INF), 1e-3);
EXPECT_LE(cvtest::norm(t1, t2, NORM_INF), 1e-3);
EXPECT_LE(cvtest::norm(trueRvec, R1, NORM_INF), 1e-3);
EXPECT_LE(cvtest::norm(trueTvec, t1, NORM_INF), 1e-3);
EXPECT_LE(cvtest::norm(trueRvec, R2, NORM_INF), 1e-3);
EXPECT_LE(cvtest::norm(trueTvec, t2, NORM_INF), 1e-3);
}
{
Mat R1(3,1,CV_32FC1), t1(3,1,CV_64FC1);
Mat R2(3,1,CV_64FC1), t2(3,1,CV_32FC1);
solvePnP(points3dF, points2d, intrinsics, Mat(), R1, t1, false, method);
solvePnP(points3d, points2dF, intrinsics, Mat(), R2, t2, false, method);
//If not null, rvec and tvec must be returned in the same precision
EXPECT_EQ(R1.type(), CV_32FC1);
EXPECT_EQ(t1.type(), CV_64FC1);
EXPECT_EQ(R2.type(), CV_64FC1);
EXPECT_EQ(t2.type(), CV_32FC1);
EXPECT_LE(cvtest::norm(Mat_<double>(R1), R2, NORM_INF), 1e-3);
EXPECT_LE(cvtest::norm(t1, Mat_<double>(t2), NORM_INF), 1e-3);
EXPECT_LE(cvtest::norm(trueRvec, Mat_<double>(R1), NORM_INF), 1e-3);
EXPECT_LE(cvtest::norm(trueTvec, t1, NORM_INF), 1e-3);
EXPECT_LE(cvtest::norm(trueRvec, R2, NORM_INF), 1e-3);
EXPECT_LE(cvtest::norm(trueTvec, Mat_<double>(t2), NORM_INF), 1e-3);
}
{
Matx31f R1, t2;
Matx31d R2, t1;
solvePnP(points3dF, points2d, intrinsics, Mat(), R1, t1, false, method);
solvePnP(points3d, points2dF, intrinsics, Mat(), R2, t2, false, method);
Matx31d R1d(R1(0), R1(1), R1(2));
Matx31d t2d(t2(0), t2(1), t2(2));
EXPECT_LE(cvtest::norm(R1d, R2, NORM_INF), 1e-3);
EXPECT_LE(cvtest::norm(t1, t2d, NORM_INF), 1e-3);
EXPECT_LE(cvtest::norm(trueRvec, R1d, NORM_INF), 1e-3);
EXPECT_LE(cvtest::norm(trueTvec, t1, NORM_INF), 1e-3);
EXPECT_LE(cvtest::norm(trueRvec, R2, NORM_INF), 1e-3);
EXPECT_LE(cvtest::norm(trueTvec, t2d, NORM_INF), 1e-3);
}
//solvePnPGeneric
{
vector<Mat> Rs, ts, RFs, tFs;
int res1 = solvePnPGeneric(points3dF, points2dF, Matx33f(intrinsics), Mat(), RFs, tFs, false, (SolvePnPMethod)method);
int res2 = solvePnPGeneric(points3d, points2d, intrinsics, Mat(), Rs, ts, false, (SolvePnPMethod)method);
EXPECT_GT(res1, 0);
EXPECT_GT(res2, 0);
Mat R = Rs.front(), t = ts.front(), RF = RFs.front(), tF = tFs.front();
//By default rvecs and tvecs must be returned in double precision
EXPECT_EQ(RF.type(), tF.type());
EXPECT_EQ(RF.type(), CV_64FC1);
EXPECT_EQ(R.type(), t.type());
EXPECT_EQ(R.type(), CV_64FC1);
EXPECT_LE(cvtest::norm(R, RF, NORM_INF), 1e-3);
EXPECT_LE(cvtest::norm(t, tF, NORM_INF), 1e-3);
EXPECT_LE(cvtest::norm(trueRvec, R, NORM_INF), 1e-3);
EXPECT_LE(cvtest::norm(trueTvec, t, NORM_INF), 1e-3);
EXPECT_LE(cvtest::norm(trueRvec, RF, NORM_INF), 1e-3);
EXPECT_LE(cvtest::norm(trueTvec, tF, NORM_INF), 1e-3);
}
{
vector<Mat> R1s, t1s, R2s, t2s;
int res1 = solvePnPGeneric(points3dF, points2d, intrinsics, Mat(), R1s, t1s, false, (SolvePnPMethod)method);
int res2 = solvePnPGeneric(points3d, points2dF, intrinsics, Mat(), R2s, t2s, false, (SolvePnPMethod)method);
EXPECT_GT(res1, 0);
EXPECT_GT(res2, 0);
Mat R1 = R1s.front(), t1 = t1s.front(), R2 = R2s.front(), t2 = t2s.front();
//By default rvecs and tvecs must be returned in double precision
EXPECT_EQ(R1.type(), t1.type());
EXPECT_EQ(R1.type(), CV_64FC1);
EXPECT_EQ(R2.type(), t2.type());
EXPECT_EQ(R2.type(), CV_64FC1);
EXPECT_LE(cvtest::norm(R1, R2, NORM_INF), 1e-3);
EXPECT_LE(cvtest::norm(t1, t2, NORM_INF), 1e-3);
EXPECT_LE(cvtest::norm(trueRvec, R1, NORM_INF), 1e-3);
EXPECT_LE(cvtest::norm(trueTvec, t1, NORM_INF), 1e-3);
EXPECT_LE(cvtest::norm(trueRvec, R2, NORM_INF), 1e-3);
EXPECT_LE(cvtest::norm(trueTvec, t2, NORM_INF), 1e-3);
}
{
vector<Mat_<float> > R1s, t2s;
vector<Mat_<double> > R2s, t1s;
int res1 = solvePnPGeneric(points3dF, points2d, intrinsics, Mat(), R1s, t1s, false, (SolvePnPMethod)method);
int res2 = solvePnPGeneric(points3d, points2dF, intrinsics, Mat(), R2s, t2s, false, (SolvePnPMethod)method);
EXPECT_GT(res1, 0);
EXPECT_GT(res2, 0);
Mat R1 = R1s.front(), t1 = t1s.front();
Mat R2 = R2s.front(), t2 = t2s.front();
//If not null, rvecs and tvecs must be returned in the same precision
EXPECT_EQ(R1.type(), CV_32FC1);
EXPECT_EQ(t1.type(), CV_64FC1);
EXPECT_EQ(R2.type(), CV_64FC1);
EXPECT_EQ(t2.type(), CV_32FC1);
EXPECT_LE(cvtest::norm(Mat_<double>(R1), R2, NORM_INF), 1e-3);
EXPECT_LE(cvtest::norm(t1, Mat_<double>(t2), NORM_INF), 1e-3);
EXPECT_LE(cvtest::norm(trueRvec, Mat_<double>(R1), NORM_INF), 1e-3);
EXPECT_LE(cvtest::norm(trueTvec, t1, NORM_INF), 1e-3);
EXPECT_LE(cvtest::norm(trueRvec, R2, NORM_INF), 1e-3);
EXPECT_LE(cvtest::norm(trueTvec, Mat_<double>(t2), NORM_INF), 1e-3);
}
{
vector<Matx31f> R1s, t2s;
vector<Matx31d> R2s, t1s;
int res1 = solvePnPGeneric(points3dF, points2d, intrinsics, Mat(), R1s, t1s, false, (SolvePnPMethod)method);
int res2 = solvePnPGeneric(points3d, points2dF, intrinsics, Mat(), R2s, t2s, false, (SolvePnPMethod)method);
EXPECT_GT(res1, 0);
EXPECT_GT(res2, 0);
Matx31f R1 = R1s.front(), t2 = t2s.front();
Matx31d R2 = R2s.front(), t1 = t1s.front();
Matx31d R1d(R1(0), R1(1), R1(2)), t2d(t2(0), t2(1), t2(2));
EXPECT_LE(cvtest::norm(R1d, R2, NORM_INF), 1e-3);
EXPECT_LE(cvtest::norm(t1, t2d, NORM_INF), 1e-3);
EXPECT_LE(cvtest::norm(trueRvec, R1d, NORM_INF), 1e-3);
EXPECT_LE(cvtest::norm(trueTvec, t1, NORM_INF), 1e-3);
EXPECT_LE(cvtest::norm(trueRvec, R2, NORM_INF), 1e-3);
EXPECT_LE(cvtest::norm(trueTvec, t2d, NORM_INF), 1e-3);
}
if (method == SOLVEPNP_P3P || method == SOLVEPNP_AP3P)
{
//solveP3P
{
vector<Mat> Rs, ts, RFs, tFs;
int res1 = solveP3P(points3dF, points2dF, Matx33f(intrinsics), Mat(), RFs, tFs, (SolvePnPMethod)method);
int res2 = solveP3P(points3d, points2d, intrinsics, Mat(), Rs, ts, (SolvePnPMethod)method);
EXPECT_GT(res1, 0);
EXPECT_GT(res2, 0);
Mat R = Rs.front(), t = ts.front(), RF = RFs.front(), tF = tFs.front();
//By default rvecs and tvecs must be returned in double precision
EXPECT_EQ(RF.type(), tF.type());
EXPECT_EQ(RF.type(), CV_64FC1);
EXPECT_EQ(R.type(), t.type());
EXPECT_EQ(R.type(), CV_64FC1);
EXPECT_LE(cvtest::norm(R, RF, NORM_INF), 1e-3);
EXPECT_LE(cvtest::norm(t, tF, NORM_INF), 1e-3);
EXPECT_LE(cvtest::norm(trueRvec, R, NORM_INF), 1e-3);
EXPECT_LE(cvtest::norm(trueTvec, t, NORM_INF), 1e-3);
EXPECT_LE(cvtest::norm(trueRvec, RF, NORM_INF), 1e-3);
EXPECT_LE(cvtest::norm(trueTvec, tF, NORM_INF), 1e-3);
}
{
vector<Mat> R1s, t1s, R2s, t2s;
int res1 = solveP3P(points3dF, points2d, intrinsics, Mat(), R1s, t1s, (SolvePnPMethod)method);
int res2 = solveP3P(points3d, points2dF, intrinsics, Mat(), R2s, t2s, (SolvePnPMethod)method);
EXPECT_GT(res1, 0);
EXPECT_GT(res2, 0);
Mat R1 = R1s.front(), t1 = t1s.front(), R2 = R2s.front(), t2 = t2s.front();
//By default rvecs and tvecs must be returned in double precision
EXPECT_EQ(R1.type(), t1.type());
EXPECT_EQ(R1.type(), CV_64FC1);
EXPECT_EQ(R2.type(), t2.type());
EXPECT_EQ(R2.type(), CV_64FC1);
EXPECT_LE(cvtest::norm(R1, R2, NORM_INF), 1e-3);
EXPECT_LE(cvtest::norm(t1, t2, NORM_INF), 1e-3);
EXPECT_LE(cvtest::norm(trueRvec, R1, NORM_INF), 1e-3);
EXPECT_LE(cvtest::norm(trueTvec, t1, NORM_INF), 1e-3);
EXPECT_LE(cvtest::norm(trueRvec, R2, NORM_INF), 1e-3);
EXPECT_LE(cvtest::norm(trueTvec, t2, NORM_INF), 1e-3);
}
{
vector<Mat_<float> > R1s, t2s;
vector<Mat_<double> > R2s, t1s;
int res1 = solveP3P(points3dF, points2d, intrinsics, Mat(), R1s, t1s, (SolvePnPMethod)method);
int res2 = solveP3P(points3d, points2dF, intrinsics, Mat(), R2s, t2s, (SolvePnPMethod)method);
EXPECT_GT(res1, 0);
EXPECT_GT(res2, 0);
Mat R1 = R1s.front(), t1 = t1s.front();
Mat R2 = R2s.front(), t2 = t2s.front();
//If not null, rvecs and tvecs must be returned in the same precision
EXPECT_EQ(R1.type(), CV_32FC1);
EXPECT_EQ(t1.type(), CV_64FC1);
EXPECT_EQ(R2.type(), CV_64FC1);
EXPECT_EQ(t2.type(), CV_32FC1);
EXPECT_LE(cvtest::norm(Mat_<double>(R1), R2, NORM_INF), 1e-3);
EXPECT_LE(cvtest::norm(t1, Mat_<double>(t2), NORM_INF), 1e-3);
EXPECT_LE(cvtest::norm(trueRvec, Mat_<double>(R1), NORM_INF), 1e-3);
EXPECT_LE(cvtest::norm(trueTvec, t1, NORM_INF), 1e-3);
EXPECT_LE(cvtest::norm(trueRvec, R2, NORM_INF), 1e-3);
EXPECT_LE(cvtest::norm(trueTvec, Mat_<double>(t2), NORM_INF), 1e-3);
}
{
vector<Matx31f> R1s, t2s;
vector<Matx31d> R2s, t1s;
int res1 = solveP3P(points3dF, points2d, intrinsics, Mat(), R1s, t1s, (SolvePnPMethod)method);
int res2 = solveP3P(points3d, points2dF, intrinsics, Mat(), R2s, t2s, (SolvePnPMethod)method);
EXPECT_GT(res1, 0);
EXPECT_GT(res2, 0);
Matx31f R1 = R1s.front(), t2 = t2s.front();
Matx31d R2 = R2s.front(), t1 = t1s.front();
Matx31d R1d(R1(0), R1(1), R1(2)), t2d(t2(0), t2(1), t2(2));
EXPECT_LE(cvtest::norm(R1d, R2, NORM_INF), 1e-3);
EXPECT_LE(cvtest::norm(t1, t2d, NORM_INF), 1e-3);
EXPECT_LE(cvtest::norm(trueRvec, R1d, NORM_INF), 1e-3);
EXPECT_LE(cvtest::norm(trueTvec, t1, NORM_INF), 1e-3);
EXPECT_LE(cvtest::norm(trueRvec, R2, NORM_INF), 1e-3);
EXPECT_LE(cvtest::norm(trueTvec, t2d, NORM_INF), 1e-3);
}
}
}
}
TEST(Calib3d_SolvePnP, translation) TEST(Calib3d_SolvePnP, translation)
{ {
Mat cameraIntrinsic = Mat::eye(3,3, CV_32FC1); Mat cameraIntrinsic = Mat::eye(3,3, CV_32FC1);
...@@ -548,13 +1258,16 @@ TEST(Calib3d_SolvePnP, iterativeInitialGuess3pts) ...@@ -548,13 +1258,16 @@ TEST(Calib3d_SolvePnP, iterativeInitialGuess3pts)
solvePnP(p3d, p2d, intrinsics, noArray(), rvec_est, tvec_est, true, SOLVEPNP_ITERATIVE); solvePnP(p3d, p2d, intrinsics, noArray(), rvec_est, tvec_est, true, SOLVEPNP_ITERATIVE);
std::cout << "rvec_ground_truth: " << rvec_ground_truth.t() << std::endl; cout << "rvec_ground_truth: " << rvec_ground_truth.t() << std::endl;
std::cout << "rvec_est: " << rvec_est.t() << std::endl; cout << "rvec_est: " << rvec_est.t() << std::endl;
std::cout << "tvec_ground_truth: " << tvec_ground_truth.t() << std::endl; cout << "tvec_ground_truth: " << tvec_ground_truth.t() << std::endl;
std::cout << "tvec_est: " << tvec_est.t() << std::endl; cout << "tvec_est: " << tvec_est.t() << std::endl;
EXPECT_LE(cvtest::norm(rvec_ground_truth, rvec_est, NORM_INF), 1e-6); EXPECT_LE(cvtest::norm(rvec_ground_truth, rvec_est, NORM_INF), 1e-6);
EXPECT_LE(cvtest::norm(tvec_ground_truth, tvec_est, NORM_INF), 1e-6); EXPECT_LE(cvtest::norm(tvec_ground_truth, tvec_est, NORM_INF), 1e-6);
EXPECT_EQ(rvec_est.type(), CV_64FC1);
EXPECT_EQ(tvec_est.type(), CV_64FC1);
} }
{ {
...@@ -579,13 +1292,230 @@ TEST(Calib3d_SolvePnP, iterativeInitialGuess3pts) ...@@ -579,13 +1292,230 @@ TEST(Calib3d_SolvePnP, iterativeInitialGuess3pts)
solvePnP(p3d, p2d, intrinsics, noArray(), rvec_est, tvec_est, true, SOLVEPNP_ITERATIVE); solvePnP(p3d, p2d, intrinsics, noArray(), rvec_est, tvec_est, true, SOLVEPNP_ITERATIVE);
std::cout << "rvec_ground_truth: " << rvec_ground_truth.t() << std::endl; cout << "rvec_ground_truth: " << rvec_ground_truth.t() << std::endl;
std::cout << "rvec_est: " << rvec_est.t() << std::endl; cout << "rvec_est: " << rvec_est.t() << std::endl;
std::cout << "tvec_ground_truth: " << tvec_ground_truth.t() << std::endl; cout << "tvec_ground_truth: " << tvec_ground_truth.t() << std::endl;
std::cout << "tvec_est: " << tvec_est.t() << std::endl; cout << "tvec_est: " << tvec_est.t() << std::endl;
EXPECT_LE(cvtest::norm(rvec_ground_truth, rvec_est, NORM_INF), 1e-6);
EXPECT_LE(cvtest::norm(tvec_ground_truth, tvec_est, NORM_INF), 1e-6);
EXPECT_EQ(rvec_est.type(), CV_32FC1);
EXPECT_EQ(tvec_est.type(), CV_32FC1);
}
}
TEST(Calib3d_SolvePnP, iterativeInitialGuess)
{
{
Matx33d intrinsics(605.4, 0.0, 317.35,
0.0, 601.2, 242.63,
0.0, 0.0, 1.0);
double L = 0.1;
vector<Point3d> p3d;
p3d.push_back(Point3d(-L, -L, 0.0));
p3d.push_back(Point3d(L, -L, 0.0));
p3d.push_back(Point3d(L, L, 0.0));
p3d.push_back(Point3d(-L, L, L/2));
p3d.push_back(Point3d(0, 0, -L/2));
Mat rvec_ground_truth = (Mat_<double>(3,1) << 0.3, -0.2, 0.75);
Mat tvec_ground_truth = (Mat_<double>(3,1) << 0.15, -0.2, 1.5);
vector<Point2d> p2d;
projectPoints(p3d, rvec_ground_truth, tvec_ground_truth, intrinsics, noArray(), p2d);
Mat rvec_est = (Mat_<double>(3,1) << 0.1, -0.1, 0.1);
Mat tvec_est = (Mat_<double>(3,1) << 0.0, -0.5, 1.0);
solvePnP(p3d, p2d, intrinsics, noArray(), rvec_est, tvec_est, true, SOLVEPNP_ITERATIVE);
cout << "rvec_ground_truth: " << rvec_ground_truth.t() << std::endl;
cout << "rvec_est: " << rvec_est.t() << std::endl;
cout << "tvec_ground_truth: " << tvec_ground_truth.t() << std::endl;
cout << "tvec_est: " << tvec_est.t() << std::endl;
EXPECT_LE(cvtest::norm(rvec_ground_truth, rvec_est, NORM_INF), 1e-6);
EXPECT_LE(cvtest::norm(tvec_ground_truth, tvec_est, NORM_INF), 1e-6);
EXPECT_EQ(rvec_est.type(), CV_64FC1);
EXPECT_EQ(tvec_est.type(), CV_64FC1);
}
{
Matx33f intrinsics(605.4f, 0.0f, 317.35f,
0.0f, 601.2f, 242.63f,
0.0f, 0.0f, 1.0f);
float L = 0.1f;
vector<Point3f> p3d;
p3d.push_back(Point3f(-L, -L, 0.0f));
p3d.push_back(Point3f(L, -L, 0.0f));
p3d.push_back(Point3f(L, L, 0.0f));
p3d.push_back(Point3f(-L, L, L/2));
p3d.push_back(Point3f(0, 0, -L/2));
Mat rvec_ground_truth = (Mat_<float>(3,1) << -0.75f, 0.4f, 0.34f);
Mat tvec_ground_truth = (Mat_<float>(3,1) << -0.15f, 0.35f, 1.58f);
vector<Point2f> p2d;
projectPoints(p3d, rvec_ground_truth, tvec_ground_truth, intrinsics, noArray(), p2d);
Mat rvec_est = (Mat_<float>(3,1) << -0.1f, 0.1f, 0.1f);
Mat tvec_est = (Mat_<float>(3,1) << 0.0f, 0.0f, 1.0f);
solvePnP(p3d, p2d, intrinsics, noArray(), rvec_est, tvec_est, true, SOLVEPNP_ITERATIVE);
cout << "rvec_ground_truth: " << rvec_ground_truth.t() << std::endl;
cout << "rvec_est: " << rvec_est.t() << std::endl;
cout << "tvec_ground_truth: " << tvec_ground_truth.t() << std::endl;
cout << "tvec_est: " << tvec_est.t() << std::endl;
EXPECT_LE(cvtest::norm(rvec_ground_truth, rvec_est, NORM_INF), 1e-6); EXPECT_LE(cvtest::norm(rvec_ground_truth, rvec_est, NORM_INF), 1e-6);
EXPECT_LE(cvtest::norm(tvec_ground_truth, tvec_est, NORM_INF), 1e-6); EXPECT_LE(cvtest::norm(tvec_ground_truth, tvec_est, NORM_INF), 1e-6);
EXPECT_EQ(rvec_est.type(), CV_32FC1);
EXPECT_EQ(tvec_est.type(), CV_32FC1);
}
}
TEST(Calib3d_SolvePnP, generic)
{
{
Matx33d intrinsics(605.4, 0.0, 317.35,
0.0, 601.2, 242.63,
0.0, 0.0, 1.0);
double L = 0.1;
vector<Point3d> p3d_;
p3d_.push_back(Point3d(-L, L, 0));
p3d_.push_back(Point3d(L, L, 0));
p3d_.push_back(Point3d(L, -L, 0));
p3d_.push_back(Point3d(-L, -L, 0));
p3d_.push_back(Point3d(-L, L, L/2));
p3d_.push_back(Point3d(0, 0, -L/2));
const int ntests = 10;
for (int numTest = 0; numTest < ntests; numTest++)
{
Mat rvec_ground_truth;
Mat tvec_ground_truth;
generatePose(p3d_, rvec_ground_truth, tvec_ground_truth, theRNG());
vector<Point2d> p2d_;
projectPoints(p3d_, rvec_ground_truth, tvec_ground_truth, intrinsics, noArray(), p2d_);
for (int method = 0; method < SOLVEPNP_MAX_COUNT; method++)
{
vector<Mat> rvecs_est;
vector<Mat> tvecs_est;
vector<Point3d> p3d;
vector<Point2d> p2d;
if (method == SOLVEPNP_P3P || method == SOLVEPNP_AP3P ||
method == SOLVEPNP_IPPE || method == SOLVEPNP_IPPE_SQUARE)
{
p3d = vector<Point3d>(p3d_.begin(), p3d_.begin()+4);
p2d = vector<Point2d>(p2d_.begin(), p2d_.begin()+4);
}
else
{
p3d = p3d_;
p2d = p2d_;
}
vector<double> reprojectionErrors;
solvePnPGeneric(p3d, p2d, intrinsics, noArray(), rvecs_est, tvecs_est, false, (SolvePnPMethod)method,
noArray(), noArray(), reprojectionErrors);
EXPECT_TRUE(!rvecs_est.empty());
EXPECT_TRUE(rvecs_est.size() == tvecs_est.size() && tvecs_est.size() == reprojectionErrors.size());
for (size_t i = 0; i < reprojectionErrors.size()-1; i++)
{
EXPECT_GE(reprojectionErrors[i+1], reprojectionErrors[i]);
}
bool isTestSuccess = false;
for (size_t i = 0; i < rvecs_est.size() && !isTestSuccess; i++) {
double rvecDiff = cvtest::norm(rvecs_est[i], rvec_ground_truth, NORM_L2);
double tvecDiff = cvtest::norm(tvecs_est[i], tvec_ground_truth, NORM_L2);
const double threshold = method == SOLVEPNP_P3P ? 1e-2 : 1e-4;
isTestSuccess = rvecDiff < threshold && tvecDiff < threshold;
}
EXPECT_TRUE(isTestSuccess);
}
}
}
{
Matx33f intrinsics(605.4f, 0.0f, 317.35f,
0.0f, 601.2f, 242.63f,
0.0f, 0.0f, 1.0f);
float L = 0.1f;
vector<Point3f> p3f_;
p3f_.push_back(Point3f(-L, L, 0));
p3f_.push_back(Point3f(L, L, 0));
p3f_.push_back(Point3f(L, -L, 0));
p3f_.push_back(Point3f(-L, -L, 0));
p3f_.push_back(Point3f(-L, L, L/2));
p3f_.push_back(Point3f(0, 0, -L/2));
const int ntests = 10;
for (int numTest = 0; numTest < ntests; numTest++)
{
Mat rvec_ground_truth;
Mat tvec_ground_truth;
generatePose(p3f_, rvec_ground_truth, tvec_ground_truth, theRNG());
vector<Point2f> p2f_;
projectPoints(p3f_, rvec_ground_truth, tvec_ground_truth, intrinsics, noArray(), p2f_);
for (int method = 0; method < SOLVEPNP_MAX_COUNT; method++)
{
vector<Mat> rvecs_est;
vector<Mat> tvecs_est;
vector<Point3f> p3f;
vector<Point2f> p2f;
if (method == SOLVEPNP_P3P || method == SOLVEPNP_AP3P ||
method == SOLVEPNP_IPPE || method == SOLVEPNP_IPPE_SQUARE)
{
p3f = vector<Point3f>(p3f_.begin(), p3f_.begin()+4);
p2f = vector<Point2f>(p2f_.begin(), p2f_.begin()+4);
}
else
{
p3f = p3f_;
p2f = p2f_;
}
vector<double> reprojectionErrors;
solvePnPGeneric(p3f, p2f, intrinsics, noArray(), rvecs_est, tvecs_est, false, (SolvePnPMethod)method,
noArray(), noArray(), reprojectionErrors);
EXPECT_TRUE(!rvecs_est.empty());
EXPECT_TRUE(rvecs_est.size() == tvecs_est.size() && tvecs_est.size() == reprojectionErrors.size());
for (size_t i = 0; i < reprojectionErrors.size()-1; i++)
{
EXPECT_GE(reprojectionErrors[i+1], reprojectionErrors[i]);
}
bool isTestSuccess = false;
for (size_t i = 0; i < rvecs_est.size() && !isTestSuccess; i++) {
double rvecDiff = cvtest::norm(rvecs_est[i], rvec_ground_truth, NORM_L2);
double tvecDiff = cvtest::norm(tvecs_est[i], tvec_ground_truth, NORM_L2);
const double threshold = method == SOLVEPNP_P3P ? 1e-2 : 1e-4;
isTestSuccess = rvecDiff < threshold && tvecDiff < threshold;
}
EXPECT_TRUE(isTestSuccess);
}
}
} }
} }
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment