Commit a1460ef3 authored by edgarriba's avatar edgarriba

adding python bindings

parent 64204eac
......@@ -69,6 +69,7 @@ ocv_add_module(sfm
opencv_calib3d
opencv_features2d
opencv_xfeatures2d
WRAP python
)
......
......@@ -54,7 +54,7 @@ namespace sfm
forming an approximately symmetric circular cloud of points of radius 1 about the origin.\n
Reference: @cite HartleyZ00 4.4.4 pag.109
*/
CV_EXPORTS
CV_EXPORTS_W
void
preconditionerFromPoints( InputArray points,
OutputArray T );
......@@ -67,7 +67,7 @@ preconditionerFromPoints( InputArray points,
bringing the centroid to the origin with an average centroid \f$(1,1,1)^T\f$.\n
Reference: @cite HartleyZ00 4.4.4 pag.107.
*/
CV_EXPORTS
CV_EXPORTS_W
void
isotropicPreconditionerFromPoints( InputArray points,
OutputArray T );
......@@ -77,7 +77,7 @@ isotropicPreconditionerFromPoints( InputArray points,
@param T Input 3x3 transformation matrix such that \f$x = T*X\f$, where \f$X\f$ are the points to transform and \f$x\f$ the transformed points.
@param transformed_points Output vector of N-dimensional transformed points.
*/
CV_EXPORTS
CV_EXPORTS_W
void
applyTransformationToPoints( InputArray points,
InputArray T,
......@@ -92,7 +92,7 @@ applyTransformationToPoints( InputArray points,
This operation is an essential step before applying the DLT algorithm in order to consider the result as optimal.\n
Reference: @cite HartleyZ00 4.4.4 pag.109
*/
CV_EXPORTS
CV_EXPORTS_W
void
normalizePoints( InputArray points,
OutputArray normalized_points,
......@@ -107,7 +107,7 @@ normalizePoints( InputArray points,
This operation is an essential step before applying the DLT algorithm in order to consider the result as optimal.\n
Reference: @cite HartleyZ00 4.4.4 pag.107.
*/
CV_EXPORTS
CV_EXPORTS_W
void
normalizeIsotropicPoints( InputArray points,
OutputArray normalized_points,
......
......@@ -53,7 +53,7 @@ namespace sfm
@param P1 Output 3x4 one possible projection matrix.
@param P2 Output 3x4 another possible projection matrix.
*/
CV_EXPORTS
CV_EXPORTS_W
void
projectionsFromFundamental( InputArray F,
OutputArray P1,
......@@ -64,7 +64,7 @@ projectionsFromFundamental( InputArray F,
@param P2 Input 3x4 second projection matrix.
@param F Output 3x3 fundamental matrix.
*/
CV_EXPORTS
CV_EXPORTS_W
void
fundamentalFromProjections( InputArray P1,
InputArray P2,
......@@ -78,7 +78,7 @@ fundamentalFromProjections( InputArray P1,
Uses the normalized 8-point fundamental matrix solver.
Reference: @cite HartleyZ00 11.2 pag.281 (x1 = x, x2 = x')
*/
CV_EXPORTS
CV_EXPORTS_W
void
normalizedEightPointSolver( InputArray x1,
InputArray x2,
......@@ -96,7 +96,7 @@ normalizedEightPointSolver( InputArray x1,
of the second one assuming the first one to be at the origin.
If T1 and T2 are the camera motions, the computed relative motion is \f$T = T_2 T_1^{-1}\f$
*/
CV_EXPORTS
CV_EXPORTS_W
void
relativeCameraMotion( InputArray R1,
InputArray t1,
......@@ -112,7 +112,7 @@ relativeCameraMotion( InputArray R1,
Reference: @cite HartleyZ00 9.6 pag 259 (Result 9.19)
*/
CV_EXPORTS
CV_EXPORTS_W
void
motionFromEssential( InputArray E,
OutputArrayOfArrays Rs,
......@@ -131,7 +131,7 @@ motionFromEssential( InputArray E,
Reference: See @cite HartleyZ00 9.6 pag 259 (9.6.3 Geometrical interpretation of the 4 solutions).
*/
CV_EXPORTS
CV_EXPORTS_W
int motionFromEssentialChooseSolution( InputArrayOfArrays Rs,
InputArrayOfArrays ts,
InputArray K1,
......@@ -147,7 +147,7 @@ int motionFromEssentialChooseSolution( InputArrayOfArrays Rs,
Reference: @cite HartleyZ00 9.6 pag 257 (formula 9.12) or http://ai.stanford.edu/~birch/projective/node20.html
*/
CV_EXPORTS
CV_EXPORTS_W
void
fundamentalFromEssential( InputArray E,
InputArray K1,
......@@ -162,7 +162,7 @@ fundamentalFromEssential( InputArray E,
Reference: @cite HartleyZ00 9.6 pag 257 (formula 9.12)
*/
CV_EXPORTS
CV_EXPORTS_W
void
essentialFromFundamental( InputArray F,
InputArray K1,
......@@ -178,7 +178,7 @@ essentialFromFundamental( InputArray F,
Reference: @cite HartleyZ00 9.6 pag 257 (formula 9.12)
*/
CV_EXPORTS
CV_EXPORTS_W
void
essentialFromRt( InputArray R1,
InputArray t1,
......@@ -192,7 +192,7 @@ essentialFromRt( InputArray R1,
By default divides the fundamental matrix by its L2 norm.
*/
CV_EXPORTS
CV_EXPORTS_W
void
normalizeFundamental( InputArray F,
OutputArray F_normalized );
......@@ -207,7 +207,7 @@ normalizeFundamental( InputArray F,
Find the best transformation such that xp=projection*(s*R*x+t) (same as Pose Estimation, ePNP).
The routines below are only for the orthographic case for now.
*/
CV_EXPORTS
CV_EXPORTS_W
void
computeOrientation( InputArrayOfArrays x1,
InputArrayOfArrays x2,
......
......@@ -55,7 +55,7 @@ namespace sfm
It computes in the same way as woud do @ref reduce but with \a Variance function.
*/
CV_EXPORTS
CV_EXPORTS_W
void
meanAndVarianceAlongRows( InputArray A,
OutputArray mean,
......@@ -66,7 +66,7 @@ meanAndVarianceAlongRows( InputArray A,
Reference: @cite HartleyZ00, p581, equation (A4.5).
*/
CV_EXPORTS
CV_EXPORTS_W
Mat
skew( InputArray x );
......
......@@ -50,7 +50,7 @@ namespace sfm
@param src Input vector of N-dimensional points.
@param dst Output vector of N-1-dimensional points.
*/
CV_EXPORTS
CV_EXPORTS_W
void
homogeneousToEuclidean(InputArray src, OutputArray dst);
......@@ -58,7 +58,7 @@ homogeneousToEuclidean(InputArray src, OutputArray dst);
@param src Input vector of N-dimensional points.
@param dst Output vector of N+1-dimensional points.
*/
CV_EXPORTS
CV_EXPORTS_W
void
euclideanToHomogeneous(InputArray src, OutputArray dst);
......@@ -71,7 +71,7 @@ euclideanToHomogeneous(InputArray src, OutputArray dst);
This function estimate the projection matrix by solving the following equation: \f$P = K * [R|t]\f$
*/
CV_EXPORTS
CV_EXPORTS_W
void
projectionFromKRt(InputArray K, InputArray R, InputArray t, OutputArray P);
......@@ -83,7 +83,7 @@ projectionFromKRt(InputArray K, InputArray R, InputArray t, OutputArray P);
Reference: @cite HartleyZ00 A4.1.1 pag.579
*/
CV_EXPORTS
CV_EXPORTS_W
void
KRtFromProjection( InputArray P, OutputArray K, OutputArray R, OutputArray t );
......@@ -92,7 +92,7 @@ KRtFromProjection( InputArray P, OutputArray K, OutputArray R, OutputArray t );
@param t Input 3x1 translation vector.
@param X Input 3x1 or 4x1 vector with the 3d point.
*/
CV_EXPORTS
CV_EXPORTS_W
double
depth( InputArray R, InputArray t, InputArray X);
......
......@@ -69,7 +69,7 @@ namespace sfm
@note
- Tracks must be as precise as possible. It does not handle outliers and is very sensible to them.
*/
CV_EXPORTS
/* CV_EXPORTS_W */ // error: ‘reconstruct’ is not a member of ‘cv::sfm’
void
reconstruct(InputArrayOfArrays points2d, OutputArray Ps, OutputArray points3d, InputOutputArray K,
bool is_projective = false);
......@@ -88,7 +88,7 @@ reconstruct(InputArrayOfArrays points2d, OutputArray Ps, OutputArray points3d, I
- Tracks must be as precise as possible. It does not handle outliers and is very sensible to them.
- To see a working example for camera motion reconstruction, check the following tutorial: @ref tutorial_sfm_trajectory_estimation.
*/
CV_EXPORTS
/* CV_EXPORTS_W */ // error: ‘reconstruct’ is not a member of ‘cv::sfm’
void
reconstruct(InputArrayOfArrays points2d, OutputArray Rs, OutputArray Ts, InputOutputArray K,
OutputArray points3d, bool is_projective = false);
......@@ -106,7 +106,7 @@ reconstruct(InputArrayOfArrays points2d, OutputArray Rs, OutputArray Ts, InputOu
- The images must be ordered as they were an image sequence. Additionally, each frame should be as close as posible to the previous and posterior.
- For now DAISY features are used in order to compute the 2d points tracks and it only works for 3-4 images.
*/
CV_EXPORTS
/* CV_EXPORTS_W */ // error: ‘vector_string’ was not declared in this scope
void
reconstruct(const std::vector<std::string> images, OutputArray Ps, OutputArray points3d,
InputOutputArray K, bool is_projective = false);
......@@ -126,7 +126,7 @@ reconstruct(const std::vector<std::string> images, OutputArray Ps, OutputArray p
- For now DAISY features are used in order to compute the 2d points tracks and it only works for 3-4 images.
- To see a working example for scene reconstruction, check the following tutorial: @ref tutorial_sfm_scene_reconstruction.
*/
CV_EXPORTS
/* CV_EXPORTS_W */ // error: ‘vector_string’ was not declared in this scope
void
reconstruct(const std::vector<std::string> images, OutputArray Rs, OutputArray Ts,
InputOutputArray K, OutputArray points3d, bool is_projective = false);
......
......@@ -62,7 +62,7 @@ namespace sfm
The fundamental solver relies on the 8 point solution. Returns the best error (in pixels), associated to the solution F.
*/
CV_EXPORTS
CV_EXPORTS_W
double
fundamentalFromCorrespondences8PointRobust( InputArray x1,
InputArray x2,
......@@ -85,7 +85,7 @@ fundamentalFromCorrespondences8PointRobust( InputArray x1,
The fundamental solver relies on the 7 point solution. Returns the best error (in pixels), associated to the solution F.
*/
CV_EXPORTS
CV_EXPORTS_W
double
fundamentalFromCorrespondences7PointRobust( InputArray x1,
InputArray x2,
......
......@@ -68,7 +68,10 @@ enum {
In case that the camera model was SFM_DISTORTION_MODEL_DIVISION, it's only needed to provide
_polynomial_k1 and _polynomial_k2 which will be assigned as division distortion parameters.
*/
typedef struct libmv_CameraIntrinsicsOptions {
class CV_EXPORTS_W_SIMPLE libmv_CameraIntrinsicsOptions
{
public:
CV_WRAP
libmv_CameraIntrinsicsOptions(const int _distortion_model=0,
const double _focal_length=0,
const double _principal_point_x=0,
......@@ -87,8 +90,8 @@ typedef struct libmv_CameraIntrinsicsOptions {
polynomial_k1(_polynomial_k1),
polynomial_k2(_polynomial_k2),
polynomial_k3(_polynomial_k3),
division_k1(0),
division_k2(0)
division_k1(_polynomial_p1),
division_k2(_polynomial_p2)
{
if ( _distortion_model == SFM_DISTORTION_MODEL_DIVISION )
{
......@@ -98,18 +101,18 @@ typedef struct libmv_CameraIntrinsicsOptions {
}
// Common settings of all distortion models.
int distortion_model;
int image_width, image_height;
double focal_length;
double principal_point_x, principal_point_y;
CV_PROP_RW int distortion_model;
CV_PROP_RW int image_width, image_height;
CV_PROP_RW double focal_length;
CV_PROP_RW double principal_point_x, principal_point_y;
// Radial distortion model.
double polynomial_k1, polynomial_k2, polynomial_k3;
double polynomial_p1, polynomial_p2;
CV_PROP_RW double polynomial_k1, polynomial_k2, polynomial_k3;
CV_PROP_RW double polynomial_p1, polynomial_p2;
// Division distortion model.
double division_k1, division_k2;
} libmv_CameraIntrinsicsOptions;
CV_PROP_RW double division_k1, division_k2;
};
/** @brief All internal camera parameters that libmv is able to refine.
......@@ -128,7 +131,10 @@ enum { SFM_REFINE_FOCAL_LENGTH = (1 << 0), // libmv::BUNDLE_FOCAL_LENGT
@param _select_keyframes allows to select automatically the initial keyframes. If 1 then autoselection is enabled. If 0 then is disabled.
@param _verbosity_level verbosity logs level for Glog. If -1 then logs are disabled, otherwise the log level will be the input integer.
*/
typedef struct libmv_ReconstructionOptions {
class CV_EXPORTS_W_SIMPLE libmv_ReconstructionOptions
{
public:
CV_WRAP
libmv_ReconstructionOptions(const int _keyframe1=1,
const int _keyframe2=2,
const int _refine_intrinsics=1,
......@@ -138,21 +144,25 @@ typedef struct libmv_ReconstructionOptions {
refine_intrinsics(_refine_intrinsics),
select_keyframes(_select_keyframes),
verbosity_level(_verbosity_level) {}
int keyframe1, keyframe2;
int refine_intrinsics;
int select_keyframes;
int verbosity_level;
} libmv_ReconstructionOptions;
CV_PROP_RW int keyframe1, keyframe2;
CV_PROP_RW int refine_intrinsics;
CV_PROP_RW int select_keyframes;
CV_PROP_RW int verbosity_level;
};
/** @brief base class BaseSFM declares a common API that would be used in a typical scene reconstruction scenario
*/
class CV_EXPORTS BaseSFM
class CV_EXPORTS_W BaseSFM
{
public:
virtual ~BaseSFM() {};
CV_WRAP
virtual void run(InputArrayOfArrays points2d) = 0;
CV_WRAP
virtual void run(InputArrayOfArrays points2d, InputOutputArray K, OutputArray Rs,
OutputArray Ts, OutputArray points3d) = 0;
......@@ -160,21 +170,23 @@ public:
virtual void run(const std::vector <std::string> &images, InputOutputArray K, OutputArray Rs,
OutputArray Ts, OutputArray points3d) = 0;
virtual double getError() const = 0;
virtual void getPoints(OutputArray points3d) = 0;
virtual cv::Mat getIntrinsics() const = 0;
virtual void getCameras(OutputArray Rs, OutputArray Ts) = 0;
CV_WRAP virtual double getError() const = 0;
CV_WRAP virtual void getPoints(OutputArray points3d) = 0;
CV_WRAP virtual cv::Mat getIntrinsics() const = 0;
CV_WRAP virtual void getCameras(OutputArray Rs, OutputArray Ts) = 0;
CV_WRAP
virtual void
setReconstructionOptions(const libmv_ReconstructionOptions &libmv_reconstruction_options) = 0;
CV_WRAP
virtual void
setCameraIntrinsicOptions(const libmv_CameraIntrinsicsOptions &libmv_camera_intrinsics_options) = 0;
};
/** @brief SFMLibmvEuclideanReconstruction class provides an interface with the Libmv Structure From Motion pipeline.
*/
class CV_EXPORTS SFMLibmvEuclideanReconstruction : public BaseSFM
class CV_EXPORTS_W SFMLibmvEuclideanReconstruction : public BaseSFM
{
public:
/** @brief Calls the pipeline in order to perform Eclidean reconstruction.
......@@ -183,6 +195,7 @@ public:
@note
- Tracks must be as precise as possible. It does not handle outliers and is very sensible to them.
*/
CV_WRAP
virtual void run(InputArrayOfArrays points2d) = 0;
/** @brief Calls the pipeline in order to perform Eclidean reconstruction.
......@@ -195,6 +208,7 @@ public:
@note
- Tracks must be as precise as possible. It does not handle outliers and is very sensible to them.
*/
CV_WRAP
virtual void run(InputArrayOfArrays points2d, InputOutputArray K, OutputArray Rs,
OutputArray Ts, OutputArray points3d) = 0;
......@@ -205,6 +219,7 @@ public:
- The images must be ordered as they were an image sequence. Additionally, each frame should be as close as posible to the previous and posterior.
- For now DAISY features are used in order to compute the 2d points tracks and it only works for 3-4 images.
*/
/* CV_WRAP */ // error: ‘vector_string’ was not declared in this scope
virtual void run(const std::vector <std::string> &images) = 0;
/** @brief Calls the pipeline in order to perform Eclidean reconstruction.
......@@ -218,32 +233,38 @@ public:
- The images must be ordered as they were an image sequence. Additionally, each frame should be as close as posible to the previous and posterior.
- For now DAISY features are used in order to compute the 2d points tracks and it only works for 3-4 images.
*/
/* CV_WRAP */ // error: ‘vector_string’ was not declared in this scope
virtual void run(const std::vector <std::string> &images, InputOutputArray K, OutputArray Rs,
OutputArray Ts, OutputArray points3d) = 0;
/** @brief Returns the computed reprojection error.
*/
CV_WRAP
virtual double getError() const = 0;
/** @brief Returns the estimated 3d points.
@param points3d Output array with estimated 3d points.
*/
CV_WRAP
virtual void getPoints(OutputArray points3d) = 0;
/** @brief Returns the refined camera calibration matrix.
*/
CV_WRAP
virtual cv::Mat getIntrinsics() const = 0;
/** @brief Returns the estimated camera extrinsic parameters.
@param Rs Output vector of 3x3 rotations of the camera.
@param Ts Output vector of 3x1 translations of the camera.
*/
CV_WRAP
virtual void getCameras(OutputArray Rs, OutputArray Ts) = 0;
/** @brief Setter method for reconstruction options.
@param libmv_reconstruction_options struct with reconstruction options such as initial keyframes,
automatic keyframe selection, parameters to refine and the verbosity level.
*/
CV_WRAP
virtual void
setReconstructionOptions(const libmv_ReconstructionOptions &libmv_reconstruction_options) = 0;
......@@ -251,14 +272,16 @@ public:
@param libmv_camera_intrinsics_options struct with camera intrinsic options such as camera model and
the internal camera parameters.
*/
CV_WRAP
virtual void
setCameraIntrinsicOptions(const libmv_CameraIntrinsicsOptions &libmv_camera_intrinsics_options) = 0;
/** @brief Creates an instance of the SFMLibmvEuclideanReconstruction class. Initializes Libmv. */
/* CV_WRAP */ // error: ‘SFMLibmvEuclideanReruction’ was not declared in this scope
static Ptr<SFMLibmvEuclideanReconstruction>
create(const libmv_CameraIntrinsicsOptions &camera_instrinsic_options=libmv_CameraIntrinsicsOptions(),
const libmv_ReconstructionOptions &reconstruction_options=libmv_ReconstructionOptions());
};
create(const libmv_CameraIntrinsicsOptions &camera_instrinsic_options=libmv_CameraIntrinsicsOptions(),
const libmv_ReconstructionOptions &reconstruction_options=libmv_ReconstructionOptions());
};
//! @} sfm
......
......@@ -54,7 +54,7 @@ namespace sfm
Triangulates the 3d position of 2d correspondences between several images.
Reference: Internally it uses DLT method @cite HartleyZ00 12.2 pag.312
*/
CV_EXPORTS
CV_EXPORTS_W
void
triangulatePoints(InputArrayOfArrays points2d, InputArrayOfArrays projection_matrices,
OutputArray points3d);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment