Commit e5763fae authored by Hamdi Sahloul's avatar Hamdi Sahloul

A pythonic interface for surface_matching module

parent 75fcfa60
......@@ -149,7 +149,7 @@ public:
*
* \details It is assumed that the model is registered on the scene. Scene remains static, while the model transforms. The output poses transform the models onto the scene. Because of the point to plane minimization, the scene is expected to have the normals available. Expected to have the normals (Nx6).
*/
int registerModelToScene(const Mat& srcPC, const Mat& dstPC, std::vector<Pose3DPtr>& poses);
CV_WRAP int registerModelToScene(const Mat& srcPC, const Mat& dstPC, CV_IN_OUT std::vector<Pose3DPtr>& poses);
private:
float m_tolerance;
......
......@@ -67,10 +67,10 @@ typedef Ptr<PoseCluster3D> PoseCluster3DPtr;
* various helper methods to work with poses
*
*/
class CV_EXPORTS Pose3D
class CV_EXPORTS_W Pose3D
{
public:
Pose3D()
CV_WRAP Pose3D()
{
alpha=0;
modelIndex=0;
......@@ -80,7 +80,7 @@ public:
pose = Matx44d::all(0);
}
Pose3D(double Alpha, size_t ModelIndex=0, size_t NumVotes=0)
CV_WRAP Pose3D(double Alpha, size_t ModelIndex=0, size_t NumVotes=0)
{
alpha = Alpha;
modelIndex = ModelIndex;
......@@ -94,24 +94,24 @@ public:
* \brief Updates the pose with the new one
* \param [in] NewPose New pose to overwrite
*/
void updatePose(Matx44d& NewPose);
CV_WRAP void updatePose(Matx44d& NewPose);
/**
* \brief Updates the pose with the new one
*/
void updatePose(Matx33d& NewR, Vec3d& NewT);
CV_WRAP void updatePose(Matx33d& NewR, Vec3d& NewT);
/**
* \brief Updates the pose with the new one, but this time using quaternions to represent rotation
*/
void updatePoseQuat(Vec4d& Q, Vec3d& NewT);
CV_WRAP void updatePoseQuat(Vec4d& Q, Vec3d& NewT);
/**
* \brief Left multiplies the existing pose in order to update the transformation
* \param [in] IncrementalPose New pose to apply
*/
void appendPose(Matx44d& IncrementalPose);
void printPose();
CV_WRAP void appendPose(Matx44d& IncrementalPose);
CV_WRAP void printPose();
Pose3DPtr clone();
......@@ -122,12 +122,12 @@ public:
virtual ~Pose3D() {}
double alpha, residual;
size_t modelIndex, numVotes;
Matx44d pose;
double angle;
Vec3d t;
Vec4d q;
CV_PROP double alpha, residual;
CV_PROP size_t modelIndex, numVotes;
CV_PROP Matx44d pose;
CV_PROP double angle;
CV_PROP Vec3d t;
CV_PROP Vec4d q;
};
/**
......
......@@ -101,7 +101,7 @@ public:
/**
* \brief Empty constructor. Sets default arguments
*/
PPF3DDetector();
CV_WRAP PPF3DDetector();
/**
* Constructor with arguments
......@@ -109,7 +109,7 @@ public:
* @param [in] relativeDistanceStep The discretization distance of the point pair distance relative to the model's diameter. This value has a direct impact on the hashtable. Using small values would lead to too fine discretization, and thus ambiguity in the bins of hashtable. Too large values would lead to no discrimination over the feature vectors and different point pair features would be assigned to the same bin. This argument defaults to the value of RelativeSamplingStep. For noisy scenes, the value can be increased to improve the robustness of the matching against noisy points.
* @param [in] numAngles Set the discretization of the point pair orientation as the number of subdivisions of the angle. This value is the equivalent of RelativeDistanceStep for the orientations. Increasing the value increases the precision of the matching but decreases the robustness against incorrect normal directions. Decreasing the value decreases the precision of the matching but increases the robustness against incorrect normal directions. For very noisy scenes where the normal directions can not be computed accurately, the value can be set to 25 or 20.
*/
PPF3DDetector(const double relativeSamplingStep, const double relativeDistanceStep=0.05, const double numAngles=30);
CV_WRAP PPF3DDetector(const double relativeSamplingStep, const double relativeDistanceStep=0.05, const double numAngles=30);
virtual ~PPF3DDetector();
......@@ -128,7 +128,7 @@ public:
*
* \details Uses the parameters set in the constructor to downsample and learn a new model. When the model is learnt, the instance gets ready for calling "match".
*/
void trainModel(const Mat& Model);
CV_WRAP void trainModel(const Mat& Model);
/**
* \brief Matches a trained model across a provided scene.
......@@ -138,7 +138,7 @@ public:
* @param [in] relativeSceneSampleStep The ratio of scene points to be used for the matching after sampling with relativeSceneDistance. For example, if this value is set to 1.0/5.0, every 5th point from the scene is used for pose estimation. This parameter allows an easy trade-off between speed and accuracy of the matching. Increasing the value leads to less points being used and in turn to a faster but less accurate pose computation. Decreasing the value has the inverse effect.
* @param [in] relativeSceneDistance Set the distance threshold relative to the diameter of the model. This parameter is equivalent to relativeSamplingStep in the training stage. This parameter acts like a prior sampling with the relativeSceneSampleStep parameter.
*/
void match(const Mat& scene, std::vector<Pose3DPtr> &results, const double relativeSceneSampleStep=1.0/5.0, const double relativeSceneDistance=0.03);
CV_WRAP void match(const Mat& scene, CV_OUT std::vector<Pose3DPtr> &results, const double relativeSceneSampleStep=1.0/5.0, const double relativeSceneDistance=0.03);
void read(const FileNode& fn);
void write(FileStorage& fs) const;
......
#ifdef HAVE_OPENCV_SURFACE_MATCHING
template<> struct pyopencvVecConverter<ppf_match_3d::Pose3DPtr >
{
static bool to(PyObject* obj, std::vector<ppf_match_3d::Pose3DPtr >& value, const ArgInfo info)
{
return pyopencv_to_generic_vec(obj, value, info);
}
static PyObject* from(const std::vector<ppf_match_3d::Pose3DPtr >& value)
{
return pyopencv_from_generic_vec(value);
}
};
typedef std::vector<ppf_match_3d::Pose3DPtr> vector_Pose3DPtr;
#endif
import cv2 as cv
N = 2
modelname = "parasaurolophus_6700"
scenename = "rs1_normals"
detector = cv.ppf_match_3d_PPF3DDetector(0.025, 0.05)
print('Loading model...')
pc = cv.ppf_match_3d.loadPLYSimple("data/%s.ply" % modelname, 1)
print('Training...')
detector.trainModel(pc)
print('Loading scene...')
pcTest = cv.ppf_match_3d.loadPLYSimple("data/%s.ply" % scenename, 1)
print('Matching...')
results = detector.match(pcTest, 1.0/40.0, 0.05)
print('Performing ICP...')
icp = cv.ppf_match_3d_ICP(100)
_, results = icp.registerModelToScene(pc, pcTest, results[:N])
print("Poses: ")
for i, result in enumerate(results):
#result.printPose()
print("\n-- Pose to Model Index %d: NumVotes = %d, Residual = %f\n%s\n" % (result.modelIndex, result.numVotes, result.residual, result.pose))
if i == 0:
pct = cv.ppf_match_3d.transformPCPose(pc, result.pose)
cv.ppf_match_3d.writePLY(pct, "%sPCTrans.ply" % modelname)
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment