Commit 995b6963 authored by Bence Magyar's avatar Bence Magyar

Cosmetic changes

parent d1193e31
......@@ -42,8 +42,8 @@
/**
* @file icp.hpp
*
* @brief Implementation of ICP (Iterative Closest Point) Algorithm
* @author Tolga Birdal
* @brief Implementation of ICP (Iterative Closest Point) Algorithm
* @author Tolga Birdal
*/
#ifndef __OPENCV_SURFACE_MATCHING_ICP_HPP__
......@@ -66,93 +66,93 @@ namespace ppf_match_3d
* You will find that my emphasis is on the performance, while retaining the accuracy.
* This implementation is based on Tolga Birdal's MATLAB implementation in here:
* http://www.mathworks.com/matlabcentral/fileexchange/47152-icp-registration-using-efficient-variants-and-multi-resolution-scheme
The main contributions come from:
1. Picky ICP:
http://www5.informatik.uni-erlangen.de/Forschung/Publikationen/2003/Zinsser03-ARI.pdf
2. Efficient variants of the ICP Algorithm:
http://docs.happycoders.org/orgadoc/graphics/imaging/fasticp_paper.pdf
3. Geometrically Stable Sampling for the ICP Algorithm: https://graphics.stanford.edu/papers/stabicp/stabicp.pdf
4. Multi-resolution registration:
http://www.cvl.iis.u-tokyo.ac.jp/~oishi/Papers/Alignment/Jost_MultiResolutionICP_3DIM03.pdf
5. Linearization of Point-to-Plane metric by Kok Lim Low:
https://www.comp.nus.edu.sg/~lowkl/publications/lowk_point-to-plane_icp_techrep.pdf
*/
* The main contributions come from:
* 1. Picky ICP:
* http://www5.informatik.uni-erlangen.de/Forschung/Publikationen/2003/Zinsser03-ARI.pdf
* 2. Efficient variants of the ICP Algorithm:
* http://docs.happycoders.org/orgadoc/graphics/imaging/fasticp_paper.pdf
* 3. Geometrically Stable Sampling for the ICP Algorithm: https://graphics.stanford.edu/papers/stabicp/stabicp.pdf
* 4. Multi-resolution registration:
* http://www.cvl.iis.u-tokyo.ac.jp/~oishi/Papers/Alignment/Jost_MultiResolutionICP_3DIM03.pdf
* 5. Linearization of Point-to-Plane metric by Kok Lim Low:
* https://www.comp.nus.edu.sg/~lowkl/publications/lowk_point-to-plane_icp_techrep.pdf
*/
class CV_EXPORTS ICP
{
public:
enum ICP_SAMPLING_TYPE
{
ICP_SAMPLING_TYPE_UNIFORM,
ICP_SAMPLING_TYPE_GELFAND
};
ICP()
{
m_tolerence = 0.005f;
m_rejectionScale = 2.5f;
m_maxItereations = 250;
m_numLevels = 6;
m_sampleType = ICP_SAMPLING_TYPE_UNIFORM;
m_numNeighborsCorr = 1;
}
virtual ~ICP() { }
/**
* \brief ICP constructor with default arguments.
* @param [in] tolerence Controls the accuracy of registration at each iteration of ICP.
* @param [in] rejectionScale Robust outlier rejection is applied for robustness. This value actually corresponds to the standard deviation coefficient. Points with rejectionScale * \sigma are ignored during registration.
* @param [in] numLevels Number of pyramid levels to proceed. Deep pyramids increase speed but decrease accuracy. Too coarse pyramids might have computational overhead on top of the inaccurate registrtaion. This parameter should be chosen to optimize a balance. Typical values range from 4 to 10.
* @param [in] sampleType Currently this parameter is ignored and only uniform sampling is applied. Leave it as 0.
* @param [in] numMaxCorr Currently this parameter is ignored and only PickyICP is applied. Leave it as 1.
* \return
*
* \details Constructor
*/
ICP(const int iterations, const float tolerence=0.05, const float rejectionScale=2.5, const int numLevels=6, const ICP_SAMPLING_TYPE sampleType = ICP_SAMPLING_TYPE_UNIFORM, const int numMaxCorr=1)
{
m_tolerence = tolerence;
m_numNeighborsCorr = numMaxCorr;
m_rejectionScale = rejectionScale;
m_maxItereations = iterations;
m_numLevels = numLevels;
m_sampleType = sampleType;
};
/**
* \brief Perform registration
*
* @param [in] srcPC The input point cloud for the model. Expected to have the normals (Nx6). Currently,
* CV_32F is the only supported data type.
* @param [in] dstPC The input point cloud for the scene. It is assumed that the model is registered on the scene. Scene remains static. Expected to have the normals (Nx6). Currently, CV_32F is the only supported data type.
* @param [out] residual The output registration error.
* \return On successful termination, the function returns 0.
*
* \details It is assumed that the model is registered on the scene. Scene remains static, while the model transforms. The output poses transform the models onto the scene. Because of the point to plane minimization, the scene is expected to have the normals available. Expected to have the normals (Nx6).
*/
int registerModelToScene(const Mat& srcPC, const Mat& dstPC, double& residual, double pose[16]);
/**
* \brief Perform registration with multiple initial poses
*
* @param [in] srcPC The input point cloud for the model. Expected to have the normals (Nx6). Currently,
* CV_32F is the only supported data type.
* @param [in] dstPC The input point cloud for the scene. Currently, CV_32F is the only supported data type.
* @param [out] poses List output of poses. For more detailed information check out Pose3D.
* \return On successful termination, the function returns 0.
*
* \details It is assumed that the model is registered on the scene. Scene remains static, while the model transforms. The output poses transform the models onto the scene. Because of the point to plane minimization, the scene is expected to have the normals available. Expected to have the normals (Nx6).
*/
int registerModelToScene(const Mat& srcPC, const Mat& dstPC, std::vector<Pose3D*>& poses);
private:
float m_tolerence;
int m_maxItereations;
float m_rejectionScale;
int m_numNeighborsCorr;
int m_numLevels;
int m_sampleType;
public:
enum ICP_SAMPLING_TYPE
{
ICP_SAMPLING_TYPE_UNIFORM,
ICP_SAMPLING_TYPE_GELFAND
};
ICP()
{
m_tolerence = 0.005f;
m_rejectionScale = 2.5f;
m_maxItereations = 250;
m_numLevels = 6;
m_sampleType = ICP_SAMPLING_TYPE_UNIFORM;
m_numNeighborsCorr = 1;
}
virtual ~ICP() { }
/**
* \brief ICP constructor with default arguments.
* @param [in] tolerence Controls the accuracy of registration at each iteration of ICP.
* @param [in] rejectionScale Robust outlier rejection is applied for robustness. This value actually corresponds to the standard deviation coefficient. Points with rejectionScale * \sigma are ignored during registration.
* @param [in] numLevels Number of pyramid levels to proceed. Deep pyramids increase speed but decrease accuracy. Too coarse pyramids might have computational overhead on top of the inaccurate registrtaion. This parameter should be chosen to optimize a balance. Typical values range from 4 to 10.
* @param [in] sampleType Currently this parameter is ignored and only uniform sampling is applied. Leave it as 0.
* @param [in] numMaxCorr Currently this parameter is ignored and only PickyICP is applied. Leave it as 1.
* \return
*
* \details Constructor
*/
ICP(const int iterations, const float tolerence=0.05, const float rejectionScale=2.5, const int numLevels=6, const ICP_SAMPLING_TYPE sampleType = ICP_SAMPLING_TYPE_UNIFORM, const int numMaxCorr=1)
{
m_tolerence = tolerence;
m_numNeighborsCorr = numMaxCorr;
m_rejectionScale = rejectionScale;
m_maxItereations = iterations;
m_numLevels = numLevels;
m_sampleType = sampleType;
}
/**
* \brief Perform registration
*
* @param [in] srcPC The input point cloud for the model. Expected to have the normals (Nx6). Currently,
* CV_32F is the only supported data type.
* @param [in] dstPC The input point cloud for the scene. It is assumed that the model is registered on the scene. Scene remains static. Expected to have the normals (Nx6). Currently, CV_32F is the only supported data type.
* @param [out] residual The output registration error.
* \return On successful termination, the function returns 0.
*
* \details It is assumed that the model is registered on the scene. Scene remains static, while the model transforms. The output poses transform the models onto the scene. Because of the point to plane minimization, the scene is expected to have the normals available. Expected to have the normals (Nx6).
*/
int registerModelToScene(const Mat& srcPC, const Mat& dstPC, double& residual, double pose[16]);
/**
* \brief Perform registration with multiple initial poses
*
* @param [in] srcPC The input point cloud for the model. Expected to have the normals (Nx6). Currently,
* CV_32F is the only supported data type.
* @param [in] dstPC The input point cloud for the scene. Currently, CV_32F is the only supported data type.
* @param [out] poses List output of poses. For more detailed information check out Pose3D.
* \return On successful termination, the function returns 0.
*
* \details It is assumed that the model is registered on the scene. Scene remains static, while the model transforms. The output poses transform the models onto the scene. Because of the point to plane minimization, the scene is expected to have the normals available. Expected to have the normals (Nx6).
*/
int registerModelToScene(const Mat& srcPC, const Mat& dstPC, std::vector<Pose3D*>& poses);
private:
float m_tolerence;
int m_maxItereations;
float m_rejectionScale;
int m_numNeighborsCorr;
int m_numLevels;
int m_sampleType;
};
......
......@@ -57,68 +57,67 @@ namespace ppf_match_3d
*/
class CV_EXPORTS Pose3D
{
public:
Pose3D()
{
alpha=0;
modelIndex=0;
numVotes=0;
residual = 0;
for (int i=0; i<16; i++)
Pose[i]=0;
};
Pose3D(double Alpha, unsigned int ModelIndex=0, unsigned int NumVotes=0)
{
alpha = Alpha;
modelIndex = ModelIndex;
numVotes = NumVotes;
residual=0;
for (int i=0; i<16; i++)
Pose[i]=0;
};
/**
* \brief Updates the pose with the new one
* \param [in] NewPose New pose to overwrite
*/
void updatePose(double NewPose[16]);
/**
* \brief Updates the pose with the new one
* \param [in] NewPose New pose to overwrite
*/
void updatePose(double NewR[9], double NewT[3]);
/**
* \brief Updates the pose with the new one, but this time using quaternions to represent rotation
* \param [in] NewPose New pose to overwrite
*/
void updatePoseQuat(double Q[4], double NewT[3]);
/**
* \brief Left multiplies the existing pose in order to update the transformation
* \param [in] IncrementalPose New pose to apply
*/
void appendPose(double IncrementalPose[16]);
void printPose();
Pose3D* clone();
int writePose(FILE* f);
int readPose(FILE* f);
int writePose(const std::string& FileName);
int readPose(const std::string& FileName);
virtual ~Pose3D() {};
double alpha, residual;
unsigned int modelIndex;
unsigned int numVotes;
double Pose[16], angle, t[3], q[4];
public:
Pose3D()
{
alpha=0;
modelIndex=0;
numVotes=0;
residual = 0;
for (int i=0; i<16; i++)
Pose[i]=0;
}
Pose3D(double Alpha, unsigned int ModelIndex=0, unsigned int NumVotes=0)
{
alpha = Alpha;
modelIndex = ModelIndex;
numVotes = NumVotes;
residual=0;
for (int i=0; i<16; i++)
Pose[i]=0;
}
/**
* \brief Updates the pose with the new one
* \param [in] NewPose New pose to overwrite
*/
void updatePose(double NewPose[16]);
/**
* \brief Updates the pose with the new one
* \param [in] NewPose New pose to overwrite
*/
void updatePose(double NewR[9], double NewT[3]);
/**
* \brief Updates the pose with the new one, but this time using quaternions to represent rotation
* \param [in] NewPose New pose to overwrite
*/
void updatePoseQuat(double Q[4], double NewT[3]);
/**
* \brief Left multiplies the existing pose in order to update the transformation
* \param [in] IncrementalPose New pose to apply
*/
void appendPose(double IncrementalPose[16]);
void printPose();
Pose3D* clone();
int writePose(FILE* f);
int readPose(FILE* f);
int writePose(const std::string& FileName);
int readPose(const std::string& FileName);
virtual ~Pose3D() {}
double alpha, residual;
unsigned int modelIndex;
unsigned int numVotes;
double Pose[16], angle, t[3], q[4];
};
/**
......@@ -129,53 +128,48 @@ class CV_EXPORTS Pose3D
*/
class CV_EXPORTS PoseCluster3D
{
public:
PoseCluster3D()
{
//poseList.clear();
numVotes=0;
id=0;
};
PoseCluster3D(Pose3D* newPose)
{
//poseList.clear();
poseList.push_back(newPose);
numVotes=newPose->numVotes;
id=0;
};
PoseCluster3D(Pose3D* newPose, int newId)
{
//poseList.clear();
poseList.push_back(newPose);
this->numVotes = newPose->numVotes;
this->id = newId;
};
virtual ~PoseCluster3D()
{
numVotes=0;
id=0;
//poseList.clear();
};
/**
* \brief Adds a new pose to the cluster. The pose should be "close" to the mean poses
* in order to preserve the consistency
* \param [in] newPose Pose to add to the cluster
*/
void addPose(Pose3D* newPose) ;
int writePoseCluster(FILE* f);
int readPoseCluster(FILE* f);
int writePoseCluster(const std::string& FileName);
int readPoseCluster(const std::string& FileName);
std::vector < Pose3D* > poseList;
int numVotes;
int id;
public:
PoseCluster3D()
{
numVotes=0;
id=0;
}
PoseCluster3D(Pose3D* newPose)
{
poseList.clear();
poseList.push_back(newPose);
numVotes=newPose->numVotes;
id=0;
}
PoseCluster3D(Pose3D* newPose, int newId)
{
poseList.push_back(newPose);
this->numVotes = newPose->numVotes;
this->id = newId;
}
virtual ~PoseCluster3D()
{}
/**
* \brief Adds a new pose to the cluster. The pose should be "close" to the mean poses
* in order to preserve the consistency
* \param [in] newPose Pose to add to the cluster
*/
void addPose(Pose3D* newPose);
int writePoseCluster(FILE* f);
int readPoseCluster(FILE* f);
int writePoseCluster(const std::string& FileName);
int readPoseCluster(const std::string& FileName);
std::vector < Pose3D* > poseList;
int numVotes;
int id;
};
} // namespace ppf_match_3d
} // namespace cv
......
......@@ -36,6 +36,7 @@
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
/**
** ppf_match_3d : Interfaces for matching 3d surfaces in 3d scenes. This module implements the algorithm from Bertram Drost and Slobodan Ilic.
** Use: Read a 3D model, load a 3D scene and match the model to the scene
......@@ -69,9 +70,9 @@ namespace ppf_match_3d
#define T_PPF_LENGTH 5
/**
* @struct THash
* @brief Struct, holding a node in the hashtable
*/
* @struct THash
* @brief Struct, holding a node in the hashtable
*/
typedef struct THash
{
int id;
......@@ -79,97 +80,97 @@ typedef struct THash
} THash;
/**
* @class PPF3DDetector
* @brief Class, allowing the load and matching 3D models.
* Typical Use:
*
* // Train a model
* ppf_match_3d::PPF3DDetector detector(0.05, 0.05);
* detector.trainModel(pc);
* // Search the model in a given scene
* vector < Pose3D* > results;
* detector.match(pcTest, results, 1.0/5.0,0.05);
*
*/
* @class PPF3DDetector
* @brief Class, allowing the load and matching 3D models.
* Typical Use:
*
* // Train a model
* ppf_match_3d::PPF3DDetector detector(0.05, 0.05);
* detector.trainModel(pc);
* // Search the model in a given scene
* vector < Pose3D* > results;
* detector.match(pcTest, results, 1.0/5.0,0.05);
*
*/
class CV_EXPORTS PPF3DDetector
{
public:
/**
* \brief Empty constructor. Sets default arguments
*/
PPF3DDetector();
/**
* Constructor with arguments
* @param [in] relativeSamplingStep Sampling distance relative to the object's diameter. Models are first sampled uniformly in order to improve efficiency. Decreasing this value leads to a denser model, and a more accurate pose estimation but the larger the model, the slower the training. Increasing the value leads to a less accurate pose computation but a smaller model and faster model generation and matching. Beware of the memory consumption when using small values.
* @param [in] relativeDistanceStep The discretization distance of the point pair distance relative to the model's diameter. This value has a direct impact on the hashtable. Using small values would lead to too fine discretization, and thus ambiguity in the bins of hashtable. Too large values would lead to no discrimination over the feature vectors and different point pair features would be assigned to the same bin. This argument defaults to the value of RelativeSamplingStep. For noisy scenes, the value can be increased to improve the robustness of the matching against noisy points.
* @param [in] numAngles Set the discretization of the point pair orientation as the number of subdivisions of the angle. This value is the equivalent of RelativeDistanceStep for the orientations. Increasing the value increases the precision of the matching but decreases the robustness against incorrect normal directions. Decreasing the value decreases the precision of the matching but increases the robustness against incorrect normal directions. For very noisy scenes where the normal directions can not be computed accurately, the value can be set to 25 or 20.
*/
PPF3DDetector(const double relativeSamplingStep, const double relativeDistanceStep=0.05, const double numAngles=30);
virtual ~PPF3DDetector();
/**
* Set the parameters for the search
* @param [in] numPoses The maximum number of poses to return
* @param [in] positionThreshold Position threshold controlling the similarity of translations. Depends on the units of calibration/model.
* @param [in] rotationThreshold Position threshold controlling the similarity of rotations. This parameter can be perceived as a threshold over the difference of angles
* @param [in] minMatchScore Not used at the moment
* @param [in] useWeightedClustering The algorithm by default clusters the poses without weighting. A non-zero value would indicate that the pose clustering should take into account the number of votes as the weights and perform a weighted averaging instead of a simple one.
*/
void setSearchParams(const int numPoses=5, const double positionThreshold=-1, const double rotationThreshold=-1, const double minMatchScore=0.5, const bool useWeightedClustering=false);
/**
* \brief Trains a new model.
*
* @param [in] Model The input point cloud with normals (Nx6)
*
* \details Uses the parameters set in the constructor to downsample and learn a new model. When the model is learnt, the instance gets ready for calling "match".
*/
void trainModel(const Mat& Model);
/**
* \brief Matches a trained model across a provided scene.
*
* @param [in] scene Point cloud for the scene
* @param [out] results List of output poses
* @param [in] relativeSceneSampleStep The ratio of scene points to be used for the matching after sampling with relativeSceneDistance. For example, if this value is set to 1.0/5.0, every 5th point from the scene is used for pose estimation. This parameter allows an easy trade-off between speed and accuracy of the matching. Increasing the value leads to less points being used and in turn to a faster but less accurate pose computation. Decreasing the value has the inverse effect.
* @param [in] relativeSceneDistance Set the distance threshold relative to the diameter of the model. This parameter is equivalent to relativeSamplingStep in the training stage. This parameter acts like a prior sampling with the relativeSceneSampleStep parameter.
*/
void match(const Mat& scene, std::vector<Pose3D*>& results, const double relativeSceneSampleStep=1.0/5.0, const double relativeSceneDistance=0.03);
void read(const FileNode& fn);
void write(FileStorage& fs) const;
protected:
double maxDist, angle_step, angleStepRadians, distance_step;
double samplingStepRelative, angleStepRelative, distanceStepRelative;
Mat inputPC, sampledPC, PPF;
int num_ref_points, sampled_step, ppf_step;
hashtable_int* hash_table;
THash* hash_nodes;
int NumPoses;
double PositionThreshold, RotationThreshold, MinMatchScore;
bool UseWeightedAvg;
float sampleStepSearch;
int SceneSampleStep;
void clearTrainingModels();
private:
void computePPFFeatures( const double p1[4], const double n1[4],
const double p2[4], const double n2[4],
double f[4]);
bool matchPose(const Pose3D& sourcePose, const Pose3D& targetPose);
int clusterPoses(Pose3D** poseList, int numPoses, std::vector<Pose3D*>& finalPoses);
bool trained;
public:
/**
* \brief Empty constructor. Sets default arguments
*/
PPF3DDetector();
/**
* Constructor with arguments
* @param [in] relativeSamplingStep Sampling distance relative to the object's diameter. Models are first sampled uniformly in order to improve efficiency. Decreasing this value leads to a denser model, and a more accurate pose estimation but the larger the model, the slower the training. Increasing the value leads to a less accurate pose computation but a smaller model and faster model generation and matching. Beware of the memory consumption when using small values.
* @param [in] relativeDistanceStep The discretization distance of the point pair distance relative to the model's diameter. This value has a direct impact on the hashtable. Using small values would lead to too fine discretization, and thus ambiguity in the bins of hashtable. Too large values would lead to no discrimination over the feature vectors and different point pair features would be assigned to the same bin. This argument defaults to the value of RelativeSamplingStep. For noisy scenes, the value can be increased to improve the robustness of the matching against noisy points.
* @param [in] numAngles Set the discretization of the point pair orientation as the number of subdivisions of the angle. This value is the equivalent of RelativeDistanceStep for the orientations. Increasing the value increases the precision of the matching but decreases the robustness against incorrect normal directions. Decreasing the value decreases the precision of the matching but increases the robustness against incorrect normal directions. For very noisy scenes where the normal directions can not be computed accurately, the value can be set to 25 or 20.
*/
PPF3DDetector(const double relativeSamplingStep, const double relativeDistanceStep=0.05, const double numAngles=30);
virtual ~PPF3DDetector();
/**
* Set the parameters for the search
* @param [in] numPoses The maximum number of poses to return
* @param [in] positionThreshold Position threshold controlling the similarity of translations. Depends on the units of calibration/model.
* @param [in] rotationThreshold Position threshold controlling the similarity of rotations. This parameter can be perceived as a threshold over the difference of angles
* @param [in] minMatchScore Not used at the moment
* @param [in] useWeightedClustering The algorithm by default clusters the poses without weighting. A non-zero value would indicate that the pose clustering should take into account the number of votes as the weights and perform a weighted averaging instead of a simple one.
*/
void setSearchParams(const int numPoses=5, const double positionThreshold=-1, const double rotationThreshold=-1, const double minMatchScore=0.5, const bool useWeightedClustering=false);
/**
* \brief Trains a new model.
*
* @param [in] Model The input point cloud with normals (Nx6)
*
* \details Uses the parameters set in the constructor to downsample and learn a new model. When the model is learnt, the instance gets ready for calling "match".
*/
void trainModel(const Mat& Model);
/**
* \brief Matches a trained model across a provided scene.
*
* @param [in] scene Point cloud for the scene
* @param [out] results List of output poses
* @param [in] relativeSceneSampleStep The ratio of scene points to be used for the matching after sampling with relativeSceneDistance. For example, if this value is set to 1.0/5.0, every 5th point from the scene is used for pose estimation. This parameter allows an easy trade-off between speed and accuracy of the matching. Increasing the value leads to less points being used and in turn to a faster but less accurate pose computation. Decreasing the value has the inverse effect.
* @param [in] relativeSceneDistance Set the distance threshold relative to the diameter of the model. This parameter is equivalent to relativeSamplingStep in the training stage. This parameter acts like a prior sampling with the relativeSceneSampleStep parameter.
*/
void match(const Mat& scene, std::vector<Pose3D*>& results, const double relativeSceneSampleStep=1.0/5.0, const double relativeSceneDistance=0.03);
void read(const FileNode& fn);
void write(FileStorage& fs) const;
protected:
double maxDist, angle_step, angleStepRadians, distance_step;
double samplingStepRelative, angleStepRelative, distanceStepRelative;
Mat inputPC, sampledPC, PPF;
int num_ref_points, sampled_step, ppf_step;
hashtable_int* hash_table;
THash* hash_nodes;
int NumPoses;
double PositionThreshold, RotationThreshold, MinMatchScore;
bool UseWeightedAvg;
float sampleStepSearch;
int SceneSampleStep;
void clearTrainingModels();
private:
void computePPFFeatures(const double p1[4], const double n1[4],
const double p2[4], const double n2[4],
double f[4]);
bool matchPose(const Pose3D& sourcePose, const Pose3D& targetPose);
int clusterPoses(Pose3D** poseList, int numPoses, std::vector<Pose3D*>& finalPoses);
bool trained;
};
} // namespace ppf_match_3d
......
......@@ -53,33 +53,33 @@ typedef unsigned int KeyType;
typedef struct hashnode_i
{
KeyType key;
void *data;
struct hashnode_i *next;
KeyType key;
void *data;
struct hashnode_i *next;
} hashnode_i ;
typedef struct HSHTBL_i
{
size_t size;
struct hashnode_i **nodes;
size_t (*hashfunc)(unsigned int);
size_t size;
struct hashnode_i **nodes;
size_t (*hashfunc)(unsigned int);
} hashtable_int;
inline static unsigned int next_power_of_two(unsigned int value)
{
/* Round up to the next highest power of 2 */
/* from http://www-graphics.stanford.edu/~seander/bithacks.html */
--value;
value |= value >> 1;
value |= value >> 2;
value |= value >> 4;
value |= value >> 8;
value |= value >> 16;
++value;
return value;
/* Round up to the next highest power of 2 */
/* from http://www-graphics.stanford.edu/~seander/bithacks.html */
--value;
value |= value >> 1;
value |= value >> 2;
value |= value >> 4;
value |= value >> 8;
value |= value >> 16;
++value;
return value;
}
hashtable_int *hashtableCreate(size_t size, size_t (*hashfunc)(unsigned int));
......
......@@ -47,24 +47,24 @@ using namespace std;
using namespace cv;
using namespace ppf_match_3d;
static void help(std::string errorMessage)
static void help(const string& errorMessage)
{
std::cout<<"Program init error : "<<errorMessage<<std::endl;
std::cout<<"\nUsage : ppf_matching [input model file] [input scene file]"<<std::endl;
std::cout<<"\nPlease start again with new parameters"<<std::endl;
cout << "Program init error : "<< errorMessage << endl;
cout << "\nUsage : ppf_matching [input model file] [input scene file]"<< endl;
cout << "\nPlease start again with new parameters"<< endl;
}
int main(int argc, char** argv)
{
// welcome message
std::cout<< "****************************************************"<<std::endl;
std::cout<< "* Surface Matching demonstration : demonstrates the use of surface matching"
" using point pair features."<<std::endl;
std::cout<< "* The sample loads a model and a scene, where the model lies in a different"
cout << "****************************************************" << endl;
cout << "* Surface Matching demonstration : demonstrates the use of surface matching"
" using point pair features." << endl;
cout << "* The sample loads a model and a scene, where the model lies in a different"
" pose than the training.\n* It then trains the model and searches for it in the"
" input scene. The detected poses are further refined by ICP\n* and printed to the "
" standard output."<<std::endl;
std::cout<< "****************************************************"<<std::endl;
" standard output." << endl;
cout << "****************************************************" << endl;
if (argc < 3)
{
......@@ -73,15 +73,15 @@ int main(int argc, char** argv)
}
#if (defined __x86_64__ || defined _M_X64)
std::cout << "Running on 64 bits" << std::endl;
cout << "Running on 64 bits" << endl;
#else
std::cout << "Running on 32 bits" << std::endl;
cout << "Running on 32 bits" << endl;
#endif
#ifdef _OPENMP
std::cout << "Running with OpenMP" << std::endl;
cout << "Running with OpenMP" << endl;
#else
std::cout << "Running without OpenMP and without TBB" << std::endl;
cout << "Running without OpenMP and without TBB" << endl;
#endif
string modelFileName = (string)argv[1];
......
......@@ -57,190 +57,190 @@ const float EPS = 1.192092896e-07F; /* smallest such that 1.0+FLT_EPSILON
static inline double TNorm3(const double v[])
{
return (sqrt(v[0]*v[0]+v[1]*v[1]+v[2]*v[2]));
return (sqrt(v[0]*v[0]+v[1]*v[1]+v[2]*v[2]));
}
static inline void TNormalize3(double v[])
{
double normTemp=TNorm3(v);
if (normTemp>0)
{
v[0]/=normTemp;
v[1]/=normTemp;
v[2]/=normTemp;
}
double normTemp=TNorm3(v);
if (normTemp>0)
{
v[0]/=normTemp;
v[1]/=normTemp;
v[2]/=normTemp;
}
}
static inline double TDot3(const double a[3], const double b[3])
{
return ((a[0])*(b[0])+(a[1])*(b[1])+(a[2])*(b[2]));
return ((a[0])*(b[0])+(a[1])*(b[1])+(a[2])*(b[2]));
}
static inline void TCross(const double a[], const double b[], double c[])
{
c[0] = (a[1])*(b[2])-(a[2])*(b[1]);
c[1] = (a[2])*(b[0])-(a[0])*(b[2]);
c[2] = (a[0])*(b[1])-(a[1])*(b[0]);
c[0] = (a[1])*(b[2])-(a[2])*(b[1]);
c[1] = (a[2])*(b[0])-(a[0])*(b[2]);
c[2] = (a[0])*(b[1])-(a[1])*(b[0]);
}
static inline double TAngle3(const double a[3], const double b[3])
{
double c[3];
TCross(a,b,c);
return (atan2(TNorm3(c), TDot3(a, b)));
double c[3];
TCross(a,b,c);
return (atan2(TNorm3(c), TDot3(a, b)));
}
static inline void matrixProduct33(double *A, double *B, double *R)
{
R[0] = A[0] * B[0] + A[1] * B[3] + A[2] * B[6];
R[1] = A[0] * B[1] + A[1] * B[4] + A[2] * B[7];
R[2] = A[0] * B[2] + A[1] * B[5] + A[2] * B[8];
R[3] = A[3] * B[0] + A[4] * B[3] + A[5] * B[6];
R[4] = A[3] * B[1] + A[4] * B[4] + A[5] * B[7];
R[5] = A[3] * B[2] + A[4] * B[5] + A[5] * B[8];
R[6] = A[6] * B[0] + A[7] * B[3] + A[8] * B[6];
R[7] = A[6] * B[1] + A[7] * B[4] + A[8] * B[7];
R[8] = A[6] * B[2] + A[7] * B[5] + A[8] * B[8];
R[0] = A[0] * B[0] + A[1] * B[3] + A[2] * B[6];
R[1] = A[0] * B[1] + A[1] * B[4] + A[2] * B[7];
R[2] = A[0] * B[2] + A[1] * B[5] + A[2] * B[8];
R[3] = A[3] * B[0] + A[4] * B[3] + A[5] * B[6];
R[4] = A[3] * B[1] + A[4] * B[4] + A[5] * B[7];
R[5] = A[3] * B[2] + A[4] * B[5] + A[5] * B[8];
R[6] = A[6] * B[0] + A[7] * B[3] + A[8] * B[6];
R[7] = A[6] * B[1] + A[7] * B[4] + A[8] * B[7];
R[8] = A[6] * B[2] + A[7] * B[5] + A[8] * B[8];
}
// A is a vector
static inline void matrixProduct133(double *A, double *B, double *R)
{
R[0] = A[0] * B[0] + A[1] * B[3] + A[2] * B[6];
R[1] = A[0] * B[1] + A[1] * B[4] + A[2] * B[7];
R[2] = A[0] * B[2] + A[1] * B[5] + A[2] * B[8];
R[0] = A[0] * B[0] + A[1] * B[3] + A[2] * B[6];
R[1] = A[0] * B[1] + A[1] * B[4] + A[2] * B[7];
R[2] = A[0] * B[2] + A[1] * B[5] + A[2] * B[8];
}
static inline void matrixProduct331(const double A[9], const double b[3], double r[3])
{
r[0] = A[0] * b[0] + A[1] * b[1] + A[2] * b[2];
r[1] = A[3] * b[0] + A[4] * b[1] + A[5] * b[2];
r[2] = A[6] * b[0] + A[7] * b[1] + A[8] * b[2];
r[0] = A[0] * b[0] + A[1] * b[1] + A[2] * b[2];
r[1] = A[3] * b[0] + A[4] * b[1] + A[5] * b[2];
r[2] = A[6] * b[0] + A[7] * b[1] + A[8] * b[2];
}
static inline void matrixTranspose33(double *A, double *At)
{
At[0] = A[0];
At[4] = A[4];
At[8] = A[8];
At[1] = A[3];
At[2] = A[6];
At[3] = A[1];
At[5] = A[7];
At[6] = A[2];
At[7] = A[5];
At[0] = A[0];
At[4] = A[4];
At[8] = A[8];
At[1] = A[3];
At[2] = A[6];
At[3] = A[1];
At[5] = A[7];
At[6] = A[2];
At[7] = A[5];
}
static inline void matrixProduct44(const double A[16], const double B[16], double R[16])
{
R[0] = A[0] * B[0] + A[1] * B[4] + A[2] * B[8] + A[3] * B[12];
R[1] = A[0] * B[1] + A[1] * B[5] + A[2] * B[9] + A[3] * B[13];
R[2] = A[0] * B[2] + A[1] * B[6] + A[2] * B[10] + A[3] * B[14];
R[3] = A[0] * B[3] + A[1] * B[7] + A[2] * B[11] + A[3] * B[15];
R[4] = A[4] * B[0] + A[5] * B[4] + A[6] * B[8] + A[7] * B[12];
R[5] = A[4] * B[1] + A[5] * B[5] + A[6] * B[9] + A[7] * B[13];
R[6] = A[4] * B[2] + A[5] * B[6] + A[6] * B[10] + A[7] * B[14];
R[7] = A[4] * B[3] + A[5] * B[7] + A[6] * B[11] + A[7] * B[15];
R[8] = A[8] * B[0] + A[9] * B[4] + A[10] * B[8] + A[11] * B[12];
R[9] = A[8] * B[1] + A[9] * B[5] + A[10] * B[9] + A[11] * B[13];
R[10] = A[8] * B[2] + A[9] * B[6] + A[10] * B[10] + A[11] * B[14];
R[11] = A[8] * B[3] + A[9] * B[7] + A[10] * B[11] + A[11] * B[15];
R[12] = A[12] * B[0] + A[13] * B[4] + A[14] * B[8] + A[15] * B[12];
R[13] = A[12] * B[1] + A[13] * B[5] + A[14] * B[9] + A[15] * B[13];
R[14] = A[12] * B[2] + A[13] * B[6] + A[14] * B[10] + A[15] * B[14];
R[15] = A[12] * B[3] + A[13] * B[7] + A[14] * B[11] + A[15] * B[15];
R[0] = A[0] * B[0] + A[1] * B[4] + A[2] * B[8] + A[3] * B[12];
R[1] = A[0] * B[1] + A[1] * B[5] + A[2] * B[9] + A[3] * B[13];
R[2] = A[0] * B[2] + A[1] * B[6] + A[2] * B[10] + A[3] * B[14];
R[3] = A[0] * B[3] + A[1] * B[7] + A[2] * B[11] + A[3] * B[15];
R[4] = A[4] * B[0] + A[5] * B[4] + A[6] * B[8] + A[7] * B[12];
R[5] = A[4] * B[1] + A[5] * B[5] + A[6] * B[9] + A[7] * B[13];
R[6] = A[4] * B[2] + A[5] * B[6] + A[6] * B[10] + A[7] * B[14];
R[7] = A[4] * B[3] + A[5] * B[7] + A[6] * B[11] + A[7] * B[15];
R[8] = A[8] * B[0] + A[9] * B[4] + A[10] * B[8] + A[11] * B[12];
R[9] = A[8] * B[1] + A[9] * B[5] + A[10] * B[9] + A[11] * B[13];
R[10] = A[8] * B[2] + A[9] * B[6] + A[10] * B[10] + A[11] * B[14];
R[11] = A[8] * B[3] + A[9] * B[7] + A[10] * B[11] + A[11] * B[15];
R[12] = A[12] * B[0] + A[13] * B[4] + A[14] * B[8] + A[15] * B[12];
R[13] = A[12] * B[1] + A[13] * B[5] + A[14] * B[9] + A[15] * B[13];
R[14] = A[12] * B[2] + A[13] * B[6] + A[14] * B[10] + A[15] * B[14];
R[15] = A[12] * B[3] + A[13] * B[7] + A[14] * B[11] + A[15] * B[15];
}
static inline void matrixProduct441(const double A[16], const double B[4], double R[4])
{
R[0] = A[0] * B[0] + A[1] * B[1] + A[2] * B[2] + A[3] * B[3];
R[1] = A[4] * B[0] + A[5] * B[1] + A[6] * B[2] + A[7] * B[3];
R[2] = A[8] * B[0] + A[9] * B[1] + A[10] * B[2] + A[11] * B[3];
R[3] = A[12] * B[0] + A[13] * B[1] + A[14] * B[2] + A[15] * B[3];
R[0] = A[0] * B[0] + A[1] * B[1] + A[2] * B[2] + A[3] * B[3];
R[1] = A[4] * B[0] + A[5] * B[1] + A[6] * B[2] + A[7] * B[3];
R[2] = A[8] * B[0] + A[9] * B[1] + A[10] * B[2] + A[11] * B[3];
R[3] = A[12] * B[0] + A[13] * B[1] + A[14] * B[2] + A[15] * B[3];
}
static inline void matrixPrint(double *A, int m, int n)
{
int i, j;
for (i = 0; i < m; i++)
int i, j;
for (i = 0; i < m; i++)
{
printf(" ");
for (j = 0; j < n; j++)
{
printf(" ");
for (j = 0; j < n; j++)
{
printf(" %0.6f ", A[i * n + j]);
}
printf("\n");
printf(" %0.6f ", A[i * n + j]);
}
printf("\n");
}
}
static inline void matrixIdentity(int n, double *A)
{
int i;
for (i = 0; i < n*n; i++)
{
A[i] = 0.0;
}
for (i = 0; i < n; i++)
{
A[i * n + i] = 1.0;
}
int i;
for (i = 0; i < n*n; i++)
{
A[i] = 0.0;
}
for (i = 0; i < n; i++)
{
A[i * n + i] = 1.0;
}
}
static inline void rtToPose(const double R[9], const double t[3], double Pose[16])
{
Pose[0]=R[0];
Pose[1]=R[1];
Pose[2]=R[2];
Pose[4]=R[3];
Pose[5]=R[4];
Pose[6]=R[5];
Pose[8]=R[6];
Pose[9]=R[7];
Pose[10]=R[8];
Pose[3]=t[0];
Pose[7]=t[1];
Pose[11]=t[2];
Pose[15] = 1;
Pose[0]=R[0];
Pose[1]=R[1];
Pose[2]=R[2];
Pose[4]=R[3];
Pose[5]=R[4];
Pose[6]=R[5];
Pose[8]=R[6];
Pose[9]=R[7];
Pose[10]=R[8];
Pose[3]=t[0];
Pose[7]=t[1];
Pose[11]=t[2];
Pose[15] = 1;
}
static inline void poseToRT(const double Pose[16], double R[9], double t[3])
{
R[0] = Pose[0];
R[1] = Pose[1];
R[2] = Pose[2];
R[3] = Pose[4];
R[4] = Pose[5];
R[5] = Pose[6];
R[6] = Pose[8];
R[7] = Pose[9];
R[8] = Pose[10];
t[0]=Pose[3];
t[1]=Pose[7];
t[2]=Pose[11];
R[0] = Pose[0];
R[1] = Pose[1];
R[2] = Pose[2];
R[3] = Pose[4];
R[4] = Pose[5];
R[5] = Pose[6];
R[6] = Pose[8];
R[7] = Pose[9];
R[8] = Pose[10];
t[0]=Pose[3];
t[1]=Pose[7];
t[2]=Pose[11];
}
static inline void poseToR(const double Pose[16], double R[9])
{
R[0] = Pose[0];
R[1] = Pose[1];
R[2] = Pose[2];
R[3] = Pose[4];
R[4] = Pose[5];
R[5] = Pose[6];
R[6] = Pose[8];
R[7] = Pose[9];
R[8] = Pose[10];
R[0] = Pose[0];
R[1] = Pose[1];
R[2] = Pose[2];
R[3] = Pose[4];
R[4] = Pose[5];
R[5] = Pose[6];
R[6] = Pose[8];
R[7] = Pose[9];
R[8] = Pose[10];
}
/**
......@@ -248,28 +248,28 @@ static inline void poseToR(const double Pose[16], double R[9])
*/
static inline void aaToRyz(double angle, const double r[3], double row2[3], double row3[3])
{
const double sinA=sin(angle);
const double cosA=cos(angle);
const double cos1A=(1-cosA);
row2[0] = 0.f;
row2[1] = cosA;
row2[2] = 0.f;
row3[0] = 0.f;
row3[1] = 0.f;
row3[2] = cosA;
row2[0] += r[2] * sinA;
row2[2] += -r[0] * sinA;
row3[0] += -r[1] * sinA;
row3[1] += r[0] * sinA;
row2[0] += r[1] * r[0] * cos1A;
row2[1] += r[1] * r[1] * cos1A;
row2[2] += r[1] * r[2] * cos1A;
row3[0] += r[2] * r[0] * cos1A;
row3[1] += r[2] * r[1] * cos1A;
row3[2] += r[2] * r[2] * cos1A;
const double sinA=sin(angle);
const double cosA=cos(angle);
const double cos1A=(1-cosA);
row2[0] = 0.f;
row2[1] = cosA;
row2[2] = 0.f;
row3[0] = 0.f;
row3[1] = 0.f;
row3[2] = cosA;
row2[0] += r[2] * sinA;
row2[2] += -r[0] * sinA;
row3[0] += -r[1] * sinA;
row3[1] += r[0] * sinA;
row2[0] += r[1] * r[0] * cos1A;
row2[1] += r[1] * r[1] * cos1A;
row2[2] += r[1] * r[2] * cos1A;
row3[0] += r[2] * r[0] * cos1A;
row3[1] += r[2] * r[1] * cos1A;
row3[2] += r[2] * r[2] * cos1A;
}
/**
......@@ -277,39 +277,39 @@ static inline void aaToRyz(double angle, const double r[3], double row2[3], doub
*/
static inline void aaToR(double angle, const double r[3], double R[9])
{
const double sinA=sin(angle);
const double cosA=cos(angle);
const double cos1A=(1-cosA);
double *row1 = &R[0];
double *row2 = &R[3];
double *row3 = &R[6];
row1[0] = cosA;
row1[1] = 0.0f;
row1[2] = 0.f;
row2[0] = 0.f;
row2[1] = cosA;
row2[2] = 0.f;
row3[0] = 0.f;
row3[1] = 0.f;
row3[2] = cosA;
row1[1] += -r[2] * sinA;
row1[2] += r[1] * sinA;
row2[0] += r[2] * sinA;
row2[2] += -r[0] * sinA;
row3[0] += -r[1] * sinA;
row3[1] += r[0] * sinA;
row1[0] += r[0] * r[0] * cos1A;
row1[1] += r[0] * r[1] * cos1A;
row1[2] += r[0] * r[2] * cos1A;
row2[0] += r[1] * r[0] * cos1A;
row2[1] += r[1] * r[1] * cos1A;
row2[2] += r[1] * r[2] * cos1A;
row3[0] += r[2] * r[0] * cos1A;
row3[1] += r[2] * r[1] * cos1A;
row3[2] += r[2] * r[2] * cos1A;
const double sinA=sin(angle);
const double cosA=cos(angle);
const double cos1A=(1-cosA);
double *row1 = &R[0];
double *row2 = &R[3];
double *row3 = &R[6];
row1[0] = cosA;
row1[1] = 0.0f;
row1[2] = 0.f;
row2[0] = 0.f;
row2[1] = cosA;
row2[2] = 0.f;
row3[0] = 0.f;
row3[1] = 0.f;
row3[2] = cosA;
row1[1] += -r[2] * sinA;
row1[2] += r[1] * sinA;
row2[0] += r[2] * sinA;
row2[2] += -r[0] * sinA;
row3[0] += -r[1] * sinA;
row3[1] += r[0] * sinA;
row1[0] += r[0] * r[0] * cos1A;
row1[1] += r[0] * r[1] * cos1A;
row1[2] += r[0] * r[2] * cos1A;
row2[0] += r[1] * r[0] * cos1A;
row2[1] += r[1] * r[1] * cos1A;
row2[2] += r[1] * r[2] * cos1A;
row3[0] += r[2] * r[0] * cos1A;
row3[1] += r[2] * r[1] * cos1A;
row3[2] += r[2] * r[2] * cos1A;
}
/**
......@@ -317,50 +317,50 @@ static inline void aaToR(double angle, const double r[3], double R[9])
*/
static inline void getUnitXRotation(double angle, double R[9])
{
const double sinA=sin(angle);
const double cosA=cos(angle);
double *row1 = &R[0];
double *row2 = &R[3];
double *row3 = &R[6];
row1[0] = 1;
row1[1] = 0.0f;
row1[2] = 0.f;
row2[0] = 0.f;
row2[1] = cosA;
row2[2] = -sinA;
row3[0] = 0.f;
row3[1] = sinA;
row3[2] = cosA;
const double sinA=sin(angle);
const double cosA=cos(angle);
double *row1 = &R[0];
double *row2 = &R[3];
double *row3 = &R[6];
row1[0] = 1;
row1[1] = 0.0f;
row1[2] = 0.f;
row2[0] = 0.f;
row2[1] = cosA;
row2[2] = -sinA;
row3[0] = 0.f;
row3[1] = sinA;
row3[2] = cosA;
}
/**
* \brief Compute a transformation in order to rotate around X direction
*/
static inline void getUnitXRotation_44(double angle, double T[16])
{
const double sinA=sin(angle);
const double cosA=cos(angle);
double *row1 = &T[0];
double *row2 = &T[4];
double *row3 = &T[8];
row1[0] = 1;
row1[1] = 0.0f;
row1[2] = 0.f;
row2[0] = 0.f;
row2[1] = cosA;
row2[2] = -sinA;
row3[0] = 0.f;
row3[1] = sinA;
row3[2] = cosA;
row1[3]=0;
row2[3]=0;
row3[3]=0;
T[3]=0;
T[7]=0;
T[11]=0;
T[15] = 1;
const double sinA=sin(angle);
const double cosA=cos(angle);
double *row1 = &T[0];
double *row2 = &T[4];
double *row3 = &T[8];
row1[0] = 1;
row1[1] = 0.0f;
row1[2] = 0.f;
row2[0] = 0.f;
row2[1] = cosA;
row2[2] = -sinA;
row3[0] = 0.f;
row3[1] = sinA;
row3[2] = cosA;
row1[3]=0;
row2[3]=0;
row3[3]=0;
T[3]=0;
T[7]=0;
T[11]=0;
T[15] = 1;
}
/**
......@@ -368,34 +368,34 @@ static inline void getUnitXRotation_44(double angle, double T[16])
*/
static inline void computeTransformRTyz(const double p1[4], const double n1[4], double row2[3], double row3[3], double t[3])
{
// dot product with x axis
double angle=acos( n1[0] );
// cross product with x axis
double axis[3]={0, n1[2], -n1[1]};
double axisNorm;
// we try to project on the ground plane but it's already parallel
if (n1[1]==0 && n1[2]==0)
// dot product with x axis
double angle=acos( n1[0] );
// cross product with x axis
double axis[3]={0, n1[2], -n1[1]};
double axisNorm;
// we try to project on the ground plane but it's already parallel
if (n1[1]==0 && n1[2]==0)
{
axis[1]=1;
axis[2]=0;
}
else
{
axisNorm=sqrt(axis[2]*axis[2]+axis[1]*axis[1]);
if (axisNorm>EPS)
{
axis[1]=1;
axis[2]=0;
axis[1]/=axisNorm;
axis[2]/=axisNorm;
}
else
{
axisNorm=sqrt(axis[2]*axis[2]+axis[1]*axis[1]);
if (axisNorm>EPS)
{
axis[1]/=axisNorm;
axis[2]/=axisNorm;
}
}
aaToRyz(angle, axis, row2, row3);
t[1] = row2[0] * (-p1[0]) + row2[1] * (-p1[1]) + row2[2] * (-p1[2]);
t[2] = row3[0] * (-p1[0]) + row3[1] * (-p1[1]) + row3[2] * (-p1[2]);
}
aaToRyz(angle, axis, row2, row3);
t[1] = row2[0] * (-p1[0]) + row2[1] * (-p1[1]) + row2[2] * (-p1[2]);
t[2] = row3[0] * (-p1[0]) + row3[1] * (-p1[1]) + row3[2] * (-p1[2]);
}
/**
......@@ -403,44 +403,44 @@ static inline void computeTransformRTyz(const double p1[4], const double n1[4],
*/
static inline void computeTransformRT(const double p1[4], const double n1[4], double R[9], double t[3])
{
// dot product with x axis
double angle=acos( n1[0] );
// cross product with x axis
double axis[3]={0, n1[2], -n1[1]};
double axisNorm;
double *row1, *row2, *row3;
// we try to project on the ground plane but it's already parallel
if (n1[1]==0 && n1[2]==0)
{
axis[1]=1;
axis[2]=0;
}
else
// dot product with x axis
double angle=acos( n1[0] );
// cross product with x axis
double axis[3]={0, n1[2], -n1[1]};
double axisNorm;
double *row1, *row2, *row3;
// we try to project on the ground plane but it's already parallel
if (n1[1]==0 && n1[2]==0)
{
axis[1]=1;
axis[2]=0;
}
else
{
axisNorm=sqrt(axis[2]*axis[2]+axis[1]*axis[1]);
if (axisNorm>EPS)
{
axisNorm=sqrt(axis[2]*axis[2]+axis[1]*axis[1]);
if (axisNorm>EPS)
{
axis[1]/=axisNorm;
axis[2]/=axisNorm;
}
axis[1]/=axisNorm;
axis[2]/=axisNorm;
}
aaToR(angle, axis, R);
row1 = &R[0];
row2 = &R[3];
row3 = &R[6];
t[0] = row1[0] * (-p1[0]) + row1[1] * (-p1[1]) + row1[2] * (-p1[2]);
t[1] = row2[0] * (-p1[0]) + row2[1] * (-p1[1]) + row2[2] * (-p1[2]);
t[2] = row3[0] * (-p1[0]) + row3[1] * (-p1[1]) + row3[2] * (-p1[2]);
}
aaToR(angle, axis, R);
row1 = &R[0];
row2 = &R[3];
row3 = &R[6];
t[0] = row1[0] * (-p1[0]) + row1[1] * (-p1[1]) + row1[2] * (-p1[2]);
t[1] = row2[0] * (-p1[0]) + row2[1] * (-p1[1]) + row2[2] * (-p1[2]);
t[2] = row3[0] * (-p1[0]) + row3[1] * (-p1[1]) + row3[2] * (-p1[2]);
}
/**
* \brief Flip a normal to the viewing direction
*
*
* \param [in] point Scene point
* \param [in] vp_x X component of view direction
* \param [in] vp_y Y component of view direction
......@@ -451,27 +451,27 @@ static inline void computeTransformRT(const double p1[4], const double n1[4], do
*/
static inline void flipNormalViewpoint(const float* point, double vp_x, double vp_y, double vp_z, double *nx, double *ny, double *nz)
{
double cos_theta;
// See if we need to flip any plane normals
vp_x -= (double)point[0];
vp_y -= (double)point[1];
vp_z -= (double)point[2];
// Dot product between the (viewpoint - point) and the plane normal
cos_theta = (vp_x * (*nx) + vp_y * (*ny) + vp_z * (*nz));
// Flip the plane normal
if (cos_theta < 0)
{
(*nx) *= -1;
(*ny) *= -1;
(*nz) *= -1;
}
double cos_theta;
// See if we need to flip any plane normals
vp_x -= (double)point[0];
vp_y -= (double)point[1];
vp_z -= (double)point[2];
// Dot product between the (viewpoint - point) and the plane normal
cos_theta = (vp_x * (*nx) + vp_y * (*ny) + vp_z * (*nz));
// Flip the plane normal
if (cos_theta < 0)
{
(*nx) *= -1;
(*ny) *= -1;
(*nz) *= -1;
}
}
/**
* \brief Flip a normal to the viewing direction
*
*
* \param [in] point Scene point
* \param [in] vp_x X component of view direction
* \param [in] vp_y Y component of view direction
......@@ -482,275 +482,275 @@ static inline void flipNormalViewpoint(const float* point, double vp_x, double v
*/
static inline void flipNormalViewpoint_32f(const float* point, float vp_x, float vp_y, float vp_z, float *nx, float *ny, float *nz)
{
float cos_theta;
// See if we need to flip any plane normals
vp_x -= (float)point[0];
vp_y -= (float)point[1];
vp_z -= (float)point[2];
// Dot product between the (viewpoint - point) and the plane normal
cos_theta = (vp_x * (*nx) + vp_y * (*ny) + vp_z * (*nz));
// Flip the plane normal
if (cos_theta < 0)
{
(*nx) *= -1;
(*ny) *= -1;
(*nz) *= -1;
}
float cos_theta;
// See if we need to flip any plane normals
vp_x -= (float)point[0];
vp_y -= (float)point[1];
vp_z -= (float)point[2];
// Dot product between the (viewpoint - point) and the plane normal
cos_theta = (vp_x * (*nx) + vp_y * (*ny) + vp_z * (*nz));
// Flip the plane normal
if (cos_theta < 0)
{
(*nx) *= -1;
(*ny) *= -1;
(*nz) *= -1;
}
}
/**
* \brief Convert a rotation matrix to axis angle representation
*
*
* \param [in] R Rotation matrix
* \param [in] axis Axis vector
* \param [in] angle Angle in radians
*/
static inline void dcmToAA(double *R, double *axis, double *angle)
{
double d1 = R[7] - R[5];
double d2 = R[2] - R[6];
double d3 = R[3] - R[1];
double norm = sqrt(d1 * d1 + d2 * d2 + d3 * d3);
double x = (R[7] - R[5]) / norm;
double y = (R[2] - R[6]) / norm;
double z = (R[3] - R[1]) / norm;
*angle = acos((R[0] + R[4] + R[8] - 1.0) * 0.5);
axis[0] = x;
axis[1] = y;
axis[2] = z;
double d1 = R[7] - R[5];
double d2 = R[2] - R[6];
double d3 = R[3] - R[1];
double norm = sqrt(d1 * d1 + d2 * d2 + d3 * d3);
double x = (R[7] - R[5]) / norm;
double y = (R[2] - R[6]) / norm;
double z = (R[3] - R[1]) / norm;
*angle = acos((R[0] + R[4] + R[8] - 1.0) * 0.5);
axis[0] = x;
axis[1] = y;
axis[2] = z;
}
/**
* \brief Convert axis angle representation to rotation matrix
*
*
* \param [in] axis Axis Vector
* \param [in] angle Angle (In radians)
* \param [in] R 3x3 Rotation matrix
*/
static inline void aaToDCM(double *axis, double angle, double *R)
static inline void aaToDCM(double *axis, double angle, double *R)
{
double ident[9]={1,0,0,0,1,0,0,0,1};
double n[9] = { 0.0, -axis[2], axis[1],
axis[2], 0.0, -axis[0],
-axis[1], axis[0], 0.0
};
double nsq[9];
double c, s;
int i;
//c = 1-cos(angle);
c = cos(angle);
s = sin(angle);
matrixProduct33(n, n, nsq);
for (i = 0; i < 9; i++)
{
const double sni = n[i]*s;
const double cnsqi = nsq[i]*(c);
R[i]=ident[i]+sni+cnsqi;
}
// The below code is the matrix based implemntation of the above
// double nsq[9], sn[9], cnsq[9], tmp[9];
//matrix_scale(3, 3, n, s, sn);
//matrix_scale(3, 3, nsq, (1 - c), cnsq);
//matrix_sum(3, 3, 3, 3, ident, sn, tmp);
//matrix_sum(3, 3, 3, 3, tmp, cnsq, R);
double ident[9]={1,0,0,0,1,0,0,0,1};
double n[9] = { 0.0, -axis[2], axis[1],
axis[2], 0.0, -axis[0],
-axis[1], axis[0], 0.0
};
double nsq[9];
double c, s;
int i;
//c = 1-cos(angle);
c = cos(angle);
s = sin(angle);
matrixProduct33(n, n, nsq);
for (i = 0; i < 9; i++)
{
const double sni = n[i]*s;
const double cnsqi = nsq[i]*(c);
R[i]=ident[i]+sni+cnsqi;
}
// The below code is the matrix based implemntation of the above
// double nsq[9], sn[9], cnsq[9], tmp[9];
//matrix_scale(3, 3, n, s, sn);
//matrix_scale(3, 3, nsq, (1 - c), cnsq);
//matrix_sum(3, 3, 3, 3, ident, sn, tmp);
//matrix_sum(3, 3, 3, 3, tmp, cnsq, R);
}
/**
* \brief Convert a discrete cosine matrix to quaternion
*
*
* \param [in] R Rotation Matrix
* \param [in] q Quaternion
*/
static inline void dcmToQuat(double *R, double *q)
{
double n4; // the norm of quaternion multiplied by 4
double tr = R[0] + R[4] + R[8]; // trace of martix
double factor;
if (tr > 0.0)
double n4; // the norm of quaternion multiplied by 4
double tr = R[0] + R[4] + R[8]; // trace of martix
double factor;
if (tr > 0.0)
{
q[1] = R[5] - R[7];
q[2] = R[6] - R[2];
q[3] = R[1] - R[3];
q[0] = tr + 1.0;
n4 = q[0];
}
else
if ((R[0] > R[4]) && (R[0] > R[8]))
{
q[1] = R[5] - R[7];
q[2] = R[6] - R[2];
q[3] = R[1] - R[3];
q[0] = tr + 1.0;
n4 = q[0];
q[1] = 1.0 + R[0] - R[4] - R[8];
q[2] = R[3] + R[1];
q[3] = R[6] + R[2];
q[0] = R[5] - R[7];
n4 = q[1];
}
else
if ((R[0] > R[4]) && (R[0] > R[8]))
{
q[1] = 1.0 + R[0] - R[4] - R[8];
q[2] = R[3] + R[1];
q[3] = R[6] + R[2];
q[0] = R[5] - R[7];
n4 = q[1];
}
else
if (R[4] > R[8])
{
q[1] = R[3] + R[1];
q[2] = 1.0 + R[4] - R[0] - R[8];
q[3] = R[7] + R[5];
q[0] = R[6] - R[2];
n4 = q[2];
}
else
{
q[1] = R[6] + R[2];
q[2] = R[7] + R[5];
q[3] = 1.0 + R[8] - R[0] - R[4];
q[0] = R[1] - R[3];
n4 = q[3];
}
factor = 0.5 / sqrt(n4);
q[0] *= factor;
q[1] *= factor;
q[2] *= factor;
q[3] *= factor;
if (R[4] > R[8])
{
q[1] = R[3] + R[1];
q[2] = 1.0 + R[4] - R[0] - R[8];
q[3] = R[7] + R[5];
q[0] = R[6] - R[2];
n4 = q[2];
}
else
{
q[1] = R[6] + R[2];
q[2] = R[7] + R[5];
q[3] = 1.0 + R[8] - R[0] - R[4];
q[0] = R[1] - R[3];
n4 = q[3];
}
factor = 0.5 / sqrt(n4);
q[0] *= factor;
q[1] *= factor;
q[2] *= factor;
q[3] *= factor;
}
/**
* \brief Convert quaternion to a discrete cosine matrix
*
*
* \param [in] q Quaternion (w is at first element)
* \param [in] R Rotation Matrix
*
*
*/
static inline void quatToDCM(double *q, double *R)
static inline void quatToDCM(double *q, double *R)
{
double sqw = q[0] * q[0];
double sqx = q[1] * q[1];
double sqy = q[2] * q[2];
double sqz = q[3] * q[3];
double tmp1, tmp2;
R[0] = sqx - sqy - sqz + sqw; // since sqw + sqx + sqy + sqz = 1
R[4] = -sqx + sqy - sqz + sqw;
R[8] = -sqx - sqy + sqz + sqw;
tmp1 = q[1] * q[2];
tmp2 = q[3] * q[0];
R[1] = 2.0 * (tmp1 + tmp2);
R[3] = 2.0 * (tmp1 - tmp2);
tmp1 = q[1] * q[3];
tmp2 = q[2] * q[0];
R[2] = 2.0 * (tmp1 - tmp2);
R[6] = 2.0 * (tmp1 + tmp2);
tmp1 = q[2] * q[3];
tmp2 = q[1] * q[0];
R[5] = 2.0 * (tmp1 + tmp2);
R[7] = 2.0 * (tmp1 - tmp2);
double sqw = q[0] * q[0];
double sqx = q[1] * q[1];
double sqy = q[2] * q[2];
double sqz = q[3] * q[3];
double tmp1, tmp2;
R[0] = sqx - sqy - sqz + sqw; // since sqw + sqx + sqy + sqz = 1
R[4] = -sqx + sqy - sqz + sqw;
R[8] = -sqx - sqy + sqz + sqw;
tmp1 = q[1] * q[2];
tmp2 = q[3] * q[0];
R[1] = 2.0 * (tmp1 + tmp2);
R[3] = 2.0 * (tmp1 - tmp2);
tmp1 = q[1] * q[3];
tmp2 = q[2] * q[0];
R[2] = 2.0 * (tmp1 - tmp2);
R[6] = 2.0 * (tmp1 + tmp2);
tmp1 = q[2] * q[3];
tmp2 = q[1] * q[0];
R[5] = 2.0 * (tmp1 + tmp2);
R[7] = 2.0 * (tmp1 - tmp2);
}
/**
* @brief Analytical solution to find the eigenvector corresponding to the smallest
* eigenvalue of a 3x3 matrix. As this implements the analytical solution, it's not
* really the most robust way. Whenever possible, this implementation can be replaced
* via a robust numerical scheme.
* via a robust numerical scheme.
* @param [in] C The matrix
* @param [in] A The eigenvector corresponding to the lowest eigenvalue
* @author Tolga Birdal
*/
static inline void eigenLowest33(const double C[3][3], double A[3])
{
const double a = C[0][0];
const double b = C[0][1];
const double c = C[0][2];
const double d = C[1][1];
const double e = C[1][2];
const double f = C[2][2];
const double t2 = c*c;
const double t3 = e*e;
const double t4 = b*t2;
const double t5 = c*d*e;
const double t34 = b*t3;
const double t35 = a*c*e;
const double t6 = t4+t5-t34-t35;
const double t7 = 1.0/t6;
const double t8 = a+d+f;
const double t9 = b*b;
const double t23 = a*d;
const double t24 = a*f;
const double t25 = d*f;
const double t10 = t2+t3+t9-t23-t24-t25;
const double t11 = t8*t10*(1.0/6.0);
const double t12 = t8*t8;
const double t20 = t8*t12*(1.0/2.7E1);
const double t21 = b*c*e;
const double t22 = a*d*f*(1.0/2.0);
const double t26 = a*t3*(1.0/2.0);
const double t27 = d*t2*(1.0/2.0);
const double t28 = f*t9*(1.0/2.0);
const double t14 = t9*(1.0/3.0);
const double t15 = t2*(1.0/3.0);
const double t16 = t3*(1.0/3.0);
const double t17 = t12*(1.0/9.0);
const double t30 = a*d*(1.0/3.0);
const double t31 = a*f*(1.0/3.0);
const double t32 = d*f*(1.0/3.0);
const double t18 = t14+t15+t16+t17-t30-t31-t32;
const double t19 = t18*t18;
const double t36 = a*(1.0/3.0);
const double t37 = d*(1.0/3.0);
const double t38 = f*(1.0/3.0);
const double t39 = t8*(t2+t3+t9-t23-t24-t25)*(1.0/6.0);
const double t41 = t18*t19;
const double t43 = e*t2;
const double t60 = b*c*f;
const double t61 = d*e*f;
const double t44 = t43-t60-t61+e*t3;
const double t45 = t7*t44;
const double t57 = sqrt(3.0);
const double t51 = b*c;
const double t52 = d*e;
const double t53 = e*f;
const double t54 = t51+t52+t53;
const double t62 = t11+t20+t21+t22-t26-t27-t28;
const double t63 = t11+t20+t21+t22-t26-t27-t28;
const double t64 = t11+t20+t21+t22-t26-t27-t28;
const double t65 = t11+t20+t21+t22-t26-t27-t28;
const double t66 = t36+t37+t38-t18*1.0/pow(t20+t21+t22-t26-t27-t28+t39+sqrt(-t41+t62*t62),1.0/3.0)*(1.0/2.0)+t57*(t18*1.0/pow(t20+t21+t22-t26-t27-t28+t39+sqrt(-t41+t64*t64),1.0/3.0)-pow(t20+t21+t22-t26-t27-t28+t39+sqrt(-t41+t65*t65),1.0/3.0))*5.0E-1*sqrt(-1.0)-pow(t20+t21+t22-t26-t27-t28+t39+sqrt(-t41+t63*t63),1.0/3.0)*(1.0/2.0);
const double t67 = t11+t20+t21+t22-t26-t27-t28;
const double t68 = t11+t20+t21+t22-t26-t27-t28;
const double t69 = t11+t20+t21+t22-t26-t27-t28;
const double t70 = t11+t20+t21+t22-t26-t27-t28;
const double t76 = c*t3;
const double t91 = a*c*f;
const double t92 = b*e*f;
const double t77 = t76-t91-t92+c*t2;
const double t83 = a*c;
const double t84 = b*e;
const double t85 = c*f;
const double t86 = t83+t84+t85;
const double t93 = t11+t20+t21+t22-t26-t27-t28;
const double t94 = t11+t20+t21+t22-t26-t27-t28;
const double t95 = t11+t20+t21+t22-t26-t27-t28;
const double t96 = t11+t20+t21+t22-t26-t27-t28;
const double t97 = t36+t37+t38-t18*1.0/pow(t20+t21+t22-t26-t27-t28+t39+sqrt(-t41+t93*t93),1.0/3.0)*(1.0/2.0)+t57*(t18*1.0/pow(t20+t21+t22-t26-t27-t28+t39+sqrt(-t41+t95*t95),1.0/3.0)-pow(t20+t21+t22-t26-t27-t28+t39+sqrt(-t41+t96*t96),1.0/3.0))*5.0E-1*sqrt(-1.0)-pow(t20+t21+t22-t26-t27-t28+t39+sqrt(-t41+t94*t94),1.0/3.0)*(1.0/2.0);
const double t98 = t11+t20+t21+t22-t26-t27-t28;
const double t99 = t11+t20+t21+t22-t26-t27-t28;
const double t100 = t11+t20+t21+t22-t26-t27-t28;
const double t101 = t11+t20+t21+t22-t26-t27-t28;
A[0] = t45-e*t7*(t66*t66)+t7*t54*(t36+t37+t38-t18*1.0/pow(t20+t21+t22-t26-t27-t28+t39+sqrt(-t41+t67*t67),1.0/3.0)*(1.0/2.0)+t57*(t18*1.0/pow(t20+t21+t22-t26-t27-t28+t39+sqrt(-t41+t69*t69),1.0/3.0)-pow(t20+t21+t22-t26-t27-t28+t39+sqrt(-t41+t70*t70),1.0/3.0))*5.0E-1*sqrt(-1.0)-pow(t20+t21+t22-t26-t27-t28+t39+sqrt(-t41+t68*t68),1.0/3.0)*(1.0/2.0));
A[1] = -t7*t77+c*t7*(t97*t97)-t7*t86*(t36+t37+t38-t18*1.0/pow(t20+t21+t22-t26-t27-t28+t39+sqrt(-t41+t98*t98),1.0/3.0)*(1.0/2.0)+t57*(t18*1.0/pow(t20+t21+t22-t26-t27-t28+t39+sqrt(-t41+t100*t100),1.0/3.0)-pow(t20+t21+t22-t26-t27-t28+t39+sqrt(-t41+t101*t101),1.0/3.0))*5.0E-1*sqrt(-1.0)-pow(t20+t21+t22-t26-t27-t28+t39+sqrt(-t41+t99*t99),1.0/3.0)*(1.0/2.0));
A[2] = 1.0;
const double a = C[0][0];
const double b = C[0][1];
const double c = C[0][2];
const double d = C[1][1];
const double e = C[1][2];
const double f = C[2][2];
const double t2 = c*c;
const double t3 = e*e;
const double t4 = b*t2;
const double t5 = c*d*e;
const double t34 = b*t3;
const double t35 = a*c*e;
const double t6 = t4+t5-t34-t35;
const double t7 = 1.0/t6;
const double t8 = a+d+f;
const double t9 = b*b;
const double t23 = a*d;
const double t24 = a*f;
const double t25 = d*f;
const double t10 = t2+t3+t9-t23-t24-t25;
const double t11 = t8*t10*(1.0/6.0);
const double t12 = t8*t8;
const double t20 = t8*t12*(1.0/2.7E1);
const double t21 = b*c*e;
const double t22 = a*d*f*(1.0/2.0);
const double t26 = a*t3*(1.0/2.0);
const double t27 = d*t2*(1.0/2.0);
const double t28 = f*t9*(1.0/2.0);
const double t14 = t9*(1.0/3.0);
const double t15 = t2*(1.0/3.0);
const double t16 = t3*(1.0/3.0);
const double t17 = t12*(1.0/9.0);
const double t30 = a*d*(1.0/3.0);
const double t31 = a*f*(1.0/3.0);
const double t32 = d*f*(1.0/3.0);
const double t18 = t14+t15+t16+t17-t30-t31-t32;
const double t19 = t18*t18;
const double t36 = a*(1.0/3.0);
const double t37 = d*(1.0/3.0);
const double t38 = f*(1.0/3.0);
const double t39 = t8*(t2+t3+t9-t23-t24-t25)*(1.0/6.0);
const double t41 = t18*t19;
const double t43 = e*t2;
const double t60 = b*c*f;
const double t61 = d*e*f;
const double t44 = t43-t60-t61+e*t3;
const double t45 = t7*t44;
const double t57 = sqrt(3.0);
const double t51 = b*c;
const double t52 = d*e;
const double t53 = e*f;
const double t54 = t51+t52+t53;
const double t62 = t11+t20+t21+t22-t26-t27-t28;
const double t63 = t11+t20+t21+t22-t26-t27-t28;
const double t64 = t11+t20+t21+t22-t26-t27-t28;
const double t65 = t11+t20+t21+t22-t26-t27-t28;
const double t66 = t36+t37+t38-t18*1.0/pow(t20+t21+t22-t26-t27-t28+t39+sqrt(-t41+t62*t62),1.0/3.0)*(1.0/2.0)+t57*(t18*1.0/pow(t20+t21+t22-t26-t27-t28+t39+sqrt(-t41+t64*t64),1.0/3.0)-pow(t20+t21+t22-t26-t27-t28+t39+sqrt(-t41+t65*t65),1.0/3.0))*5.0E-1*sqrt(-1.0)-pow(t20+t21+t22-t26-t27-t28+t39+sqrt(-t41+t63*t63),1.0/3.0)*(1.0/2.0);
const double t67 = t11+t20+t21+t22-t26-t27-t28;
const double t68 = t11+t20+t21+t22-t26-t27-t28;
const double t69 = t11+t20+t21+t22-t26-t27-t28;
const double t70 = t11+t20+t21+t22-t26-t27-t28;
const double t76 = c*t3;
const double t91 = a*c*f;
const double t92 = b*e*f;
const double t77 = t76-t91-t92+c*t2;
const double t83 = a*c;
const double t84 = b*e;
const double t85 = c*f;
const double t86 = t83+t84+t85;
const double t93 = t11+t20+t21+t22-t26-t27-t28;
const double t94 = t11+t20+t21+t22-t26-t27-t28;
const double t95 = t11+t20+t21+t22-t26-t27-t28;
const double t96 = t11+t20+t21+t22-t26-t27-t28;
const double t97 = t36+t37+t38-t18*1.0/pow(t20+t21+t22-t26-t27-t28+t39+sqrt(-t41+t93*t93),1.0/3.0)*(1.0/2.0)+t57*(t18*1.0/pow(t20+t21+t22-t26-t27-t28+t39+sqrt(-t41+t95*t95),1.0/3.0)-pow(t20+t21+t22-t26-t27-t28+t39+sqrt(-t41+t96*t96),1.0/3.0))*5.0E-1*sqrt(-1.0)-pow(t20+t21+t22-t26-t27-t28+t39+sqrt(-t41+t94*t94),1.0/3.0)*(1.0/2.0);
const double t98 = t11+t20+t21+t22-t26-t27-t28;
const double t99 = t11+t20+t21+t22-t26-t27-t28;
const double t100 = t11+t20+t21+t22-t26-t27-t28;
const double t101 = t11+t20+t21+t22-t26-t27-t28;
A[0] = t45-e*t7*(t66*t66)+t7*t54*(t36+t37+t38-t18*1.0/pow(t20+t21+t22-t26-t27-t28+t39+sqrt(-t41+t67*t67),1.0/3.0)*(1.0/2.0)+t57*(t18*1.0/pow(t20+t21+t22-t26-t27-t28+t39+sqrt(-t41+t69*t69),1.0/3.0)-pow(t20+t21+t22-t26-t27-t28+t39+sqrt(-t41+t70*t70),1.0/3.0))*5.0E-1*sqrt(-1.0)-pow(t20+t21+t22-t26-t27-t28+t39+sqrt(-t41+t68*t68),1.0/3.0)*(1.0/2.0));
A[1] = -t7*t77+c*t7*(t97*t97)-t7*t86*(t36+t37+t38-t18*1.0/pow(t20+t21+t22-t26-t27-t28+t39+sqrt(-t41+t98*t98),1.0/3.0)*(1.0/2.0)+t57*(t18*1.0/pow(t20+t21+t22-t26-t27-t28+t39+sqrt(-t41+t100*t100),1.0/3.0)-pow(t20+t21+t22-t26-t27-t28+t39+sqrt(-t41+t101*t101),1.0/3.0))*5.0E-1*sqrt(-1.0)-pow(t20+t21+t22-t26-t27-t28+t39+sqrt(-t41+t99*t99),1.0/3.0)*(1.0/2.0));
A[2] = 1.0;
}
} // namespace ppf_match_3d
......
......@@ -8,7 +8,7 @@
FORCE_INLINE unsigned int getblock ( const unsigned int * p, int i )
{
return p[i];
return p[i];
}
//----------
......@@ -18,125 +18,125 @@ FORCE_INLINE unsigned int getblock ( const unsigned int * p, int i )
FORCE_INLINE unsigned int fmix32 ( unsigned int h )
{
h ^= h >> 16;
h *= 0x85ebca6b;
h ^= h >> 13;
h *= 0xc2b2ae35;
h ^= h >> 16;
return h;
h ^= h >> 16;
h *= 0x85ebca6b;
h ^= h >> 13;
h *= 0xc2b2ae35;
h ^= h >> 16;
return h;
}
//-----------------------------------------------------------------------------
FORCE_INLINE void bmix32 ( unsigned int & h1, unsigned int & k1, unsigned int & c1, unsigned int & c2 )
{
k1 *= c1;
k1 = ROTL32(k1,11);
k1 *= c2;
h1 ^= k1;
h1 = h1*3+0x52dce729;
c1 = c1*5+0x7b7d159c;
c2 = c2*5+0x6bce6396;
k1 *= c1;
k1 = ROTL32(k1,11);
k1 *= c2;
h1 ^= k1;
h1 = h1*3+0x52dce729;
c1 = c1*5+0x7b7d159c;
c2 = c2*5+0x6bce6396;
}
//-----------------------------------------------------------------------------
FORCE_INLINE void bmix32 ( unsigned int & h1, unsigned int & h2, unsigned int & k1, unsigned int & k2, unsigned int & c1, unsigned int & c2 )
{
k1 *= c1;
k1 = ROTL32(k1,11);
k1 *= c2;
h1 ^= k1;
h1 += h2;
h2 = ROTL32(h2,17);
k2 *= c2;
k2 = ROTL32(k2,11);
k2 *= c1;
h2 ^= k2;
h2 += h1;
h1 = h1*3+0x52dce729;
h2 = h2*3+0x38495ab5;
c1 = c1*5+0x7b7d159c;
c2 = c2*5+0x6bce6396;
k1 *= c1;
k1 = ROTL32(k1,11);
k1 *= c2;
h1 ^= k1;
h1 += h2;
h2 = ROTL32(h2,17);
k2 *= c2;
k2 = ROTL32(k2,11);
k2 *= c1;
h2 ^= k2;
h2 += h1;
h1 = h1*3+0x52dce729;
h2 = h2*3+0x38495ab5;
c1 = c1*5+0x7b7d159c;
c2 = c2*5+0x6bce6396;
}
//----------
FORCE_INLINE void hashMurmurx64 ( const void * key, const int len, const unsigned int seed, void * out )
{
const unsigned char * data = (const unsigned char*)key;
const int nblocks = len / 8;
unsigned int h1 = 0x8de1c3ac ^ seed;
unsigned int h2 = 0xbab98226 ^ seed;
unsigned int c1 = 0x95543787;
unsigned int c2 = 0x2ad7eb25;
//----------
// body
const unsigned int * blocks = (const unsigned int *)(data + nblocks*8);
for (int i = -nblocks; i; i++)
{
unsigned int k1 = getblock(blocks,i*2+0);
unsigned int k2 = getblock(blocks,i*2+1);
bmix32(h1,h2,k1,k2,c1,c2);
}
//----------
// tail
const unsigned char * tail = (const unsigned char*)(data + nblocks*8);
unsigned int k1 = 0;
unsigned int k2 = 0;
switch (len & 7)
{
case 7:
k2 ^= tail[6] << 16;
case 6:
k2 ^= tail[5] << 8;
case 5:
k2 ^= tail[4] << 0;
case 4:
k1 ^= tail[3] << 24;
case 3:
k1 ^= tail[2] << 16;
case 2:
k1 ^= tail[1] << 8;
case 1:
k1 ^= tail[0] << 0;
bmix32(h1,h2,k1,k2,c1,c2);
};
//----------
// finalization
h2 ^= len;
h1 += h2;
h2 += h1;
h1 = fmix32(h1);
h2 = fmix32(h2);
h1 += h2;
h2 += h1;
((unsigned int*)out)[0] = h1;
((unsigned int*)out)[1] = h2;
const unsigned char * data = (const unsigned char*)key;
const int nblocks = len / 8;
unsigned int h1 = 0x8de1c3ac ^ seed;
unsigned int h2 = 0xbab98226 ^ seed;
unsigned int c1 = 0x95543787;
unsigned int c2 = 0x2ad7eb25;
//----------
// body
const unsigned int * blocks = (const unsigned int *)(data + nblocks*8);
for (int i = -nblocks; i; i++)
{
unsigned int k1 = getblock(blocks,i*2+0);
unsigned int k2 = getblock(blocks,i*2+1);
bmix32(h1,h2,k1,k2,c1,c2);
}
//----------
// tail
const unsigned char * tail = (const unsigned char*)(data + nblocks*8);
unsigned int k1 = 0;
unsigned int k2 = 0;
switch (len & 7)
{
case 7:
k2 ^= tail[6] << 16;
case 6:
k2 ^= tail[5] << 8;
case 5:
k2 ^= tail[4] << 0;
case 4:
k1 ^= tail[3] << 24;
case 3:
k1 ^= tail[2] << 16;
case 2:
k1 ^= tail[1] << 8;
case 1:
k1 ^= tail[0] << 0;
bmix32(h1,h2,k1,k2,c1,c2);
};
//----------
// finalization
h2 ^= len;
h1 += h2;
h2 += h1;
h1 = fmix32(h1);
h2 = fmix32(h2);
h1 += h2;
h2 += h1;
((unsigned int*)out)[0] = h1;
((unsigned int*)out)[1] = h2;
}
#endif
\ No newline at end of file
#endif
......@@ -46,43 +46,43 @@ namespace ppf_match_3d
{
static void subtractColumns(Mat srcPC, double mean[3])
{
int height = srcPC.rows;
for (int i=0; i<height; i++)
int height = srcPC.rows;
for (int i=0; i<height; i++)
{
float *row = (float*)(&srcPC.data[i*srcPC.step]);
{
float *row = (float*)(&srcPC.data[i*srcPC.step]);
{
row[0]-=(float)mean[0];
row[1]-=(float)mean[1];
row[2]-=(float)mean[2];
}
row[0]-=(float)mean[0];
row[1]-=(float)mean[1];
row[2]-=(float)mean[2];
}
}
}
// as in PCA
static void computeMeanCols(Mat srcPC, double mean[3])
{
int height = srcPC.rows;
double mean1=0, mean2 = 0, mean3 = 0;
for (int i=0; i<height; i++)
int height = srcPC.rows;
double mean1=0, mean2 = 0, mean3 = 0;
for (int i=0; i<height; i++)
{
const float *row = (float*)(&srcPC.data[i*srcPC.step]);
{
const float *row = (float*)(&srcPC.data[i*srcPC.step]);
{
mean1 += (double)row[0];
mean2 += (double)row[1];
mean3 += (double)row[2];
}
mean1 += (double)row[0];
mean2 += (double)row[1];
mean3 += (double)row[2];
}
mean1/=(double)height;
mean2/=(double)height;
mean3/=(double)height;
mean[0] = mean1;
mean[1] = mean2;
mean[2] = mean3;
}
mean1/=(double)height;
mean2/=(double)height;
mean3/=(double)height;
mean[0] = mean1;
mean[1] = mean2;
mean[2] = mean3;
}
// as in PCA
......@@ -95,185 +95,185 @@ static void computeMeanCols(Mat srcPC, double mean[3])
// compute the average distance to the origin
static double computeDistToOrigin(Mat srcPC)
{
int height = srcPC.rows;
double dist = 0;
for (int i=0; i<height; i++)
{
const float *row = (float*)(&srcPC.data[i*srcPC.step]);
dist += sqrt(row[0]*row[0]+row[1]*row[1]+row[2]*row[2]);
}
return dist;
int height = srcPC.rows;
double dist = 0;
for (int i=0; i<height; i++)
{
const float *row = (float*)(&srcPC.data[i*srcPC.step]);
dist += sqrt(row[0]*row[0]+row[1]*row[1]+row[2]*row[2]);
}
return dist;
}
// From numerical receipes: Finds the median of an array
static float medianF(float arr[], int n)
{
int low, high ;
int median;
int middle, ll, hh;
low = 0 ;
high = n-1 ;
median = (low + high) >>1;
int low, high ;
int median;
int middle, ll, hh;
low = 0 ;
high = n-1 ;
median = (low + high) >>1;
for (;;)
{
if (high <= low) /* One element only */
return arr[median] ;
if (high == low + 1)
{
/* Two elements only */
if (arr[low] > arr[high])
std::swap(arr[low], arr[high]) ;
return arr[median] ;
}
/* Find median of low, middle and high items; swap into position low */
middle = (low + high) >>1;
if (arr[middle] > arr[high])
std::swap(arr[middle], arr[high]) ;
if (arr[low] > arr[high])
std::swap(arr[low], arr[high]) ;
if (arr[middle] > arr[low])
std::swap(arr[middle], arr[low]) ;
/* Swap low item (now in position middle) into position (low+1) */
std::swap(arr[middle], arr[low+1]) ;
/* Nibble from each end towards middle, swapping items when stuck */
ll = low + 1;
hh = high;
for (;;)
{
if (high <= low) /* One element only */
return arr[median] ;
if (high == low + 1)
{
/* Two elements only */
if (arr[low] > arr[high])
std::swap(arr[low], arr[high]) ;
return arr[median] ;
}
/* Find median of low, middle and high items; swap into position low */
middle = (low + high) >>1;
if (arr[middle] > arr[high])
std::swap(arr[middle], arr[high]) ;
if (arr[low] > arr[high])
std::swap(arr[low], arr[high]) ;
if (arr[middle] > arr[low])
std::swap(arr[middle], arr[low]) ;
/* Swap low item (now in position middle) into position (low+1) */
std::swap(arr[middle], arr[low+1]) ;
/* Nibble from each end towards middle, swapping items when stuck */
ll = low + 1;
hh = high;
for (;;)
{
do
ll++;
while (arr[low] > arr[ll]) ;
do
hh--;
while (arr[hh] > arr[low]) ;
if (hh < ll)
break;
std::swap(arr[ll], arr[hh]) ;
}
/* Swap middle item (in position low) back into correct position */
std::swap(arr[low], arr[hh]) ;
/* Re-set active partition */
if (hh <= median)
low = ll;
if (hh >= median)
high = hh - 1;
do
ll++;
while (arr[low] > arr[ll]) ;
do
hh--;
while (arr[hh] > arr[low]) ;
if (hh < ll)
break;
std::swap(arr[ll], arr[hh]) ;
}
/* Swap middle item (in position low) back into correct position */
std::swap(arr[low], arr[hh]) ;
/* Re-set active partition */
if (hh <= median)
low = ll;
if (hh >= median)
high = hh - 1;
}
}
static float getRejectionThreshold(float* r, int m, float outlierScale)
{
float* t=(float*)calloc(m, sizeof(float));
int i=0;
float s=0, medR, threshold;
memcpy(t, r, m*sizeof(float));
medR=medianF(t, m);
for (i=0; i<m; i++)
t[i] = (float)fabs((double)r[i]-(double)medR);
s = 1.48257968f * medianF(t, m);
threshold = (outlierScale*s+medR);
free(t);
return threshold;
float* t=(float*)calloc(m, sizeof(float));
int i=0;
float s=0, medR, threshold;
memcpy(t, r, m*sizeof(float));
medR=medianF(t, m);
for (i=0; i<m; i++)
t[i] = (float)fabs((double)r[i]-(double)medR);
s = 1.48257968f * medianF(t, m);
threshold = (outlierScale*s+medR);
free(t);
return threshold;
}
// Kok Lim Low's linearization
static void minimizePointToPlaneMetric(Mat Src, Mat Dst, Mat& X)
{
//Mat sub = Dst - Src;
Mat A=Mat(Src.rows, 6, CV_64F);
Mat b=Mat(Src.rows, 1, CV_64F);
//Mat sub = Dst - Src;
Mat A = Mat(Src.rows, 6, CV_64F);
Mat b = Mat(Src.rows, 1, CV_64F);
#if defined _OPENMP
#pragma omp parallel for
#endif
for (int i=0; i<Src.rows; i++)
{
const double *srcPt = (double*)&Src.data[i*Src.step];
const double *dstPt = (double*)&Dst.data[i*Dst.step];
const double *normals = &dstPt[3];
double *bVal = (double*)&b.data[i*b.step];
double *aRow = (double*)&A.data[i*A.step];
const double sub[3]={dstPt[0]-srcPt[0], dstPt[1]-srcPt[1], dstPt[2]-srcPt[2]};
*bVal = TDot3(sub, normals);
TCross(srcPt, normals, aRow);
aRow[3] = normals[0];
aRow[4] = normals[1];
aRow[5] = normals[2];
}
cv::solve(A, b, X, DECOMP_SVD);
for (int i=0; i<Src.rows; i++)
{
const double *srcPt = (double*)&Src.data[i*Src.step];
const double *dstPt = (double*)&Dst.data[i*Dst.step];
const double *normals = &dstPt[3];
double *bVal = (double*)&b.data[i*b.step];
double *aRow = (double*)&A.data[i*A.step];
const double sub[3]={dstPt[0]-srcPt[0], dstPt[1]-srcPt[1], dstPt[2]-srcPt[2]};
*bVal = TDot3(sub, normals);
TCross(srcPt, normals, aRow);
aRow[3] = normals[0];
aRow[4] = normals[1];
aRow[5] = normals[2];
}
cv::solve(A, b, X, DECOMP_SVD);
}
static void getTransformMat(Mat X, double Pose[16])
{
Mat DCM;
double *r1, *r2, *r3;
double* x = (double*)X.data;
const double sx = sin(x[0]);
const double cx = cos(x[0]);
const double sy = sin(x[1]);
const double cy = cos(x[1]);
const double sz = sin(x[2]);
const double cz = cos(x[2]);
Mat R1 = Mat::eye(3,3, CV_64F);
Mat R2 = Mat::eye(3,3, CV_64F);
Mat R3 = Mat::eye(3,3, CV_64F);
r1= (double*)R1.data;
r2= (double*)R2.data;
r3= (double*)R3.data;
r1[4]= cx;
r1[5]= -sx;
r1[7]= sx;
r1[8]= cx;
r2[0]= cy;
r2[2]= sy;
r2[6]= -sy;
r2[8]= cy;
r3[0]= cz;
r3[1]= -sz;
r3[3]= sz;
r3[4]= cz;
DCM = R1*(R2*R3);
Pose[0] = DCM.at<double>(0,0);
Pose[1] = DCM.at<double>(0,1);
Pose[2] = DCM.at<double>(0,2);
Pose[4] = DCM.at<double>(1,0);
Pose[5] = DCM.at<double>(1,1);
Pose[6] = DCM.at<double>(1,2);
Pose[8] = DCM.at<double>(2,0);
Pose[9] = DCM.at<double>(2,1);
Pose[10] = DCM.at<double>(2,2);
Pose[3]=x[3];
Pose[7]=x[4];
Pose[11]=x[5];
Pose[15]=1;
Mat DCM;
double *r1, *r2, *r3;
double* x = (double*)X.data;
const double sx = sin(x[0]);
const double cx = cos(x[0]);
const double sy = sin(x[1]);
const double cy = cos(x[1]);
const double sz = sin(x[2]);
const double cz = cos(x[2]);
Mat R1 = Mat::eye(3,3, CV_64F);
Mat R2 = Mat::eye(3,3, CV_64F);
Mat R3 = Mat::eye(3,3, CV_64F);
r1= (double*)R1.data;
r2= (double*)R2.data;
r3= (double*)R3.data;
r1[4]= cx;
r1[5]= -sx;
r1[7]= sx;
r1[8]= cx;
r2[0]= cy;
r2[2]= sy;
r2[6]= -sy;
r2[8]= cy;
r3[0]= cz;
r3[1]= -sz;
r3[3]= sz;
r3[4]= cz;
DCM = R1*(R2*R3);
Pose[0] = DCM.at<double>(0,0);
Pose[1] = DCM.at<double>(0,1);
Pose[2] = DCM.at<double>(0,2);
Pose[4] = DCM.at<double>(1,0);
Pose[5] = DCM.at<double>(1,1);
Pose[6] = DCM.at<double>(1,2);
Pose[8] = DCM.at<double>(2,0);
Pose[9] = DCM.at<double>(2,1);
Pose[10] = DCM.at<double>(2,2);
Pose[3]=x[3];
Pose[7]=x[4];
Pose[11]=x[5];
Pose[15]=1;
}
/* Fast way to look up the duplicates
......@@ -282,277 +282,277 @@ make sure that the max element in array will not exceed maxElement
*/
static hashtable_int* getHashtable(int* data, size_t length, int numMaxElement)
{
hashtable_int* hashtable = hashtableCreate(static_cast<size_t>(numMaxElement*2), 0);
for (size_t i = 0; i < length; i++)
{
const KeyType key = (KeyType)data[i];
hashtableInsertHashed(hashtable, key+1, reinterpret_cast<void*>(i+1));
}
return hashtable;
hashtable_int* hashtable = hashtableCreate(static_cast<size_t>(numMaxElement*2), 0);
for (size_t i = 0; i < length; i++)
{
const KeyType key = (KeyType)data[i];
hashtableInsertHashed(hashtable, key+1, reinterpret_cast<void*>(i+1));
}
return hashtable;
}
// source point clouds are assumed to contain their normals
int ICP::registerModelToScene(const Mat& srcPC, const Mat& dstPC, double& Residual, double Pose[16])
{
int n = srcPC.rows;
//double PoseInit[16];
bool UseRobustReject = m_rejectionScale>0;
Mat srcTemp = srcPC.clone();
Mat dstTemp = dstPC.clone();
double meanSrc[3], meanDst[3];
computeMeanCols(srcTemp, meanSrc);
computeMeanCols(dstTemp, meanDst);
double meanAvg[3]={0.5*(meanSrc[0]+meanDst[0]), 0.5*(meanSrc[1]+meanDst[1]), 0.5*(meanSrc[2]+meanDst[2])};
subtractColumns(srcTemp, meanAvg);
subtractColumns(dstTemp, meanAvg);
double distSrc = computeDistToOrigin(srcTemp);
double distDst = computeDistToOrigin(dstTemp);
double scale = (double)n / ((distSrc + distDst)*0.5);
//srcTemp(cv::Range(0, srcTemp.rows), cv::Range(0,3)) = (srcTemp(cv::Range(0, srcTemp.rows), cv::Range(0,3)) ) * scale;
//dstTemp(cv::Range(0, dstTemp.rows), cv::Range(0,3)) = (dstTemp(cv::Range(0, dstTemp.rows), cv::Range(0,3)) ) * scale;
srcTemp(cv::Range(0, srcTemp.rows), cv::Range(0,3)) *= scale;
dstTemp(cv::Range(0, dstTemp.rows), cv::Range(0,3)) *= scale;
Mat srcPC0 = srcTemp;
Mat dstPC0 = dstTemp;
// initialize pose
matrixIdentity(4, Pose);
void* flann = indexPCFlann(dstPC0);
Mat M = Mat::eye(4,4,CV_64F);
double tempResidual = 0;
// walk the pyramid
for (int level = m_numLevels-1; level >=0; level--)
int n = srcPC.rows;
//double PoseInit[16];
bool UseRobustReject = m_rejectionScale>0;
Mat srcTemp = srcPC.clone();
Mat dstTemp = dstPC.clone();
double meanSrc[3], meanDst[3];
computeMeanCols(srcTemp, meanSrc);
computeMeanCols(dstTemp, meanDst);
double meanAvg[3]={0.5*(meanSrc[0]+meanDst[0]), 0.5*(meanSrc[1]+meanDst[1]), 0.5*(meanSrc[2]+meanDst[2])};
subtractColumns(srcTemp, meanAvg);
subtractColumns(dstTemp, meanAvg);
double distSrc = computeDistToOrigin(srcTemp);
double distDst = computeDistToOrigin(dstTemp);
double scale = (double)n / ((distSrc + distDst)*0.5);
//srcTemp(cv::Range(0, srcTemp.rows), cv::Range(0,3)) = (srcTemp(cv::Range(0, srcTemp.rows), cv::Range(0,3)) ) * scale;
//dstTemp(cv::Range(0, dstTemp.rows), cv::Range(0,3)) = (dstTemp(cv::Range(0, dstTemp.rows), cv::Range(0,3)) ) * scale;
srcTemp(cv::Range(0, srcTemp.rows), cv::Range(0,3)) *= scale;
dstTemp(cv::Range(0, dstTemp.rows), cv::Range(0,3)) *= scale;
Mat srcPC0 = srcTemp;
Mat dstPC0 = dstTemp;
// initialize pose
matrixIdentity(4, Pose);
void* flann = indexPCFlann(dstPC0);
Mat M = Mat::eye(4,4,CV_64F);
double tempResidual = 0;
// walk the pyramid
for (int level = m_numLevels-1; level >=0; level--)
{
const double impact = 2;
double div = pow((double)impact, (double)level);
//double div2 = div*div;
const int numSamples = cvRound((double)(n/(div)));
const double TolP = m_tolerence*(double)(level+1)*(level+1);
const int MaxIterationsPyr = cvRound((double)m_maxItereations/(level+1));
// Obtain the sampled point clouds for this level: Also rotates the normals
Mat srcPCT = transformPCPose(srcPC0, Pose);
const int sampleStep = cvRound((double)n/(double)numSamples);
std::vector<int> srcSampleInd;
/*
Note by Tolga Birdal
Downsample the model point clouds. If more optimization is required,
one could also downsample the scene points, but I think this might
decrease the accuracy. That's why I won't be implementing it at this
moment.
Also note that you have to compute a KD-tree for each level.
*/
srcPCT = samplePCUniformInd(srcPCT, sampleStep, srcSampleInd);
double fval_old=9999999999;
double fval_perc=0;
double fval_min=9999999999;
Mat Src_Moved = srcPCT.clone();
int i=0;
size_t numElSrc = (size_t)Src_Moved.rows;
int sizesResult[2] = {(int)numElSrc, 1};
float* distances = new float[numElSrc];
int* indices = new int[numElSrc];
Mat Indices(2, sizesResult, CV_32S, indices, 0);
Mat Distances(2, sizesResult, CV_32F, distances, 0);
// use robust weighting for outlier treatment
int* indicesModel = new int[numElSrc];
int* indicesScene = new int[numElSrc];
int* newI = new int[numElSrc];
int* newJ = new int[numElSrc];
double PoseX[16]={0};
matrixIdentity(4, PoseX);
while ( (!(fval_perc<(1+TolP) && fval_perc>(1-TolP))) && i<MaxIterationsPyr)
{
const double impact = 2;
double div = pow((double)impact, (double)level);
//double div2 = div*div;
const int numSamples = cvRound((double)(n/(div)));
const double TolP = m_tolerence*(double)(level+1)*(level+1);
const int MaxIterationsPyr = cvRound((double)m_maxItereations/(level+1));
// Obtain the sampled point clouds for this level: Also rotates the normals
Mat srcPCT = transformPCPose(srcPC0, Pose);
const int sampleStep = cvRound((double)n/(double)numSamples);
std::vector<int> srcSampleInd;
/*
Note by Tolga Birdal
Downsample the model point clouds. If more optimization is required,
one could also downsample the scene points, but I think this might
decrease the accuracy. That's why I won't be implementing it at this
moment.
Also note that you have to compute a KD-tree for each level.
*/
srcPCT = samplePCUniformInd(srcPCT, sampleStep, srcSampleInd);
double fval_old=9999999999;
double fval_perc=0;
double fval_min=9999999999;
Mat Src_Moved = srcPCT.clone();
int i=0;
size_t numElSrc = (size_t)Src_Moved.rows;
int sizesResult[2] = {(int)numElSrc, 1};
float* distances = new float[numElSrc];
int* indices = new int[numElSrc];
Mat Indices(2, sizesResult, CV_32S, indices, 0);
Mat Distances(2, sizesResult, CV_32F, distances, 0);
// use robust weighting for outlier treatment
int* indicesModel = new int[numElSrc];
int* indicesScene = new int[numElSrc];
int* newI = new int[numElSrc];
int* newJ = new int[numElSrc];
double PoseX[16]={0};
matrixIdentity(4, PoseX);
while ( (!(fval_perc<(1+TolP) && fval_perc>(1-TolP))) && i<MaxIterationsPyr)
size_t di=0, selInd = 0;
queryPCFlann(flann, Src_Moved, Indices, Distances);
for (di=0; di<numElSrc; di++)
{
newI[di] = (int)di;
newJ[di] = indices[di];
}
if (UseRobustReject)
{
int numInliers = 0;
float threshold = getRejectionThreshold(distances, Distances.rows, m_rejectionScale);
Mat acceptInd = Distances<threshold;
uchar *accPtr = (uchar*)acceptInd.data;
for (int l=0; l<acceptInd.rows; l++)
{
size_t di=0, selInd = 0;
queryPCFlann(flann, Src_Moved, Indices, Distances);
for (di=0; di<numElSrc; di++)
{
newI[di] = (int)di;
newJ[di] = indices[di];
}
if (UseRobustReject)
{
int numInliers = 0;
float threshold = getRejectionThreshold(distances, Distances.rows, m_rejectionScale);
Mat acceptInd = Distances<threshold;
uchar *accPtr = (uchar*)acceptInd.data;
for (int l=0; l<acceptInd.rows; l++)
{
if (accPtr[l])
{
newI[numInliers] = l;
newJ[numInliers] = indices[l];
numInliers++;
}
}
numElSrc=numInliers;
}
// Step 2: Picky ICP
// Among the resulting corresponding pairs, if more than one scene point p_i
// is assigned to the same model point m_j, then select p_i that corresponds
// to the minimum distance
hashtable_int* duplicateTable = getHashtable(newJ, numElSrc, dstPC0.rows);
for (di=0; di<duplicateTable->size; di++)
{
hashnode_i *node = duplicateTable->nodes[di];
if (node)
{
// select the first node
int idx = reinterpret_cast<long>(node->data)-1, dn=0;
int dup = (int)node->key-1;
int minIdxD = idx;
float minDist = distances[idx];
while ( node )
{
idx = reinterpret_cast<long>(node->data)-1;
if (distances[idx] < minDist)
{
minDist = distances[idx];
minIdxD = idx;
}
node = node->next;
dn++;
}
indicesModel[ selInd ] = newI[ minIdxD ];
indicesScene[ selInd ] = dup ;
selInd++;
}
}
hashtableDestroy(duplicateTable);
if (selInd)
if (accPtr[l])
{
newI[numInliers] = l;
newJ[numInliers] = indices[l];
numInliers++;
}
}
numElSrc=numInliers;
}
// Step 2: Picky ICP
// Among the resulting corresponding pairs, if more than one scene point p_i
// is assigned to the same model point m_j, then select p_i that corresponds
// to the minimum distance
hashtable_int* duplicateTable = getHashtable(newJ, numElSrc, dstPC0.rows);
for (di=0; di<duplicateTable->size; di++)
{
hashnode_i *node = duplicateTable->nodes[di];
if (node)
{
// select the first node
int idx = reinterpret_cast<long>(node->data)-1, dn=0;
int dup = (int)node->key-1;
int minIdxD = idx;
float minDist = distances[idx];
while ( node )
{
idx = reinterpret_cast<long>(node->data)-1;
if (distances[idx] < minDist)
{
Mat Src_Match = Mat((int)selInd, srcPCT.cols, CV_64F);
Mat Dst_Match = Mat((int)selInd, srcPCT.cols, CV_64F);
for (di=0; di<selInd; di++)
{
const int indModel = indicesModel[di];
const int indScene = indicesScene[di];
const float *srcPt = (float*)&srcPCT.data[indModel*srcPCT.step];
const float *dstPt = (float*)&dstPC0.data[indScene*dstPC0.step];
double *srcMatchPt = (double*)&Src_Match.data[di*Src_Match.step];
double *dstMatchPt = (double*)&Dst_Match.data[di*Dst_Match.step];
int ci=0;
for (ci=0; ci<srcPCT.cols; ci++)
{
srcMatchPt[ci] = (double)srcPt[ci];
dstMatchPt[ci] = (double)dstPt[ci];
}
}
Mat X;
minimizePointToPlaneMetric(Src_Match, Dst_Match, X);
getTransformMat(X, PoseX);
Src_Moved = transformPCPose(srcPCT, PoseX);
double fval = cv::norm(Src_Match, Dst_Match)/(double)(Src_Moved.rows);
// Calculate change in error between iterations
fval_perc=fval/fval_old;
// Store error value
fval_old=fval;
if (fval < fval_min)
fval_min = fval;
minDist = distances[idx];
minIdxD = idx;
}
else
break;
i++;
node = node->next;
dn++;
}
indicesModel[ selInd ] = newI[ minIdxD ];
indicesScene[ selInd ] = dup ;
selInd++;
}
double TempPose[16];
matrixProduct44(PoseX, Pose, TempPose);
// no need to copy the last 4 rows
for (int c=0; c<12; c++)
Pose[c] = TempPose[c];
Residual = tempResidual;
delete[] newI;
delete[] newJ;
delete[] indicesModel;
delete[] indicesScene;
delete[] distances;
delete[] indices;
tempResidual = fval_min;
}
hashtableDestroy(duplicateTable);
if (selInd)
{
Mat Src_Match = Mat((int)selInd, srcPCT.cols, CV_64F);
Mat Dst_Match = Mat((int)selInd, srcPCT.cols, CV_64F);
for (di=0; di<selInd; di++)
{
const int indModel = indicesModel[di];
const int indScene = indicesScene[di];
const float *srcPt = (float*)&srcPCT.data[indModel*srcPCT.step];
const float *dstPt = (float*)&dstPC0.data[indScene*dstPC0.step];
double *srcMatchPt = (double*)&Src_Match.data[di*Src_Match.step];
double *dstMatchPt = (double*)&Dst_Match.data[di*Dst_Match.step];
int ci=0;
for (ci=0; ci<srcPCT.cols; ci++)
{
srcMatchPt[ci] = (double)srcPt[ci];
dstMatchPt[ci] = (double)dstPt[ci];
}
}
Mat X;
minimizePointToPlaneMetric(Src_Match, Dst_Match, X);
getTransformMat(X, PoseX);
Src_Moved = transformPCPose(srcPCT, PoseX);
double fval = cv::norm(Src_Match, Dst_Match)/(double)(Src_Moved.rows);
// Calculate change in error between iterations
fval_perc=fval/fval_old;
// Store error value
fval_old=fval;
if (fval < fval_min)
fval_min = fval;
}
else
break;
i++;
}
// Pose(1:3, 4) = Pose(1:3, 4)./scale;
Pose[3] = Pose[3]/scale + meanAvg[0];
Pose[7] = Pose[7]/scale + meanAvg[1];
Pose[11] = Pose[11]/scale + meanAvg[2];
// In MATLAB this would be : Pose(1:3, 4) = Pose(1:3, 4)./scale + meanAvg' - Pose(1:3, 1:3)*meanAvg';
double Rpose[9], Cpose[3];
poseToR(Pose, Rpose);
matrixProduct331(Rpose, meanAvg, Cpose);
Pose[3] -= Cpose[0];
Pose[7] -= Cpose[1];
Pose[11] -= Cpose[2];
double TempPose[16];
matrixProduct44(PoseX, Pose, TempPose);
// no need to copy the last 4 rows
for (int c=0; c<12; c++)
Pose[c] = TempPose[c];
Residual = tempResidual;
destroyFlann(flann);
flann = 0;
return 0;
delete[] newI;
delete[] newJ;
delete[] indicesModel;
delete[] indicesScene;
delete[] distances;
delete[] indices;
tempResidual = fval_min;
}
// Pose(1:3, 4) = Pose(1:3, 4)./scale;
Pose[3] = Pose[3]/scale + meanAvg[0];
Pose[7] = Pose[7]/scale + meanAvg[1];
Pose[11] = Pose[11]/scale + meanAvg[2];
// In MATLAB this would be : Pose(1:3, 4) = Pose(1:3, 4)./scale + meanAvg' - Pose(1:3, 1:3)*meanAvg';
double Rpose[9], Cpose[3];
poseToR(Pose, Rpose);
matrixProduct331(Rpose, meanAvg, Cpose);
Pose[3] -= Cpose[0];
Pose[7] -= Cpose[1];
Pose[11] -= Cpose[2];
Residual = tempResidual;
destroyFlann(flann);
flann = 0;
return 0;
}
// source point clouds are assumed to contain their normals
int ICP::registerModelToScene(const Mat& srcPC, const Mat& dstPC, std::vector<Pose3D*>& poses)
{
for (size_t i=0; i<poses.size(); i++)
{
double PoseICP[16]={0};
Mat srcTemp = transformPCPose(srcPC, poses[i]->Pose);
registerModelToScene(srcTemp, dstPC, poses[i]->residual, PoseICP);
poses[i]->appendPose(PoseICP);
}
return 0;
for (size_t i=0; i<poses.size(); i++)
{
double PoseICP[16]={0};
Mat srcTemp = transformPCPose(srcPC, poses[i]->Pose);
registerModelToScene(srcTemp, dstPC, poses[i]->residual, PoseICP);
poses[i]->appendPose(PoseICP);
}
return 0;
}
} // namespace ppf_match_3d
......
......@@ -47,346 +47,346 @@ namespace ppf_match_3d
void Pose3D::updatePose(double NewPose[16])
{
double R[9];
for (int i=0; i<16; i++)
Pose[i]=NewPose[i];
R[0] = Pose[0];
R[1] = Pose[1];
R[2] = Pose[2];
R[3] = Pose[4];
R[4] = Pose[5];
R[5] = Pose[6];
R[6] = Pose[8];
R[7] = Pose[9];
R[8] = Pose[10];
t[0]=Pose[3];
t[1]=Pose[7];
t[2]=Pose[11];
// compute the angle
const double trace = R[0] + R[4] + R[8];
if (fabs(trace - 3) <= EPS)
double R[9];
for (int i=0; i<16; i++)
Pose[i]=NewPose[i];
R[0] = Pose[0];
R[1] = Pose[1];
R[2] = Pose[2];
R[3] = Pose[4];
R[4] = Pose[5];
R[5] = Pose[6];
R[6] = Pose[8];
R[7] = Pose[9];
R[8] = Pose[10];
t[0]=Pose[3];
t[1]=Pose[7];
t[2]=Pose[11];
// compute the angle
const double trace = R[0] + R[4] + R[8];
if (fabs(trace - 3) <= EPS)
{
angle = 0;
}
else
if (fabs(trace + 1) <= EPS)
{
angle = 0;
angle = M_PI;
}
else
if (fabs(trace + 1) <= EPS)
{
angle = M_PI;
}
else
{
angle = ( acos((trace - 1)/2) );
}
// compute the quaternion
dcmToQuat(R, q);
{
angle = ( acos((trace - 1)/2) );
}
// compute the quaternion
dcmToQuat(R, q);
}
void Pose3D::updatePose(double NewR[9], double NewT[3])
{
Pose[0]=NewR[0];
Pose[1]=NewR[1];
Pose[2]=NewR[2];
Pose[3]=NewT[0];
Pose[4]=NewR[3];
Pose[5]=NewR[4];
Pose[6]=NewR[5];
Pose[7]=NewT[1];
Pose[8]=NewR[6];
Pose[9]=NewR[7];
Pose[10]=NewR[8];
Pose[11]=NewT[2];
Pose[12]=0;
Pose[13]=0;
Pose[14]=0;
Pose[15]=1;
// compute the angle
const double trace = NewR[0] + NewR[4] + NewR[8];
if (fabs(trace - 3) <= EPS)
Pose[0]=NewR[0];
Pose[1]=NewR[1];
Pose[2]=NewR[2];
Pose[3]=NewT[0];
Pose[4]=NewR[3];
Pose[5]=NewR[4];
Pose[6]=NewR[5];
Pose[7]=NewT[1];
Pose[8]=NewR[6];
Pose[9]=NewR[7];
Pose[10]=NewR[8];
Pose[11]=NewT[2];
Pose[12]=0;
Pose[13]=0;
Pose[14]=0;
Pose[15]=1;
// compute the angle
const double trace = NewR[0] + NewR[4] + NewR[8];
if (fabs(trace - 3) <= EPS)
{
angle = 0;
}
else
if (fabs(trace + 1) <= EPS)
{
angle = 0;
angle = M_PI;
}
else
if (fabs(trace + 1) <= EPS)
{
angle = M_PI;
}
else
{
angle = ( acos((trace - 1)/2) );
}
// compute the quaternion
dcmToQuat(NewR, q);
{
angle = ( acos((trace - 1)/2) );
}
// compute the quaternion
dcmToQuat(NewR, q);
}
void Pose3D::updatePoseQuat(double Q[4], double NewT[3])
{
double NewR[9];
quatToDCM(Q, NewR);
q[0]=Q[0];
q[1]=Q[1];
q[2]=Q[2];
q[3]=Q[3];
Pose[0]=NewR[0];
Pose[1]=NewR[1];
Pose[2]=NewR[2];
Pose[3]=NewT[0];
Pose[4]=NewR[3];
Pose[5]=NewR[4];
Pose[6]=NewR[5];
Pose[7]=NewT[1];
Pose[8]=NewR[6];
Pose[9]=NewR[7];
Pose[10]=NewR[8];
Pose[11]=NewT[2];
Pose[12]=0;
Pose[13]=0;
Pose[14]=0;
Pose[15]=1;
// compute the angle
const double trace = NewR[0] + NewR[4] + NewR[8];
if (fabs(trace - 3) <= EPS)
double NewR[9];
quatToDCM(Q, NewR);
q[0]=Q[0];
q[1]=Q[1];
q[2]=Q[2];
q[3]=Q[3];
Pose[0]=NewR[0];
Pose[1]=NewR[1];
Pose[2]=NewR[2];
Pose[3]=NewT[0];
Pose[4]=NewR[3];
Pose[5]=NewR[4];
Pose[6]=NewR[5];
Pose[7]=NewT[1];
Pose[8]=NewR[6];
Pose[9]=NewR[7];
Pose[10]=NewR[8];
Pose[11]=NewT[2];
Pose[12]=0;
Pose[13]=0;
Pose[14]=0;
Pose[15]=1;
// compute the angle
const double trace = NewR[0] + NewR[4] + NewR[8];
if (fabs(trace - 3) <= EPS)
{
angle = 0;
}
else
{
if (fabs(trace + 1) <= EPS)
{
angle = 0;
angle = M_PI;
}
else
{
if (fabs(trace + 1) <= EPS)
{
angle = M_PI;
}
else
{
angle = ( acos((trace - 1)/2) );
}
}
{
angle = ( acos((trace - 1)/2) );
}
}
}
void Pose3D::appendPose(double IncrementalPose[16])
{
double R[9], PoseFull[16]={0};
matrixProduct44(IncrementalPose, this->Pose, PoseFull);
R[0] = PoseFull[0];
R[1] = PoseFull[1];
R[2] = PoseFull[2];
R[3] = PoseFull[4];
R[4] = PoseFull[5];
R[5] = PoseFull[6];
R[6] = PoseFull[8];
R[7] = PoseFull[9];
R[8] = PoseFull[10];
t[0]=PoseFull[3];
t[1]=PoseFull[7];
t[2]=PoseFull[11];
// compute the angle
const double trace = R[0] + R[4] + R[8];
if (fabs(trace - 3) <= EPS)
double R[9], PoseFull[16]={0};
matrixProduct44(IncrementalPose, this->Pose, PoseFull);
R[0] = PoseFull[0];
R[1] = PoseFull[1];
R[2] = PoseFull[2];
R[3] = PoseFull[4];
R[4] = PoseFull[5];
R[5] = PoseFull[6];
R[6] = PoseFull[8];
R[7] = PoseFull[9];
R[8] = PoseFull[10];
t[0]=PoseFull[3];
t[1]=PoseFull[7];
t[2]=PoseFull[11];
// compute the angle
const double trace = R[0] + R[4] + R[8];
if (fabs(trace - 3) <= EPS)
{
angle = 0;
}
else
if (fabs(trace + 1) <= EPS)
{
angle = 0;
angle = M_PI;
}
else
if (fabs(trace + 1) <= EPS)
{
angle = M_PI;
}
else
{
angle = ( acos((trace - 1)/2) );
}
// compute the quaternion
dcmToQuat(R, q);
for (int i=0; i<16; i++)
Pose[i]=PoseFull[i];
{
angle = ( acos((trace - 1)/2) );
}
// compute the quaternion
dcmToQuat(R, q);
for (int i=0; i<16; i++)
Pose[i]=PoseFull[i];
}
Pose3D* Pose3D::clone()
{
Pose3D* pose = new Pose3D(alpha, modelIndex, numVotes);
for (int i=0; i<16; i++)
pose->Pose[i]= Pose[i];
pose->q[0]=q[0];
pose->q[1]=q[1];
pose->q[2]=q[2];
pose->q[3]=q[3];
pose->t[0]=t[0];
pose->t[1]=t[1];
pose->t[2]=t[2];
pose->angle=angle;
return pose;
Pose3D* pose = new Pose3D(alpha, modelIndex, numVotes);
for (int i=0; i<16; i++)
pose->Pose[i]= Pose[i];
pose->q[0]=q[0];
pose->q[1]=q[1];
pose->q[2]=q[2];
pose->q[3]=q[3];
pose->t[0]=t[0];
pose->t[1]=t[1];
pose->t[2]=t[2];
pose->angle=angle;
return pose;
}
void Pose3D::printPose()
{
printf("\n-- Pose to Model Index %d: NumVotes = %d, Residual = %f\n", this->modelIndex, this->numVotes, this->residual);
for (int j=0; j<4; j++)
printf("\n-- Pose to Model Index %d: NumVotes = %d, Residual = %f\n", this->modelIndex, this->numVotes, this->residual);
for (int j=0; j<4; j++)
{
for (int k=0; k<4; k++)
{
for (int k=0; k<4; k++)
{
printf("%f ", this->Pose[j*4+k]);
}
printf("\n");
printf("%f ", this->Pose[j*4+k]);
}
printf("\n");
}
printf("\n");
}
int Pose3D::writePose(FILE* f)
{
int POSE_MAGIC = 7673;
fwrite(&POSE_MAGIC, sizeof(int), 1, f);
fwrite(&angle, sizeof(double), 1, f);
fwrite(&numVotes, sizeof(int), 1, f);
fwrite(&modelIndex, sizeof(int), 1, f);
fwrite(Pose, sizeof(double)*16, 1, f);
fwrite(t, sizeof(double)*3, 1, f);
fwrite(q, sizeof(double)*4, 1, f);
fwrite(&residual, sizeof(double), 1, f);
return 0;
int POSE_MAGIC = 7673;
fwrite(&POSE_MAGIC, sizeof(int), 1, f);
fwrite(&angle, sizeof(double), 1, f);
fwrite(&numVotes, sizeof(int), 1, f);
fwrite(&modelIndex, sizeof(int), 1, f);
fwrite(Pose, sizeof(double)*16, 1, f);
fwrite(t, sizeof(double)*3, 1, f);
fwrite(q, sizeof(double)*4, 1, f);
fwrite(&residual, sizeof(double), 1, f);
return 0;
}
int Pose3D::readPose(FILE* f)
{
int POSE_MAGIC = 7673, magic;
size_t status = fread(&magic, sizeof(int), 1, f);
if (status && magic == POSE_MAGIC)
{
status = fread(&angle, sizeof(double), 1, f);
status = fread(&numVotes, sizeof(int), 1, f);
status = fread(&modelIndex, sizeof(int), 1, f);
status = fread(Pose, sizeof(double)*16, 1, f);
status = fread(t, sizeof(double)*3, 1, f);
status = fread(q, sizeof(double)*4, 1, f);
status = fread(&residual, sizeof(double), 1, f);
return 0;
}
return -1;
int POSE_MAGIC = 7673, magic;
size_t status = fread(&magic, sizeof(int), 1, f);
if (status && magic == POSE_MAGIC)
{
status = fread(&angle, sizeof(double), 1, f);
status = fread(&numVotes, sizeof(int), 1, f);
status = fread(&modelIndex, sizeof(int), 1, f);
status = fread(Pose, sizeof(double)*16, 1, f);
status = fread(t, sizeof(double)*3, 1, f);
status = fread(q, sizeof(double)*4, 1, f);
status = fread(&residual, sizeof(double), 1, f);
return 0;
}
return -1;
}
int Pose3D::writePose(const std::string& FileName)
{
FILE* f = fopen(FileName.c_str(), "wb");
if (!f)
return -1;
int status = writePose(f);
fclose(f);
return status;
FILE* f = fopen(FileName.c_str(), "wb");
if (!f)
return -1;
int status = writePose(f);
fclose(f);
return status;
}
int Pose3D::readPose(const std::string& FileName)
{
FILE* f = fopen(FileName.c_str(), "rb");
if (!f)
return -1;
int status = readPose(f);
fclose(f);
return status;
FILE* f = fopen(FileName.c_str(), "rb");
if (!f)
return -1;
int status = readPose(f);
fclose(f);
return status;
}
void PoseCluster3D::addPose(Pose3D* newPose)
{
poseList.push_back(newPose);
this->numVotes += newPose->numVotes;
poseList.push_back(newPose);
this->numVotes += newPose->numVotes;
};
int PoseCluster3D::writePoseCluster(FILE* f)
{
int POSE_CLUSTER_MAGIC_IO = 8462597;
fwrite(&POSE_CLUSTER_MAGIC_IO, sizeof(int), 1, f);
fwrite(&id, sizeof(int), 1, f);
fwrite(&numVotes, sizeof(int), 1, f);
int numPoses = (int)poseList.size();
fwrite(&numPoses, sizeof(int), 1, f);
for (int i=0; i<numPoses; i++)
poseList[i]->writePose(f);
return 0;
int POSE_CLUSTER_MAGIC_IO = 8462597;
fwrite(&POSE_CLUSTER_MAGIC_IO, sizeof(int), 1, f);
fwrite(&id, sizeof(int), 1, f);
fwrite(&numVotes, sizeof(int), 1, f);
int numPoses = (int)poseList.size();
fwrite(&numPoses, sizeof(int), 1, f);
for (int i=0; i<numPoses; i++)
poseList[i]->writePose(f);
return 0;
}
int PoseCluster3D::readPoseCluster(FILE* f)
{
// The magic values are only used to check the files
int POSE_CLUSTER_MAGIC_IO = 8462597;
int magic=0, numPoses=0;
size_t status;
status = fread(&magic, sizeof(int), 1, f);
if (!status || magic!=POSE_CLUSTER_MAGIC_IO)
return -1;
status = fread(&id, sizeof(int), 1, f);
status = fread(&numVotes, sizeof(int), 1, f);
status = fread(&numPoses, sizeof(int), 1, f);
fclose(f);
poseList.clear();
poseList.resize(numPoses);
for (size_t i=0; i<poseList.size(); i++)
{
poseList[i] = new Pose3D();
poseList[i]->readPose(f);
}
return 0;
// The magic values are only used to check the files
int POSE_CLUSTER_MAGIC_IO = 8462597;
int magic=0, numPoses=0;
size_t status;
status = fread(&magic, sizeof(int), 1, f);
if (!status || magic!=POSE_CLUSTER_MAGIC_IO)
return -1;
status = fread(&id, sizeof(int), 1, f);
status = fread(&numVotes, sizeof(int), 1, f);
status = fread(&numPoses, sizeof(int), 1, f);
fclose(f);
poseList.clear();
poseList.resize(numPoses);
for (size_t i=0; i<poseList.size(); i++)
{
poseList[i] = new Pose3D();
poseList[i]->readPose(f);
}
return 0;
}
int PoseCluster3D::writePoseCluster(const std::string& FileName)
{
FILE* f = fopen(FileName.c_str(), "wb");
if (!f)
return -1;
int status = writePoseCluster(f);
fclose(f);
return status;
FILE* f = fopen(FileName.c_str(), "wb");
if (!f)
return -1;
int status = writePoseCluster(f);
fclose(f);
return status;
}
int PoseCluster3D::readPoseCluster(const std::string& FileName)
{
FILE* f = fopen(FileName.c_str(), "rb");
if (!f)
return -1;
int status = readPoseCluster(f);
fclose(f);
return status;
FILE* f = fopen(FileName.c_str(), "rb");
if (!f)
return -1;
int status = readPoseCluster(f);
fclose(f);
return status;
}
} // namespace ppf_match_3d
......
......@@ -57,160 +57,160 @@ void meanCovLocalPCInd(const float* pc, const int* Indices, const int ws, const
Mat loadPLYSimple(const char* fileName, int withNormals)
{
Mat cloud;
int numVertices=0;
std::ifstream ifs(fileName);
if (!ifs.is_open())
Mat cloud;
int numVertices=0;
std::ifstream ifs(fileName);
if (!ifs.is_open())
{
printf("Cannot open file...\n");
return Mat();
}
std::string str;
while (str.substr(0, 10) !="end_header")
{
std::string entry = str.substr(0, 14);
if (entry == "element vertex")
{
printf("Cannot open file...\n");
return Mat();
numVertices = atoi(str.substr(15, str.size()-15).c_str());
}
std::string str;
while (str.substr(0, 10) !="end_header")
std::getline(ifs, str);
}
if (withNormals)
cloud=Mat(numVertices, 6, CV_32FC1);
else
cloud=Mat(numVertices, 3, CV_32FC1);
for (int i = 0; i < numVertices; i++)
{
float* data = (float*)(&cloud.data[i*cloud.step[0]]);
if (withNormals)
{
std::string entry = str.substr(0, 14);
if (entry == "element vertex")
{
numVertices = atoi(str.substr(15, str.size()-15).c_str());
}
std::getline(ifs, str);
ifs >> data[0] >> data[1] >> data[2] >> data[3] >> data[4] >> data[5];
// normalize to unit norm
double norm = sqrt(data[3]*data[3] + data[4]*data[4] + data[5]*data[5]);
if (norm>0.00001)
{
data[3]/=(float)norm;
data[4]/=(float)norm;
data[5]/=(float)norm;
}
}
if (withNormals)
cloud=Mat(numVertices, 6, CV_32FC1);
else
cloud=Mat(numVertices, 3, CV_32FC1);
for (int i = 0; i < numVertices; i++)
{
float* data = (float*)(&cloud.data[i*cloud.step[0]]);
if (withNormals)
{
ifs >> data[0] >> data[1] >> data[2] >> data[3] >> data[4] >> data[5];
// normalize to unit norm
double norm = sqrt(data[3]*data[3] + data[4]*data[4] + data[5]*data[5]);
if (norm>0.00001)
{
data[3]/=(float)norm;
data[4]/=(float)norm;
data[5]/=(float)norm;
}
}
else
{
ifs >> data[0] >> data[1] >> data[2];
}
ifs >> data[0] >> data[1] >> data[2];
}
//cloud *= 5.0f;
return cloud;
}
//cloud *= 5.0f;
return cloud;
}
void writePLY(Mat PC, const char* FileName)
{
std::ofstream outFile( FileName );
if ( !outFile )
{
//cerr << "Error opening output file: " << FileName << "!" << endl;
printf("Error opening output file: %s!\n", FileName);
exit( 1 );
}
////
// Header
////
const int pointNum = ( int ) PC.rows;
const int vertNum = ( int ) PC.cols;
outFile << "ply" << std::endl;
outFile << "format ascii 1.0" << std::endl;
outFile << "element vertex " << pointNum << std::endl;
outFile << "property float x" << std::endl;
outFile << "property float y" << std::endl;
outFile << "property float z" << std::endl;
std::ofstream outFile( FileName );
if ( !outFile )
{
//cerr << "Error opening output file: " << FileName << "!" << endl;
printf("Error opening output file: %s!\n", FileName);
exit( 1 );
}
////
// Header
////
const int pointNum = ( int ) PC.rows;
const int vertNum = ( int ) PC.cols;
outFile << "ply" << std::endl;
outFile << "format ascii 1.0" << std::endl;
outFile << "element vertex " << pointNum << std::endl;
outFile << "property float x" << std::endl;
outFile << "property float y" << std::endl;
outFile << "property float z" << std::endl;
if (vertNum==6)
{
outFile << "property float nx" << std::endl;
outFile << "property float ny" << std::endl;
outFile << "property float nz" << std::endl;
}
outFile << "end_header" << std::endl;
////
// Points
////
for ( int pi = 0; pi < pointNum; ++pi )
{
const float* point = (float*)(&PC.data[ pi*PC.step ]);
outFile << point[0] << " "<<point[1]<<" "<<point[2];
if (vertNum==6)
{
outFile << "property float nx" << std::endl;
outFile << "property float ny" << std::endl;
outFile << "property float nz" << std::endl;
}
outFile << "end_header" << std::endl;
////
// Points
////
for ( int pi = 0; pi < pointNum; ++pi )
{
const float* point = (float*)(&PC.data[ pi*PC.step ]);
outFile << point[0] << " "<<point[1]<<" "<<point[2];
if (vertNum==6)
{
outFile<<" " << point[3] << " "<<point[4]<<" "<<point[5];
}
outFile << std::endl;
outFile<<" " << point[3] << " "<<point[4]<<" "<<point[5];
}
return;
outFile << std::endl;
}
return;
}
Mat samplePCUniform(Mat PC, int sampleStep)
{
int numRows = PC.rows/sampleStep;
Mat sampledPC = Mat(numRows, PC.cols, PC.type());
int c=0;
for (int i=0; i<PC.rows && c<numRows; i+=sampleStep)
{
PC.row(i).copyTo(sampledPC.row(c++));
}
return sampledPC;
int numRows = PC.rows/sampleStep;
Mat sampledPC = Mat(numRows, PC.cols, PC.type());
int c=0;
for (int i=0; i<PC.rows && c<numRows; i+=sampleStep)
{
PC.row(i).copyTo(sampledPC.row(c++));
}
return sampledPC;
}
Mat samplePCUniformInd(Mat PC, int sampleStep, std::vector<int> &indices)
{
int numRows = cvRound((double)PC.rows/(double)sampleStep);
indices.resize(numRows);
Mat sampledPC = Mat(numRows, PC.cols, PC.type());
int c=0;
for (int i=0; i<PC.rows && c<numRows; i+=sampleStep)
{
indices[c] = i;
PC.row(i).copyTo(sampledPC.row(c++));
}
return sampledPC;
int numRows = cvRound((double)PC.rows/(double)sampleStep);
indices.resize(numRows);
Mat sampledPC = Mat(numRows, PC.cols, PC.type());
int c=0;
for (int i=0; i<PC.rows && c<numRows; i+=sampleStep)
{
indices[c] = i;
PC.row(i).copyTo(sampledPC.row(c++));
}
return sampledPC;
}
void* indexPCFlann(Mat pc)
{
Mat dest_32f;
pc.colRange(0,3).copyTo(dest_32f);
return new FlannIndex(dest_32f, cvflann::KDTreeSingleIndexParams(8));
Mat dest_32f;
pc.colRange(0,3).copyTo(dest_32f);
return new FlannIndex(dest_32f, cvflann::KDTreeSingleIndexParams(8));
}
void destroyFlann(void* flannIndex)
{
delete ((FlannIndex*)flannIndex);
delete ((FlannIndex*)flannIndex);
}
// For speed purposes this function assumes that PC, Indices and Distances are created with continuous structures
void queryPCFlann(void* flannIndex, Mat& pc, Mat& indices, Mat& distances)
{
Mat obj_32f;
pc.colRange(0,3).copyTo(obj_32f);
((FlannIndex*)flannIndex)->knnSearch(obj_32f, indices, distances, 1, cvflann::SearchParams(32) );
Mat obj_32f;
pc.colRange(0,3).copyTo(obj_32f);
((FlannIndex*)flannIndex)->knnSearch(obj_32f, indices, distances, 1, cvflann::SearchParams(32) );
}
// uses a volume instead of an octree
......@@ -218,359 +218,358 @@ void queryPCFlann(void* flannIndex, Mat& pc, Mat& indices, Mat& distances)
// This is much faster than sample_pc_octree
Mat samplePCByQuantization(Mat pc, float xrange[2], float yrange[2], float zrange[2], float sampleStep, int weightByCenter)
{
std::vector< std::vector<int> > map;
int numSamplesDim = (int)(1.0/sampleStep);
float xr = xrange[1] - xrange[0];
float yr = yrange[1] - yrange[0];
float zr = zrange[1] - zrange[0];
int numPoints = 0;
map.resize((numSamplesDim+1)*(numSamplesDim+1)*(numSamplesDim+1));
// OpenMP might seem like a good idea, but it didn't speed this up for me
//#pragma omp parallel for
for (int i=0; i<pc.rows; i++)
{
const float* point = (float*)(&pc.data[i * pc.step]);
// quantize a point
const int xCell =(int) ((float)numSamplesDim*(point[0]-xrange[0])/xr);
const int yCell =(int) ((float)numSamplesDim*(point[1]-yrange[0])/yr);
const int zCell =(int) ((float)numSamplesDim*(point[2]-zrange[0])/zr);
const int index = xCell*numSamplesDim*numSamplesDim+yCell*numSamplesDim+zCell;
/*#pragma omp critical
std::vector< std::vector<int> > map;
int numSamplesDim = (int)(1.0/sampleStep);
float xr = xrange[1] - xrange[0];
float yr = yrange[1] - yrange[0];
float zr = zrange[1] - zrange[0];
int numPoints = 0;
map.resize((numSamplesDim+1)*(numSamplesDim+1)*(numSamplesDim+1));
// OpenMP might seem like a good idea, but it didn't speed this up for me
//#pragma omp parallel for
for (int i=0; i<pc.rows; i++)
{
const float* point = (float*)(&pc.data[i * pc.step]);
// quantize a point
const int xCell =(int) ((float)numSamplesDim*(point[0]-xrange[0])/xr);
const int yCell =(int) ((float)numSamplesDim*(point[1]-yrange[0])/yr);
const int zCell =(int) ((float)numSamplesDim*(point[2]-zrange[0])/zr);
const int index = xCell*numSamplesDim*numSamplesDim+yCell*numSamplesDim+zCell;
/*#pragma omp critical
{*/
map[index].push_back(i);
// }
}
for (unsigned int i=0; i<map.size(); i++)
{
numPoints += (map[i].size()>0);
}
Mat pcSampled = Mat(numPoints, pc.cols, CV_32F);
int c = 0;
for (unsigned int i=0; i<map.size(); i++)
map[index].push_back(i);
// }
}
for (unsigned int i=0; i<map.size(); i++)
{
numPoints += (map[i].size()>0);
}
Mat pcSampled = Mat(numPoints, pc.cols, CV_32F);
int c = 0;
for (unsigned int i=0; i<map.size(); i++)
{
double px=0, py=0, pz=0;
double nx=0, ny=0, nz=0;
std::vector<int> curCell = map[i];
int cn = (int)curCell.size();
if (cn>0)
{
double px=0, py=0, pz=0;
double nx=0, ny=0, nz=0;
std::vector<int> curCell = map[i];
int cn = (int)curCell.size();
if (cn>0)
if (weightByCenter)
{
int xCell, yCell, zCell;
double xc, yc, zc;
double weightSum = 0 ;
zCell = i % numSamplesDim;
yCell = ((i-zCell)/numSamplesDim) % numSamplesDim;
xCell = ((i-zCell-yCell*numSamplesDim)/(numSamplesDim*numSamplesDim));
xc = ((double)xCell+0.5) * (double)xr/numSamplesDim + (double)xrange[0];
yc = ((double)yCell+0.5) * (double)yr/numSamplesDim + (double)yrange[0];
zc = ((double)zCell+0.5) * (double)zr/numSamplesDim + (double)zrange[0];
for (int j=0; j<cn; j++)
{
if (weightByCenter)
{
int xCell, yCell, zCell;
double xc, yc, zc;
double weightSum = 0 ;
zCell = i % numSamplesDim;
yCell = ((i-zCell)/numSamplesDim) % numSamplesDim;
xCell = ((i-zCell-yCell*numSamplesDim)/(numSamplesDim*numSamplesDim));
xc = ((double)xCell+0.5) * (double)xr/numSamplesDim + (double)xrange[0];
yc = ((double)yCell+0.5) * (double)yr/numSamplesDim + (double)yrange[0];
zc = ((double)zCell+0.5) * (double)zr/numSamplesDim + (double)zrange[0];
for (int j=0; j<cn; j++)
{
const int ptInd = curCell[j];
float* point = (float*)(&pc.data[ptInd * pc.step]);
const double dx = point[0]-xc;
const double dy = point[1]-yc;
const double dz = point[2]-zc;
const double d = sqrt(dx*dx+dy*dy+dz*dz);
double w = 0;
if (d>EPS)
{
// it is possible to use different weighting schemes.
// inverse weigthing was just good for me
// exp( - (distance/h)**2 )
//const double w = exp(-d*d);
w = 1.0/d;
}
//float weights[3]={1,1,1};
px += w*(double)point[0];
py += w*(double)point[1];
pz += w*(double)point[2];
nx += w*(double)point[3];
ny += w*(double)point[4];
nz += w*(double)point[5];
weightSum+=w;
}
px/=(double)weightSum;
py/=(double)weightSum;
pz/=(double)weightSum;
nx/=(double)weightSum;
ny/=(double)weightSum;
nz/=(double)weightSum;
}
else
{
for (int j=0; j<cn; j++)
{
const int ptInd = curCell[j];
float* point = (float*)(&pc.data[ptInd * pc.step]);
px += (double)point[0];
py += (double)point[1];
pz += (double)point[2];
nx += (double)point[3];
ny += (double)point[4];
nz += (double)point[5];
}
px/=(double)cn;
py/=(double)cn;
pz/=(double)cn;
nx/=(double)cn;
ny/=(double)cn;
nz/=(double)cn;
}
float *pcData = (float*)(&pcSampled.data[c*pcSampled.step[0]]);
pcData[0]=(float)px;
pcData[1]=(float)py;
pcData[2]=(float)pz;
// normalize the normals
double norm = sqrt(nx*nx+ny*ny+nz*nz);
if (norm>EPS)
{
pcData[3]=(float)(nx/norm);
pcData[4]=(float)(ny/norm);
pcData[5]=(float)(nz/norm);
}
//#pragma omp atomic
c++;
curCell.clear();
const int ptInd = curCell[j];
float* point = (float*)(&pc.data[ptInd * pc.step]);
const double dx = point[0]-xc;
const double dy = point[1]-yc;
const double dz = point[2]-zc;
const double d = sqrt(dx*dx+dy*dy+dz*dz);
double w = 0;
if (d>EPS)
{
// it is possible to use different weighting schemes.
// inverse weigthing was just good for me
// exp( - (distance/h)**2 )
//const double w = exp(-d*d);
w = 1.0/d;
}
//float weights[3]={1,1,1};
px += w*(double)point[0];
py += w*(double)point[1];
pz += w*(double)point[2];
nx += w*(double)point[3];
ny += w*(double)point[4];
nz += w*(double)point[5];
weightSum+=w;
}
px/=(double)weightSum;
py/=(double)weightSum;
pz/=(double)weightSum;
nx/=(double)weightSum;
ny/=(double)weightSum;
nz/=(double)weightSum;
}
else
{
for (int j=0; j<cn; j++)
{
const int ptInd = curCell[j];
float* point = (float*)(&pc.data[ptInd * pc.step]);
px += (double)point[0];
py += (double)point[1];
pz += (double)point[2];
nx += (double)point[3];
ny += (double)point[4];
nz += (double)point[5];
}
px/=(double)cn;
py/=(double)cn;
pz/=(double)cn;
nx/=(double)cn;
ny/=(double)cn;
nz/=(double)cn;
}
float *pcData = (float*)(&pcSampled.data[c*pcSampled.step[0]]);
pcData[0]=(float)px;
pcData[1]=(float)py;
pcData[2]=(float)pz;
// normalize the normals
double norm = sqrt(nx*nx+ny*ny+nz*nz);
if (norm>EPS)
{
pcData[3]=(float)(nx/norm);
pcData[4]=(float)(ny/norm);
pcData[5]=(float)(nz/norm);
}
//#pragma omp atomic
c++;
curCell.clear();
}
map.clear();
return pcSampled;
}
map.clear();
return pcSampled;
}
void shuffle(int *array, size_t n)
{
size_t i;
for (i = 0; i < n - 1; i++)
{
size_t j = i + rand() / (RAND_MAX / (n - i) + 1);
int t = array[j];
array[j] = array[i];
array[i] = t;
}
size_t i;
for (i = 0; i < n - 1; i++)
{
size_t j = i + rand() / (RAND_MAX / (n - i) + 1);
int t = array[j];
array[j] = array[i];
array[i] = t;
}
}
// compute the standard bounding box
void computeBboxStd(Mat pc, float xRange[2], float yRange[2], float zRange[2])
{
Mat pcPts = pc.colRange(0, 3);
int num = pcPts.rows;
float* points = (float*)pcPts.data;
xRange[0] = points[0];
xRange[1] = points[0];
yRange[0] = points[1];
yRange[1] = points[1];
zRange[0] = points[2];
zRange[1] = points[2];
for ( int ind = 0; ind < num; ind++ )
{
const float* row = (float*)(pcPts.data + (ind * pcPts.step));
const float x = row[0];
const float y = row[1];
const float z = row[2];
if (x<xRange[0])
xRange[0]=x;
if (x>xRange[1])
xRange[1]=x;
if (y<yRange[0])
yRange[0]=y;
if (y>yRange[1])
yRange[1]=y;
if (z<zRange[0])
zRange[0]=z;
if (z>zRange[1])
zRange[1]=z;
}
Mat pcPts = pc.colRange(0, 3);
int num = pcPts.rows;
float* points = (float*)pcPts.data;
xRange[0] = points[0];
xRange[1] = points[0];
yRange[0] = points[1];
yRange[1] = points[1];
zRange[0] = points[2];
zRange[1] = points[2];
for ( int ind = 0; ind < num; ind++ )
{
const float* row = (float*)(pcPts.data + (ind * pcPts.step));
const float x = row[0];
const float y = row[1];
const float z = row[2];
if (x<xRange[0])
xRange[0]=x;
if (x>xRange[1])
xRange[1]=x;
if (y<yRange[0])
yRange[0]=y;
if (y>yRange[1])
yRange[1]=y;
if (z<zRange[0])
zRange[0]=z;
if (z>zRange[1])
zRange[1]=z;
}
}
Mat normalizePCCoeff(Mat pc, float scale, float* Cx, float* Cy, float* Cz, float* MinVal, float* MaxVal)
{
double minVal=0, maxVal=0;
Mat x,y,z, pcn;
pc.col(0).copyTo(x);
pc.col(1).copyTo(y);
pc.col(2).copyTo(z);
float cx = (float) cv::mean(x).val[0];
float cy = (float) cv::mean(y).val[0];
float cz = (float) cv::mean(z).val[0];
cv::minMaxIdx(pc, &minVal, &maxVal);
x=x-cx;
y=y-cy;
z=z-cz;
pcn.create(pc.rows, 3, CV_32FC1);
x.copyTo(pcn.col(0));
y.copyTo(pcn.col(1));
z.copyTo(pcn.col(2));
cv::minMaxIdx(pcn, &minVal, &maxVal);
pcn=(float)scale*(pcn)/((float)maxVal-(float)minVal);
*MinVal=(float)minVal;
*MaxVal=(float)maxVal;
*Cx=(float)cx;
*Cy=(float)cy;
*Cz=(float)cz;
return pcn;
double minVal=0, maxVal=0;
Mat x,y,z, pcn;
pc.col(0).copyTo(x);
pc.col(1).copyTo(y);
pc.col(2).copyTo(z);
float cx = (float) cv::mean(x).val[0];
float cy = (float) cv::mean(y).val[0];
float cz = (float) cv::mean(z).val[0];
cv::minMaxIdx(pc, &minVal, &maxVal);
x=x-cx;
y=y-cy;
z=z-cz;
pcn.create(pc.rows, 3, CV_32FC1);
x.copyTo(pcn.col(0));
y.copyTo(pcn.col(1));
z.copyTo(pcn.col(2));
cv::minMaxIdx(pcn, &minVal, &maxVal);
pcn=(float)scale*(pcn)/((float)maxVal-(float)minVal);
*MinVal=(float)minVal;
*MaxVal=(float)maxVal;
*Cx=(float)cx;
*Cy=(float)cy;
*Cz=(float)cz;
return pcn;
}
Mat transPCCoeff(Mat pc, float scale, float Cx, float Cy, float Cz, float MinVal, float MaxVal)
{
Mat x,y,z, pcn;
pc.col(0).copyTo(x);
pc.col(1).copyTo(y);
pc.col(2).copyTo(z);
x=x-Cx;
y=y-Cy;
z=z-Cz;
pcn.create(pc.rows, 3, CV_32FC1);
x.copyTo(pcn.col(0));
y.copyTo(pcn.col(1));
z.copyTo(pcn.col(2));
pcn=(float)scale*(pcn)/((float)MaxVal-(float)MinVal);
return pcn;
Mat x,y,z, pcn;
pc.col(0).copyTo(x);
pc.col(1).copyTo(y);
pc.col(2).copyTo(z);
x=x-Cx;
y=y-Cy;
z=z-Cz;
pcn.create(pc.rows, 3, CV_32FC1);
x.copyTo(pcn.col(0));
y.copyTo(pcn.col(1));
z.copyTo(pcn.col(2));
pcn=(float)scale*(pcn)/((float)MaxVal-(float)MinVal);
return pcn;
}
Mat transformPCPose(Mat pc, double Pose[16])
{
Mat pct = Mat(pc.rows, pc.cols, CV_32F);
double R[9], t[3];
poseToRT(Pose, R, t);
Mat pct = Mat(pc.rows, pc.cols, CV_32F);
double R[9], t[3];
poseToRT(Pose, R, t);
#if defined _OPENMP
#pragma omp parallel for
#endif
for (int i=0; i<pc.rows; i++)
for (int i=0; i<pc.rows; i++)
{
const float *pcData = (float*)(&pc.data[i*pc.step]);
float *pcDataT = (float*)(&pct.data[i*pct.step]);
const float *n1 = &pcData[3];
float *nT = &pcDataT[3];
double p[4] = {(double)pcData[0], (double)pcData[1], (double)pcData[2], 1};
double p2[4];
matrixProduct441(Pose, p, p2);
// p2[3] should normally be 1
if (fabs(p2[3])>EPS)
{
const float *pcData = (float*)(&pc.data[i*pc.step]);
float *pcDataT = (float*)(&pct.data[i*pct.step]);
const float *n1 = &pcData[3];
float *nT = &pcDataT[3];
double p[4] = {(double)pcData[0], (double)pcData[1], (double)pcData[2], 1};
double p2[4];
matrixProduct441(Pose, p, p2);
// p2[3] should normally be 1
if (fabs(p2[3])>EPS)
{
pcDataT[0] = (float)(p2[0]/p2[3]);
pcDataT[1] = (float)(p2[1]/p2[3]);
pcDataT[2] = (float)(p2[2]/p2[3]);
}
// Rotate the normals, too
double n[3] = {(double)n1[0], (double)n1[1], (double)n1[2]}, n2[3];
matrixProduct331(R, n, n2);
double nNorm = sqrt(n2[0]*n2[0]+n2[1]*n2[1]+n2[2]*n2[2]);
if (nNorm>EPS)
{
nT[0]=(float)(n2[0]/nNorm);
nT[1]=(float)(n2[1]/nNorm);
nT[2]=(float)(n2[2]/nNorm);
}
pcDataT[0] = (float)(p2[0]/p2[3]);
pcDataT[1] = (float)(p2[1]/p2[3]);
pcDataT[2] = (float)(p2[2]/p2[3]);
}
// Rotate the normals, too
double n[3] = {(double)n1[0], (double)n1[1], (double)n1[2]}, n2[3];
matrixProduct331(R, n, n2);
double nNorm = sqrt(n2[0]*n2[0]+n2[1]*n2[1]+n2[2]*n2[2]);
if (nNorm>EPS)
{
nT[0]=(float)(n2[0]/nNorm);
nT[1]=(float)(n2[1]/nNorm);
nT[2]=(float)(n2[2]/nNorm);
}
return pct;
}
return pct;
}
Mat genRandomMat(int rows, int cols, double mean, double stddev, int type)
{
Mat meanMat = mean*Mat::ones(1,1,type);
Mat sigmaMat= stddev*Mat::ones(1,1,type);
RNG rng(time(0));
Mat matr(rows, cols,type);
rng.fill(matr, RNG::NORMAL, meanMat, sigmaMat);
return matr;
Mat meanMat = mean*Mat::ones(1,1,type);
Mat sigmaMat= stddev*Mat::ones(1,1,type);
RNG rng(time(0));
Mat matr(rows, cols,type);
rng.fill(matr, RNG::NORMAL, meanMat, sigmaMat);
return matr;
}
void getRandQuat(double q[4])
{
q[0] = (float)rand()/(float)(RAND_MAX);
q[1] = (float)rand()/(float)(RAND_MAX);
q[2] = (float)rand()/(float)(RAND_MAX);
q[3] = (float)rand()/(float)(RAND_MAX);
double n = sqrt(q[0]*q[0]+q[1]*q[1]+q[2]*q[2]+q[3]*q[3]);
q[0]/=n;
q[1]/=n;
q[2]/=n;
q[3]/=n;
q[0]=fabs(q[0]);
q[0] = (float)rand()/(float)(RAND_MAX);
q[1] = (float)rand()/(float)(RAND_MAX);
q[2] = (float)rand()/(float)(RAND_MAX);
q[3] = (float)rand()/(float)(RAND_MAX);
double n = sqrt(q[0]*q[0]+q[1]*q[1]+q[2]*q[2]+q[3]*q[3]);
q[0]/=n;
q[1]/=n;
q[2]/=n;
q[3]/=n;
q[0]=fabs(q[0]);
}
void getRandomRotation(double R[9])
{
double q[4];
getRandQuat(q);
quatToDCM(q, R);
double q[4];
getRandQuat(q);
quatToDCM(q, R);
}
void getRandomPose(double Pose[16])
{
double R[9], t[3];
srand((unsigned int)time(0));
getRandomRotation(R);
t[0] = (float)rand()/(float)(RAND_MAX);
t[1] = (float)rand()/(float)(RAND_MAX);
t[2] = (float)rand()/(float)(RAND_MAX);
rtToPose(R,t,Pose);
double R[9], t[3];
srand((unsigned int)time(0));
getRandomRotation(R);
t[0] = (float)rand()/(float)(RAND_MAX);
t[1] = (float)rand()/(float)(RAND_MAX);
t[2] = (float)rand()/(float)(RAND_MAX);
rtToPose(R,t,Pose);
}
Mat addNoisePC(Mat pc, double scale)
{
Mat randT = genRandomMat(pc.rows,pc.cols,0,scale,CV_32FC1);
return randT + pc;
Mat randT = genRandomMat(pc.rows,pc.cols,0,scale,CV_32FC1);
return randT + pc;
}
//////////// COMPUTE NORMALS OF POINT CLOUD //////////////////////////////////
/*
The routines below use the eigenvectors of the local covariance matrix
to compute the normals of a point cloud.
......@@ -581,157 +580,156 @@ Also, view point flipping as in point cloud library is implemented
void meanCovLocalPC(const float* pc, const int ws, const int point_count, double CovMat[3][3], double Mean[4])
{
int i;
double accu[16]={0};
// For each point in the cloud
for (i = 0; i < point_count; ++i)
{
const float* cloud = &pc[i*ws];
accu [0] += cloud[0] * cloud[0];
accu [1] += cloud[0] * cloud[1];
accu [2] += cloud[0] * cloud[2];
accu [3] += cloud[1] * cloud[1]; // 4
accu [4] += cloud[1] * cloud[2]; // 5
accu [5] += cloud[2] * cloud[2]; // 8
accu [6] += cloud[0];
accu [7] += cloud[1];
accu [8] += cloud[2];
}
for (i = 0; i < 9; ++i)
accu[i]/=(double)point_count;
Mean[0] = accu[6];
Mean[1] = accu[7];
Mean[2] = accu[8];
Mean[3] = 0;
CovMat[0][0] = accu [0] - accu [6] * accu [6];
CovMat[0][1] = accu [1] - accu [6] * accu [7];
CovMat[0][2] = accu [2] - accu [6] * accu [8];
CovMat[1][1] = accu [3] - accu [7] * accu [7];
CovMat[1][2] = accu [4] - accu [7] * accu [8];
CovMat[2][2] = accu [5] - accu [8] * accu [8];
CovMat[1][0] = CovMat[0][1];
CovMat[2][0] = CovMat[0][2];
CovMat[2][1] = CovMat[1][2];
int i;
double accu[16]={0};
// For each point in the cloud
for (i = 0; i < point_count; ++i)
{
const float* cloud = &pc[i*ws];
accu [0] += cloud[0] * cloud[0];
accu [1] += cloud[0] * cloud[1];
accu [2] += cloud[0] * cloud[2];
accu [3] += cloud[1] * cloud[1]; // 4
accu [4] += cloud[1] * cloud[2]; // 5
accu [5] += cloud[2] * cloud[2]; // 8
accu [6] += cloud[0];
accu [7] += cloud[1];
accu [8] += cloud[2];
}
for (i = 0; i < 9; ++i)
accu[i]/=(double)point_count;
Mean[0] = accu[6];
Mean[1] = accu[7];
Mean[2] = accu[8];
Mean[3] = 0;
CovMat[0][0] = accu [0] - accu [6] * accu [6];
CovMat[0][1] = accu [1] - accu [6] * accu [7];
CovMat[0][2] = accu [2] - accu [6] * accu [8];
CovMat[1][1] = accu [3] - accu [7] * accu [7];
CovMat[1][2] = accu [4] - accu [7] * accu [8];
CovMat[2][2] = accu [5] - accu [8] * accu [8];
CovMat[1][0] = CovMat[0][1];
CovMat[2][0] = CovMat[0][2];
CovMat[2][1] = CovMat[1][2];
}
void meanCovLocalPCInd(const float* pc, const int* Indices, const int ws, const int point_count, double CovMat[3][3], double Mean[4])
{
int i;
double accu[16]={0};
for (i = 0; i < point_count; ++i)
{
const float* cloud = &pc[ Indices[i] * ws ];
accu [0] += cloud[0] * cloud[0];
accu [1] += cloud[0] * cloud[1];
accu [2] += cloud[0] * cloud[2];
accu [3] += cloud[1] * cloud[1]; // 4
accu [4] += cloud[1] * cloud[2]; // 5
accu [5] += cloud[2] * cloud[2]; // 8
accu [6] += cloud[0];
accu [7] += cloud[1];
accu [8] += cloud[2];
}
for (i = 0; i < 9; ++i)
accu[i]/=(double)point_count;
Mean[0] = accu[6];
Mean[1] = accu[7];
Mean[2] = accu[8];
Mean[3] = 0;
CovMat[0][0] = accu [0] - accu [6] * accu [6];
CovMat[0][1] = accu [1] - accu [6] * accu [7];
CovMat[0][2] = accu [2] - accu [6] * accu [8];
CovMat[1][1] = accu [3] - accu [7] * accu [7];
CovMat[1][2] = accu [4] - accu [7] * accu [8];
CovMat[2][2] = accu [5] - accu [8] * accu [8];
CovMat[1][0] = CovMat[0][1];
CovMat[2][0] = CovMat[0][2];
CovMat[2][1] = CovMat[1][2];
int i;
double accu[16]={0};
for (i = 0; i < point_count; ++i)
{
const float* cloud = &pc[ Indices[i] * ws ];
accu [0] += cloud[0] * cloud[0];
accu [1] += cloud[0] * cloud[1];
accu [2] += cloud[0] * cloud[2];
accu [3] += cloud[1] * cloud[1]; // 4
accu [4] += cloud[1] * cloud[2]; // 5
accu [5] += cloud[2] * cloud[2]; // 8
accu [6] += cloud[0];
accu [7] += cloud[1];
accu [8] += cloud[2];
}
for (i = 0; i < 9; ++i)
accu[i]/=(double)point_count;
Mean[0] = accu[6];
Mean[1] = accu[7];
Mean[2] = accu[8];
Mean[3] = 0;
CovMat[0][0] = accu [0] - accu [6] * accu [6];
CovMat[0][1] = accu [1] - accu [6] * accu [7];
CovMat[0][2] = accu [2] - accu [6] * accu [8];
CovMat[1][1] = accu [3] - accu [7] * accu [7];
CovMat[1][2] = accu [4] - accu [7] * accu [8];
CovMat[2][2] = accu [5] - accu [8] * accu [8];
CovMat[1][0] = CovMat[0][1];
CovMat[2][0] = CovMat[0][2];
CovMat[2][1] = CovMat[1][2];
}
CV_EXPORTS int computeNormalsPC3d(const Mat& PC, Mat& PCNormals, const int NumNeighbors, const bool FlipViewpoint, const double viewpoint[3])
{
int i;
if (PC.cols!=3 && PC.cols!=6) // 3d data is expected
{
//return -1;
CV_Error(cv::Error::BadImageSize, "PC should have 3 or 6 elements in its columns");
}
int sizes[2] = {PC.rows, 3};
int sizesResult[2] = {PC.rows, NumNeighbors};
float* dataset = new float[PC.rows*3];
float* distances = new float[PC.rows*NumNeighbors];
int* indices = new int[PC.rows*NumNeighbors];
for (i=0; i<PC.rows; i++)
{
const float* src = (float*)(&PC.data[i*PC.step]);
float* dst = (float*)(&dataset[i*3]);
dst[0] = src[0];
dst[1] = src[1];
dst[2] = src[2];
}
Mat PCInput(2, sizes, CV_32F, dataset, 0);
void* flannIndex = indexPCFlann(PCInput);
Mat Indices(2, sizesResult, CV_32S, indices, 0);
Mat Distances(2, sizesResult, CV_32F, distances, 0);
queryPCFlann(flannIndex, PCInput, Indices, Distances);
destroyFlann(flannIndex);
flannIndex = 0;
PCNormals = Mat(PC.rows, 6, CV_32F);
for (i=0; i<PC.rows; i++)
int i;
if (PC.cols!=3 && PC.cols!=6) // 3d data is expected
{
//return -1;
CV_Error(cv::Error::BadImageSize, "PC should have 3 or 6 elements in its columns");
}
int sizes[2] = {PC.rows, 3};
int sizesResult[2] = {PC.rows, NumNeighbors};
float* dataset = new float[PC.rows*3];
float* distances = new float[PC.rows*NumNeighbors];
int* indices = new int[PC.rows*NumNeighbors];
for (i=0; i<PC.rows; i++)
{
const float* src = (float*)(&PC.data[i*PC.step]);
float* dst = (float*)(&dataset[i*3]);
dst[0] = src[0];
dst[1] = src[1];
dst[2] = src[2];
}
Mat PCInput(2, sizes, CV_32F, dataset, 0);
void* flannIndex = indexPCFlann(PCInput);
Mat Indices(2, sizesResult, CV_32S, indices, 0);
Mat Distances(2, sizesResult, CV_32F, distances, 0);
queryPCFlann(flannIndex, PCInput, Indices, Distances);
destroyFlann(flannIndex);
flannIndex = 0;
PCNormals = Mat(PC.rows, 6, CV_32F);
for (i=0; i<PC.rows; i++)
{
double C[3][3], mu[4];
const float* pci = &dataset[i*3];
float* pcr = (float*)(&PCNormals.data[i*PCNormals.step]);
double nr[3];
int* indLocal = &indices[i*NumNeighbors];
// compute covariance matrix
meanCovLocalPCInd(dataset, indLocal, 3, NumNeighbors, C, mu);
// eigenvectors of covariance matrix
eigenLowest33(C, nr);
pcr[0] = pci[0];
pcr[1] = pci[1];
pcr[2] = pci[2];
if (FlipViewpoint)
{
double C[3][3], mu[4];
const float* pci = &dataset[i*3];
float* pcr = (float*)(&PCNormals.data[i*PCNormals.step]);
double nr[3];
int* indLocal = &indices[i*NumNeighbors];
// compute covariance matrix
meanCovLocalPCInd(dataset, indLocal, 3, NumNeighbors, C, mu);
// eigenvectors of covariance matrix
eigenLowest33(C, nr);
pcr[0] = pci[0];
pcr[1] = pci[1];
pcr[2] = pci[2];
if (FlipViewpoint)
{
flipNormalViewpoint(pci, viewpoint[0], viewpoint[1], viewpoint[2], &nr[0], &nr[1], &nr[2]);
}
pcr[3] = (float)nr[0];
pcr[4] = (float)nr[1];
pcr[5] = (float)nr[2];
flipNormalViewpoint(pci, viewpoint[0], viewpoint[1], viewpoint[2], &nr[0], &nr[1], &nr[2]);
}
delete[] indices;
delete[] distances;
delete[] dataset;
return 1;
pcr[3] = (float)nr[0];
pcr[4] = (float)nr[1];
pcr[5] = (float)nr[2];
}
delete[] indices;
delete[] distances;
delete[] dataset;
return 1;
}
//////////////////////////////////////// END OF NORMAL COMPUTATIONS ///////////////////////////////////
} // namespace ppf_match_3d
} // namespace cv
......@@ -46,276 +46,274 @@ namespace cv
namespace ppf_match_3d
{
// routines for assisting sort
static int qsortPoseCmp (const void * a, const void * b)
static int qsortPoseCmp(const void * a, const void * b)
{
Pose3D* pose1 = *(Pose3D**)a;
Pose3D* pose2 = *(Pose3D**)b;
return ( pose2->numVotes - pose1->numVotes );
Pose3D* pose1 = *(Pose3D**)a;
Pose3D* pose2 = *(Pose3D**)b;
return ( pose2->numVotes - pose1->numVotes );
}
static int sortPoseClusters (const PoseCluster3D* a, const PoseCluster3D* b)
static int sortPoseClusters(const PoseCluster3D* a, const PoseCluster3D* b)
{
return ( a->numVotes > b->numVotes );
return ( a->numVotes > b->numVotes );
}
// simple hashing
/*static int hashPPFSimple(const double f[4], const double AngleStep, const double DistanceStep)
{
const unsigned char d1 = (unsigned char) (floor ((double)f[0] / (double)AngleStep));
const unsigned char d2 = (unsigned char) (floor ((double)f[1] / (double)AngleStep));
const unsigned char d3 = (unsigned char) (floor ((double)f[2] / (double)AngleStep));
const unsigned char d4 = (unsigned char) (floor ((double)f[3] / (double)DistanceStep));
int hashKey = (d1 | (d2<<8) | (d3<<16) | (d4<<24));
return hashKey;
const unsigned char d1 = (unsigned char) (floor ((double)f[0] / (double)AngleStep));
const unsigned char d2 = (unsigned char) (floor ((double)f[1] / (double)AngleStep));
const unsigned char d3 = (unsigned char) (floor ((double)f[2] / (double)AngleStep));
const unsigned char d4 = (unsigned char) (floor ((double)f[3] / (double)DistanceStep));
int hashKey = (d1 | (d2<<8) | (d3<<16) | (d4<<24));
return hashKey;
}*/
// quantize ppf and hash it for proper indexing
static KeyType hashPPF(const double f[4], const double AngleStep, const double DistanceStep)
{
const int d1 = (int) (floor ((double)f[0] / (double)AngleStep));
const int d2 = (int) (floor ((double)f[1] / (double)AngleStep));
const int d3 = (int) (floor ((double)f[2] / (double)AngleStep));
const int d4 = (int) (floor ((double)f[3] / (double)DistanceStep));
int key[4]={d1,d2,d3,d4};
KeyType hashKey=0;
//hashMurmurx86(key, 4*sizeof(int), 42, &hashKey);
murmurHash(key, 4*sizeof(int), 42, &hashKey);
return hashKey;
const int d1 = (int) (floor ((double)f[0] / (double)AngleStep));
const int d2 = (int) (floor ((double)f[1] / (double)AngleStep));
const int d3 = (int) (floor ((double)f[2] / (double)AngleStep));
const int d4 = (int) (floor ((double)f[3] / (double)DistanceStep));
int key[4]={d1,d2,d3,d4};
KeyType hashKey=0;
murmurHash(key, 4*sizeof(int), 42, &hashKey);
return hashKey;
}
/*static size_t hashMurmur(unsigned int key)
{
size_t hashKey=0;
hashMurmurx86((void*)&key, 4, 42, &hashKey);
return hashKey;
size_t hashKey=0;
hashMurmurx86((void*)&key, 4, 42, &hashKey);
return hashKey;
}*/
static double computeAlpha(const double p1[4], const double n1[4], const double p2[4])
{
double Tmg[3], mpt[3], row2[3], row3[3], alpha;
computeTransformRTyz(p1, n1, row2, row3, Tmg);
// checked row2, row3: They are correct
mpt[1] = Tmg[1] + row2[0] * p2[0] + row2[1] * p2[1] + row2[2] * p2[2];
mpt[2] = Tmg[2] + row3[0] * p2[0] + row3[1] * p2[1] + row3[2] * p2[2];
alpha=atan2(-mpt[2], mpt[1]);
if ( alpha != alpha)
{
return 0;
}
if (sin(alpha)*mpt[2]<0.0)
alpha=-alpha;
return (-alpha);
double Tmg[3], mpt[3], row2[3], row3[3], alpha;
computeTransformRTyz(p1, n1, row2, row3, Tmg);
// checked row2, row3: They are correct
mpt[1] = Tmg[1] + row2[0] * p2[0] + row2[1] * p2[1] + row2[2] * p2[2];
mpt[2] = Tmg[2] + row3[0] * p2[0] + row3[1] * p2[1] + row3[2] * p2[2];
alpha=atan2(-mpt[2], mpt[1]);
if ( alpha != alpha)
{
return 0;
}
if (sin(alpha)*mpt[2]<0.0)
alpha=-alpha;
return (-alpha);
}
PPF3DDetector::PPF3DDetector()
{
samplingStepRelative = 0.05;
distanceStepRelative = 0.05;
SceneSampleStep = (int)(1/0.04);
angleStepRelative = 30;
angleStepRadians = (360.0/angleStepRelative)*M_PI/180.0;
angle_step = angleStepRadians;
trained = false;
setSearchParams();
samplingStepRelative = 0.05;
distanceStepRelative = 0.05;
SceneSampleStep = (int)(1/0.04);
angleStepRelative = 30;
angleStepRadians = (360.0/angleStepRelative)*M_PI/180.0;
angle_step = angleStepRadians;
trained = false;
setSearchParams();
}
PPF3DDetector::PPF3DDetector(const double RelativeSamplingStep, const double RelativeDistanceStep, const double NumAngles)
{
samplingStepRelative = RelativeSamplingStep;
distanceStepRelative = RelativeDistanceStep;
angleStepRelative = NumAngles;
angleStepRadians = (360.0/angleStepRelative)*M_PI/180.0;
//SceneSampleStep = 1.0/RelativeSceneSampleStep;
angle_step = angleStepRadians;
trained = false;
setSearchParams();
samplingStepRelative = RelativeSamplingStep;
distanceStepRelative = RelativeDistanceStep;
angleStepRelative = NumAngles;
angleStepRadians = (360.0/angleStepRelative)*M_PI/180.0;
//SceneSampleStep = 1.0/RelativeSceneSampleStep;
angle_step = angleStepRadians;
trained = false;
setSearchParams();
}
void PPF3DDetector::setSearchParams(const int numPoses, const double positionThreshold, const double rotationThreshold, const double minMatchScore, const bool useWeightedClustering)
{
NumPoses=numPoses;
if (positionThreshold<0)
PositionThreshold = samplingStepRelative;
else
PositionThreshold = positionThreshold;
if (rotationThreshold<0)
RotationThreshold = ((360/angle_step) / 180.0 * M_PI);
else
RotationThreshold = rotationThreshold;
UseWeightedAvg = useWeightedClustering;
MinMatchScore = minMatchScore;
NumPoses=numPoses;
if (positionThreshold<0)
PositionThreshold = samplingStepRelative;
else
PositionThreshold = positionThreshold;
if (rotationThreshold<0)
RotationThreshold = ((360/angle_step) / 180.0 * M_PI);
else
RotationThreshold = rotationThreshold;
UseWeightedAvg = useWeightedClustering;
MinMatchScore = minMatchScore;
}
// compute per point PPF as in paper
void PPF3DDetector::computePPFFeatures( const double p1[4], const double n1[4],
const double p2[4], const double n2[4],
double f[4])
void PPF3DDetector::computePPFFeatures(const double p1[4], const double n1[4],
const double p2[4], const double n2[4],
double f[4])
{
/*
Vectors will be defined as of length 4 instead of 3, because of:
- Further SIMD vectorization
- Cache alignment
*/
double d[4] = {p2[0] - p1[0], p2[1] - p1[1], p2[2] - p1[2], 0};
double norm = TNorm3(d);
f[3] = norm;
if (norm)
{
d[0] /= f[3];
d[1] /= f[3];
d[2] /= f[3];
}
else
{
// TODO: Handle this
f[0] = 0;
f[1] = 0;
f[2] = 0;
return ;
}
/*
Tolga Birdal's note:
Issues of numerical stability is of concern here.
Bertram's suggestion: atan2(a dot b, |axb|)
My correction :
I guess it should be: angle = atan2(norm(cross(a,b)), dot(a,b))
The macro is implemented accordingly.
TAngle3 actually outputs in range [0, pi] as
Bertram suggests
*/
f[0] = TAngle3(n1, d);
f[1] = TAngle3(n2, d);
f[2] = TAngle3(n1, n2);
/*
Vectors will be defined as of length 4 instead of 3, because of:
- Further SIMD vectorization
- Cache alignment
*/
double d[4] = {p2[0] - p1[0], p2[1] - p1[1], p2[2] - p1[2], 0};
double norm = TNorm3(d);
f[3] = norm;
if (norm)
{
d[0] /= f[3];
d[1] /= f[3];
d[2] /= f[3];
}
else
{
// TODO: Handle this
f[0] = 0;
f[1] = 0;
f[2] = 0;
return ;
}
/*
Tolga Birdal's note:
Issues of numerical stability is of concern here.
Bertram's suggestion: atan2(a dot b, |axb|)
My correction :
I guess it should be: angle = atan2(norm(cross(a,b)), dot(a,b))
The macro is implemented accordingly.
TAngle3 actually outputs in range [0, pi] as
Bertram suggests
*/
f[0] = TAngle3(n1, d);
f[1] = TAngle3(n2, d);
f[2] = TAngle3(n1, n2);
}
void PPF3DDetector::clearTrainingModels()
{
if (this->hash_nodes)
{
free(this->hash_nodes);
this->hash_nodes=0;
}
if (this->hash_table)
{
hashtableDestroy(this->hash_table);
this->hash_table=0;
}
if (this->hash_nodes)
{
free(this->hash_nodes);
this->hash_nodes=0;
}
if (this->hash_table)
{
hashtableDestroy(this->hash_table);
this->hash_table=0;
}
}
PPF3DDetector::~PPF3DDetector()
{
clearTrainingModels();
clearTrainingModels();
}
// TODO: Check all step sizes to be positive
void PPF3DDetector::trainModel(const Mat &PC)
{
CV_Assert(PC.type() == CV_32F || PC.type() == CV_32FC1);
// compute bbox
float xRange[2], yRange[2], zRange[2];
computeBboxStd(PC, xRange, yRange, zRange);
// compute sampling step from diameter of bbox
float dx = xRange[1] - xRange[0];
float dy = yRange[1] - yRange[0];
float dz = zRange[1] - zRange[0];
float diameter = sqrt ( dx * dx + dy * dy + dz * dz );
float distanceStep = (float)(diameter * samplingStepRelative);
Mat sampled = samplePCByQuantization(PC, xRange, yRange, zRange, (float)samplingStepRelative,0);
int size = sampled.rows*sampled.rows;
hashtable_int* hashTable = hashtableCreate(size, NULL);
int numPPF = sampled.rows*sampled.rows;
PPF = Mat(numPPF, T_PPF_LENGTH, CV_32FC1);
int ppfStep = (int)PPF.step;
int sampledStep = (int)sampled.step;
// TODO: Maybe I could sample 1/5th of them here. Check the performance later.
int numRefPoints = sampled.rows;
// pre-allocate the hash nodes
hash_nodes = (THash*)calloc(numRefPoints*numRefPoints, sizeof(THash));
// TODO : This can easily be parallelized. But we have to lock hashtable_insert.
// I realized that performance drops when this loop is parallelized (unordered
// inserts into the hashtable
// But it is still there to be investigated. For now, I leave this unparallelized
// since this is just a training part.
for (int i=0; i<numRefPoints; i++)
CV_Assert(PC.type() == CV_32F || PC.type() == CV_32FC1);
// compute bbox
float xRange[2], yRange[2], zRange[2];
computeBboxStd(PC, xRange, yRange, zRange);
// compute sampling step from diameter of bbox
float dx = xRange[1] - xRange[0];
float dy = yRange[1] - yRange[0];
float dz = zRange[1] - zRange[0];
float diameter = sqrt ( dx * dx + dy * dy + dz * dz );
float distanceStep = (float)(diameter * samplingStepRelative);
Mat sampled = samplePCByQuantization(PC, xRange, yRange, zRange, (float)samplingStepRelative,0);
int size = sampled.rows*sampled.rows;
hashtable_int* hashTable = hashtableCreate(size, NULL);
int numPPF = sampled.rows*sampled.rows;
PPF = Mat(numPPF, T_PPF_LENGTH, CV_32FC1);
int ppfStep = (int)PPF.step;
int sampledStep = (int)sampled.step;
// TODO: Maybe I could sample 1/5th of them here. Check the performance later.
int numRefPoints = sampled.rows;
// pre-allocate the hash nodes
hash_nodes = (THash*)calloc(numRefPoints*numRefPoints, sizeof(THash));
// TODO : This can easily be parallelized. But we have to lock hashtable_insert.
// I realized that performance drops when this loop is parallelized (unordered
// inserts into the hashtable
// But it is still there to be investigated. For now, I leave this unparallelized
// since this is just a training part.
for (int i=0; i<numRefPoints; i++)
{
float* f1 = (float*)(&sampled.data[i * sampledStep]);
const double p1[4] = {f1[0], f1[1], f1[2], 0};
const double n1[4] = {f1[3], f1[4], f1[5], 0};
//printf("///////////////////// NEW REFERENCE ////////////////////////\n");
for (int j=0; j<numRefPoints; j++)
{
float* f1 = (float*)(&sampled.data[i * sampledStep]);
const double p1[4] = {f1[0], f1[1], f1[2], 0};
const double n1[4] = {f1[3], f1[4], f1[5], 0};
//printf("///////////////////// NEW REFERENCE ////////////////////////\n");
for (int j=0; j<numRefPoints; j++)
{
// cannnot compute the ppf with myself
if (i!=j)
{
float* f2 = (float*)(&sampled.data[j * sampledStep]);
const double p2[4] = {f2[0], f2[1], f2[2], 0};
const double n2[4] = {f2[3], f2[4], f2[5], 0};
double f[4]={0};
computePPFFeatures(p1, n1, p2, n2, f);
KeyType hashValue = hashPPF(f, angleStepRadians, distanceStep);
double alpha = computeAlpha(p1, n1, p2);
unsigned int corrInd = i*numRefPoints+j;
unsigned int ppfInd = corrInd*ppfStep;
THash* hashNode = &hash_nodes[i*numRefPoints+j];
hashNode->id = hashValue;
hashNode->i = i;
hashNode->ppfInd = ppfInd;
hashtableInsertHashed(hashTable, hashValue, (void*)hashNode);
float* ppfRow = (float*)(&(PPF.data[ ppfInd ]));
ppfRow[0] = (float)f[0];
ppfRow[1] = (float)f[1];
ppfRow[2] = (float)f[2];
ppfRow[3] = (float)f[3];
ppfRow[4] = (float)alpha;
}
}
// cannnot compute the ppf with myself
if (i!=j)
{
float* f2 = (float*)(&sampled.data[j * sampledStep]);
const double p2[4] = {f2[0], f2[1], f2[2], 0};
const double n2[4] = {f2[3], f2[4], f2[5], 0};
double f[4]={0};
computePPFFeatures(p1, n1, p2, n2, f);
KeyType hashValue = hashPPF(f, angleStepRadians, distanceStep);
double alpha = computeAlpha(p1, n1, p2);
unsigned int corrInd = i*numRefPoints+j;
unsigned int ppfInd = corrInd*ppfStep;
THash* hashNode = &hash_nodes[i*numRefPoints+j];
hashNode->id = hashValue;
hashNode->i = i;
hashNode->ppfInd = ppfInd;
hashtableInsertHashed(hashTable, hashValue, (void*)hashNode);
float* ppfRow = (float*)(&(PPF.data[ ppfInd ]));
ppfRow[0] = (float)f[0];
ppfRow[1] = (float)f[1];
ppfRow[2] = (float)f[2];
ppfRow[3] = (float)f[3];
ppfRow[4] = (float)alpha;
}
}
angle_step = angleStepRadians;
distance_step = distanceStep;
hash_table = hashTable;
sampled_step = sampledStep;
ppf_step = ppfStep;
num_ref_points = numRefPoints;
//samplingStepRelative = sampling_step_relative;
sampledPC = sampled;
trained = true;
}
angle_step = angleStepRadians;
distance_step = distanceStep;
hash_table = hashTable;
sampled_step = sampledStep;
ppf_step = ppfStep;
num_ref_points = numRefPoints;
sampledPC = sampled;
trained = true;
}
......@@ -325,371 +323,368 @@ void PPF3DDetector::trainModel(const Mat &PC)
bool PPF3DDetector::matchPose(const Pose3D& sourcePose, const Pose3D& targetPose)
{
// translational difference
double dv[3] = {targetPose.t[0]-sourcePose.t[0], targetPose.t[1]-sourcePose.t[1], targetPose.t[2]-sourcePose.t[2]};
double dNorm = sqrt(dv[0]*dv[0]+dv[1]*dv[1]+dv[2]*dv[2]);
const double phi = fabs ( sourcePose.angle - targetPose.angle );
return (phi<this->RotationThreshold && dNorm < this->PositionThreshold);
// translational difference
double dv[3] = {targetPose.t[0]-sourcePose.t[0], targetPose.t[1]-sourcePose.t[1], targetPose.t[2]-sourcePose.t[2]};
double dNorm = sqrt(dv[0]*dv[0]+dv[1]*dv[1]+dv[2]*dv[2]);
const double phi = fabs ( sourcePose.angle - targetPose.angle );
return (phi<this->RotationThreshold && dNorm < this->PositionThreshold);
}
int PPF3DDetector::clusterPoses(Pose3D** poseList, int numPoses, std::vector<Pose3D*>& finalPoses)
{
std::vector<PoseCluster3D*> poseClusters;
poseClusters.clear();
finalPoses.clear();
// sort the poses for stability
qsort(poseList, numPoses, sizeof(Pose3D*), qsortPoseCmp);
for (int i=0; i<numPoses; i++)
std::vector<PoseCluster3D*> poseClusters;
poseClusters.clear();
finalPoses.clear();
// sort the poses for stability
qsort(poseList, numPoses, sizeof(Pose3D*), qsortPoseCmp);
for (int i=0; i<numPoses; i++)
{
Pose3D* pose = poseList[i];
bool assigned = false;
// search all clusters
for (size_t j=0; j<poseClusters.size() && !assigned; j++)
{
Pose3D* pose = poseList[i];
bool assigned = false;
// search all clusters
for (size_t j=0; j<poseClusters.size() && !assigned; j++)
{
const Pose3D* poseCenter = poseClusters[j]->poseList[0];
if (matchPose(*pose, *poseCenter))
{
poseClusters[j]->addPose(pose);
assigned = true;
}
}
if (!assigned)
{
poseClusters.push_back ( new PoseCluster3D(pose));
}
const Pose3D* poseCenter = poseClusters[j]->poseList[0];
if (matchPose(*pose, *poseCenter))
{
poseClusters[j]->addPose(pose);
assigned = true;
}
}
// sort the clusters so that we could output multiple hypothesis
std::sort (poseClusters.begin(), poseClusters.end(), sortPoseClusters);
finalPoses.resize(poseClusters.size());
// TODO: Use MinMatchScore
if (UseWeightedAvg)
if (!assigned)
{
poseClusters.push_back ( new PoseCluster3D(pose));
}
}
// sort the clusters so that we could output multiple hypothesis
std::sort (poseClusters.begin(), poseClusters.end(), sortPoseClusters);
finalPoses.resize(poseClusters.size());
// TODO: Use MinMatchScore
if (UseWeightedAvg)
{
#if defined _OPENMP
#pragma omp parallel for
#endif
// uses weighting by the number of votes
for (size_t i=0; i<poseClusters.size(); i++)
{
// We could only average the quaternions. So I will make use of them here
double qAvg[4]={0}, tAvg[3]={0};
// Perform the final averaging
PoseCluster3D* curCluster = poseClusters[i];
std::vector<Pose3D*> curPoses = curCluster->poseList;
int curSize = (int)curPoses.size();
int numTotalVotes = 0;
for (int j=0; j<curSize; j++)
numTotalVotes += curPoses[j]->numVotes;
double wSum=0;
for (int j=0; j<curSize; j++)
{
const double w = (double)curPoses[j]->numVotes / (double)numTotalVotes;
qAvg[0]+= w*curPoses[j]->q[0];
qAvg[1]+= w*curPoses[j]->q[1];
qAvg[2]+= w*curPoses[j]->q[2];
qAvg[3]+= w*curPoses[j]->q[3];
tAvg[0]+= w*curPoses[j]->t[0];
tAvg[1]+= w*curPoses[j]->t[1];
tAvg[2]+= w*curPoses[j]->t[2];
wSum+=w;
}
tAvg[0]/=wSum;
tAvg[1]/=wSum;
tAvg[2]/=wSum;
qAvg[0]/=wSum;
qAvg[1]/=wSum;
qAvg[2]/=wSum;
qAvg[3]/=wSum;
curPoses[0]->updatePoseQuat(qAvg, tAvg);
curPoses[0]->numVotes=curCluster->numVotes;
finalPoses[i]=curPoses[0]->clone();
delete poseClusters[i];
}
}
else
// uses weighting by the number of votes
for (size_t i=0; i<poseClusters.size(); i++)
{
// We could only average the quaternions. So I will make use of them here
double qAvg[4]={0}, tAvg[3]={0};
// Perform the final averaging
PoseCluster3D* curCluster = poseClusters[i];
std::vector<Pose3D*> curPoses = curCluster->poseList;
int curSize = (int)curPoses.size();
int numTotalVotes = 0;
for (int j=0; j<curSize; j++)
numTotalVotes += curPoses[j]->numVotes;
double wSum=0;
for (int j=0; j<curSize; j++)
{
const double w = (double)curPoses[j]->numVotes / (double)numTotalVotes;
qAvg[0]+= w*curPoses[j]->q[0];
qAvg[1]+= w*curPoses[j]->q[1];
qAvg[2]+= w*curPoses[j]->q[2];
qAvg[3]+= w*curPoses[j]->q[3];
tAvg[0]+= w*curPoses[j]->t[0];
tAvg[1]+= w*curPoses[j]->t[1];
tAvg[2]+= w*curPoses[j]->t[2];
wSum+=w;
}
tAvg[0]/=wSum;
tAvg[1]/=wSum;
tAvg[2]/=wSum;
qAvg[0]/=wSum;
qAvg[1]/=wSum;
qAvg[2]/=wSum;
qAvg[3]/=wSum;
curPoses[0]->updatePoseQuat(qAvg, tAvg);
curPoses[0]->numVotes=curCluster->numVotes;
finalPoses[i]=curPoses[0]->clone();
delete poseClusters[i];
}
}
else
{
#if defined _OPENMP
#pragma omp parallel for
#endif
for (size_t i=0; i<poseClusters.size(); i++)
{
// We could only average the quaternions. So I will make use of them here
double qAvg[4]={0}, tAvg[3]={0};
// Perform the final averaging
PoseCluster3D* curCluster = poseClusters[i];
std::vector<Pose3D*> curPoses = curCluster->poseList;
int curSize = (int)curPoses.size();
for (int j=0; j<curSize; j++)
{
qAvg[0]+= curPoses[j]->q[0];
qAvg[1]+= curPoses[j]->q[1];
qAvg[2]+= curPoses[j]->q[2];
qAvg[3]+= curPoses[j]->q[3];
tAvg[0]+= curPoses[j]->t[0];
tAvg[1]+= curPoses[j]->t[1];
tAvg[2]+= curPoses[j]->t[2];
}
tAvg[0]/=(double)curSize;
tAvg[1]/=(double)curSize;
tAvg[2]/=(double)curSize;
qAvg[0]/=(double)curSize;
qAvg[1]/=(double)curSize;
qAvg[2]/=(double)curSize;
qAvg[3]/=(double)curSize;
curPoses[0]->updatePoseQuat(qAvg, tAvg);
curPoses[0]->numVotes=curCluster->numVotes;
finalPoses[i]=curPoses[0]->clone();
// we won't need this
delete poseClusters[i];
}
for (size_t i=0; i<poseClusters.size(); i++)
{
// We could only average the quaternions. So I will make use of them here
double qAvg[4]={0}, tAvg[3]={0};
// Perform the final averaging
PoseCluster3D* curCluster = poseClusters[i];
std::vector<Pose3D*> curPoses = curCluster->poseList;
int curSize = (int)curPoses.size();
for (int j=0; j<curSize; j++)
{
qAvg[0]+= curPoses[j]->q[0];
qAvg[1]+= curPoses[j]->q[1];
qAvg[2]+= curPoses[j]->q[2];
qAvg[3]+= curPoses[j]->q[3];
tAvg[0]+= curPoses[j]->t[0];
tAvg[1]+= curPoses[j]->t[1];
tAvg[2]+= curPoses[j]->t[2];
}
tAvg[0]/=(double)curSize;
tAvg[1]/=(double)curSize;
tAvg[2]/=(double)curSize;
qAvg[0]/=(double)curSize;
qAvg[1]/=(double)curSize;
qAvg[2]/=(double)curSize;
qAvg[3]/=(double)curSize;
curPoses[0]->updatePoseQuat(qAvg, tAvg);
curPoses[0]->numVotes=curCluster->numVotes;
finalPoses[i]=curPoses[0]->clone();
// we won't need this
delete poseClusters[i];
}
poseClusters.clear();
return 0;
}
poseClusters.clear();
return 0;
}
void PPF3DDetector::match(const Mat& pc, std::vector<Pose3D*>& results, const double RelativeSceneSampleStep, const double RelativeSceneDistance)
void PPF3DDetector::match(const Mat& pc, std::vector<Pose3D*>& results, const double relativeSceneSampleStep, const double relativeSceneDistance)
{
if (!trained)
{
throw cv::Exception(cv::Error::StsError, "The model is not trained. Cannot match without training", __FUNCTION__, __FILE__, __LINE__);
}
CV_Assert(pc.type() == CV_32F || pc.type() == CV_32FC1);
CV_Assert(RelativeSceneSampleStep<=1 && RelativeSceneSampleStep>0);
SceneSampleStep = (int)(1.0/RelativeSceneSampleStep);
//int numNeighbors = 10;
int numAngles = (int) (floor (2 * M_PI / angle_step));
float distanceStep = (float)distance_step;
unsigned int n = num_ref_points;
Pose3D** poseList;
int sceneSamplingStep = SceneSampleStep;
// compute bbox
float xRange[2], yRange[2], zRange[2];
computeBboxStd(pc, xRange, yRange, zRange);
// sample the point cloud
/*float dx = xRange[1] - xRange[0];
float dy = yRange[1] - yRange[0];
float dz = zRange[1] - zRange[0];
float diameter = sqrt ( dx * dx + dy * dy + dz * dz );
float distanceSampleStep = diameter * RelativeSceneDistance;*/
Mat sampled = samplePCByQuantization(pc, xRange, yRange, zRange, (float)RelativeSceneDistance, 0);
// allocate the accumulator : Moved this to the inside of the loop
/*#if !defined (_OPENMP)
unsigned int* accumulator = (unsigned int*)calloc(numAngles*n, sizeof(unsigned int));
#endif*/
poseList = (Pose3D**)calloc((sampled.rows/sceneSamplingStep)+4, sizeof(Pose3D*));
if (!trained)
{
throw cv::Exception(cv::Error::StsError, "The model is not trained. Cannot match without training", __FUNCTION__, __FILE__, __LINE__);
}
CV_Assert(pc.type() == CV_32F || pc.type() == CV_32FC1);
CV_Assert(relativeSceneSampleStep<=1 && relativeSceneSampleStep>0);
SceneSampleStep = (int)(1.0/relativeSceneSampleStep);
//int numNeighbors = 10;
int numAngles = (int) (floor (2 * M_PI / angle_step));
float distanceStep = (float)distance_step;
unsigned int n = num_ref_points;
Pose3D** poseList;
int sceneSamplingStep = SceneSampleStep;
// compute bbox
float xRange[2], yRange[2], zRange[2];
computeBboxStd(pc, xRange, yRange, zRange);
// sample the point cloud
/*float dx = xRange[1] - xRange[0];
float dy = yRange[1] - yRange[0];
float dz = zRange[1] - zRange[0];
float diameter = sqrt ( dx * dx + dy * dy + dz * dz );
float distanceSampleStep = diameter * RelativeSceneDistance;*/
Mat sampled = samplePCByQuantization(pc, xRange, yRange, zRange, (float)relativeSceneDistance, 0);
// allocate the accumulator : Moved this to the inside of the loop
/*#if !defined (_OPENMP)
unsigned int* accumulator = (unsigned int*)calloc(numAngles*n, sizeof(unsigned int));
#endif*/
poseList = (Pose3D**)calloc((sampled.rows/sceneSamplingStep)+4, sizeof(Pose3D*));
#if defined _OPENMP
#pragma omp parallel for
#endif
for (int i = 0; i < sampled.rows; i += sceneSamplingStep)
for (int i = 0; i < sampled.rows; i += sceneSamplingStep)
{
unsigned int refIndMax = 0, alphaIndMax = 0;
unsigned int maxVotes = 0;
float* f1 = (float*)(&sampled.data[i * sampled.step]);
const double p1[4] = {f1[0], f1[1], f1[2], 0};
const double n1[4] = {f1[3], f1[4], f1[5], 0};
double *row2, *row3, tsg[3]={0}, Rsg[9]={0}, RInv[9]={0};
unsigned int* accumulator = (unsigned int*)calloc(numAngles*n, sizeof(unsigned int));
computeTransformRT(p1, n1, Rsg, tsg);
row2=&Rsg[3];
row3=&Rsg[6];
// Tolga Birdal's notice:
// As a later update, we might want to look into a local neighborhood only
// To do this, simply search the local neighborhood by radius look up
// and collect the neighbors to compute the relative pose
for (int j = 0; j < sampled.rows; j ++)
{
unsigned int refIndMax = 0, alphaIndMax = 0;
unsigned int maxVotes = 0;
float* f1 = (float*)(&sampled.data[i * sampled.step]);
const double p1[4] = {f1[0], f1[1], f1[2], 0};
const double n1[4] = {f1[3], f1[4], f1[5], 0};
double *row2, *row3, tsg[3]={0}, Rsg[9]={0}, RInv[9]={0};
unsigned int* accumulator = (unsigned int*)calloc(numAngles*n, sizeof(unsigned int));
computeTransformRT(p1, n1, Rsg, tsg);
row2=&Rsg[3];
row3=&Rsg[6];
// Tolga Birdal's notice:
// As a later update, we might want to look into a local neighborhood only
// To do this, simply search the local neighborhood by radius look up
// and collect the neighbors to compute the relative pose
for (int j = 0; j < sampled.rows; j ++)
if (i!=j)
{
float* f2 = (float*)(&sampled.data[j * sampled.step]);
const double p2[4] = {f2[0], f2[1], f2[2], 0};
const double n2[4] = {f2[3], f2[4], f2[5], 0};
double p2t[4], alpha_scene;
double f[4]={0};
computePPFFeatures(p1, n1, p2, n2, f);
KeyType hashValue = hashPPF(f, angle_step, distanceStep);
// we don't need to call this here, as we already estimate the tsg from scene reference point
// double alpha = computeAlpha(p1, n1, p2);
p2t[1] = tsg[1] + row2[0] * p2[0] + row2[1] * p2[1] + row2[2] * p2[2];
p2t[2] = tsg[2] + row3[0] * p2[0] + row3[1] * p2[1] + row3[2] * p2[2];
alpha_scene=atan2(-p2t[2], p2t[1]);
if ( alpha_scene != alpha_scene)
{
if (i!=j)
{
float* f2 = (float*)(&sampled.data[j * sampled.step]);
const double p2[4] = {f2[0], f2[1], f2[2], 0};
const double n2[4] = {f2[3], f2[4], f2[5], 0};
double p2t[4], alpha_scene;
double f[4]={0};
computePPFFeatures(p1, n1, p2, n2, f);
KeyType hashValue = hashPPF(f, angle_step, distanceStep);
// we don't need to call this here, as we already estimate the tsg from scene reference point
// double alpha = computeAlpha(p1, n1, p2);
p2t[1] = tsg[1] + row2[0] * p2[0] + row2[1] * p2[1] + row2[2] * p2[2];
p2t[2] = tsg[2] + row3[0] * p2[0] + row3[1] * p2[1] + row3[2] * p2[2];
alpha_scene=atan2(-p2t[2], p2t[1]);
if ( alpha_scene != alpha_scene)
{
continue;
}
if (sin(alpha_scene)*p2t[2]<0.0)
alpha_scene=-alpha_scene;
alpha_scene=-alpha_scene;
hashnode_i* node = hashtableGetBucketHashed(hash_table, (hashValue));
while (node)
{
THash* tData = (THash*) node->data;
int corrI = (int)tData->i;
int ppfInd = (int)tData->ppfInd;
float* ppfCorrScene = (float*)(&PPF.data[ppfInd]);
double alpha_model = (double)ppfCorrScene[T_PPF_LENGTH-1];
double alpha = alpha_model - alpha_scene;
/* Tolga Birdal's note: Map alpha to the indices:
atan2 generates results in (-pi pi]
That's why alpha should be in range [-2pi 2pi]
So the quantization would be :
numAngles * (alpha+2pi)/(4pi)
*/
//printf("%f\n", alpha);
int alpha_index = (int)(numAngles*(alpha + 2*M_PI) / (4*M_PI));
unsigned int accIndex = corrI * numAngles + alpha_index;
accumulator[accIndex]++;
node = node->next;
}
}
continue;
}
// Maximize the accumulator
for (unsigned int k = 0; k < n; k++)
if (sin(alpha_scene)*p2t[2]<0.0)
alpha_scene=-alpha_scene;
alpha_scene=-alpha_scene;
hashnode_i* node = hashtableGetBucketHashed(hash_table, (hashValue));
while (node)
{
for (int j = 0; j < numAngles; j++)
{
const unsigned int accInd = k*numAngles + j;
const unsigned int accVal = accumulator[ accInd ];
if (accVal > maxVotes)
{
maxVotes = accVal;
refIndMax = k;
alphaIndMax = j;
}
#if !defined (_OPENMP)
accumulator[accInd ] = 0;
#endif
}
THash* tData = (THash*) node->data;
int corrI = (int)tData->i;
int ppfInd = (int)tData->ppfInd;
float* ppfCorrScene = (float*)(&PPF.data[ppfInd]);
double alpha_model = (double)ppfCorrScene[T_PPF_LENGTH-1];
double alpha = alpha_model - alpha_scene;
/* Tolga Birdal's note: Map alpha to the indices:
atan2 generates results in (-pi pi]
That's why alpha should be in range [-2pi 2pi]
So the quantization would be :
numAngles * (alpha+2pi)/(4pi)
*/
//printf("%f\n", alpha);
int alpha_index = (int)(numAngles*(alpha + 2*M_PI) / (4*M_PI));
unsigned int accIndex = corrI * numAngles + alpha_index;
accumulator[accIndex]++;
node = node->next;
}
// invert Tsg : Luckily rotation is orthogonal: Inverse = Transpose.
// We are not required to invert.
double tInv[3], tmg[3], Rmg[9];
matrixTranspose33(Rsg, RInv);
matrixProduct331(RInv, tsg, tInv);
double TsgInv[16] = { RInv[0], RInv[1], RInv[2], -tInv[0],
RInv[3], RInv[4], RInv[5], -tInv[1],
RInv[6], RInv[7], RInv[8], -tInv[2],
0, 0, 0, 1
};
// TODO : Compute pose
const float* fMax = (float*)(&sampledPC.data[refIndMax * sampledPC.step]);
const double pMax[4] = {fMax[0], fMax[1], fMax[2], 1};
const double nMax[4] = {fMax[3], fMax[4], fMax[5], 1};
computeTransformRT(pMax, nMax, Rmg, tmg);
row2=&Rsg[3];
row3=&Rsg[6];
double Tmg[16] = { Rmg[0], Rmg[1], Rmg[2], tmg[0],
Rmg[3], Rmg[4], Rmg[5], tmg[1],
Rmg[6], Rmg[7], Rmg[8], tmg[2],
0, 0, 0, 1
};
// convert alpha_index to alpha
int alpha_index = alphaIndMax;
double alpha = (alpha_index*(4*M_PI))/numAngles-2*M_PI;
// Equation 2:
double Talpha[16]={0};
getUnitXRotation_44(alpha, Talpha);
double Temp[16]={0};
double Pose[16]={0};
matrixProduct44(Talpha, Tmg, Temp);
matrixProduct44(TsgInv, Temp, Pose);
Pose3D *ppf = new Pose3D(alpha, refIndMax, maxVotes);
ppf->updatePose(Pose);
poseList[i/sceneSamplingStep] = ppf;
#if defined (_OPENMP)
free(accumulator);
#endif
}
}
// TODO : Make the parameters relative if not arguments.
//double MinMatchScore = 0.5;
int numPosesAdded = sampled.rows/sceneSamplingStep;
clusterPoses(poseList, numPosesAdded, results);
// free up the used space
sampled.release();
for (int i=0; i<numPosesAdded; i++)
// Maximize the accumulator
for (unsigned int k = 0; k < n; k++)
{
Pose3D* pose = poseList[i];
delete pose;
poseList[i]=0;
for (int j = 0; j < numAngles; j++)
{
const unsigned int accInd = k*numAngles + j;
const unsigned int accVal = accumulator[ accInd ];
if (accVal > maxVotes)
{
maxVotes = accVal;
refIndMax = k;
alphaIndMax = j;
}
#if !defined (_OPENMP)
accumulator[accInd ] = 0;
#endif
}
}
free(poseList);
/*#if !defined (_OPENMP)
free(accumulator);
#endif*/
// invert Tsg : Luckily rotation is orthogonal: Inverse = Transpose.
// We are not required to invert.
double tInv[3], tmg[3], Rmg[9];
matrixTranspose33(Rsg, RInv);
matrixProduct331(RInv, tsg, tInv);
double TsgInv[16] = { RInv[0], RInv[1], RInv[2], -tInv[0],
RInv[3], RInv[4], RInv[5], -tInv[1],
RInv[6], RInv[7], RInv[8], -tInv[2],
0, 0, 0, 1
};
// TODO : Compute pose
const float* fMax = (float*)(&sampledPC.data[refIndMax * sampledPC.step]);
const double pMax[4] = {fMax[0], fMax[1], fMax[2], 1};
const double nMax[4] = {fMax[3], fMax[4], fMax[5], 1};
computeTransformRT(pMax, nMax, Rmg, tmg);
row2=&Rsg[3];
row3=&Rsg[6];
double Tmg[16] = { Rmg[0], Rmg[1], Rmg[2], tmg[0],
Rmg[3], Rmg[4], Rmg[5], tmg[1],
Rmg[6], Rmg[7], Rmg[8], tmg[2],
0, 0, 0, 1
};
// convert alpha_index to alpha
int alpha_index = alphaIndMax;
double alpha = (alpha_index*(4*M_PI))/numAngles-2*M_PI;
// Equation 2:
double Talpha[16]={0};
getUnitXRotation_44(alpha, Talpha);
double Temp[16]={0};
double Pose[16]={0};
matrixProduct44(Talpha, Tmg, Temp);
matrixProduct44(TsgInv, Temp, Pose);
Pose3D *ppf = new Pose3D(alpha, refIndMax, maxVotes);
ppf->updatePose(Pose);
poseList[i/sceneSamplingStep] = ppf;
#if defined (_OPENMP)
free(accumulator);
#endif
}
// TODO : Make the parameters relative if not arguments.
//double MinMatchScore = 0.5;
int numPosesAdded = sampled.rows/sceneSamplingStep;
clusterPoses(poseList, numPosesAdded, results);
// free up the used space
sampled.release();
for (int i=0; i<numPosesAdded; i++)
{
Pose3D* pose = poseList[i];
delete pose;
poseList[i]=0;
}
free(poseList);
}
} // namespace ppf_match_3d
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment