Commit 9e787377 authored by Pavel Rojtberg's avatar Pavel Rojtberg

kinfu: allow basic wrapping for bindings

for this
- move Params struct out of class
- use static create instead of pimpl
- allow demo to be compiled without VIZ
parent e351caed
...@@ -15,37 +15,18 @@ namespace kinfu { ...@@ -15,37 +15,18 @@ namespace kinfu {
//! @addtogroup kinect_fusion //! @addtogroup kinect_fusion
//! @{ //! @{
/** @brief KinectFusion implementation struct CV_EXPORTS_W Params
This class implements a 3d reconstruction algorithm described in
@cite kinectfusion paper.
It takes a sequence of depth images taken from depth sensor
(or any depth images source such as stereo camera matching algorithm or even raymarching renderer).
The output can be obtained as a vector of points and their normals
or can be Phong-rendered from given camera pose.
An internal representation of a model is a voxel cube that keeps TSDF values
which are a sort of distances to the surface (for details read the @cite kinectfusion article about TSDF).
There is no interface to that representation yet.
This implementation is based on (kinfu-remake)[https://github.com/Nerei/kinfu_remake].
*/
class CV_EXPORTS KinFu
{ {
public:
struct CV_EXPORTS Params
{
/** @brief Default parameters /** @brief Default parameters
A set of parameters which provides better model quality, can be very slow. A set of parameters which provides better model quality, can be very slow.
*/ */
static Params defaultParams(); CV_WRAP static Ptr<Params> defaultParams();
/** @brief Coarse parameters /** @brief Coarse parameters
A set of parameters which provides better speed, can fail to match frames A set of parameters which provides better speed, can fail to match frames
in case of rapid sensor motion. in case of rapid sensor motion.
*/ */
static Params coarseParams(); CV_WRAP static Ptr<Params> coarseParams();
enum PlatformType enum PlatformType
{ {
...@@ -61,10 +42,10 @@ public: ...@@ -61,10 +42,10 @@ public:
PlatformType platform; PlatformType platform;
/** @brief frame size in pixels */ /** @brief frame size in pixels */
Size frameSize; CV_PROP_RW Size frameSize;
/** @brief camera intrinsics */ /** @brief camera intrinsics */
Matx33f intr; CV_PROP Matx33f intr;
/** @brief pre-scale per 1 meter for input values /** @brief pre-scale per 1 meter for input values
...@@ -72,31 +53,31 @@ public: ...@@ -72,31 +53,31 @@ public:
* 5000 per 1 meter for the 16-bit PNG files of TUM database * 5000 per 1 meter for the 16-bit PNG files of TUM database
* 1 per 1 meter for the 32-bit float images in the ROS bag files * 1 per 1 meter for the 32-bit float images in the ROS bag files
*/ */
float depthFactor; CV_PROP_RW float depthFactor;
/** @brief Depth sigma in meters for bilateral smooth */ /** @brief Depth sigma in meters for bilateral smooth */
float bilateral_sigma_depth; CV_PROP_RW float bilateral_sigma_depth;
/** @brief Spatial sigma in pixels for bilateral smooth */ /** @brief Spatial sigma in pixels for bilateral smooth */
float bilateral_sigma_spatial; CV_PROP_RW float bilateral_sigma_spatial;
/** @brief Kernel size in pixels for bilateral smooth */ /** @brief Kernel size in pixels for bilateral smooth */
int bilateral_kernel_size; CV_PROP_RW int bilateral_kernel_size;
/** @brief Number of pyramid levels for ICP */ /** @brief Number of pyramid levels for ICP */
int pyramidLevels; CV_PROP_RW int pyramidLevels;
/** @brief Resolution of voxel cube /** @brief Resolution of voxel cube
Number of voxels in each cube edge. Number of voxels in each cube edge.
*/ */
int volumeDims; CV_PROP_RW int volumeDims;
/** @brief Size of voxel cube side in meters */ /** @brief Size of voxel cube side in meters */
float volumeSize; CV_PROP_RW float volumeSize;
/** @brief Minimal camera movement in meters /** @brief Minimal camera movement in meters
Integrate new depth frame only if camera movement exceeds this value. Integrate new depth frame only if camera movement exceeds this value.
*/ */
float tsdf_min_camera_movement; CV_PROP_RW float tsdf_min_camera_movement;
/** @brief initial volume pose in meters */ /** @brief initial volume pose in meters */
Affine3f volumePose; Affine3f volumePose;
...@@ -105,44 +86,63 @@ public: ...@@ -105,44 +86,63 @@ public:
Distances that exceed this value will be truncated in voxel cube values. Distances that exceed this value will be truncated in voxel cube values.
*/ */
float tsdf_trunc_dist; CV_PROP_RW float tsdf_trunc_dist;
/** @brief max number of frames per voxel /** @brief max number of frames per voxel
Each voxel keeps running average of distances no longer than this value. Each voxel keeps running average of distances no longer than this value.
*/ */
int tsdf_max_weight; CV_PROP_RW int tsdf_max_weight;
/** @brief A length of one raycast step /** @brief A length of one raycast step
How much voxel sizes we skip each raycast step How much voxel sizes we skip each raycast step
*/ */
float raycast_step_factor; CV_PROP_RW float raycast_step_factor;
// gradient delta in voxel sizes // gradient delta in voxel sizes
// fixed at 1.0f // fixed at 1.0f
// float gradient_delta_factor; // float gradient_delta_factor;
/** @brief light pose for rendering in meters */ /** @brief light pose for rendering in meters */
Vec3f lightPose; CV_PROP Vec3f lightPose;
/** @brief distance theshold for ICP in meters */ /** @brief distance theshold for ICP in meters */
float icpDistThresh; CV_PROP_RW float icpDistThresh;
/** angle threshold for ICP in radians */ /** angle threshold for ICP in radians */
float icpAngleThresh; CV_PROP_RW float icpAngleThresh;
/** number of ICP iterations for each pyramid level */ /** number of ICP iterations for each pyramid level */
std::vector<int> icpIterations; CV_PROP std::vector<int> icpIterations;
// depth truncation is not used by default // depth truncation is not used by default
// float icp_truncate_depth_dist; //meters // float icp_truncate_depth_dist; //meters
}; };
KinFu(const Params& _params); /** @brief KinectFusion implementation
This class implements a 3d reconstruction algorithm described in
@cite kinectfusion paper.
It takes a sequence of depth images taken from depth sensor
(or any depth images source such as stereo camera matching algorithm or even raymarching renderer).
The output can be obtained as a vector of points and their normals
or can be Phong-rendered from given camera pose.
An internal representation of a model is a voxel cube that keeps TSDF values
which are a sort of distances to the surface (for details read the @cite kinectfusion article about TSDF).
There is no interface to that representation yet.
This implementation is based on (kinfu-remake)[https://github.com/Nerei/kinfu_remake].
*/
class CV_EXPORTS_W KinFu
{
public:
CV_WRAP static Ptr<KinFu> create(const Ptr<Params>& _params);
virtual ~KinFu(); virtual ~KinFu();
/** @brief Get current parameters */ /** @brief Get current parameters */
const Params& getParams() const; virtual const Params& getParams() const = 0;
void setParams(const Params&); virtual void setParams(const Params&) = 0;
/** @brief Renders a volume into an image /** @brief Renders a volume into an image
...@@ -154,7 +154,7 @@ public: ...@@ -154,7 +154,7 @@ public:
which is a last frame camera pose. which is a last frame camera pose.
*/ */
void render(OutputArray image, const Affine3f cameraPose = Affine3f::Identity()) const; CV_WRAP virtual void render(OutputArray image, const Matx44f& cameraPose = Matx44f::eye()) const = 0;
/** @brief Gets points and normals of current 3d mesh /** @brief Gets points and normals of current 3d mesh
...@@ -164,7 +164,7 @@ public: ...@@ -164,7 +164,7 @@ public:
@param points vector of points which are 4-float vectors @param points vector of points which are 4-float vectors
@param normals vector of normals which are 4-float vectors @param normals vector of normals which are 4-float vectors
*/ */
void getCloud(OutputArray points, OutputArray normals) const; CV_WRAP virtual void getCloud(OutputArray points, OutputArray normals) const = 0;
/** @brief Gets points of current 3d mesh /** @brief Gets points of current 3d mesh
...@@ -172,22 +172,22 @@ public: ...@@ -172,22 +172,22 @@ public:
@param points vector of points which are 4-float vectors @param points vector of points which are 4-float vectors
*/ */
void getPoints(OutputArray points) const; CV_WRAP virtual void getPoints(OutputArray points) const = 0;
/** @brief Calculates normals for given points /** @brief Calculates normals for given points
@param points input vector of points which are 4-float vectors @param points input vector of points which are 4-float vectors
@param normals output vector of corresponding normals which are 4-float vectors @param normals output vector of corresponding normals which are 4-float vectors
*/ */
void getNormals(InputArray points, OutputArray normals) const; CV_WRAP virtual void getNormals(InputArray points, OutputArray normals) const = 0;
/** @brief Resets the algorithm /** @brief Resets the algorithm
Clears current model and resets a pose. Clears current model and resets a pose.
*/ */
void reset(); CV_WRAP virtual void reset() = 0;
/** @brief Get current pose in voxel cube space */ /** @brief Get current pose in voxel cube space */
const Affine3f getPose() const; virtual const Affine3f getPose() const = 0;
/** @brief Process next depth frame /** @brief Process next depth frame
...@@ -197,12 +197,10 @@ public: ...@@ -197,12 +197,10 @@ public:
@param depth one-channel image which size and depth scale is described in algorithm's parameters @param depth one-channel image which size and depth scale is described in algorithm's parameters
@return true if succeded to align new frame with current scene, false if opposite @return true if succeded to align new frame with current scene, false if opposite
*/ */
bool update(InputArray depth); CV_WRAP virtual bool update(InputArray depth) = 0;
private: private:
class KinFuImpl; class KinFuImpl;
cv::Ptr<KinFuImpl> impl;
}; };
//! @} //! @}
......
...@@ -10,14 +10,14 @@ ...@@ -10,14 +10,14 @@
#include <opencv2/highgui.hpp> #include <opencv2/highgui.hpp>
#include <opencv2/rgbd/kinfu.hpp> #include <opencv2/rgbd/kinfu.hpp>
#ifdef HAVE_OPENCV_VIZ
#include <opencv2/viz.hpp>
using namespace cv; using namespace cv;
using namespace cv::kinfu; using namespace cv::kinfu;
using namespace std; using namespace std;
#ifdef HAVE_OPENCV_VIZ
#include <opencv2/viz.hpp>
#endif
static vector<string> readDepth(std::string fileList); static vector<string> readDepth(std::string fileList);
static vector<string> readDepth(std::string fileList) static vector<string> readDepth(std::string fileList)
...@@ -113,7 +113,7 @@ public: ...@@ -113,7 +113,7 @@ public:
return depthFileList.empty() && !(vc.isOpened()); return depthFileList.empty() && !(vc.isOpened());
} }
void updateParams(KinFu::Params& params) void updateParams(Params& params)
{ {
if (vc.isOpened()) if (vc.isOpened())
{ {
...@@ -143,6 +143,7 @@ public: ...@@ -143,6 +143,7 @@ public:
bool useKinect2Workarounds; bool useKinect2Workarounds;
}; };
#ifdef HAVE_OPENCV_VIZ
const std::string vizWindowName = "cloud"; const std::string vizWindowName = "cloud";
struct PauseCallbackArgs struct PauseCallbackArgs
...@@ -163,11 +164,12 @@ void pauseCallback(const viz::MouseEvent& me, void* args) ...@@ -163,11 +164,12 @@ void pauseCallback(const viz::MouseEvent& me, void* args)
PauseCallbackArgs pca = *((PauseCallbackArgs*)(args)); PauseCallbackArgs pca = *((PauseCallbackArgs*)(args));
viz::Viz3d window(vizWindowName); viz::Viz3d window(vizWindowName);
Mat rendered; Mat rendered;
pca.kf.render(rendered, window.getViewerPose()); pca.kf.render(rendered, window.getViewerPose().matrix);
imshow("render", rendered); imshow("render", rendered);
waitKey(1); waitKey(1);
} }
} }
#endif
static const char* keys = static const char* keys =
{ {
...@@ -220,23 +222,26 @@ int main(int argc, char **argv) ...@@ -220,23 +222,26 @@ int main(int argc, char **argv)
return -1; return -1;
} }
KinFu::Params params; Ptr<Params> params;
if(coarse) if(coarse)
params = KinFu::Params::coarseParams(); params = Params::coarseParams();
else else
params = KinFu::Params::defaultParams(); params = Params::defaultParams();
// These params can be different for each depth sensor // These params can be different for each depth sensor
ds.updateParams(params); ds.updateParams(*params);
// Scene-specific params should be tuned for each scene individually // Scene-specific params should be tuned for each scene individually
//params.volumePose = params.volumePose.translate(Vec3f(0.f, 0.f, 0.5f)); //params.volumePose = params.volumePose.translate(Vec3f(0.f, 0.f, 0.5f));
//params.tsdf_max_weight = 16; //params.tsdf_max_weight = 16;
KinFu kf(params); Ptr<KinFu> kf = KinFu::create(params);
#ifdef HAVE_OPENCV_VIZ
cv::viz::Viz3d window(vizWindowName); cv::viz::Viz3d window(vizWindowName);
window.setViewerPose(Affine3f::Identity()); window.setViewerPose(Affine3f::Identity());
bool pause = false;
#endif
// TODO: can we use UMats for that? // TODO: can we use UMats for that?
Mat rendered; Mat rendered;
...@@ -245,13 +250,12 @@ int main(int argc, char **argv) ...@@ -245,13 +250,12 @@ int main(int argc, char **argv)
int64 prevTime = getTickCount(); int64 prevTime = getTickCount();
bool pause = false;
for(Mat frame = ds.getDepth(); !frame.empty(); frame = ds.getDepth()) for(Mat frame = ds.getDepth(); !frame.empty(); frame = ds.getDepth())
{ {
#ifdef HAVE_OPENCV_VIZ
if(pause) if(pause)
{ {
kf.getCloud(points, normals); kf->getCloud(points, normals);
if(!points.empty() && !normals.empty()) if(!points.empty() && !normals.empty())
{ {
viz::WCloud cloudWidget(points, viz::Color::white()); viz::WCloud cloudWidget(points, viz::Color::white());
...@@ -260,9 +264,9 @@ int main(int argc, char **argv) ...@@ -260,9 +264,9 @@ int main(int argc, char **argv)
window.showWidget("normals", cloudNormals); window.showWidget("normals", cloudNormals);
window.showWidget("cube", viz::WCube(Vec3d::all(0), window.showWidget("cube", viz::WCube(Vec3d::all(0),
Vec3d::all(kf.getParams().volumeSize)), Vec3d::all(kf->getParams().volumeSize)),
kf.getParams().volumePose); kf->getParams().volumePose);
PauseCallbackArgs pca(kf); PauseCallbackArgs pca(*kf);
window.registerMouseCallback(pauseCallback, (void*)&pca); window.registerMouseCallback(pauseCallback, (void*)&pca);
window.showWidget("text", viz::WText(cv::String("Move camera in this window. " window.showWidget("text", viz::WText(cv::String("Move camera in this window. "
"Close the window or press Q to resume"), Point())); "Close the window or press Q to resume"), Point()));
...@@ -274,22 +278,24 @@ int main(int argc, char **argv) ...@@ -274,22 +278,24 @@ int main(int argc, char **argv)
pause = false; pause = false;
} }
else else
#endif
{ {
Mat cvt8; Mat cvt8;
float depthFactor = kf.getParams().depthFactor; float depthFactor = kf->getParams().depthFactor;
convertScaleAbs(frame, cvt8, 0.25*256. / depthFactor); convertScaleAbs(frame, cvt8, 0.25*256. / depthFactor);
imshow("depth", cvt8); imshow("depth", cvt8);
if(!kf.update(frame)) if(!kf->update(frame))
{ {
kf.reset(); kf->reset();
std::cout << "reset" << std::endl; std::cout << "reset" << std::endl;
} }
#ifdef HAVE_OPENCV_VIZ
else else
{ {
if(coarse) if(coarse)
{ {
kf.getCloud(points, normals); kf->getCloud(points, normals);
if(!points.empty() && !normals.empty()) if(!points.empty() && !normals.empty())
{ {
viz::WCloud cloudWidget(points, viz::Color::white()); viz::WCloud cloudWidget(points, viz::Color::white());
...@@ -301,13 +307,14 @@ int main(int argc, char **argv) ...@@ -301,13 +307,14 @@ int main(int argc, char **argv)
//window.showWidget("worldAxes", viz::WCoordinateSystem()); //window.showWidget("worldAxes", viz::WCoordinateSystem());
window.showWidget("cube", viz::WCube(Vec3d::all(0), window.showWidget("cube", viz::WCube(Vec3d::all(0),
Vec3d::all(kf.getParams().volumeSize)), Vec3d::all(kf->getParams().volumeSize)),
kf.getParams().volumePose); kf->getParams().volumePose);
window.setViewerPose(kf.getPose()); window.setViewerPose(kf->getPose());
window.spinOnce(1, true); window.spinOnce(1, true);
} }
#endif
kf.render(rendered); kf->render(rendered);
} }
int64 newTime = getTickCount(); int64 newTime = getTickCount();
...@@ -322,12 +329,14 @@ int main(int argc, char **argv) ...@@ -322,12 +329,14 @@ int main(int argc, char **argv)
switch (c) switch (c)
{ {
case 'r': case 'r':
kf.reset(); kf->reset();
break; break;
case 'q': case 'q':
return 0; return 0;
#ifdef HAVE_OPENCV_VIZ
case 'p': case 'p':
pause = true; pause = true;
#endif
default: default:
break; break;
} }
...@@ -335,12 +344,3 @@ int main(int argc, char **argv) ...@@ -335,12 +344,3 @@ int main(int argc, char **argv)
return 0; return 0;
} }
#else
int main(int /* argc */, char ** /* argv */)
{
std::cout << "This demo requires viz module" << std::endl;
return 0;
}
#endif
...@@ -500,15 +500,15 @@ bool ICPGPU::estimateTransform(cv::Affine3f& /*transform*/, cv::Ptr<Frame> /*_ol ...@@ -500,15 +500,15 @@ bool ICPGPU::estimateTransform(cv::Affine3f& /*transform*/, cv::Ptr<Frame> /*_ol
throw std::runtime_error("Not implemented"); throw std::runtime_error("Not implemented");
} }
cv::Ptr<ICP> makeICP(cv::kinfu::KinFu::Params::PlatformType t, cv::Ptr<ICP> makeICP(cv::kinfu::Params::PlatformType t,
const cv::kinfu::Intr _intrinsics, const std::vector<int> &_iterations, const cv::kinfu::Intr _intrinsics, const std::vector<int> &_iterations,
float _angleThreshold, float _distanceThreshold) float _angleThreshold, float _distanceThreshold)
{ {
switch (t) switch (t)
{ {
case cv::kinfu::KinFu::Params::PlatformType::PLATFORM_CPU: case cv::kinfu::Params::PlatformType::PLATFORM_CPU:
return cv::makePtr<ICPCPU>(_intrinsics, _iterations, _angleThreshold, _distanceThreshold); return cv::makePtr<ICPCPU>(_intrinsics, _iterations, _angleThreshold, _distanceThreshold);
case cv::kinfu::KinFu::Params::PlatformType::PLATFORM_GPU: case cv::kinfu::Params::PlatformType::PLATFORM_GPU:
return cv::makePtr<ICPGPU>(_intrinsics, _iterations, _angleThreshold, _distanceThreshold); return cv::makePtr<ICPGPU>(_intrinsics, _iterations, _angleThreshold, _distanceThreshold);
default: default:
return cv::Ptr<ICP>(); return cv::Ptr<ICP>();
......
...@@ -30,7 +30,7 @@ protected: ...@@ -30,7 +30,7 @@ protected:
cv::kinfu::Intr intrinsics; cv::kinfu::Intr intrinsics;
}; };
cv::Ptr<ICP> makeICP(cv::kinfu::KinFu::Params::PlatformType t, cv::Ptr<ICP> makeICP(cv::kinfu::Params::PlatformType t,
const cv::kinfu::Intr _intrinsics, const std::vector<int> &_iterations, const cv::kinfu::Intr _intrinsics, const std::vector<int> &_iterations,
float _angleThreshold, float _distanceThreshold); float _angleThreshold, float _distanceThreshold);
......
...@@ -12,29 +12,29 @@ ...@@ -12,29 +12,29 @@
namespace cv { namespace cv {
namespace kinfu { namespace kinfu {
class KinFu::KinFuImpl class KinFu::KinFuImpl : public KinFu
{ {
public: public:
KinFuImpl(const KinFu::Params& _params); KinFuImpl(const Params& _params);
virtual ~KinFuImpl(); virtual ~KinFuImpl();
const KinFu::Params& getParams() const; const Params& getParams() const CV_OVERRIDE;
void setParams(const KinFu::Params&); void setParams(const Params&) CV_OVERRIDE;
void render(OutputArray image, const Affine3f cameraPose = Affine3f::Identity()) const; void render(OutputArray image, const Matx44f& cameraPose) const CV_OVERRIDE;
void getCloud(OutputArray points, OutputArray normals) const; void getCloud(OutputArray points, OutputArray normals) const CV_OVERRIDE;
void getPoints(OutputArray points) const; void getPoints(OutputArray points) const CV_OVERRIDE;
void getNormals(InputArray points, OutputArray normals) const; void getNormals(InputArray points, OutputArray normals) const CV_OVERRIDE;
void reset(); void reset() CV_OVERRIDE;
const Affine3f getPose() const; const Affine3f getPose() const CV_OVERRIDE;
bool update(InputArray depth); bool update(InputArray depth) CV_OVERRIDE;
private: private:
KinFu::Params params; Params params;
cv::Ptr<FrameGenerator> frameGenerator; cv::Ptr<FrameGenerator> frameGenerator;
cv::Ptr<ICP> icp; cv::Ptr<ICP> icp;
...@@ -45,7 +45,7 @@ private: ...@@ -45,7 +45,7 @@ private:
cv::Ptr<Frame> frame; cv::Ptr<Frame> frame;
}; };
KinFu::Params KinFu::Params::defaultParams() Ptr<Params> Params::defaultParams()
{ {
Params p; Params p;
...@@ -107,36 +107,36 @@ KinFu::Params KinFu::Params::defaultParams() ...@@ -107,36 +107,36 @@ KinFu::Params KinFu::Params::defaultParams()
// depth truncation is not used by default // depth truncation is not used by default
//p.icp_truncate_depth_dist = 0.f; //meters, disabled //p.icp_truncate_depth_dist = 0.f; //meters, disabled
return p; return makePtr<Params>(p);
} }
KinFu::Params KinFu::Params::coarseParams() Ptr<Params> Params::coarseParams()
{ {
Params p = defaultParams(); Ptr<Params> p = defaultParams();
// first non-zero numbers are accepted // first non-zero numbers are accepted
const int iters[] = {5, 3, 2}; const int iters[] = {5, 3, 2};
p.icpIterations.clear(); p->icpIterations.clear();
for(size_t i = 0; i < sizeof(iters)/sizeof(int); i++) for(size_t i = 0; i < sizeof(iters)/sizeof(int); i++)
{ {
if(iters[i]) if(iters[i])
{ {
p.icpIterations.push_back(iters[i]); p->icpIterations.push_back(iters[i]);
} }
else else
break; break;
} }
p.pyramidLevels = (int)p.icpIterations.size(); p->pyramidLevels = (int)p->icpIterations.size();
p.volumeDims = 128; //number of voxels p->volumeDims = 128; //number of voxels
p.raycast_step_factor = 0.75f; //in voxel sizes p->raycast_step_factor = 0.75f; //in voxel sizes
return p; return p;
} }
KinFu::KinFuImpl::KinFuImpl(const KinFu::Params &_params) : KinFu::KinFuImpl::KinFuImpl(const Params &_params) :
params(_params), params(_params),
frameGenerator(makeFrameGenerator(params.platform)), frameGenerator(makeFrameGenerator(params.platform)),
icp(makeICP(params.platform, params.intr, params.icpIterations, params.icpAngleThresh, params.icpDistThresh)), icp(makeICP(params.platform, params.intr, params.icpIterations, params.icpAngleThresh, params.icpDistThresh)),
...@@ -160,12 +160,12 @@ KinFu::KinFuImpl::~KinFuImpl() ...@@ -160,12 +160,12 @@ KinFu::KinFuImpl::~KinFuImpl()
} }
const KinFu::Params& KinFu::KinFuImpl::getParams() const const Params& KinFu::KinFuImpl::getParams() const
{ {
return params; return params;
} }
void KinFu::KinFuImpl::setParams(const KinFu::Params& p) void KinFu::KinFuImpl::setParams(const Params& p)
{ {
params = p; params = p;
} }
...@@ -225,10 +225,12 @@ bool KinFu::KinFuImpl::update(InputArray _depth) ...@@ -225,10 +225,12 @@ bool KinFu::KinFuImpl::update(InputArray _depth)
} }
void KinFu::KinFuImpl::render(OutputArray image, const Affine3f cameraPose) const void KinFu::KinFuImpl::render(OutputArray image, const Matx44f& _cameraPose) const
{ {
ScopeTime st("kinfu render"); ScopeTime st("kinfu render");
Affine3f cameraPose(_cameraPose);
const Affine3f id = Affine3f::Identity(); const Affine3f id = Affine3f::Identity();
if((cameraPose.rotation() == pose.rotation() && cameraPose.translation() == pose.translation()) || if((cameraPose.rotation() == pose.rotation() && cameraPose.translation() == pose.translation()) ||
(cameraPose.rotation() == id.rotation() && cameraPose.translation() == id.translation())) (cameraPose.rotation() == id.rotation() && cameraPose.translation() == id.translation()))
...@@ -263,57 +265,12 @@ void KinFu::KinFuImpl::getNormals(InputArray points, OutputArray normals) const ...@@ -263,57 +265,12 @@ void KinFu::KinFuImpl::getNormals(InputArray points, OutputArray normals) const
// importing class // importing class
KinFu::KinFu(const Params& _params) Ptr<KinFu> KinFu::create(const Ptr<Params>& _params)
{
impl = makePtr<KinFu::KinFuImpl>(_params);
}
KinFu::~KinFu() { }
const KinFu::Params& KinFu::getParams() const
{ {
return impl->getParams(); return makePtr<KinFu::KinFuImpl>(*_params);
} }
void KinFu::setParams(const Params& p) KinFu::~KinFu() {}
{
impl->setParams(p);
}
const Affine3f KinFu::getPose() const
{
return impl->getPose();
}
void KinFu::reset()
{
impl->reset();
}
void KinFu::getCloud(cv::OutputArray points, cv::OutputArray normals) const
{
impl->getCloud(points, normals);
}
void KinFu::getPoints(OutputArray points) const
{
impl->getPoints(points);
}
void KinFu::getNormals(InputArray points, OutputArray normals) const
{
impl->getNormals(points, normals);
}
bool KinFu::update(InputArray depth)
{
return impl->update(depth);
}
void KinFu::render(cv::OutputArray image, const Affine3f cameraPose) const
{
impl->render(image, cameraPose);
}
} // namespace kinfu } // namespace kinfu
} // namespace cv } // namespace cv
...@@ -442,13 +442,13 @@ void FrameGPU::getDepth(OutputArray /* depth */) const ...@@ -442,13 +442,13 @@ void FrameGPU::getDepth(OutputArray /* depth */) const
throw std::runtime_error("Not implemented"); throw std::runtime_error("Not implemented");
} }
cv::Ptr<FrameGenerator> makeFrameGenerator(cv::kinfu::KinFu::Params::PlatformType t) cv::Ptr<FrameGenerator> makeFrameGenerator(cv::kinfu::Params::PlatformType t)
{ {
switch (t) switch (t)
{ {
case cv::kinfu::KinFu::Params::PlatformType::PLATFORM_CPU: case cv::kinfu::Params::PlatformType::PLATFORM_CPU:
return cv::makePtr<FrameGeneratorCPU>(); return cv::makePtr<FrameGeneratorCPU>();
case cv::kinfu::KinFu::Params::PlatformType::PLATFORM_GPU: case cv::kinfu::Params::PlatformType::PLATFORM_GPU:
return cv::makePtr<FrameGeneratorGPU>(); return cv::makePtr<FrameGeneratorGPU>();
default: default:
return cv::Ptr<FrameGenerator>(); return cv::Ptr<FrameGenerator>();
......
...@@ -115,7 +115,7 @@ public: ...@@ -115,7 +115,7 @@ public:
virtual ~FrameGenerator() {} virtual ~FrameGenerator() {}
}; };
cv::Ptr<FrameGenerator> makeFrameGenerator(cv::kinfu::KinFu::Params::PlatformType t); cv::Ptr<FrameGenerator> makeFrameGenerator(cv::kinfu::Params::PlatformType t);
} // namespace kinfu } // namespace kinfu
} // namespace cv } // namespace cv
......
...@@ -1482,7 +1482,7 @@ Size FastICPOdometry::prepareFrameCache(Ptr<OdometryFrame>& frame, int cacheType ...@@ -1482,7 +1482,7 @@ Size FastICPOdometry::prepareFrameCache(Ptr<OdometryFrame>& frame, int cacheType
// mask isn't used by FastICP // mask isn't used by FastICP
Ptr<FrameGenerator> fg = makeFrameGenerator(KinFu::Params::PlatformType::PLATFORM_CPU); Ptr<FrameGenerator> fg = makeFrameGenerator(Params::PlatformType::PLATFORM_CPU);
Ptr<FrameCPU> f = (*fg)().dynamicCast<FrameCPU>(); Ptr<FrameCPU> f = (*fg)().dynamicCast<FrameCPU>();
Intr intr(cameraMatrix); Intr intr(cameraMatrix);
float depthFactor = 1.f; // user should rescale depth manually float depthFactor = 1.f; // user should rescale depth manually
...@@ -1517,7 +1517,7 @@ bool FastICPOdometry::computeImpl(const Ptr<OdometryFrame>& srcFrame, ...@@ -1517,7 +1517,7 @@ bool FastICPOdometry::computeImpl(const Ptr<OdometryFrame>& srcFrame,
{ {
kinfu::Intr intr(cameraMatrix); kinfu::Intr intr(cameraMatrix);
std::vector<int> iterations = iterCounts; std::vector<int> iterations = iterCounts;
Ptr<kinfu::ICP> icp = kinfu::makeICP(kinfu::KinFu::Params::PlatformType::PLATFORM_CPU, Ptr<kinfu::ICP> icp = kinfu::makeICP(kinfu::Params::PlatformType::PLATFORM_CPU,
intr, intr,
iterations, iterations,
angleThreshold, angleThreshold,
......
...@@ -1199,16 +1199,16 @@ void TSDFVolumeGPU::fetchPointsNormals(OutputArray /*points*/, OutputArray /*nor ...@@ -1199,16 +1199,16 @@ void TSDFVolumeGPU::fetchPointsNormals(OutputArray /*points*/, OutputArray /*nor
throw std::runtime_error("Not implemented"); throw std::runtime_error("Not implemented");
} }
cv::Ptr<TSDFVolume> makeTSDFVolume(cv::kinfu::KinFu::Params::PlatformType t, cv::Ptr<TSDFVolume> makeTSDFVolume(cv::kinfu::Params::PlatformType t,
int _res, float _size, cv::Affine3f _pose, float _truncDist, int _maxWeight, int _res, float _size, cv::Affine3f _pose, float _truncDist, int _maxWeight,
float _raycastStepFactor) float _raycastStepFactor)
{ {
switch (t) switch (t)
{ {
case cv::kinfu::KinFu::Params::PlatformType::PLATFORM_CPU: case cv::kinfu::Params::PlatformType::PLATFORM_CPU:
return cv::makePtr<TSDFVolumeCPU>(_res, _size, _pose, _truncDist, _maxWeight, return cv::makePtr<TSDFVolumeCPU>(_res, _size, _pose, _truncDist, _maxWeight,
_raycastStepFactor); _raycastStepFactor);
case cv::kinfu::KinFu::Params::PlatformType::PLATFORM_GPU: case cv::kinfu::Params::PlatformType::PLATFORM_GPU:
return cv::makePtr<TSDFVolumeGPU>(_res, _size, _pose, _truncDist, _maxWeight, return cv::makePtr<TSDFVolumeGPU>(_res, _size, _pose, _truncDist, _maxWeight,
_raycastStepFactor); _raycastStepFactor);
default: default:
......
...@@ -38,7 +38,7 @@ public: ...@@ -38,7 +38,7 @@ public:
cv::Affine3f pose; cv::Affine3f pose;
}; };
cv::Ptr<TSDFVolume> makeTSDFVolume(cv::kinfu::KinFu::Params::PlatformType t, cv::Ptr<TSDFVolume> makeTSDFVolume(cv::kinfu::Params::PlatformType t,
int _res, float _size, cv::Affine3f _pose, float _truncDist, int _maxWeight, int _res, float _size, cv::Affine3f _pose, float _truncDist, int _maxWeight,
float _raycastStepFactor); float _raycastStepFactor);
......
...@@ -262,12 +262,11 @@ static const bool display = false; ...@@ -262,12 +262,11 @@ static const bool display = false;
TEST( KinectFusion, lowDense ) TEST( KinectFusion, lowDense )
{ {
kinfu::KinFu::Params params; Ptr<kinfu::Params> params = kinfu::Params::coarseParams();
params = kinfu::KinFu::Params::coarseParams();
RotatingScene scene(params.frameSize, params.intr, params.depthFactor); RotatingScene scene(params->frameSize, params->intr, params->depthFactor);
kinfu::KinFu kf(params); Ptr<kinfu::KinFu> kf = kinfu::KinFu::create(params);
std::vector<Affine3f> poses = scene.getPoses(); std::vector<Affine3f> poses = scene.getPoses();
Affine3f startPoseGT = poses[0], startPoseKF; Affine3f startPoseGT = poses[0], startPoseKF;
...@@ -278,9 +277,9 @@ TEST( KinectFusion, lowDense ) ...@@ -278,9 +277,9 @@ TEST( KinectFusion, lowDense )
Mat depth = scene.depth(pose); Mat depth = scene.depth(pose);
ASSERT_TRUE(kf.update(depth)); ASSERT_TRUE(kf->update(depth));
kfPose = kf.getPose(); kfPose = kf->getPose();
if(i == 0) if(i == 0)
startPoseKF = kfPose; startPoseKF = kfPose;
...@@ -288,9 +287,9 @@ TEST( KinectFusion, lowDense ) ...@@ -288,9 +287,9 @@ TEST( KinectFusion, lowDense )
if(display) if(display)
{ {
imshow("depth", depth*(1.f/params.depthFactor/4.f)); imshow("depth", depth*(1.f/params->depthFactor/4.f));
Mat rendered; Mat rendered;
kf.render(rendered); kf->render(rendered);
imshow("render", rendered); imshow("render", rendered);
waitKey(10); waitKey(10);
} }
...@@ -302,12 +301,10 @@ TEST( KinectFusion, lowDense ) ...@@ -302,12 +301,10 @@ TEST( KinectFusion, lowDense )
TEST( KinectFusion, highDense ) TEST( KinectFusion, highDense )
{ {
kinfu::KinFu::Params params; Ptr<kinfu::Params> params = kinfu::Params::defaultParams();
CubeSpheresScene scene(params->frameSize, params->intr, params->depthFactor);
params = kinfu::KinFu::Params::defaultParams(); Ptr<kinfu::KinFu> kf = kinfu::KinFu::create(params);
CubeSpheresScene scene(params.frameSize, params.intr, params.depthFactor);
kinfu::KinFu kf(params);
std::vector<Affine3f> poses = scene.getPoses(); std::vector<Affine3f> poses = scene.getPoses();
Affine3f startPoseGT = poses[0], startPoseKF; Affine3f startPoseGT = poses[0], startPoseKF;
...@@ -318,9 +315,9 @@ TEST( KinectFusion, highDense ) ...@@ -318,9 +315,9 @@ TEST( KinectFusion, highDense )
Mat depth = scene.depth(pose); Mat depth = scene.depth(pose);
ASSERT_TRUE(kf.update(depth)); ASSERT_TRUE(kf->update(depth));
kfPose = kf.getPose(); kfPose = kf->getPose();
if(i == 0) if(i == 0)
startPoseKF = kfPose; startPoseKF = kfPose;
...@@ -328,9 +325,9 @@ TEST( KinectFusion, highDense ) ...@@ -328,9 +325,9 @@ TEST( KinectFusion, highDense )
if(display) if(display)
{ {
imshow("depth", depth*(1.f/params.depthFactor/4.f)); imshow("depth", depth*(1.f/params->depthFactor/4.f));
Mat rendered; Mat rendered;
kf.render(rendered); kf->render(rendered);
imshow("render", rendered); imshow("render", rendered);
waitKey(10); waitKey(10);
} }
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment