Commit ace94d2e authored by Alexey Spizhevoy's avatar Alexey Spizhevoy

fixed bug in opencv_stitching (corrected resize images step), added matches…

fixed bug in opencv_stitching (corrected resize images step), added matches checking (both 1->2 and 2->1 must be presented)
parent 3ed42fcd
This diff is collapsed.
...@@ -75,7 +75,7 @@ void printUsage() ...@@ -75,7 +75,7 @@ void printUsage()
" --work_megapix <float>\n" " --work_megapix <float>\n"
" Resolution for image registration step. The default is 0.6 Mpx.\n" " Resolution for image registration step. The default is 0.6 Mpx.\n"
" --match_conf <float>\n" " --match_conf <float>\n"
" Confidence for feature matching step. The default is 0.7.\n" " Confidence for feature matching step. The default is 0.65.\n"
" --conf_thresh <float>\n" " --conf_thresh <float>\n"
" Threshold for two images are from the same panorama confidence.\n" " Threshold for two images are from the same panorama confidence.\n"
" The default is 1.0.\n" " The default is 1.0.\n"
...@@ -320,11 +320,14 @@ int main(int argc, char* argv[]) ...@@ -320,11 +320,14 @@ int main(int argc, char* argv[])
Mat full_img, img; Mat full_img, img;
vector<Mat> images(num_images); vector<Mat> images(num_images);
vector<Size> full_img_sizes(num_images);
double seam_work_aspect = 1; double seam_work_aspect = 1;
for (int i = 0; i < num_images; ++i) for (int i = 0; i < num_images; ++i)
{ {
full_img = imread(img_names[i]); full_img = imread(img_names[i]);
full_img_sizes[i] = full_img.size();
if (full_img.empty()) if (full_img.empty())
{ {
LOGLN("Can't open image " << img_names[i]); LOGLN("Can't open image " << img_names[i]);
...@@ -376,14 +379,17 @@ int main(int argc, char* argv[]) ...@@ -376,14 +379,17 @@ int main(int argc, char* argv[])
vector<int> indices = leaveBiggestComponent(features, pairwise_matches, conf_thresh); vector<int> indices = leaveBiggestComponent(features, pairwise_matches, conf_thresh);
vector<Mat> img_subset; vector<Mat> img_subset;
vector<string> img_names_subset; vector<string> img_names_subset;
vector<Size> full_img_sizes_subset;
for (size_t i = 0; i < indices.size(); ++i) for (size_t i = 0; i < indices.size(); ++i)
{ {
img_names_subset.push_back(img_names[indices[i]]); img_names_subset.push_back(img_names[indices[i]]);
img_subset.push_back(images[indices[i]]); img_subset.push_back(images[indices[i]]);
full_img_sizes_subset.push_back(full_img_sizes[indices[i]]);
} }
images = img_subset; images = img_subset;
img_names = img_names_subset; img_names = img_names_subset;
full_img_sizes = full_img_sizes_subset;
// Check if we still have enough images // Check if we still have enough images
num_images = static_cast<int>(img_names.size()); num_images = static_cast<int>(img_names.size());
...@@ -519,16 +525,21 @@ int main(int argc, char* argv[]) ...@@ -519,16 +525,21 @@ int main(int argc, char* argv[])
warper = Warper::createByCameraFocal(warped_image_scale, warp_type); warper = Warper::createByCameraFocal(warped_image_scale, warp_type);
// Update corners and sizes // Update corners and sizes
Rect dst_roi = resultRoi(corners, sizes);
for (int i = 0; i < num_images; ++i) for (int i = 0; i < num_images; ++i)
{ {
// Update camera focal // Update camera focal
cameras[i].focal *= compose_work_aspect; cameras[i].focal *= compose_work_aspect;
// Update corner and size // Update corner and size
corners[i] = dst_roi.tl() + (corners[i] - dst_roi.tl()) * compose_seam_aspect; Size sz = full_img_sizes[i];
sizes[i] = Size(static_cast<int>((sizes[i].width + 1) * compose_seam_aspect), if (abs(compose_scale - 1) > 1e-1)
static_cast<int>((sizes[i].height + 1) * compose_seam_aspect)); {
sz.width = cvRound(full_img_sizes[i].width * compose_scale);
sz.height = cvRound(full_img_sizes[i].height * compose_scale);
}
Rect roi = warper->warpRoi(sz, static_cast<float>(cameras[i].focal), cameras[i].R);
corners[i] = roi.tl();
sizes[i] = roi.size();
} }
} }
if (abs(compose_scale - 1) > 1e-1) if (abs(compose_scale - 1) > 1e-1)
...@@ -539,7 +550,7 @@ int main(int argc, char* argv[]) ...@@ -539,7 +550,7 @@ int main(int argc, char* argv[])
Size img_size = img.size(); Size img_size = img.size();
// Warp the current image // Warp the current image
warper->warp(img, static_cast<float>(cameras[img_idx].focal), cameras[img_idx].R, warper->warp(img, static_cast<float>(cameras[img_idx].focal), cameras[img_idx].R,
img_warped); img_warped);
// Warp the current image mask // Warp the current image mask
...@@ -587,7 +598,7 @@ int main(int argc, char* argv[]) ...@@ -587,7 +598,7 @@ int main(int argc, char* argv[])
} }
Mat result, result_mask; Mat result, result_mask;
blender->blend(result, result_mask); blender->blend(result, result_mask);
LOGLN("Compositing, time: " << ((getTickCount() - t) / getTickFrequency()) << " sec"); LOGLN("Compositing, time: " << ((getTickCount() - t) / getTickFrequency()) << " sec");
......
...@@ -311,7 +311,7 @@ namespace ...@@ -311,7 +311,7 @@ namespace
const DMatch& m1 = pair_matches[i][1]; const DMatch& m1 = pair_matches[i][1];
if (m0.distance < (1.f - match_conf_) * m1.distance) if (m0.distance < (1.f - match_conf_) * m1.distance)
{ {
matches_info.matches.push_back(m0); //matches_info.matches.push_back(m0);
matches.insert(make_pair(m0.queryIdx, m0.trainIdx)); matches.insert(make_pair(m0.queryIdx, m0.trainIdx));
} }
} }
...@@ -326,7 +326,7 @@ namespace ...@@ -326,7 +326,7 @@ namespace
const DMatch& m0 = pair_matches[i][0]; const DMatch& m0 = pair_matches[i][0];
const DMatch& m1 = pair_matches[i][1]; const DMatch& m1 = pair_matches[i][1];
if (m0.distance < (1.f - match_conf_) * m1.distance) if (m0.distance < (1.f - match_conf_) * m1.distance)
if (matches.find(make_pair(m0.trainIdx, m0.queryIdx)) == matches.end()) if (matches.find(make_pair(m0.trainIdx, m0.queryIdx)) != matches.end())
matches_info.matches.push_back(DMatch(m0.trainIdx, m0.queryIdx, m0.distance)); matches_info.matches.push_back(DMatch(m0.trainIdx, m0.queryIdx, m0.distance));
} }
} }
...@@ -352,7 +352,7 @@ namespace ...@@ -352,7 +352,7 @@ namespace
const DMatch& m1 = pair_matches[i][1]; const DMatch& m1 = pair_matches[i][1];
if (m0.distance < (1.f - match_conf_) * m1.distance) if (m0.distance < (1.f - match_conf_) * m1.distance)
{ {
matches_info.matches.push_back(m0); //matches_info.matches.push_back(m0);
matches.insert(make_pair(m0.queryIdx, m0.trainIdx)); matches.insert(make_pair(m0.queryIdx, m0.trainIdx));
} }
} }
...@@ -368,7 +368,7 @@ namespace ...@@ -368,7 +368,7 @@ namespace
const DMatch& m0 = pair_matches[i][0]; const DMatch& m0 = pair_matches[i][0];
const DMatch& m1 = pair_matches[i][1]; const DMatch& m1 = pair_matches[i][1];
if (m0.distance < (1.f - match_conf_) * m1.distance) if (m0.distance < (1.f - match_conf_) * m1.distance)
if (matches.find(make_pair(m0.trainIdx, m0.queryIdx)) == matches.end()) if (matches.find(make_pair(m0.trainIdx, m0.queryIdx)) != matches.end())
matches_info.matches.push_back(DMatch(m0.trainIdx, m0.queryIdx, m0.distance)); matches_info.matches.push_back(DMatch(m0.trainIdx, m0.queryIdx, m0.distance));
} }
} }
......
This diff is collapsed.
This diff is collapsed.
...@@ -38,119 +38,122 @@ ...@@ -38,119 +38,122 @@
// or tort (including negligence or otherwise) arising in any way out of // or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage. // the use of this software, even if advised of the possibility of such damage.
// //
//M*/ //M*/
#ifndef __OPENCV_WARPERS_HPP__ #ifndef __OPENCV_WARPERS_HPP__
#define __OPENCV_WARPERS_HPP__ #define __OPENCV_WARPERS_HPP__
#include "precomp.hpp" #include "precomp.hpp"
class Warper class Warper
{ {
public: public:
enum { PLANE, CYLINDRICAL, SPHERICAL }; enum { PLANE, CYLINDRICAL, SPHERICAL };
static cv::Ptr<Warper> createByCameraFocal(float focal, int type); static cv::Ptr<Warper> createByCameraFocal(float focal, int type);
virtual ~Warper() {} virtual ~Warper() {}
virtual cv::Point warp(const cv::Mat &src, float focal, const cv::Mat& R, cv::Mat &dst, virtual cv::Point warp(const cv::Mat &src, float focal, const cv::Mat& R, cv::Mat &dst,
int interp_mode = cv::INTER_LINEAR, int border_mode = cv::BORDER_REFLECT) = 0; int interp_mode = cv::INTER_LINEAR, int border_mode = cv::BORDER_REFLECT) = 0;
}; virtual cv::Rect warpRoi(const cv::Size &sz, float focal, const cv::Mat &R) = 0;
};
struct ProjectorBase
{ struct ProjectorBase
void setTransformation(const cv::Mat& R); {
void setTransformation(const cv::Mat& R);
cv::Size size;
float focal; cv::Size size;
float r[9]; float focal;
float rinv[9]; float r[9];
float scale; float rinv[9];
}; float scale;
};
template <class P>
class WarperBase : public Warper template <class P>
{ class WarperBase : public Warper
public: {
cv::Point warp(const cv::Mat &src, float focal, const cv::Mat &R, cv::Mat &dst, public:
int interp_mode, int border_mode); cv::Point warp(const cv::Mat &src, float focal, const cv::Mat &R, cv::Mat &dst,
int interp_mode, int border_mode);
protected:
// Detects ROI of the destination image. It's correct for any projection. cv::Rect warpRoi(const cv::Size &sz, float focal, const cv::Mat &R);
virtual void detectResultRoi(cv::Point &dst_tl, cv::Point &dst_br);
protected:
// Detects ROI of the destination image by walking over image border. // Detects ROI of the destination image. It's correct for any projection.
// Correctness for any projection isn't guaranteed. virtual void detectResultRoi(cv::Point &dst_tl, cv::Point &dst_br);
void detectResultRoiByBorder(cv::Point &dst_tl, cv::Point &dst_br);
// Detects ROI of the destination image by walking over image border.
cv::Size src_size_; // Correctness for any projection isn't guaranteed.
P projector_; void detectResultRoiByBorder(cv::Point &dst_tl, cv::Point &dst_br);
};
cv::Size src_size_;
P projector_;
struct PlaneProjector : ProjectorBase };
{
void mapForward(float x, float y, float &u, float &v);
void mapBackward(float u, float v, float &x, float &y); struct PlaneProjector : ProjectorBase
{
float plane_dist; void mapForward(float x, float y, float &u, float &v);
}; void mapBackward(float u, float v, float &x, float &y);
float plane_dist;
// Projects image onto z = plane_dist plane };
class PlaneWarper : public WarperBase<PlaneProjector>
{
public: // Projects image onto z = plane_dist plane
PlaneWarper(float plane_dist = 1.f, float scale = 1.f) class PlaneWarper : public WarperBase<PlaneProjector>
{ {
projector_.plane_dist = plane_dist; public:
projector_.scale = scale; PlaneWarper(float plane_dist = 1.f, float scale = 1.f)
} {
projector_.plane_dist = plane_dist;
private: projector_.scale = scale;
void detectResultRoi(cv::Point &dst_tl, cv::Point &dst_br); }
};
private:
void detectResultRoi(cv::Point &dst_tl, cv::Point &dst_br);
struct SphericalProjector : ProjectorBase };
{
void mapForward(float x, float y, float &u, float &v);
void mapBackward(float u, float v, float &x, float &y); struct SphericalProjector : ProjectorBase
}; {
void mapForward(float x, float y, float &u, float &v);
void mapBackward(float u, float v, float &x, float &y);
// Projects image onto unit sphere with origin at (0, 0, 0). };
// Poles are located at (0, -1, 0) and (0, 1, 0) points.
class SphericalWarper : public WarperBase<SphericalProjector>
{ // Projects image onto unit sphere with origin at (0, 0, 0).
public: // Poles are located at (0, -1, 0) and (0, 1, 0) points.
SphericalWarper(float scale = 300.f) { projector_.scale = scale; } class SphericalWarper : public WarperBase<SphericalProjector>
{
private: public:
void detectResultRoi(cv::Point &dst_tl, cv::Point &dst_br); SphericalWarper(float scale = 300.f) { projector_.scale = scale; }
};
private:
void detectResultRoi(cv::Point &dst_tl, cv::Point &dst_br);
struct CylindricalProjector : ProjectorBase };
{
void mapForward(float x, float y, float &u, float &v);
void mapBackward(float u, float v, float &x, float &y); struct CylindricalProjector : ProjectorBase
}; {
void mapForward(float x, float y, float &u, float &v);
void mapBackward(float u, float v, float &x, float &y);
// Projects image onto x * x + z * z = 1 cylinder };
class CylindricalWarper : public WarperBase<CylindricalProjector>
{
public: // Projects image onto x * x + z * z = 1 cylinder
CylindricalWarper(float scale = 300.f) { projector_.scale = scale; } class CylindricalWarper : public WarperBase<CylindricalProjector>
{
private: public:
void detectResultRoi(cv::Point &dst_tl, cv::Point &dst_br) CylindricalWarper(float scale = 300.f) { projector_.scale = scale; }
{
WarperBase<CylindricalProjector>::detectResultRoiByBorder(dst_tl, dst_br); private:
} void detectResultRoi(cv::Point &dst_tl, cv::Point &dst_br)
}; {
WarperBase<CylindricalProjector>::detectResultRoiByBorder(dst_tl, dst_br);
#include "warpers_inl.hpp" }
};
#endif // __OPENCV_WARPERS_HPP__
#include "warpers_inl.hpp"
#endif // __OPENCV_WARPERS_HPP__
This diff is collapsed.
...@@ -32,7 +32,11 @@ int main() ...@@ -32,7 +32,11 @@ int main()
#include <cuda.h> #include <cuda.h>
#include <cuda_runtime.h> #include <cuda_runtime.h>
#include <GL/gl.h>
#include <cudaGL.h>
#include "opencv2/core/internal.hpp" // For TBB wrappers #include "opencv2/core/internal.hpp" // For TBB wrappers
#include "tbb/tbb.h"
#include "tbb/mutex.h"
using namespace std; using namespace std;
using namespace cv; using namespace cv;
...@@ -54,7 +58,7 @@ inline void safeCall_(int code, const char* expr, const char* file, int line) ...@@ -54,7 +58,7 @@ inline void safeCall_(int code, const char* expr, const char* file, int line)
} }
// Each GPU is associated with its own context // Each GPU is associated with its own context
CUcontext contexts[2]; CUcontext contexts[/*2*/1];
void inline contextOn(int id) void inline contextOn(int id)
{ {
...@@ -76,6 +80,10 @@ GpuMat d_result[2]; ...@@ -76,6 +80,10 @@ GpuMat d_result[2];
// CPU result // CPU result
Mat result; Mat result;
int some[2];
tbb::mutex mutex;
int main(int argc, char** argv) int main(int argc, char** argv)
{ {
if (argc < 3) if (argc < 3)
...@@ -85,11 +93,11 @@ int main(int argc, char** argv) ...@@ -85,11 +93,11 @@ int main(int argc, char** argv)
} }
int num_devices = getCudaEnabledDeviceCount(); int num_devices = getCudaEnabledDeviceCount();
if (num_devices < 2) // if (num_devices < 2)
{ // {
std::cout << "Two or more GPUs are required\n"; // std::cout << "Two or more GPUs are required\n";
return -1; // return -1;
} // }
for (int i = 0; i < num_devices; ++i) for (int i = 0; i < num_devices; ++i)
{ {
...@@ -123,13 +131,14 @@ int main(int argc, char** argv) ...@@ -123,13 +131,14 @@ int main(int argc, char** argv)
// Create context for GPU #0 // Create context for GPU #0
CUdevice device; CUdevice device;
safeCall(cuDeviceGet(&device, 0)); safeCall(cuDeviceGet(&device, 0));
safeCall(cuCtxCreate(&contexts[0], 0, device)); safeCall(cuGLCtxCreate(&contexts[0], 0, device));
//safeCall(cuCtxCreate(&contexts[0], 0, device));
contextOff(); contextOff();
// Create context for GPU #1 // // Create context for GPU #1
safeCall(cuDeviceGet(&device, 1)); // safeCall(cuDeviceGet(&device, 0));
safeCall(cuCtxCreate(&contexts[1], 0, device)); // safeCall(cuCtxCreate(&contexts[1], 0, device));
contextOff(); // contextOff();
// Split source images for processing on GPU #0 // Split source images for processing on GPU #0
contextOn(0); contextOn(0);
...@@ -139,15 +148,20 @@ int main(int argc, char** argv) ...@@ -139,15 +148,20 @@ int main(int argc, char** argv)
contextOff(); contextOff();
// Split source images for processing on the GPU #1 // Split source images for processing on the GPU #1
contextOn(1); contextOn(0);
d_left[1].upload(left.rowRange(left.rows / 2, left.rows)); d_left[1].upload(left.rowRange(left.rows / 2, left.rows));
d_right[1].upload(right.rowRange(right.rows / 2, right.rows)); d_right[1].upload(right.rowRange(right.rows / 2, right.rows));
bm[1] = new StereoBM_GPU(); bm[1] = new StereoBM_GPU();
contextOff(); contextOff();
some[0] = some[1] = 0;
// Execute calculation in two threads using two GPUs // Execute calculation in two threads using two GPUs
int devices[] = {0, 1}; vector<int> devices;
parallel_do(devices, devices + 2, Worker()); for (int i = 0; i < 4; ++i)
devices.push_back(rand()%2);
tbb::parallel_do(&devices[0], &devices[devices.size() - 1], Worker());
cout << some[0] << " " << some[1] << endl;
// Release the first GPU resources // Release the first GPU resources
contextOn(0); contextOn(0);
...@@ -159,7 +173,7 @@ int main(int argc, char** argv) ...@@ -159,7 +173,7 @@ int main(int argc, char** argv)
contextOff(); contextOff();
// Release the second GPU resources // Release the second GPU resources
contextOn(1); contextOn(0);
imshow("GPU #1 result", Mat(d_result[1])); imshow("GPU #1 result", Mat(d_result[1]));
d_left[1].release(); d_left[1].release();
d_right[1].release(); d_right[1].release();
...@@ -175,7 +189,9 @@ int main(int argc, char** argv) ...@@ -175,7 +189,9 @@ int main(int argc, char** argv)
void Worker::operator()(int device_id) const void Worker::operator()(int device_id) const
{ {
contextOn(device_id); mutex.lock();
contextOn(0);
bm[device_id]->operator()(d_left[device_id], d_right[device_id], bm[device_id]->operator()(d_left[device_id], d_right[device_id],
d_result[device_id]); d_result[device_id]);
...@@ -184,13 +200,16 @@ void Worker::operator()(int device_id) const ...@@ -184,13 +200,16 @@ void Worker::operator()(int device_id) const
<< "): finished\n"; << "): finished\n";
contextOff(); contextOff();
mutex.unlock();
} }
void destroyContexts() void destroyContexts()
{ {
safeCall(cuCtxDestroy(contexts[0])); safeCall(cuCtxDestroy(contexts[0]));
safeCall(cuCtxDestroy(contexts[1])); //safeCall(cuCtxDestroy(contexts[1]));
} }
#endif #endif
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment