Commit 1ba7c728 authored by Jiri Horner's avatar Jiri Horner Committed by Alexander Alekhin

Merge pull request #12827 from hrnr:stitching_4

[evolution] Stitching for OpenCV 4.0

* stitching: wrap Stitcher::create for bindings

* provide method for consistent stitcher usage across languages

* samples: add python stitching sample

* port cpp stitching sample to python

* stitching: consolidate Stitcher create methods

* remove Stitcher::createDefault, it returns Stitcher, not Ptr<Stitcher> -> inconsistent API
* deprecate cv::createStitcher and cv::createStitcherScans in favor of Stitcher::create

* stitching: avoid anonymous enum in Stitcher

* ORIG_RESOL should be double
* add documentatiton

* stitching: improve documentation in Stitcher

* stitching: expose estimator in Stitcher

* remove ABI hack

* stitching: drop try_use_gpu flag

* OCL will be used automatically through T-API in OCL-enable paths
* CUDA won't be used unless user sets CUDA-enabled classes manually

* stitching: drop FeaturesFinder

* use Feature2D instead of FeaturesFinder
* interoperability with features2d module
* detach from dependency on xfeatures2d

* features2d: fix compute and detect to work with UMat vectors

* correctly pass UMats as UMats to allow OCL paths
* support vector of UMats as output arg

* stitching: use nearest interpolation for resizing masks

* fix warnings
parent be9b676d
......@@ -71,29 +71,38 @@ void Feature2D::detect( InputArray image,
}
void Feature2D::detect( InputArrayOfArrays _images,
void Feature2D::detect( InputArrayOfArrays images,
std::vector<std::vector<KeyPoint> >& keypoints,
InputArrayOfArrays _masks )
InputArrayOfArrays masks )
{
CV_INSTRUMENT_REGION();
vector<Mat> images, masks;
int nimages = (int)images.total();
_images.getMatVector(images);
size_t i, nimages = images.size();
if( !_masks.empty() )
if (!masks.empty())
{
_masks.getMatVector(masks);
CV_Assert(masks.size() == nimages);
CV_Assert(masks.total() == (size_t)nimages);
}
keypoints.resize(nimages);
for( i = 0; i < nimages; i++ )
if (images.isMatVector())
{
for (int i = 0; i < nimages; i++)
{
detect(images.getMat(i), keypoints[i], masks.empty() ? noArray() : masks.getMat(i));
}
}
else
{
detect(images[i], keypoints[i], masks.empty() ? Mat() : masks[i] );
// assume UMats
for (int i = 0; i < nimages; i++)
{
detect(images.getUMat(i), keypoints[i], masks.empty() ? noArray() : masks.getUMat(i));
}
}
}
/*
......@@ -116,29 +125,40 @@ void Feature2D::compute( InputArray image,
detectAndCompute(image, noArray(), keypoints, descriptors, true);
}
void Feature2D::compute( InputArrayOfArrays _images,
void Feature2D::compute( InputArrayOfArrays images,
std::vector<std::vector<KeyPoint> >& keypoints,
OutputArrayOfArrays _descriptors )
OutputArrayOfArrays descriptors )
{
CV_INSTRUMENT_REGION();
if( !_descriptors.needed() )
if( !descriptors.needed() )
return;
vector<Mat> images;
_images.getMatVector(images);
size_t i, nimages = images.size();
int nimages = (int)images.total();
CV_Assert( keypoints.size() == nimages );
CV_Assert( _descriptors.kind() == _InputArray::STD_VECTOR_MAT );
vector<Mat>& descriptors = *(vector<Mat>*)_descriptors.getObj();
descriptors.resize(nimages);
for( i = 0; i < nimages; i++ )
CV_Assert( keypoints.size() == (size_t)nimages );
// resize descriptors to appropriate size and compute
if (descriptors.isMatVector())
{
vector<Mat>& vec = *(vector<Mat>*)descriptors.getObj();
vec.resize(nimages);
for (int i = 0; i < nimages; i++)
{
compute(images.getMat(i), keypoints[i], vec[i]);
}
}
else if (descriptors.isUMatVector())
{
vector<UMat>& vec = *(vector<UMat>*)descriptors.getObj();
vec.resize(nimages);
for (int i = 0; i < nimages; i++)
{
compute(images.getUMat(i), keypoints[i], vec[i]);
}
}
else
{
compute(images[i], keypoints[i], descriptors[i]);
CV_Error(Error::StsBadArg, "descriptors must be vector<Mat> or vector<UMat>");
}
}
......
......@@ -10,7 +10,7 @@ class stitching_test(NewOpenCVTests):
img1 = self.get_sample('stitching/a1.png')
img2 = self.get_sample('stitching/a2.png')
stitcher = cv.createStitcher(False)
stitcher = cv.Stitcher.create(cv.Stitcher_PANORAMA)
(_result, pano) = stitcher.stitch((img1, img2))
#cv.imshow("pano", pano)
......
......@@ -113,6 +113,10 @@ namespace cv {
A basic example on image stitching
*/
/** @example samples/python/stitching.py
A basic example on image stitching in Python.
*/
/** @example samples/cpp/stitching_detailed.cpp
A detailed example on image stitching
*/
......@@ -124,15 +128,22 @@ be able to achieve higher stitching stability and quality of the final images at
familiar with the theory is recommended.
@note
- A basic example on image stitching can be found at
- A basic example on image stitching can be found at
opencv_source_code/samples/cpp/stitching.cpp
- A detailed example on image stitching can be found at
- A basic example on image stitching in Python can be found at
opencv_source_code/samples/python/stitching.py
- A detailed example on image stitching can be found at
opencv_source_code/samples/cpp/stitching_detailed.cpp
*/
class CV_EXPORTS_W Stitcher
{
public:
enum { ORIG_RESOL = -1 };
/**
* When setting a resolution for stitching, this values is a placeholder
* for preserving the original resolution.
*/
static constexpr const double ORIG_RESOL = -1.0;
enum Status
{
OK = 0,
......@@ -140,6 +151,7 @@ public:
ERR_HOMOGRAPHY_EST_FAIL = 2,
ERR_CAMERA_PARAMS_ADJUST_FAIL = 3
};
enum Mode
{
/** Mode for creating photo panoramas. Expects images under perspective
......@@ -157,22 +169,14 @@ public:
};
// Stitcher() {}
/** @brief Creates a stitcher with the default parameters.
@param try_use_gpu Flag indicating whether GPU should be used whenever it's possible.
@return Stitcher class instance.
*/
static Stitcher createDefault(bool try_use_gpu = false);
/** @brief Creates a Stitcher configured in one of the stitching modes.
@param mode Scenario for stitcher operation. This is usually determined by source of images
to stitch and their transformation. Default parameters will be chosen for operation in given
scenario.
@param try_use_gpu Flag indicating whether GPU should be used whenever it's possible.
@return Stitcher class instance.
*/
static Ptr<Stitcher> create(Mode mode = PANORAMA, bool try_use_gpu = false);
CV_WRAP static Ptr<Stitcher> create(Mode mode = Stitcher::PANORAMA);
CV_WRAP double registrationResol() const { return registr_resol_; }
CV_WRAP void setRegistrationResol(double resol_mpx) { registr_resol_ = resol_mpx; }
......@@ -192,9 +196,9 @@ public:
detail::WaveCorrectKind waveCorrectKind() const { return wave_correct_kind_; }
void setWaveCorrectKind(detail::WaveCorrectKind kind) { wave_correct_kind_ = kind; }
Ptr<detail::FeaturesFinder> featuresFinder() { return features_finder_; }
const Ptr<detail::FeaturesFinder> featuresFinder() const { return features_finder_; }
void setFeaturesFinder(Ptr<detail::FeaturesFinder> features_finder)
Ptr<Feature2D> featuresFinder() { return features_finder_; }
const Ptr<Feature2D> featuresFinder() const { return features_finder_; }
void setFeaturesFinder(Ptr<Feature2D> features_finder)
{ features_finder_ = features_finder; }
Ptr<detail::FeaturesMatcher> featuresMatcher() { return features_matcher_; }
......@@ -214,12 +218,10 @@ public:
void setBundleAdjuster(Ptr<detail::BundleAdjusterBase> bundle_adjuster)
{ bundle_adjuster_ = bundle_adjuster; }
/* TODO OpenCV ABI 4.x
Ptr<detail::Estimator> estimator() { return estimator_; }
const Ptr<detail::Estimator> estimator() const { return estimator_; }
void setEstimator(Ptr<detail::Estimator> estimator)
{ estimator_ = estimator; }
*/
Ptr<WarperCreator> warper() { return warper_; }
const Ptr<WarperCreator> warper() const { return warper_; }
......@@ -238,18 +240,16 @@ public:
const Ptr<detail::Blender> blender() const { return blender_; }
void setBlender(Ptr<detail::Blender> b) { blender_ = b; }
/** @overload */
CV_WRAP Status estimateTransform(InputArrayOfArrays images);
/** @brief These functions try to match the given images and to estimate rotations of each camera.
@note Use the functions only if you're aware of the stitching pipeline, otherwise use
Stitcher::stitch.
@param images Input images.
@param rois Region of interest rectangles.
@param masks Masks for each input image specifying where to look for keypoints (optional).
@return Status code.
*/
Status estimateTransform(InputArrayOfArrays images, const std::vector<std::vector<Rect> > &rois);
CV_WRAP Status estimateTransform(InputArrayOfArrays images, InputArrayOfArrays masks = noArray());
/** @overload */
CV_WRAP Status composePanorama(OutputArray pano);
......@@ -271,19 +271,17 @@ public:
/** @brief These functions try to stitch the given images.
@param images Input images.
@param rois Region of interest rectangles.
@param masks Masks for each input image specifying where to look for keypoints (optional).
@param pano Final pano.
@return Status code.
*/
Status stitch(InputArrayOfArrays images, const std::vector<std::vector<Rect> > &rois, OutputArray pano);
CV_WRAP Status stitch(InputArrayOfArrays images, InputArrayOfArrays masks, OutputArray pano);
std::vector<int> component() const { return indices_; }
std::vector<detail::CameraParams> cameras() const { return cameras_; }
CV_WRAP double workScale() const { return work_scale_; }
private:
//Stitcher() {}
Status matchImages();
Status estimateCameraParams();
......@@ -291,13 +289,11 @@ private:
double seam_est_resol_;
double compose_resol_;
double conf_thresh_;
Ptr<detail::FeaturesFinder> features_finder_;
Ptr<Feature2D> features_finder_;
Ptr<detail::FeaturesMatcher> features_matcher_;
cv::UMat matching_mask_;
Ptr<detail::BundleAdjusterBase> bundle_adjuster_;
/* TODO OpenCV ABI 4.x
Ptr<detail::Estimator> estimator_;
*/
bool do_wave_correct_;
detail::WaveCorrectKind wave_correct_kind_;
Ptr<WarperCreator> warper_;
......@@ -306,7 +302,7 @@ private:
Ptr<detail::Blender> blender_;
std::vector<cv::UMat> imgs_;
std::vector<std::vector<cv::Rect> > rois_;
std::vector<cv::UMat> masks_;
std::vector<cv::Size> full_img_sizes_;
std::vector<detail::ImageFeatures> features_;
std::vector<detail::MatchesInfo> pairwise_matches_;
......@@ -319,8 +315,15 @@ private:
double warped_image_scale_;
};
CV_EXPORTS_W Ptr<Stitcher> createStitcher(bool try_use_gpu = false);
CV_EXPORTS_W Ptr<Stitcher> createStitcherScans(bool try_use_gpu = false);
/**
* @deprecated use Stitcher::create
*/
CV_DEPRECATED Ptr<Stitcher> createStitcher(bool try_use_gpu = false);
/**
* @deprecated use Stitcher::create
*/
CV_DEPRECATED Ptr<Stitcher> createStitcherScans(bool try_use_gpu = false);
//! @} stitching
......
......@@ -48,10 +48,6 @@
#include "opencv2/opencv_modules.hpp"
#ifdef HAVE_OPENCV_XFEATURES2D
# include "opencv2/xfeatures2d/cuda.hpp"
#endif
namespace cv {
namespace detail {
......@@ -67,149 +63,17 @@ struct CV_EXPORTS ImageFeatures
UMat descriptors;
};
/** @brief Feature finders base class */
class CV_EXPORTS FeaturesFinder
{
public:
virtual ~FeaturesFinder() {}
/** @overload */
void operator ()(InputArray image, ImageFeatures &features);
/** @brief Finds features in the given image.
@param image Source image
@param features Found features
@param rois Regions of interest
@sa detail::ImageFeatures, Rect_
*/
void operator ()(InputArray image, ImageFeatures &features, const std::vector<cv::Rect> &rois);
/** @brief Finds features in the given images in parallel.
@param images Source images
@param features Found features for each image
@param rois Regions of interest for each image
@sa detail::ImageFeatures, Rect_
*/
void operator ()(InputArrayOfArrays images, std::vector<ImageFeatures> &features,
const std::vector<std::vector<cv::Rect> > &rois);
/** @overload */
void operator ()(InputArrayOfArrays images, std::vector<ImageFeatures> &features);
/** @brief Frees unused memory allocated before if there is any. */
virtual void collectGarbage() {}
/* TODO OpenCV ABI 4.x
reimplement this as public method similar to FeaturesMatcher and remove private function hack
@return True, if it's possible to use the same finder instance in parallel, false otherwise
bool isThreadSafe() const { return is_thread_safe_; }
*/
protected:
/** @brief This method must implement features finding logic in order to make the wrappers
detail::FeaturesFinder::operator()_ work.
@param image Source image
@param features Found features
@sa detail::ImageFeatures */
virtual void find(InputArray image, ImageFeatures &features) = 0;
/** @brief uses dynamic_cast to determine thread-safety
@return True, if it's possible to use the same finder instance in parallel, false otherwise
*/
bool isThreadSafe() const;
};
/** @brief SURF features finder.
@sa detail::FeaturesFinder, SURF
*/
class CV_EXPORTS SurfFeaturesFinder : public FeaturesFinder
{
public:
SurfFeaturesFinder(double hess_thresh = 300., int num_octaves = 3, int num_layers = 4,
int num_octaves_descr = /*4*/3, int num_layers_descr = /*2*/4);
private:
void find(InputArray image, ImageFeatures &features) CV_OVERRIDE;
Ptr<FeatureDetector> detector_;
Ptr<DescriptorExtractor> extractor_;
Ptr<Feature2D> surf;
};
/** @brief SIFT features finder.
@sa detail::FeaturesFinder, SIFT
*/
class CV_EXPORTS SiftFeaturesFinder : public FeaturesFinder
{
public:
SiftFeaturesFinder();
private:
void find(InputArray image, ImageFeatures &features) CV_OVERRIDE;
Ptr<Feature2D> sift;
};
/** @brief ORB features finder. :
@sa detail::FeaturesFinder, ORB
*/
class CV_EXPORTS OrbFeaturesFinder : public FeaturesFinder
{
public:
OrbFeaturesFinder(Size _grid_size = Size(3,1), int nfeatures=1500, float scaleFactor=1.3f, int nlevels=5);
private:
void find(InputArray image, ImageFeatures &features) CV_OVERRIDE;
Ptr<ORB> orb;
Size grid_size;
};
/** @brief AKAZE features finder. :
@sa detail::FeaturesFinder, AKAZE
*/
class CV_EXPORTS AKAZEFeaturesFinder : public detail::FeaturesFinder
{
public:
AKAZEFeaturesFinder(AKAZE::DescriptorType descriptor_type = AKAZE::DESCRIPTOR_MLDB,
int descriptor_size = 0,
int descriptor_channels = 3,
float threshold = 0.001f,
int nOctaves = 4,
int nOctaveLayers = 4,
KAZE::DiffusivityType diffusivity = KAZE::DIFF_PM_G2);
private:
void find(InputArray image, ImageFeatures &features) CV_OVERRIDE;
Ptr<AKAZE> akaze;
};
#ifdef HAVE_OPENCV_XFEATURES2D
class CV_EXPORTS SurfFeaturesFinderGpu : public FeaturesFinder
{
public:
SurfFeaturesFinderGpu(double hess_thresh = 300., int num_octaves = 3, int num_layers = 4,
int num_octaves_descr = 4, int num_layers_descr = 2);
void collectGarbage() CV_OVERRIDE;
private:
void find(InputArray image, ImageFeatures &features) CV_OVERRIDE;
cuda::GpuMat image_;
cuda::GpuMat gray_image_;
cuda::SURF_CUDA surf_;
cuda::GpuMat keypoints_;
cuda::GpuMat descriptors_;
int num_octaves_, num_layers_;
int num_octaves_descr_, num_layers_descr_;
};
#endif
CV_EXPORTS void computeImageFeatures(
const Ptr<Feature2D> &featuresFinder,
InputArrayOfArrays images,
std::vector<ImageFeatures> &features,
InputArrayOfArrays masks = noArray());
CV_EXPORTS void computeImageFeatures(
const Ptr<Feature2D> &featuresFinder,
InputArray image,
ImageFeatures &features,
InputArray mask = noArray());
/** @brief Structure containing information about matches between two images.
......
#ifdef HAVE_OPENCV_STITCHING
typedef Stitcher::Status Status;
typedef Stitcher::Mode Mode;
#endif
......@@ -35,7 +35,7 @@ OCL_PERF_TEST_P(stitch, a123, TEST_DETECTORS)
_imgs.push_back( imread( getDataPath("stitching/a3.png") ) );
vector<UMat> imgs = ToUMat(_imgs);
Ptr<detail::FeaturesFinder> featuresFinder = getFeatureFinder(GetParam());
Ptr<Feature2D> featuresFinder = getFeatureFinder(GetParam());
Ptr<detail::FeaturesMatcher> featuresMatcher = GetParam() == "orb"
? makePtr<detail::BestOf2NearestMatcher>(false, ORB_MATCH_CONFIDENCE)
: makePtr<detail::BestOf2NearestMatcher>(false, SURF_MATCH_CONFIDENCE);
......@@ -44,14 +44,14 @@ OCL_PERF_TEST_P(stitch, a123, TEST_DETECTORS)
while(next())
{
Stitcher stitcher = Stitcher::createDefault();
stitcher.setFeaturesFinder(featuresFinder);
stitcher.setFeaturesMatcher(featuresMatcher);
stitcher.setWarper(makePtr<SphericalWarper>());
stitcher.setRegistrationResol(WORK_MEGAPIX);
Ptr<Stitcher> stitcher = Stitcher::create();
stitcher->setFeaturesFinder(featuresFinder);
stitcher->setFeaturesMatcher(featuresMatcher);
stitcher->setWarper(makePtr<SphericalWarper>());
stitcher->setRegistrationResol(WORK_MEGAPIX);
startTimer();
stitcher.stitch(imgs, pano);
stitcher->stitch(imgs, pano);
stopTimer();
}
......@@ -69,7 +69,7 @@ OCL_PERF_TEST_P(stitch, b12, TEST_DETECTORS)
imgs.push_back( imread( getDataPath("stitching/b1.png") ) );
imgs.push_back( imread( getDataPath("stitching/b2.png") ) );
Ptr<detail::FeaturesFinder> featuresFinder = getFeatureFinder(GetParam());
Ptr<Feature2D> featuresFinder = getFeatureFinder(GetParam());
Ptr<detail::FeaturesMatcher> featuresMatcher = GetParam() == "orb"
? makePtr<detail::BestOf2NearestMatcher>(false, ORB_MATCH_CONFIDENCE)
: makePtr<detail::BestOf2NearestMatcher>(false, SURF_MATCH_CONFIDENCE);
......@@ -78,14 +78,14 @@ OCL_PERF_TEST_P(stitch, b12, TEST_DETECTORS)
while(next())
{
Stitcher stitcher = Stitcher::createDefault();
stitcher.setFeaturesFinder(featuresFinder);
stitcher.setFeaturesMatcher(featuresMatcher);
stitcher.setWarper(makePtr<SphericalWarper>());
stitcher.setRegistrationResol(WORK_MEGAPIX);
Ptr<Stitcher> stitcher = Stitcher::create();
stitcher->setFeaturesFinder(featuresFinder);
stitcher->setFeaturesMatcher(featuresMatcher);
stitcher->setWarper(makePtr<SphericalWarper>());
stitcher->setRegistrationResol(WORK_MEGAPIX);
startTimer();
stitcher.stitch(imgs, pano);
stitcher->stitch(imgs, pano);
stopTimer();
}
......@@ -116,7 +116,7 @@ OCL_PERF_TEST_P(stitch, boat, TEST_DETECTORS)
_imgs.push_back( imread( getDataPath("stitching/boat6.jpg") ) );
vector<UMat> imgs = ToUMat(_imgs);
Ptr<detail::FeaturesFinder> featuresFinder = getFeatureFinder(GetParam());
Ptr<Feature2D> featuresFinder = getFeatureFinder(GetParam());
Ptr<detail::FeaturesMatcher> featuresMatcher = GetParam() == "orb"
? makePtr<detail::BestOf2NearestMatcher>(false, ORB_MATCH_CONFIDENCE)
: makePtr<detail::BestOf2NearestMatcher>(false, SURF_MATCH_CONFIDENCE);
......@@ -125,14 +125,14 @@ OCL_PERF_TEST_P(stitch, boat, TEST_DETECTORS)
while(next())
{
Stitcher stitcher = Stitcher::createDefault();
stitcher.setFeaturesFinder(featuresFinder);
stitcher.setFeaturesMatcher(featuresMatcher);
stitcher.setWarper(makePtr<SphericalWarper>());
stitcher.setRegistrationResol(WORK_MEGAPIX);
Ptr<Stitcher> stitcher = Stitcher::create();
stitcher->setFeaturesFinder(featuresFinder);
stitcher->setFeaturesMatcher(featuresMatcher);
stitcher->setWarper(makePtr<SphericalWarper>());
stitcher->setRegistrationResol(WORK_MEGAPIX);
startTimer();
stitcher.stitch(imgs, pano);
stitcher->stitch(imgs, pano);
stopTimer();
}
......
......@@ -28,13 +28,9 @@ PERF_TEST_P(bundleAdjuster, affine, testing::Combine(TEST_DETECTORS, AFFINE_FUNC
string detector = get<0>(GetParam());
string affine_fun = get<1>(GetParam());
Ptr<detail::FeaturesFinder> finder;
Ptr<Feature2D> finder = getFeatureFinder(detector);
Ptr<detail::FeaturesMatcher> matcher;
Ptr<detail::BundleAdjusterBase> bundle_adjuster;
if (detector == "surf")
finder = makePtr<detail::SurfFeaturesFinder>();
else if (detector == "orb")
finder = makePtr<detail::OrbFeaturesFinder>();
if (affine_fun == "affinePartial")
{
matcher = makePtr<detail::AffineBestOf2NearestMatcher>(false);
......@@ -54,7 +50,7 @@ PERF_TEST_P(bundleAdjuster, affine, testing::Combine(TEST_DETECTORS, AFFINE_FUNC
std::vector<detail::CameraParams> cameras;
std::vector<detail::CameraParams> cameras2;
(*finder)(images, features);
computeImageFeatures(finder, images, features);
(*matcher)(features, pairwise_matches);
if (!(*estimator)(features, pairwise_matches, cameras))
FAIL() << "estimation failed. this should never happen.";
......
......@@ -29,11 +29,11 @@ PERF_TEST_P(FeaturesFinderVec, ParallelFeaturesFinder, NUMBER_IMAGES)
vector<Mat> imgs(GetParam(), img);
vector<detail::ImageFeatures> features(imgs.size());
Ptr<detail::FeaturesFinder> featuresFinder = makePtr<detail::OrbFeaturesFinder>();
Ptr<Feature2D> finder = ORB::create();
TEST_CYCLE()
{
(*featuresFinder)(imgs, features);
detail::computeImageFeatures(finder, imgs, features);
}
SANITY_CHECK_NOTHING();
......@@ -45,12 +45,12 @@ PERF_TEST_P(FeaturesFinderVec, SerialFeaturesFinder, NUMBER_IMAGES)
vector<Mat> imgs(GetParam(), img);
vector<detail::ImageFeatures> features(imgs.size());
Ptr<detail::FeaturesFinder> featuresFinder = makePtr<detail::OrbFeaturesFinder>();
Ptr<Feature2D> finder = ORB::create();
TEST_CYCLE()
{
for (size_t i = 0; i < imgs.size(); ++i)
(*featuresFinder)(imgs[i], features[i]);
detail::computeImageFeatures(finder, imgs[i], features[i]);
}
SANITY_CHECK_NOTHING();
......@@ -65,16 +65,14 @@ PERF_TEST_P( match, bestOf2Nearest, TEST_DETECTORS)
resize(img1_full, img1, Size(), scale1, scale1, INTER_LINEAR_EXACT);
resize(img2_full, img2, Size(), scale2, scale2, INTER_LINEAR_EXACT);
Ptr<detail::FeaturesFinder> finder;
Ptr<Feature2D> finder = getFeatureFinder(GetParam());
Ptr<detail::FeaturesMatcher> matcher;
if (GetParam() == "surf")
{
finder = makePtr<detail::SurfFeaturesFinder>();
matcher = makePtr<detail::BestOf2NearestMatcher>(false, SURF_MATCH_CONFIDENCE);
}
else if (GetParam() == "orb")
{
finder = makePtr<detail::OrbFeaturesFinder>();
matcher = makePtr<detail::BestOf2NearestMatcher>(false, ORB_MATCH_CONFIDENCE);
}
else
......@@ -83,8 +81,8 @@ PERF_TEST_P( match, bestOf2Nearest, TEST_DETECTORS)
}
detail::ImageFeatures features1, features2;
(*finder)(img1, features1);
(*finder)(img2, features2);
detail::computeImageFeatures(finder, img1, features1);
detail::computeImageFeatures(finder, img2, features2);
detail::MatchesInfo pairwise_matches;
......@@ -118,18 +116,16 @@ PERF_TEST_P( matchVector, bestOf2NearestVectorFeatures, testing::Combine(
resize(img1_full, img1, Size(), scale1, scale1, INTER_LINEAR_EXACT);
resize(img2_full, img2, Size(), scale2, scale2, INTER_LINEAR_EXACT);
Ptr<detail::FeaturesFinder> finder;
Ptr<detail::FeaturesMatcher> matcher;
string detectorName = get<0>(GetParam());
int featuresVectorSize = get<1>(GetParam());
Ptr<Feature2D> finder = getFeatureFinder(detectorName);
Ptr<detail::FeaturesMatcher> matcher;
if (detectorName == "surf")
{
finder = makePtr<detail::SurfFeaturesFinder>();
matcher = makePtr<detail::BestOf2NearestMatcher>(false, SURF_MATCH_CONFIDENCE);
}
else if (detectorName == "orb")
{
finder = makePtr<detail::OrbFeaturesFinder>();
matcher = makePtr<detail::BestOf2NearestMatcher>(false, ORB_MATCH_CONFIDENCE);
}
else
......@@ -138,8 +134,8 @@ PERF_TEST_P( matchVector, bestOf2NearestVectorFeatures, testing::Combine(
}
detail::ImageFeatures features1, features2;
(*finder)(img1, features1);
(*finder)(img2, features2);
detail::computeImageFeatures(finder, img1, features1);
detail::computeImageFeatures(finder, img2, features2);
vector<detail::ImageFeatures> features;
vector<detail::MatchesInfo> pairwise_matches;
for(int i = 0; i < featuresVectorSize/2; i++)
......@@ -183,16 +179,14 @@ PERF_TEST_P( match, affineBestOf2Nearest, TEST_DETECTORS)
resize(img1_full, img1, Size(), scale1, scale1, INTER_LINEAR_EXACT);
resize(img2_full, img2, Size(), scale2, scale2, INTER_LINEAR_EXACT);
Ptr<detail::FeaturesFinder> finder;
Ptr<Feature2D> finder = getFeatureFinder(GetParam());
Ptr<detail::FeaturesMatcher> matcher;
if (GetParam() == "surf")
{
finder = makePtr<detail::SurfFeaturesFinder>();
matcher = makePtr<detail::AffineBestOf2NearestMatcher>(false, false, SURF_MATCH_CONFIDENCE);
}
else if (GetParam() == "orb")
{
finder = makePtr<detail::OrbFeaturesFinder>();
matcher = makePtr<detail::AffineBestOf2NearestMatcher>(false, false, ORB_MATCH_CONFIDENCE);
}
else
......@@ -201,8 +195,8 @@ PERF_TEST_P( match, affineBestOf2Nearest, TEST_DETECTORS)
}
detail::ImageFeatures features1, features2;
(*finder)(img1, features1);
(*finder)(img2, features2);
detail::computeImageFeatures(finder, img1, features1);
detail::computeImageFeatures(finder, img2, features2);
detail::MatchesInfo pairwise_matches;
......@@ -242,18 +236,16 @@ PERF_TEST_P( matchVector, affineBestOf2NearestVectorFeatures, testing::Combine(
resize(img1_full, img1, Size(), scale1, scale1, INTER_LINEAR_EXACT);
resize(img2_full, img2, Size(), scale2, scale2, INTER_LINEAR_EXACT);
Ptr<detail::FeaturesFinder> finder;
Ptr<detail::FeaturesMatcher> matcher;
string detectorName = get<0>(GetParam());
int featuresVectorSize = get<1>(GetParam());
Ptr<Feature2D> finder = getFeatureFinder(detectorName);
Ptr<detail::FeaturesMatcher> matcher;
if (detectorName == "surf")
{
finder = makePtr<detail::SurfFeaturesFinder>();
matcher = makePtr<detail::AffineBestOf2NearestMatcher>(false, false, SURF_MATCH_CONFIDENCE);
}
else if (detectorName == "orb")
{
finder = makePtr<detail::OrbFeaturesFinder>();
matcher = makePtr<detail::AffineBestOf2NearestMatcher>(false, false, ORB_MATCH_CONFIDENCE);
}
else
......@@ -262,8 +254,8 @@ PERF_TEST_P( matchVector, affineBestOf2NearestVectorFeatures, testing::Combine(
}
detail::ImageFeatures features1, features2;
(*finder)(img1, features1);
(*finder)(img2, features2);
detail::computeImageFeatures(finder, img1, features1);
detail::computeImageFeatures(finder, img2, features2);
vector<detail::ImageFeatures> features;
vector<detail::MatchesInfo> pairwise_matches;
for(int i = 0; i < featuresVectorSize/2; i++)
......@@ -288,12 +280,12 @@ PERF_TEST_P( matchVector, affineBestOf2NearestVectorFeatures, testing::Combine(
if (pairwise_matches[i].src_img_idx < 0)
continue;
EXPECT_TRUE(pairwise_matches[i].matches.size() > 400);
EXPECT_GT(pairwise_matches[i].matches.size(), 200u);
EXPECT_FALSE(pairwise_matches[i].H.empty());
++matches_count;
}
EXPECT_TRUE(matches_count > 0);
EXPECT_GT(matches_count, 0u);
SANITY_CHECK_NOTHING();
}
......
......@@ -4,19 +4,25 @@
#include "opencv2/ts.hpp"
#include "opencv2/stitching.hpp"
#ifdef HAVE_OPENCV_XFEATURES2D
#include "opencv2/xfeatures2d/nonfree.hpp"
#endif
namespace cv
{
static inline Ptr<detail::FeaturesFinder> getFeatureFinder(const std::string& name)
static inline Ptr<Feature2D> getFeatureFinder(const std::string& name)
{
if (name == "orb")
return makePtr<detail::OrbFeaturesFinder>();
return ORB::create();
#ifdef HAVE_OPENCV_XFEATURES2D
else if (name == "surf")
return makePtr<detail::SurfFeaturesFinder>();
return xfeatures2d::SURF::create();
#endif
else if (name == "akaze")
return makePtr<detail::AKAZEFeaturesFinder>();
return AKAZE::create();
else
return Ptr<detail::FeaturesFinder>();
return Ptr<Feature2D>();
}
} // namespace cv
......
......@@ -31,7 +31,7 @@ PERF_TEST_P(stitch, a123, TEST_DETECTORS)
imgs.push_back( imread( getDataPath("stitching/a2.png") ) );
imgs.push_back( imread( getDataPath("stitching/a3.png") ) );
Ptr<detail::FeaturesFinder> featuresFinder = getFeatureFinder(GetParam());
Ptr<Feature2D> featuresFinder = getFeatureFinder(GetParam());
Ptr<detail::FeaturesMatcher> featuresMatcher = GetParam() == "orb"
? makePtr<detail::BestOf2NearestMatcher>(false, ORB_MATCH_CONFIDENCE)
......@@ -41,14 +41,14 @@ PERF_TEST_P(stitch, a123, TEST_DETECTORS)
while(next())
{
Stitcher stitcher = Stitcher::createDefault();
stitcher.setFeaturesFinder(featuresFinder);
stitcher.setFeaturesMatcher(featuresMatcher);
stitcher.setWarper(makePtr<SphericalWarper>());
stitcher.setRegistrationResol(WORK_MEGAPIX);
Ptr<Stitcher> stitcher = Stitcher::create();
stitcher->setFeaturesFinder(featuresFinder);
stitcher->setFeaturesMatcher(featuresMatcher);
stitcher->setWarper(makePtr<SphericalWarper>());
stitcher->setRegistrationResol(WORK_MEGAPIX);
startTimer();
stitcher.stitch(imgs, pano);
stitcher->stitch(imgs, pano);
stopTimer();
}
......@@ -66,7 +66,7 @@ PERF_TEST_P(stitch, b12, TEST_DETECTORS)
imgs.push_back( imread( getDataPath("stitching/b1.png") ) );
imgs.push_back( imread( getDataPath("stitching/b2.png") ) );
Ptr<detail::FeaturesFinder> featuresFinder = getFeatureFinder(GetParam());
Ptr<Feature2D> featuresFinder = getFeatureFinder(GetParam());
Ptr<detail::FeaturesMatcher> featuresMatcher = GetParam() == "orb"
? makePtr<detail::BestOf2NearestMatcher>(false, ORB_MATCH_CONFIDENCE)
......@@ -76,14 +76,14 @@ PERF_TEST_P(stitch, b12, TEST_DETECTORS)
while(next())
{
Stitcher stitcher = Stitcher::createDefault();
stitcher.setFeaturesFinder(featuresFinder);
stitcher.setFeaturesMatcher(featuresMatcher);
stitcher.setWarper(makePtr<SphericalWarper>());
stitcher.setRegistrationResol(WORK_MEGAPIX);
Ptr<Stitcher> stitcher = Stitcher::create();
stitcher->setFeaturesFinder(featuresFinder);
stitcher->setFeaturesMatcher(featuresMatcher);
stitcher->setWarper(makePtr<SphericalWarper>());
stitcher->setRegistrationResol(WORK_MEGAPIX);
startTimer();
stitcher.stitch(imgs, pano);
stitcher->stitch(imgs, pano);
stopTimer();
}
......@@ -101,7 +101,7 @@ PERF_TEST_P(stitchDatasets, affine, testing::Combine(AFFINE_DATASETS, TEST_DETEC
Mat pano;
vector<Mat> imgs;
int width, height, allowed_diff = 20;
Ptr<detail::FeaturesFinder> featuresFinder = getFeatureFinder(detector);
Ptr<Feature2D> featuresFinder = getFeatureFinder(detector);
if(dataset == "budapest")
{
......@@ -116,6 +116,10 @@ PERF_TEST_P(stitchDatasets, affine, testing::Combine(AFFINE_DATASETS, TEST_DETEC
// this dataset is big, the results between surf and orb differ slightly,
// but both are still good
allowed_diff = 50;
// we need to boost ORB number of features to be able to stitch this dataset
// SURF works just fine with default settings
if(detector == "orb")
featuresFinder = ORB::create(1500);
}
else if (dataset == "newspaper")
{
......@@ -128,7 +132,7 @@ PERF_TEST_P(stitchDatasets, affine, testing::Combine(AFFINE_DATASETS, TEST_DETEC
// we need to boost ORB number of features to be able to stitch this dataset
// SURF works just fine with default settings
if(detector == "orb")
featuresFinder = makePtr<detail::OrbFeaturesFinder>(Size(3,1), 3000);
featuresFinder = ORB::create(3000);
}
else if (dataset == "prague")
{
......@@ -149,7 +153,7 @@ PERF_TEST_P(stitchDatasets, affine, testing::Combine(AFFINE_DATASETS, TEST_DETEC
while(next())
{
Ptr<Stitcher> stitcher = Stitcher::create(Stitcher::SCANS, false);
Ptr<Stitcher> stitcher = Stitcher::create(Stitcher::SCANS);
stitcher->setFeaturesFinder(featuresFinder);
if (cv::ocl::useOpenCL())
......
......@@ -48,16 +48,6 @@ using namespace cv;
using namespace cv::detail;
using namespace cv::cuda;
#ifdef HAVE_OPENCV_XFEATURES2D
#include "opencv2/xfeatures2d.hpp"
using xfeatures2d::SURF;
using xfeatures2d::SIFT;
#else
# if defined(_MSC_VER)
# pragma warning(disable:4702) // unreachable code
# endif
#endif
#ifdef HAVE_OPENCV_CUDAIMGPROC
# include "opencv2/cudaimgproc.hpp"
#endif
......@@ -121,35 +111,6 @@ private:
};
struct FindFeaturesBody : ParallelLoopBody
{
FindFeaturesBody(FeaturesFinder &finder, InputArrayOfArrays images,
std::vector<ImageFeatures> &features, const std::vector<std::vector<cv::Rect> > *rois)
: finder_(finder), images_(images), features_(features), rois_(rois) {}
void operator ()(const Range &r) const CV_OVERRIDE
{
for (int i = r.start; i < r.end; ++i)
{
Mat image = images_.getMat(i);
if (rois_)
finder_(image, features_[i], (*rois_)[i]);
else
finder_(image, features_[i]);
}
}
private:
FeaturesFinder &finder_;
InputArrayOfArrays images_;
std::vector<ImageFeatures> &features_;
const std::vector<std::vector<cv::Rect> > *rois_;
// to cease visual studio warning
void operator =(const FindFeaturesBody&);
};
//////////////////////////////////////////////////////////////////////////////
typedef std::set<std::pair<int,int> > MatchesSet;
......@@ -318,339 +279,40 @@ void GpuMatcher::collectGarbage()
namespace cv {
namespace detail {
void FeaturesFinder::operator ()(InputArray image, ImageFeatures &features)
void computeImageFeatures(
const Ptr<Feature2D> &featuresFinder,
InputArrayOfArrays images,
std::vector<ImageFeatures> &features,
InputArrayOfArrays masks)
{
find(image, features);
features.img_size = image.size();
}
// compute all features
std::vector<std::vector<KeyPoint>> keypoints;
std::vector<UMat> descriptors;
// TODO replace with 1 call to new over load of detectAndCompute
featuresFinder->detect(images, keypoints, masks);
featuresFinder->compute(images, keypoints, descriptors);
void FeaturesFinder::operator ()(InputArray image, ImageFeatures &features, const std::vector<Rect> &rois)
{
std::vector<ImageFeatures> roi_features(rois.size());
size_t total_kps_count = 0;
int total_descriptors_height = 0;
for (size_t i = 0; i < rois.size(); ++i)
{
find(image.getUMat()(rois[i]), roi_features[i]);
total_kps_count += roi_features[i].keypoints.size();
total_descriptors_height += roi_features[i].descriptors.rows;
}
features.img_size = image.size();
features.keypoints.resize(total_kps_count);
features.descriptors.create(total_descriptors_height,
roi_features[0].descriptors.cols,
roi_features[0].descriptors.type());
int kp_idx = 0;
int descr_offset = 0;
for (size_t i = 0; i < rois.size(); ++i)
{
for (size_t j = 0; j < roi_features[i].keypoints.size(); ++j, ++kp_idx)
{
features.keypoints[kp_idx] = roi_features[i].keypoints[j];
features.keypoints[kp_idx].pt.x += (float)rois[i].x;
features.keypoints[kp_idx].pt.y += (float)rois[i].y;
}
UMat subdescr = features.descriptors.rowRange(
descr_offset, descr_offset + roi_features[i].descriptors.rows);
roi_features[i].descriptors.copyTo(subdescr);
descr_offset += roi_features[i].descriptors.rows;
}
}
void FeaturesFinder::operator ()(InputArrayOfArrays images, std::vector<ImageFeatures> &features)
{
// store to ImageFeatures
size_t count = images.total();
features.resize(count);
FindFeaturesBody body(*this, images, features, NULL);
if (isThreadSafe())
parallel_for_(Range(0, static_cast<int>(count)), body);
else
body(Range(0, static_cast<int>(count)));
}
void FeaturesFinder::operator ()(InputArrayOfArrays images, std::vector<ImageFeatures> &features,
const std::vector<std::vector<cv::Rect> > &rois)
{
CV_Assert(rois.size() == images.total());
size_t count = images.total();
features.resize(count);
FindFeaturesBody body(*this, images, features, &rois);
if (isThreadSafe())
parallel_for_(Range(0, static_cast<int>(count)), body);
else
body(Range(0, static_cast<int>(count)));
}
bool FeaturesFinder::isThreadSafe() const
{
#ifdef HAVE_OPENCL
if (ocl::isOpenCLActivated())
{
return false;
}
#endif
if (dynamic_cast<const SurfFeaturesFinder*>(this))
{
return true;
}
else if (dynamic_cast<const OrbFeaturesFinder*>(this))
{
return true;
}
else
{
return false;
}
}
SurfFeaturesFinder::SurfFeaturesFinder(double hess_thresh, int num_octaves, int num_layers,
int num_octaves_descr, int num_layers_descr)
{
#ifdef HAVE_OPENCV_XFEATURES2D
if (num_octaves_descr == num_octaves && num_layers_descr == num_layers)
{
Ptr<SURF> surf_ = SURF::create();
if( !surf_ )
CV_Error( Error::StsNotImplemented, "OpenCV was built without SURF support" );
surf_->setHessianThreshold(hess_thresh);
surf_->setNOctaves(num_octaves);
surf_->setNOctaveLayers(num_layers);
surf = surf_;
}
else
{
Ptr<SURF> sdetector_ = SURF::create();
Ptr<SURF> sextractor_ = SURF::create();
if( !sdetector_ || !sextractor_ )
CV_Error( Error::StsNotImplemented, "OpenCV was built without SURF support" );
sdetector_->setHessianThreshold(hess_thresh);
sdetector_->setNOctaves(num_octaves);
sdetector_->setNOctaveLayers(num_layers);
sextractor_->setNOctaves(num_octaves_descr);
sextractor_->setNOctaveLayers(num_layers_descr);
detector_ = sdetector_;
extractor_ = sextractor_;
}
#else
CV_UNUSED(hess_thresh);
CV_UNUSED(num_octaves);
CV_UNUSED(num_layers);
CV_UNUSED(num_octaves_descr);
CV_UNUSED(num_layers_descr);
CV_Error( Error::StsNotImplemented, "OpenCV was built without SURF support" );
#endif
}
void SurfFeaturesFinder::find(InputArray image, ImageFeatures &features)
{
UMat gray_image;
CV_Assert((image.type() == CV_8UC3) || (image.type() == CV_8UC1));
if(image.type() == CV_8UC3)
{
cvtColor(image, gray_image, COLOR_BGR2GRAY);
}
else
{
gray_image = image.getUMat();
}
if (!surf)
CV_Assert(count == keypoints.size() && count == descriptors.size());
for (size_t i = 0; i < count; ++i)
{
detector_->detect(gray_image, features.keypoints);
extractor_->compute(gray_image, features.keypoints, features.descriptors);
}
else
{
UMat descriptors;
surf->detectAndCompute(gray_image, Mat(), features.keypoints, descriptors);
features.descriptors = descriptors.reshape(1, (int)features.keypoints.size());
features[i].img_size = images.size(int(i));
features[i].keypoints = std::move(keypoints[i]);
features[i].descriptors = std::move(descriptors[i]);
}
}
SiftFeaturesFinder::SiftFeaturesFinder()
void computeImageFeatures(
const Ptr<Feature2D> &featuresFinder,
InputArray image,
ImageFeatures &features,
InputArray mask)
{
#ifdef HAVE_OPENCV_XFEATURES2D
Ptr<SIFT> sift_ = SIFT::create();
if( !sift_ )
CV_Error( Error::StsNotImplemented, "OpenCV was built without SIFT support" );
sift = sift_;
#else
CV_Error( Error::StsNotImplemented, "OpenCV was built without SIFT support" );
#endif
}
void SiftFeaturesFinder::find(InputArray image, ImageFeatures &features)
{
UMat gray_image;
CV_Assert((image.type() == CV_8UC3) || (image.type() == CV_8UC1));
if(image.type() == CV_8UC3)
{
cvtColor(image, gray_image, COLOR_BGR2GRAY);
}
else
{
gray_image = image.getUMat();
}
UMat descriptors;
sift->detectAndCompute(gray_image, Mat(), features.keypoints, descriptors);
features.descriptors = descriptors.reshape(1, (int)features.keypoints.size());
}
OrbFeaturesFinder::OrbFeaturesFinder(Size _grid_size, int n_features, float scaleFactor, int nlevels)
{
grid_size = _grid_size;
orb = ORB::create(n_features * (99 + grid_size.area())/100/grid_size.area(), scaleFactor, nlevels);
}
void OrbFeaturesFinder::find(InputArray image, ImageFeatures &features)
{
UMat gray_image;
CV_Assert((image.type() == CV_8UC3) || (image.type() == CV_8UC4) || (image.type() == CV_8UC1));
if (image.type() == CV_8UC3) {
cvtColor(image, gray_image, COLOR_BGR2GRAY);
} else if (image.type() == CV_8UC4) {
cvtColor(image, gray_image, COLOR_BGRA2GRAY);
} else if (image.type() == CV_8UC1) {
gray_image = image.getUMat();
} else {
CV_Error(Error::StsUnsupportedFormat, "");
}
if (grid_size.area() == 1)
orb->detectAndCompute(gray_image, Mat(), features.keypoints, features.descriptors);
else
{
features.keypoints.clear();
features.descriptors.release();
std::vector<KeyPoint> points;
Mat _descriptors;
UMat descriptors;
for (int r = 0; r < grid_size.height; ++r)
for (int c = 0; c < grid_size.width; ++c)
{
int xl = c * gray_image.cols / grid_size.width;
int yl = r * gray_image.rows / grid_size.height;
int xr = (c+1) * gray_image.cols / grid_size.width;
int yr = (r+1) * gray_image.rows / grid_size.height;
// LOGLN("OrbFeaturesFinder::find: gray_image.empty=" << (gray_image.empty()?"true":"false") << ", "
// << " gray_image.size()=(" << gray_image.size().width << "x" << gray_image.size().height << "), "
// << " yl=" << yl << ", yr=" << yr << ", "
// << " xl=" << xl << ", xr=" << xr << ", gray_image.data=" << ((size_t)gray_image.data) << ", "
// << "gray_image.dims=" << gray_image.dims << "\n");
UMat gray_image_part=gray_image(Range(yl, yr), Range(xl, xr));
// LOGLN("OrbFeaturesFinder::find: gray_image_part.empty=" << (gray_image_part.empty()?"true":"false") << ", "
// << " gray_image_part.size()=(" << gray_image_part.size().width << "x" << gray_image_part.size().height << "), "
// << " gray_image_part.dims=" << gray_image_part.dims << ", "
// << " gray_image_part.data=" << ((size_t)gray_image_part.data) << "\n");
orb->detectAndCompute(gray_image_part, UMat(), points, descriptors);
features.keypoints.reserve(features.keypoints.size() + points.size());
for (std::vector<KeyPoint>::iterator kp = points.begin(); kp != points.end(); ++kp)
{
kp->pt.x += xl;
kp->pt.y += yl;
features.keypoints.push_back(*kp);
}
_descriptors.push_back(descriptors.getMat(ACCESS_READ));
}
// TODO optimize copyTo()
//features.descriptors = _descriptors.getUMat(ACCESS_READ);
_descriptors.copyTo(features.descriptors);
}
}
AKAZEFeaturesFinder::AKAZEFeaturesFinder(AKAZE::DescriptorType descriptor_type,
int descriptor_size,
int descriptor_channels,
float threshold,
int nOctaves,
int nOctaveLayers,
KAZE::DiffusivityType diffusivity)
{
akaze = AKAZE::create(descriptor_type, descriptor_size, descriptor_channels,
threshold, nOctaves, nOctaveLayers, diffusivity);
}
void AKAZEFeaturesFinder::find(InputArray image, detail::ImageFeatures &features)
{
CV_Assert((image.type() == CV_8UC3) || (image.type() == CV_8UC1));
akaze->detectAndCompute(image, noArray(), features.keypoints, features.descriptors);
}
#ifdef HAVE_OPENCV_XFEATURES2D
SurfFeaturesFinderGpu::SurfFeaturesFinderGpu(double hess_thresh, int num_octaves, int num_layers,
int num_octaves_descr, int num_layers_descr)
{
surf_.keypointsRatio = 0.1f;
surf_.hessianThreshold = hess_thresh;
surf_.extended = false;
num_octaves_ = num_octaves;
num_layers_ = num_layers;
num_octaves_descr_ = num_octaves_descr;
num_layers_descr_ = num_layers_descr;
}
void SurfFeaturesFinderGpu::find(InputArray image, ImageFeatures &features)
{
CV_Assert(image.depth() == CV_8U);
ensureSizeIsEnough(image.size(), image.type(), image_);
image_.upload(image);
ensureSizeIsEnough(image.size(), CV_8UC1, gray_image_);
#ifdef HAVE_OPENCV_CUDAIMGPROC
cv::cuda::cvtColor(image_, gray_image_, COLOR_BGR2GRAY);
#else
cvtColor(image_, gray_image_, COLOR_BGR2GRAY);
#endif
surf_.nOctaves = num_octaves_;
surf_.nOctaveLayers = num_layers_;
surf_.upright = false;
surf_(gray_image_, GpuMat(), keypoints_);
surf_.nOctaves = num_octaves_descr_;
surf_.nOctaveLayers = num_layers_descr_;
surf_.upright = true;
surf_(gray_image_, GpuMat(), keypoints_, descriptors_, true);
surf_.downloadKeypoints(keypoints_, features.keypoints);
descriptors_.download(features.descriptors);
}
void SurfFeaturesFinderGpu::collectGarbage()
{
surf_.releaseMemory();
image_.release();
gray_image_.release();
keypoints_.release();
descriptors_.release();
features.img_size = image.size();
featuresFinder->detectAndCompute(image, mask, features.keypoints, features.descriptors);
}
#endif
//////////////////////////////////////////////////////////////////////////////
......
......@@ -87,10 +87,6 @@
# include "opencv2/cudalegacy.hpp"
#endif
#ifdef HAVE_OPENCV_XFEATURES2D
# include "opencv2/xfeatures2d/cuda.hpp"
#endif
#include "opencv2/core/private.hpp"
#include "util_log.hpp"
......
......@@ -44,67 +44,40 @@
namespace cv {
Stitcher Stitcher::createDefault(bool try_use_gpu)
Ptr<Stitcher> Stitcher::create(Mode mode)
{
Stitcher stitcher;
stitcher.setRegistrationResol(0.6);
stitcher.setSeamEstimationResol(0.1);
stitcher.setCompositingResol(ORIG_RESOL);
stitcher.setPanoConfidenceThresh(1);
stitcher.setWaveCorrection(true);
stitcher.setWaveCorrectKind(detail::WAVE_CORRECT_HORIZ);
stitcher.setFeaturesMatcher(makePtr<detail::BestOf2NearestMatcher>(try_use_gpu));
stitcher.setBundleAdjuster(makePtr<detail::BundleAdjusterRay>());
#ifdef HAVE_OPENCV_CUDALEGACY
if (try_use_gpu && cuda::getCudaEnabledDeviceCount() > 0)
{
#ifdef HAVE_OPENCV_XFEATURES2D
stitcher.setFeaturesFinder(makePtr<detail::SurfFeaturesFinderGpu>());
#else
stitcher.setFeaturesFinder(makePtr<detail::OrbFeaturesFinder>());
#endif
stitcher.setWarper(makePtr<SphericalWarperGpu>());
stitcher.setSeamFinder(makePtr<detail::GraphCutSeamFinderGpu>());
}
else
#endif
{
#ifdef HAVE_OPENCV_XFEATURES2D
stitcher.setFeaturesFinder(makePtr<detail::SurfFeaturesFinder>());
#else
stitcher.setFeaturesFinder(makePtr<detail::OrbFeaturesFinder>());
#endif
stitcher.setWarper(makePtr<SphericalWarper>());
stitcher.setSeamFinder(makePtr<detail::GraphCutSeamFinder>(detail::GraphCutSeamFinderBase::COST_COLOR));
}
Ptr<Stitcher> stitcher = makePtr<Stitcher>();
stitcher.setExposureCompensator(makePtr<detail::BlocksGainCompensator>());
stitcher.setBlender(makePtr<detail::MultiBandBlender>(try_use_gpu));
stitcher->setRegistrationResol(0.6);
stitcher->setSeamEstimationResol(0.1);
stitcher->setCompositingResol(ORIG_RESOL);
stitcher->setPanoConfidenceThresh(1);
stitcher->setSeamFinder(makePtr<detail::GraphCutSeamFinder>(detail::GraphCutSeamFinderBase::COST_COLOR));
stitcher->setBlender(makePtr<detail::MultiBandBlender>(false));
stitcher->setFeaturesFinder(ORB::create());
stitcher.work_scale_ = 1;
stitcher.seam_scale_ = 1;
stitcher.seam_work_aspect_ = 1;
stitcher.warped_image_scale_ = 1;
return stitcher;
}
Ptr<Stitcher> Stitcher::create(Mode mode, bool try_use_gpu)
{
Stitcher stit = createDefault(try_use_gpu);
Ptr<Stitcher> stitcher = makePtr<Stitcher>(stit);
stitcher->work_scale_ = 1;
stitcher->seam_scale_ = 1;
stitcher->seam_work_aspect_ = 1;
stitcher->warped_image_scale_ = 1;
switch (mode)
{
case PANORAMA: // PANORAMA is the default
// already setup
// mostly already setup
stitcher->setEstimator(makePtr<detail::HomographyBasedEstimator>());
stitcher->setWaveCorrection(true);
stitcher->setWaveCorrectKind(detail::WAVE_CORRECT_HORIZ);
stitcher->setFeaturesMatcher(makePtr<detail::BestOf2NearestMatcher>(false));
stitcher->setBundleAdjuster(makePtr<detail::BundleAdjusterRay>());
stitcher->setWarper(makePtr<SphericalWarper>());
stitcher->setExposureCompensator(makePtr<detail::BlocksGainCompensator>());
break;
case SCANS:
stitcher->setEstimator(makePtr<detail::AffineBasedEstimator>());
stitcher->setWaveCorrection(false);
stitcher->setFeaturesMatcher(makePtr<detail::AffineBestOf2NearestMatcher>(false, try_use_gpu));
stitcher->setFeaturesMatcher(makePtr<detail::AffineBestOf2NearestMatcher>(false, false));
stitcher->setBundleAdjuster(makePtr<detail::BundleAdjusterAffinePartial>());
stitcher->setWarper(makePtr<AffineWarper>());
stitcher->setExposureCompensator(makePtr<detail::NoExposureCompensator>());
......@@ -119,20 +92,12 @@ Ptr<Stitcher> Stitcher::create(Mode mode, bool try_use_gpu)
}
Stitcher::Status Stitcher::estimateTransform(InputArrayOfArrays images)
{
CV_INSTRUMENT_REGION();
return estimateTransform(images, std::vector<std::vector<Rect> >());
}
Stitcher::Status Stitcher::estimateTransform(InputArrayOfArrays images, const std::vector<std::vector<Rect> > &rois)
Stitcher::Status Stitcher::estimateTransform(InputArrayOfArrays images, InputArrayOfArrays masks)
{
CV_INSTRUMENT_REGION();
images.getUMatVector(imgs_);
rois_ = rois;
masks.getUMatVector(masks_);
Status status;
......@@ -407,20 +372,15 @@ Stitcher::Status Stitcher::composePanorama(InputArrayOfArrays images, OutputArra
Stitcher::Status Stitcher::stitch(InputArrayOfArrays images, OutputArray pano)
{
CV_INSTRUMENT_REGION();
Status status = estimateTransform(images);
if (status != OK)
return status;
return composePanorama(pano);
return stitch(images, noArray(), pano);
}
Stitcher::Status Stitcher::stitch(InputArrayOfArrays images, const std::vector<std::vector<Rect> > &rois, OutputArray pano)
Stitcher::Status Stitcher::stitch(InputArrayOfArrays images, InputArrayOfArrays masks, OutputArray pano)
{
CV_INSTRUMENT_REGION();
Status status = estimateTransform(images, rois);
Status status = estimateTransform(images, masks);
if (status != OK)
return status;
return composePanorama(pano);
......@@ -440,7 +400,6 @@ Stitcher::Status Stitcher::matchImages()
seam_scale_ = 1;
bool is_work_scale_set = false;
bool is_seam_scale_set = false;
UMat full_img, img;
features_.resize(imgs_.size());
seam_est_imgs_.resize(imgs_.size());
full_img_sizes_.resize(imgs_.size());
......@@ -451,16 +410,14 @@ Stitcher::Status Stitcher::matchImages()
#endif
std::vector<UMat> feature_find_imgs(imgs_.size());
std::vector<std::vector<Rect> > feature_find_rois(rois_.size());
std::vector<UMat> feature_find_masks(masks_.size());
for (size_t i = 0; i < imgs_.size(); ++i)
{
full_img = imgs_[i];
full_img_sizes_[i] = full_img.size();
full_img_sizes_[i] = imgs_[i].size();
if (registr_resol_ < 0)
{
img = full_img;
feature_find_imgs[i] = imgs_[i];
work_scale_ = 1;
is_work_scale_set = true;
}
......@@ -468,50 +425,34 @@ Stitcher::Status Stitcher::matchImages()
{
if (!is_work_scale_set)
{
work_scale_ = std::min(1.0, std::sqrt(registr_resol_ * 1e6 / full_img.size().area()));
work_scale_ = std::min(1.0, std::sqrt(registr_resol_ * 1e6 / full_img_sizes_[i].area()));
is_work_scale_set = true;
}
resize(full_img, img, Size(), work_scale_, work_scale_, INTER_LINEAR_EXACT);
resize(imgs_[i], feature_find_imgs[i], Size(), work_scale_, work_scale_, INTER_LINEAR_EXACT);
}
if (!is_seam_scale_set)
{
seam_scale_ = std::min(1.0, std::sqrt(seam_est_resol_ * 1e6 / full_img.size().area()));
seam_scale_ = std::min(1.0, std::sqrt(seam_est_resol_ * 1e6 / full_img_sizes_[i].area()));
seam_work_aspect_ = seam_scale_ / work_scale_;
is_seam_scale_set = true;
}
if (rois_.empty())
feature_find_imgs[i] = img;
else
{
feature_find_rois[i].resize(rois_[i].size());
for (size_t j = 0; j < rois_[i].size(); ++j)
if (!masks_.empty())
{
Point tl(cvRound(rois_[i][j].x * work_scale_), cvRound(rois_[i][j].y * work_scale_));
Point br(cvRound(rois_[i][j].br().x * work_scale_), cvRound(rois_[i][j].br().y * work_scale_));
feature_find_rois[i][j] = Rect(tl, br);
}
feature_find_imgs[i] = img;
resize(masks_[i], feature_find_masks[i], Size(), work_scale_, work_scale_, INTER_NEAREST);
}
features_[i].img_idx = (int)i;
LOGLN("Features in image #" << i+1 << ": " << features_[i].keypoints.size());
resize(full_img, img, Size(), seam_scale_, seam_scale_, INTER_LINEAR_EXACT);
seam_est_imgs_[i] = img.clone();
resize(imgs_[i], seam_est_imgs_[i], Size(), seam_scale_, seam_scale_, INTER_LINEAR_EXACT);
}
// find features possibly in parallel
if (rois_.empty())
(*features_finder_)(feature_find_imgs, features_);
else
(*features_finder_)(feature_find_imgs, features_, feature_find_rois);
detail::computeImageFeatures(features_finder_, feature_find_imgs, features_, feature_find_masks);
// Do it to save memory
features_finder_->collectGarbage();
full_img.release();
img.release();
feature_find_imgs.clear();
feature_find_rois.clear();
feature_find_masks.clear();
LOGLN("Finding features, time: " << ((getTickCount() - t) / getTickFrequency()) << " sec");
......@@ -550,16 +491,8 @@ Stitcher::Status Stitcher::matchImages()
Stitcher::Status Stitcher::estimateCameraParams()
{
/* TODO OpenCV ABI 4.x
get rid of this dynamic_cast hack and use estimator_
*/
Ptr<detail::Estimator> estimator;
if (dynamic_cast<detail::AffineBestOf2NearestMatcher*>(features_matcher_.get()))
estimator = makePtr<detail::AffineBasedEstimator>();
else
estimator = makePtr<detail::HomographyBasedEstimator>();
if (!(*estimator)(features_, pairwise_matches_, cameras_))
// estimate homography in global frame
if (!(*estimator_)(features_, pairwise_matches_, cameras_))
return ERR_HOMOGRAPHY_EST_FAIL;
for (size_t i = 0; i < cameras_.size(); ++i)
......@@ -602,17 +535,17 @@ Stitcher::Status Stitcher::estimateCameraParams()
}
Ptr<Stitcher> createStitcher(bool try_use_gpu)
CV_DEPRECATED Ptr<Stitcher> createStitcher(bool /*ignored*/)
{
CV_INSTRUMENT_REGION();
return Stitcher::create(Stitcher::PANORAMA, try_use_gpu);
return Stitcher::create(Stitcher::PANORAMA);
}
Ptr<Stitcher> createStitcherScans(bool try_use_gpu)
CV_DEPRECATED Ptr<Stitcher> createStitcherScans(bool /*ignored*/)
{
CV_INSTRUMENT_REGION();
return Stitcher::create(Stitcher::SCANS, try_use_gpu);
return Stitcher::create(Stitcher::SCANS);
}
} // namespace cv
......@@ -48,54 +48,61 @@ namespace opencv_test { namespace {
TEST(SurfFeaturesFinder, CanFindInROIs)
{
Ptr<detail::FeaturesFinder> finder = makePtr<detail::SurfFeaturesFinder>();
Ptr<Feature2D> finder = xfeatures2d::SURF::create();
Mat img = imread(string(cvtest::TS::ptr()->get_data_path()) + "cv/shared/lena.png");
vector<Rect> rois;
rois.push_back(Rect(0, 0, img.cols / 2, img.rows / 2));
rois.push_back(Rect(img.cols / 2, img.rows / 2, img.cols - img.cols / 2, img.rows - img.rows / 2));
// construct mask
Mat mask = Mat::zeros(img.size(), CV_8U);
for (const Rect &roi : rois)
{
Mat(mask, roi) = 1;
}
detail::ImageFeatures roi_features;
(*finder)(img, roi_features, rois);
detail::computeImageFeatures(finder, img, roi_features, mask);
int tl_rect_count = 0, br_rect_count = 0, bad_count = 0;
for (size_t i = 0; i < roi_features.keypoints.size(); ++i)
for (const auto &keypoint : roi_features.keypoints)
{
Point2f pt = roi_features.keypoints[i].pt;
if (pt.x >= rois[0].x && pt.y >= rois[0].y && pt.x <= rois[0].br().x && pt.y <= rois[0].br().y)
if (rois[0].contains(keypoint.pt))
tl_rect_count++;
else if (pt.x >= rois[1].x && pt.y >= rois[1].y && pt.x <= rois[1].br().x && pt.y <= rois[1].br().y)
else if (rois[1].contains(keypoint.pt))
br_rect_count++;
else
bad_count++;
}
ASSERT_GT(tl_rect_count, 0);
ASSERT_GT(br_rect_count, 0);
ASSERT_EQ(bad_count, 0);
EXPECT_GT(tl_rect_count, 0);
EXPECT_GT(br_rect_count, 0);
EXPECT_EQ(bad_count, 0);
}
#endif // HAVE_OPENCV_XFEATURES2D
TEST(ParallelFeaturesFinder, IsSameWithSerial)
{
Ptr<detail::FeaturesFinder> para_finder = makePtr<detail::OrbFeaturesFinder>();
Ptr<detail::FeaturesFinder> serial_finder = makePtr<detail::OrbFeaturesFinder>();
Ptr<Feature2D> para_finder = ORB::create();
Ptr<Feature2D> serial_finder = ORB::create();
Mat img = imread(string(cvtest::TS::ptr()->get_data_path()) + "stitching/a3.png", IMREAD_GRAYSCALE);
vector<Mat> imgs(50, img);
detail::ImageFeatures serial_features;
vector<detail::ImageFeatures> para_features(imgs.size());
(*serial_finder)(img, serial_features);
(*para_finder)(imgs, para_features);
detail::computeImageFeatures(serial_finder, img, serial_features);
detail::computeImageFeatures(para_finder, imgs, para_features);
// results must be the same
for(size_t i = 0; i < para_features.size(); ++i)
{
Mat diff_descriptors = serial_features.descriptors.getMat(ACCESS_READ) != para_features[i].descriptors.getMat(ACCESS_READ);
ASSERT_EQ(countNonZero(diff_descriptors), 0);
ASSERT_EQ(serial_features.img_size, para_features[i].img_size);
ASSERT_EQ(serial_features.keypoints.size(), para_features[i].keypoints.size());
EXPECT_EQ(countNonZero(diff_descriptors), 0);
EXPECT_EQ(serial_features.img_size, para_features[i].img_size);
EXPECT_EQ(serial_features.keypoints.size(), para_features[i].keypoints.size());
}
}
......
......@@ -9,4 +9,8 @@
#include "opencv2/stitching/detail/matchers.hpp"
#include "opencv2/stitching/detail/blenders.hpp"
#ifdef HAVE_OPENCV_XFEATURES2D
#include "opencv2/xfeatures2d/nonfree.hpp"
#endif
#endif
......@@ -8,7 +8,6 @@
using namespace std;
using namespace cv;
bool try_use_gpu = false;
bool divide_images = false;
Stitcher::Mode mode = Stitcher::PANORAMA;
vector<Mat> imgs;
......@@ -24,7 +23,7 @@ int main(int argc, char* argv[])
//![stitching]
Mat pano;
Ptr<Stitcher> stitcher = Stitcher::create(mode, try_use_gpu);
Ptr<Stitcher> stitcher = Stitcher::create(mode);
Stitcher::Status status = stitcher->stitch(imgs, pano);
if (status != Stitcher::OK)
......@@ -47,9 +46,6 @@ void printUsage(char** argv)
"Flags:\n"
" --d3\n"
" internally creates three chunks of each image to increase stitching success\n"
" --try_use_gpu (yes|no)\n"
" Try to use GPU. The default value is 'no'. All default values\n"
" are for CPU mode.\n"
" --mode (panorama|scans)\n"
" Determines configuration of stitcher. The default is 'panorama',\n"
" mode suitable for creating photo panoramas. Option 'scans' is suitable\n"
......@@ -75,19 +71,6 @@ int parseCmdArgs(int argc, char** argv)
printUsage(argv);
return EXIT_FAILURE;
}
else if (string(argv[i]) == "--try_use_gpu")
{
if (string(argv[i + 1]) == "no")
try_use_gpu = false;
else if (string(argv[i + 1]) == "yes")
try_use_gpu = true;
else
{
cout << "Bad --try_use_gpu flag value\n";
return EXIT_FAILURE;
}
i++;
}
else if (string(argv[i]) == "--d3")
{
divide_images = true;
......
......@@ -17,6 +17,10 @@
#include "opencv2/stitching/detail/warpers.hpp"
#include "opencv2/stitching/warpers.hpp"
#ifdef HAVE_OPENCV_XFEATURES2D
#include "opencv2/xfeatures2d/nonfree.hpp"
#endif
#define ENABLE_LOG 1
#define LOG(msg) std::cout << msg
#define LOGLN(msg) std::cout << msg << std::endl
......@@ -374,23 +378,20 @@ int main(int argc, char* argv[])
int64 t = getTickCount();
#endif
Ptr<FeaturesFinder> finder;
if (features_type == "surf")
Ptr<Feature2D> finder;
if (features_type == "orb")
{
#ifdef HAVE_OPENCV_XFEATURES2D
if (try_cuda && cuda::getCudaEnabledDeviceCount() > 0)
finder = makePtr<SurfFeaturesFinderGpu>();
else
#endif
finder = makePtr<SurfFeaturesFinder>();
finder = ORB::create();
}
else if (features_type == "orb")
#ifdef HAVE_OPENCV_XFEATURES2D
else if (features_type == "surf")
{
finder = makePtr<OrbFeaturesFinder>();
finder = xfeatures2d::SURF::create();
}
else if (features_type == "sift") {
finder = makePtr<SiftFeaturesFinder>();
finder = xfeatures2d::SIFT::create();
}
#endif
else
{
cout << "Unknown 2D features type: '" << features_type << "'.\n";
......@@ -435,7 +436,7 @@ int main(int argc, char* argv[])
is_seam_scale_set = true;
}
(*finder)(img, features[i]);
computeImageFeatures(finder, img, features[i]);
features[i].img_idx = i;
LOGLN("Features in image #" << i+1 << ": " << features[i].keypoints.size());
......@@ -443,7 +444,6 @@ int main(int argc, char* argv[])
images[i] = img.clone();
}
finder->collectGarbage();
full_img.release();
img.release();
......
#!/usr/bin/env python
'''
Stitching sample
================
Show how to use Stitcher API from python in a simple way to stitch panoramas
or scans.
'''
from __future__ import print_function
import cv2 as cv
import numpy as np
import argparse
import sys
modes = (cv.Stitcher_PANORAMA, cv.Stitcher_SCANS)
parser = argparse.ArgumentParser(description='Stitching sample.')
parser.add_argument('--mode',
type = int, choices = modes, default = cv.Stitcher_PANORAMA,
help = 'Determines configuration of stitcher. The default is `PANORAMA` (%d), '
'mode suitable for creating photo panoramas. Option `SCANS` (%d) is suitable '
'for stitching materials under affine transformation, such as scans.' % modes)
parser.add_argument('--output', default = 'result.jpg',
help = 'Resulting image. The default is `result.jpg`.')
parser.add_argument('img', nargs='+', help = 'input images')
args = parser.parse_args()
# read input images
imgs = []
for img_name in args.img:
img = cv.imread(img_name)
if img is None:
print("can't read image " + img_name)
sys.exit(-1)
imgs.append(img)
stitcher = cv.Stitcher.create(args.mode)
status, pano = stitcher.stitch(imgs)
if status != cv.Stitcher_OK:
print("Can't stitch images, error code = %d" % status)
sys.exit(-1)
cv.imwrite(args.output, pano);
print("stitching completed successfully. %s saved!" % args.output)
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment