Skip to content
Projects
Groups
Snippets
Help
Loading...
Sign in / Register
Toggle navigation
O
opencv
Project
Project
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Packages
Packages
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
submodule
opencv
Commits
472c2106
Commit
472c2106
authored
Nov 19, 2014
by
Maksim Shabunin
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Doxygen documentation: flann, photo and stitching modules
parent
8e9ea0e3
Expand all
Hide whitespace changes
Inline
Side-by-side
Showing
19 changed files
with
663 additions
and
47 deletions
+663
-47
Doxyfile.in
doc/Doxyfile.in
+2
-1
flann.hpp
modules/flann/include/opencv2/flann.hpp
+141
-2
photo.hpp
modules/photo/include/opencv2/photo.hpp
+0
-0
cuda.hpp
modules/photo/include/opencv2/photo/cuda.hpp
+64
-5
photo_c.h
modules/photo/include/opencv2/photo/photo_c.h
+5
-0
stitching.hpp
modules/stitching/include/opencv2/stitching.hpp
+75
-1
autocalib.hpp
.../stitching/include/opencv2/stitching/detail/autocalib.hpp
+23
-2
blenders.hpp
...s/stitching/include/opencv2/stitching/detail/blenders.hpp
+31
-5
camera.hpp
...les/stitching/include/opencv2/stitching/detail/camera.hpp
+9
-0
exposure_compensate.hpp
.../include/opencv2/stitching/detail/exposure_compensate.hpp
+29
-3
matchers.hpp
...s/stitching/include/opencv2/stitching/detail/matchers.hpp
+79
-7
motion_estimators.hpp
...ng/include/opencv2/stitching/detail/motion_estimators.hpp
+70
-7
seam_finders.hpp
...itching/include/opencv2/stitching/detail/seam_finders.hpp
+29
-5
timelapsers.hpp
...titching/include/opencv2/stitching/detail/timelapsers.hpp
+5
-0
util.hpp
modules/stitching/include/opencv2/stitching/detail/util.hpp
+5
-0
util_inl.hpp
...s/stitching/include/opencv2/stitching/detail/util_inl.hpp
+4
-0
warpers.hpp
...es/stitching/include/opencv2/stitching/detail/warpers.hpp
+74
-6
warpers_inl.hpp
...titching/include/opencv2/stitching/detail/warpers_inl.hpp
+4
-0
warpers.hpp
modules/stitching/include/opencv2/stitching/warpers.hpp
+14
-3
No files found.
doc/Doxyfile.in
View file @
472c2106
...
...
@@ -241,7 +241,8 @@ PREDEFINED = __cplusplus=1 \
CV_INLINE= \
CV_NORETURN= \
CV_DEFAULT(x)=" = x" \
CV_NEON=1
CV_NEON=1 \
FLANN_DEPRECATED=
EXPAND_AS_DEFINED =
SKIP_FUNCTION_MACROS = YES
TAGFILES =
...
...
modules/flann/include/opencv2/flann.hpp
View file @
472c2106
...
...
@@ -47,6 +47,15 @@
#include "opencv2/flann/miniflann.hpp"
#include "opencv2/flann/flann_base.hpp"
/**
@defgroup flann Clustering and Search in Multi-Dimensional Spaces
This section documents OpenCV's interface to the FLANN library. FLANN (Fast Library for Approximate
Nearest Neighbors) is a library that contains a collection of algorithms optimized for fast nearest
neighbor search in large datasets and for high dimensional features. More information about FLANN
can be found in @cite Muja2009 .
*/
namespace
cvflann
{
CV_EXPORTS
flann_distance_t
flann_distance_type
();
...
...
@@ -59,6 +68,10 @@ namespace cv
namespace
flann
{
//! @addtogroup flann
//! @{
template
<
typename
T
>
struct
CvType
{};
template
<>
struct
CvType
<
unsigned
char
>
{
static
int
type
()
{
return
CV_8U
;
}
};
template
<>
struct
CvType
<
char
>
{
static
int
type
()
{
return
CV_8S
;
}
};
...
...
@@ -88,7 +101,9 @@ using ::cvflann::ChiSquareDistance;
using
::
cvflann
::
KL_Divergence
;
/** @brief The FLANN nearest neighbor index class. This class is templated with the type of elements for which
the index is built.
*/
template
<
typename
Distance
>
class
GenericIndex
{
...
...
@@ -96,10 +111,108 @@ public:
typedef
typename
Distance
::
ElementType
ElementType
;
typedef
typename
Distance
::
ResultType
DistanceType
;
/** @brief Constructs a nearest neighbor search index for a given dataset.
@param features Matrix of containing the features(points) to index. The size of the matrix is
num\_features x feature\_dimensionality and the data type of the elements in the matrix must
coincide with the type of the index.
@param params Structure containing the index parameters. The type of index that will be
constructed depends on the type of this parameter. See the description.
@param distance
The method constructs a fast search structure from a set of features using the specified algorithm
with specified parameters, as defined by params. params is a reference to one of the following class
IndexParams descendants:
- **LinearIndexParams** When passing an object of this type, the index will perform a linear,
brute-force search. :
@code
struct LinearIndexParams : public IndexParams
{
};
@endcode
- **KDTreeIndexParams** When passing an object of this type the index constructed will consist of
a set of randomized kd-trees which will be searched in parallel. :
@code
struct KDTreeIndexParams : public IndexParams
{
KDTreeIndexParams( int trees = 4 );
};
@endcode
- **KMeansIndexParams** When passing an object of this type the index constructed will be a
hierarchical k-means tree. :
@code
struct KMeansIndexParams : public IndexParams
{
KMeansIndexParams(
int branching = 32,
int iterations = 11,
flann_centers_init_t centers_init = CENTERS_RANDOM,
float cb_index = 0.2 );
};
@endcode
- **CompositeIndexParams** When using a parameters object of this type the index created
combines the randomized kd-trees and the hierarchical k-means tree. :
@code
struct CompositeIndexParams : public IndexParams
{
CompositeIndexParams(
int trees = 4,
int branching = 32,
int iterations = 11,
flann_centers_init_t centers_init = CENTERS_RANDOM,
float cb_index = 0.2 );
};
@endcode
- **LshIndexParams** When using a parameters object of this type the index created uses
multi-probe LSH (by Multi-Probe LSH: Efficient Indexing for High-Dimensional Similarity Search
by Qin Lv, William Josephson, Zhe Wang, Moses Charikar, Kai Li., Proceedings of the 33rd
International Conference on Very Large Data Bases (VLDB). Vienna, Austria. September 2007) :
@code
struct LshIndexParams : public IndexParams
{
LshIndexParams(
unsigned int table_number,
unsigned int key_size,
unsigned int multi_probe_level );
};
@endcode
- **AutotunedIndexParams** When passing an object of this type the index created is
automatically tuned to offer the best performance, by choosing the optimal index type
(randomized kd-trees, hierarchical kmeans, linear) and parameters for the dataset provided. :
@code
struct AutotunedIndexParams : public IndexParams
{
AutotunedIndexParams(
float target_precision = 0.9,
float build_weight = 0.01,
float memory_weight = 0,
float sample_fraction = 0.1 );
};
@endcode
- **SavedIndexParams** This object type is used for loading a previously saved index from the
disk. :
@code
struct SavedIndexParams : public IndexParams
{
SavedIndexParams( String filename );
};
@endcode
*/
GenericIndex
(
const
Mat
&
features
,
const
::
cvflann
::
IndexParams
&
params
,
Distance
distance
=
Distance
());
~
GenericIndex
();
/** @brief Performs a K-nearest neighbor search for a given query point using the index.
@param query The query point
@param indices Vector that will contain the indices of the K-nearest neighbors found. It must have
at least knn size.
@param dists Vector that will contain the distances to the K-nearest neighbors found. It must have
at least knn size.
@param knn Number of nearest neighbors to search for.
@param params SearchParams
*/
void
knnSearch
(
const
std
::
vector
<
ElementType
>&
query
,
std
::
vector
<
int
>&
indices
,
std
::
vector
<
DistanceType
>&
dists
,
int
knn
,
const
::
cvflann
::
SearchParams
&
params
);
void
knnSearch
(
const
Mat
&
queries
,
Mat
&
indices
,
Mat
&
dists
,
int
knn
,
const
::
cvflann
::
SearchParams
&
params
);
...
...
@@ -123,6 +236,7 @@ private:
::
cvflann
::
Index
<
Distance
>*
nnIndex
;
};
//! @cond IGNORED
#define FLANN_DISTANCE_CHECK \
if ( ::cvflann::flann_distance_type() != cvflann::FLANN_DIST_L2) { \
...
...
@@ -218,6 +332,8 @@ int GenericIndex<Distance>::radiusSearch(const Mat& query, Mat& indices, Mat& di
return
nnIndex
->
radiusSearch
(
m_query
,
m_indices
,
m_dists
,
radius
,
searchParams
);
}
//! @endcond
/**
* @deprecated Use GenericIndex class instead
*/
...
...
@@ -283,6 +399,8 @@ template <typename T>
class
FLANN_DEPRECATED
Index_
;
#endif
//! @cond IGNORED
template
<
typename
T
>
Index_
<
T
>::
Index_
(
const
Mat
&
dataset
,
const
::
cvflann
::
IndexParams
&
params
)
{
...
...
@@ -377,7 +495,25 @@ int Index_<T>::radiusSearch(const Mat& query, Mat& indices, Mat& dists, Distance
if
(
nnIndex_L2
)
return
nnIndex_L2
->
radiusSearch
(
m_query
,
m_indices
,
m_dists
,
radius
,
searchParams
);
}
//! @endcond
/** @brief Clusters features using hierarchical k-means algorithm.
@param features The points to be clustered. The matrix must have elements of type
Distance::ElementType.
@param centers The centers of the clusters obtained. The matrix must have type
Distance::ResultType. The number of rows in this matrix represents the number of clusters desired,
however, because of the way the cut in the hierarchical tree is chosen, the number of clusters
computed will be the highest number of the form (branching-1)\*k+1 that's lower than the number of
clusters desired, where branching is the tree's branching factor (see description of the
KMeansIndexParams).
@param params Parameters used in the construction of the hierarchical k-means tree.
@param d Distance to be used for clustering.
The method clusters the given feature vectors by constructing a hierarchical k-means tree and
choosing a cut in the tree that minimizes the cluster's variance. It returns the number of clusters
found.
*/
template
<
typename
Distance
>
int
hierarchicalClustering
(
const
Mat
&
features
,
Mat
&
centers
,
const
::
cvflann
::
KMeansIndexParams
&
params
,
Distance
d
=
Distance
())
...
...
@@ -396,7 +532,8 @@ int hierarchicalClustering(const Mat& features, Mat& centers, const ::cvflann::K
return
::
cvflann
::
hierarchicalClustering
<
Distance
>
(
m_features
,
m_centers
,
params
,
d
);
}
/** @deprecated
*/
template
<
typename
ELEM_TYPE
,
typename
DIST_TYPE
>
FLANN_DEPRECATED
int
hierarchicalClustering
(
const
Mat
&
features
,
Mat
&
centers
,
const
::
cvflann
::
KMeansIndexParams
&
params
)
{
...
...
@@ -417,6 +554,8 @@ FLANN_DEPRECATED int hierarchicalClustering(const Mat& features, Mat& centers, c
}
}
//! @} flann
}
}
// namespace cv::flann
#endif
modules/photo/include/opencv2/photo.hpp
View file @
472c2106
This diff is collapsed.
Click to expand it.
modules/photo/include/opencv2/photo/cuda.hpp
View file @
472c2106
...
...
@@ -47,18 +47,75 @@
namespace
cv
{
namespace
cuda
{
//! Brute force non-local means algorith (slow but universal)
//! @addtogroup photo_denoise
//! @{
/** @brief Performs pure non local means denoising without any simplification, and thus it is not fast.
@param src Source image. Supports only CV\_8UC1, CV\_8UC2 and CV\_8UC3.
@param dst Destination image.
@param h Filter sigma regulating filter strength for color.
@param search\_window Size of search window.
@param block\_size Size of block used for computing weights.
@param borderMode Border type. See borderInterpolate for details. BORDER\_REFLECT101 ,
BORDER\_REPLICATE , BORDER\_CONSTANT , BORDER\_REFLECT and BORDER\_WRAP are supported for now.
@param s Stream for the asynchronous version.
@sa
fastNlMeansDenoising
*/
CV_EXPORTS
void
nonLocalMeans
(
const
GpuMat
&
src
,
GpuMat
&
dst
,
float
h
,
int
search_window
=
21
,
int
block_size
=
7
,
int
borderMode
=
BORDER_DEFAULT
,
Stream
&
s
=
Stream
::
Null
());
//! Fast (but approximate)version of non-local means algorith similar to CPU function (running sums technique)
/** @brief The class implements fast approximate Non Local Means Denoising algorithm.
*/
class
CV_EXPORTS
FastNonLocalMeansDenoising
{
public
:
//! Simple method, recommended for grayscale images (though it supports multichannel images)
/** @brief Perform image denoising using Non-local Means Denoising algorithm
<http://www.ipol.im/pub/algo/bcm_non_local_means_denoising> with several computational
optimizations. Noise expected to be a gaussian white noise
@param src Input 8-bit 1-channel, 2-channel or 3-channel image.
@param dst Output image with the same size and type as src .
@param h Parameter regulating filter strength. Big h value perfectly removes noise but also
removes image details, smaller h value preserves details but also preserves some noise
@param search\_window Size in pixels of the window that is used to compute weighted average for
given pixel. Should be odd. Affect performance linearly: greater search\_window - greater
denoising time. Recommended value 21 pixels
@param block\_size Size in pixels of the template patch that is used to compute weights. Should be
odd. Recommended value 7 pixels
@param s Stream for the asynchronous invocations.
This function expected to be applied to grayscale images. For colored images look at
FastNonLocalMeansDenoising::labMethod.
@sa
fastNlMeansDenoising
*/
void
simpleMethod
(
const
GpuMat
&
src
,
GpuMat
&
dst
,
float
h
,
int
search_window
=
21
,
int
block_size
=
7
,
Stream
&
s
=
Stream
::
Null
());
//! Processes luminance and color components separatelly
void
labMethod
(
const
GpuMat
&
src
,
GpuMat
&
dst
,
float
h_luminance
,
float
h_color
,
int
search_window
=
21
,
int
block_size
=
7
,
Stream
&
s
=
Stream
::
Null
());
/** @brief Modification of FastNonLocalMeansDenoising::simpleMethod for color images
@param src Input 8-bit 3-channel image.
@param dst Output image with the same size and type as src .
@param h\_luminance Parameter regulating filter strength. Big h value perfectly removes noise but
also removes image details, smaller h value preserves details but also preserves some noise
@param photo_render float The same as h but for color components. For most images value equals 10 will be
enought to remove colored noise and do not distort colors
@param search\_window Size in pixels of the window that is used to compute weighted average for
given pixel. Should be odd. Affect performance linearly: greater search\_window - greater
denoising time. Recommended value 21 pixels
@param block\_size Size in pixels of the template patch that is used to compute weights. Should be
odd. Recommended value 7 pixels
@param s Stream for the asynchronous invocations.
The function converts image to CIELAB colorspace and then separately denoise L and AB components
with given h parameters using FastNonLocalMeansDenoising::simpleMethod function.
@sa
fastNlMeansDenoisingColored
*/
void
labMethod
(
const
GpuMat
&
src
,
GpuMat
&
dst
,
float
h_luminance
,
float
photo_render
,
int
search_window
=
21
,
int
block_size
=
7
,
Stream
&
s
=
Stream
::
Null
());
private
:
...
...
@@ -66,6 +123,8 @@ private:
GpuMat
lab
,
l
,
ab
;
};
//! @} photo
}}
// namespace cv { namespace cuda {
#endif
/* __OPENCV_PHOTO_CUDA_HPP__ */
modules/photo/include/opencv2/photo/photo_c.h
View file @
472c2106
...
...
@@ -49,6 +49,10 @@
extern
"C"
{
#endif
/** @addtogroup photo_c
@{
*/
/* Inpainting algorithms */
enum
{
...
...
@@ -61,6 +65,7 @@ enum
CVAPI
(
void
)
cvInpaint
(
const
CvArr
*
src
,
const
CvArr
*
inpaint_mask
,
CvArr
*
dst
,
double
inpaintRange
,
int
flags
);
/** @} */
#ifdef __cplusplus
}
//extern "C"
...
...
modules/stitching/include/opencv2/stitching.hpp
View file @
472c2106
...
...
@@ -53,8 +53,46 @@
#include "opencv2/stitching/detail/blenders.hpp"
#include "opencv2/stitching/detail/camera.hpp"
/**
@defgroup stitching Images stitching
This figure illustrates the stitching module pipeline implemented in the Stitcher class. Using that
class it's possible to configure/remove some steps, i.e. adjust the stitching pipeline according to
the particular needs. All building blocks from the pipeline are available in the detail namespace,
one can combine and use them separately.
The implemented stitching pipeline is very similar to the one proposed in @cite BL07.
![image](StitchingPipeline.jpg)
@{
@defgroup stitching_match Features Finding and Images Matching
@defgroup stitching_rotation Rotation Estimation
@defgroup stitching_autocalib Autocalibration
@defgroup stitching_warp Images Warping
@defgroup stitching_seam Seam Estimation
@defgroup stitching_exposure Exposure Compensation
@defgroup stitching_blend Image Blenders
@}
*/
namespace
cv
{
//! @addtogroup stitching
//! @{
/** @brief High level image stitcher.
It's possible to use this class without being aware of the entire stitching pipeline. However, to
be able to achieve higher stitching stability and quality of the final images at least being
familiar with the theory is recommended.
@note
- A basic example on image stitching can be found at
opencv\_source\_code/samples/cpp/stitching.cpp
- A detailed example on image stitching can be found at
opencv\_source\_code/samples/cpp/stitching\_detailed.cpp
*/
class
CV_EXPORTS_W
Stitcher
{
public
:
...
...
@@ -68,7 +106,11 @@ public:
};
// Stitcher() {}
// Creates stitcher with default parameters
/** @brief Creates a stitcher with the default parameters.
@param try\_use\_gpu Flag indicating whether GPU should be used whenever it's possible.
@return Stitcher class instance.
*/
static
Stitcher
createDefault
(
bool
try_use_gpu
=
false
);
CV_WRAP
double
registrationResol
()
const
{
return
registr_resol_
;
}
...
...
@@ -128,13 +170,43 @@ public:
const
Ptr
<
detail
::
Blender
>
blender
()
const
{
return
blender_
;
}
void
setBlender
(
Ptr
<
detail
::
Blender
>
b
)
{
blender_
=
b
;
}
/** @overload */
CV_WRAP
Status
estimateTransform
(
InputArrayOfArrays
images
);
/** @brief These functions try to match the given images and to estimate rotations of each camera.
@note Use the functions only if you're aware of the stitching pipeline, otherwise use
Stitcher::stitch.
@param images Input images.
@param rois Region of interest rectangles.
@return Status code.
*/
Status
estimateTransform
(
InputArrayOfArrays
images
,
const
std
::
vector
<
std
::
vector
<
Rect
>
>
&
rois
);
/** @overload */
CV_WRAP
Status
composePanorama
(
OutputArray
pano
);
/** @brief These functions try to compose the given images (or images stored internally from the other function
calls) into the final pano under the assumption that the image transformations were estimated
before.
@note Use the functions only if you're aware of the stitching pipeline, otherwise use
Stitcher::stitch.
@param images Input images.
@param pano Final pano.
@return Status code.
*/
Status
composePanorama
(
InputArrayOfArrays
images
,
OutputArray
pano
);
/** @overload */
CV_WRAP
Status
stitch
(
InputArrayOfArrays
images
,
OutputArray
pano
);
/** @brief These functions try to stitch the given images.
@param images Input images.
@param rois Region of interest rectangles.
@param pano Final pano.
@return Status code.
*/
Status
stitch
(
InputArrayOfArrays
images
,
const
std
::
vector
<
std
::
vector
<
Rect
>
>
&
rois
,
OutputArray
pano
);
std
::
vector
<
int
>
component
()
const
{
return
indices_
;
}
...
...
@@ -178,6 +250,8 @@ private:
CV_EXPORTS_W
Ptr
<
Stitcher
>
createStitcher
(
bool
try_use_gpu
=
false
);
//! @} stitching
}
// namespace cv
#endif // __OPENCV_STITCHING_STITCHER_HPP__
modules/stitching/include/opencv2/stitching/detail/autocalib.hpp
View file @
472c2106
...
...
@@ -49,16 +49,37 @@
namespace
cv
{
namespace
detail
{
// See "Construction of Panoramic Image Mosaics with Global and Local Alignment"
// by Heung-Yeung Shum and Richard Szeliski.
//! @addtogroup stitching_autocalib
//! @{
/** @brief Tries to estimate focal lengths from the given homography under the assumption that the camera
undergoes rotations around its centre only.
@param H Homography.
@param f0 Estimated focal length along X axis.
@param f1 Estimated focal length along Y axis.
@param f0\_ok True, if f0 was estimated successfully, false otherwise.
@param f1\_ok True, if f1 was estimated successfully, false otherwise.
See "Construction of Panoramic Image Mosaics with Global and Local Alignment"
by Heung-Yeung Shum and Richard Szeliski.
*/
void
CV_EXPORTS
focalsFromHomography
(
const
Mat
&
H
,
double
&
f0
,
double
&
f1
,
bool
&
f0_ok
,
bool
&
f1_ok
);
/** @brief Estimates focal lengths for each given camera.
@param features Features of images.
@param pairwise\_matches Matches between all image pairs.
@param focals Estimated focal lengths for each camera.
*/
void
CV_EXPORTS
estimateFocal
(
const
std
::
vector
<
ImageFeatures
>
&
features
,
const
std
::
vector
<
MatchesInfo
>
&
pairwise_matches
,
std
::
vector
<
double
>
&
focals
);
bool
CV_EXPORTS
calibrateRotatingCamera
(
const
std
::
vector
<
Mat
>
&
Hs
,
Mat
&
K
);
//! @} stitching_autocalib
}
// namespace detail
}
// namespace cv
...
...
modules/stitching/include/opencv2/stitching/detail/blenders.hpp
View file @
472c2106
...
...
@@ -48,8 +48,13 @@
namespace
cv
{
namespace
detail
{
//! @addtogroup stitching_blend
//! @{
// Simple blender which puts one image over another
/** @brief Base class for all blenders.
Simple blender which puts one image over another
*/
class
CV_EXPORTS
Blender
{
public
:
...
...
@@ -58,9 +63,26 @@ public:
enum
{
NO
,
FEATHER
,
MULTI_BAND
};
static
Ptr
<
Blender
>
createDefault
(
int
type
,
bool
try_gpu
=
false
);
/** @brief Prepares the blender for blending.
@param corners Source images top-left corners
@param sizes Source image sizes
*/
void
prepare
(
const
std
::
vector
<
Point
>
&
corners
,
const
std
::
vector
<
Size
>
&
sizes
);
/** @overload */
virtual
void
prepare
(
Rect
dst_roi
);
/** @brief Processes the image.
@param img Source image
@param mask Source image mask
@param tl Source image top-left corners
*/
virtual
void
feed
(
InputArray
img
,
InputArray
mask
,
Point
tl
);
/** @brief Blends and returns the final pano.
@param dst Final pano
@param dst\_mask Final pano mask
*/
virtual
void
blend
(
InputOutputArray
dst
,
InputOutputArray
dst_mask
);
protected
:
...
...
@@ -68,7 +90,8 @@ protected:
Rect
dst_roi_
;
};
/** @brief Simple blender which mixes images at its borders.
*/
class
CV_EXPORTS
FeatherBlender
:
public
Blender
{
public
:
...
...
@@ -81,8 +104,8 @@ public:
void
feed
(
InputArray
img
,
InputArray
mask
,
Point
tl
);
void
blend
(
InputOutputArray
dst
,
InputOutputArray
dst_mask
);
// Creates weight maps for fixed set of source images by their masks and top-left corners.
// Final image can be obtained by simple weighting of the source images.
//
!
Creates weight maps for fixed set of source images by their masks and top-left corners.
//
!
Final image can be obtained by simple weighting of the source images.
Rect
createWeightMaps
(
const
std
::
vector
<
UMat
>
&
masks
,
const
std
::
vector
<
Point
>
&
corners
,
std
::
vector
<
UMat
>
&
weight_maps
);
...
...
@@ -94,7 +117,8 @@ private:
inline
FeatherBlender
::
FeatherBlender
(
float
_sharpness
)
{
setSharpness
(
_sharpness
);
}
/** @brief Blender which uses multi-band blending algorithm (see @cite BA83).
*/
class
CV_EXPORTS
MultiBandBlender
:
public
Blender
{
public
:
...
...
@@ -131,6 +155,8 @@ void CV_EXPORTS createLaplacePyrGpu(InputArray img, int num_levels, std::vector<
void
CV_EXPORTS
restoreImageFromLaplacePyr
(
std
::
vector
<
UMat
>&
pyr
);
void
CV_EXPORTS
restoreImageFromLaplacePyrGpu
(
std
::
vector
<
UMat
>&
pyr
);
//! @}
}
// namespace detail
}
// namespace cv
...
...
modules/stitching/include/opencv2/stitching/detail/camera.hpp
View file @
472c2106
...
...
@@ -48,6 +48,13 @@
namespace
cv
{
namespace
detail
{
//! @addtogroup stitching
//! @{
/** @brief Describes camera parameters.
@note Translation is assumed to be zero during the whole stitching pipeline. :
*/
struct
CV_EXPORTS
CameraParams
{
CameraParams
();
...
...
@@ -63,6 +70,8 @@ struct CV_EXPORTS CameraParams
Mat
t
;
// Translation
};
//! @}
}
// namespace detail
}
// namespace cv
...
...
modules/stitching/include/opencv2/stitching/detail/exposure_compensate.hpp
View file @
472c2106
...
...
@@ -48,6 +48,11 @@
namespace
cv
{
namespace
detail
{
//! @addtogroup stitching_exposure
//! @{
/** @brief Base class for all exposure compensators.
*/
class
CV_EXPORTS
ExposureCompensator
{
public
:
...
...
@@ -56,14 +61,29 @@ public:
enum
{
NO
,
GAIN
,
GAIN_BLOCKS
};
static
Ptr
<
ExposureCompensator
>
createDefault
(
int
type
);
/**
@param corners Source image top-left corners
@param images Source images
@param masks Image masks to update (second value in pair specifies the value which should be used
to detect where image is)
*/
void
feed
(
const
std
::
vector
<
Point
>
&
corners
,
const
std
::
vector
<
UMat
>
&
images
,
const
std
::
vector
<
UMat
>
&
masks
);
/** @overload */
virtual
void
feed
(
const
std
::
vector
<
Point
>
&
corners
,
const
std
::
vector
<
UMat
>
&
images
,
const
std
::
vector
<
std
::
pair
<
UMat
,
uchar
>
>
&
masks
)
=
0
;
/** @brief Compensate exposure in the specified image.
@param index Image index
@param corner Image top-left corner
@param image Image to process
@param mask Image mask
*/
virtual
void
apply
(
int
index
,
Point
corner
,
InputOutputArray
image
,
InputArray
mask
)
=
0
;
};
/** @brief Stub exposure compensator which does nothing.
*/
class
CV_EXPORTS
NoExposureCompensator
:
public
ExposureCompensator
{
public
:
...
...
@@ -72,7 +92,9 @@ public:
void
apply
(
int
/*index*/
,
Point
/*corner*/
,
InputOutputArray
/*image*/
,
InputArray
/*mask*/
)
{
}
};
/** @brief Exposure compensator which tries to remove exposure related artifacts by adjusting image
intensities, see @cite BL07 and @cite WJ10 for details.
*/
class
CV_EXPORTS
GainCompensator
:
public
ExposureCompensator
{
public
:
...
...
@@ -85,7 +107,9 @@ private:
Mat_
<
double
>
gains_
;
};
/** @brief Exposure compensator which tries to remove exposure related artifacts by adjusting image block
intensities, see @cite UES01 for details.
*/
class
CV_EXPORTS
BlocksGainCompensator
:
public
ExposureCompensator
{
public
:
...
...
@@ -100,6 +124,8 @@ private:
std
::
vector
<
UMat
>
gain_maps_
;
};
//! @}
}
// namespace detail
}
// namespace cv
...
...
modules/stitching/include/opencv2/stitching/detail/matchers.hpp
View file @
472c2106
...
...
@@ -55,6 +55,10 @@
namespace
cv
{
namespace
detail
{
//! @addtogroup stitching_match
//! @{
/** @brief Structure containing image keypoints and descriptors. */
struct
CV_EXPORTS
ImageFeatures
{
int
img_idx
;
...
...
@@ -63,20 +67,40 @@ struct CV_EXPORTS ImageFeatures
UMat
descriptors
;
};
/** @brief Feature finders base class */
class
CV_EXPORTS
FeaturesFinder
{
public
:
virtual
~
FeaturesFinder
()
{}
/** @overload */
void
operator
()(
InputArray
image
,
ImageFeatures
&
features
);
/** @brief Finds features in the given image.
@param image Source image
@param features Found features
@param rois Regions of interest
@sa detail::ImageFeatures, Rect\_
*/
void
operator
()(
InputArray
image
,
ImageFeatures
&
features
,
const
std
::
vector
<
cv
::
Rect
>
&
rois
);
/** @brief Frees unused memory allocated before if there is any. */
virtual
void
collectGarbage
()
{}
protected
:
/** @brief This method must implement features finding logic in order to make the wrappers
detail::FeaturesFinder::operator()\_ work.
@param image Source image
@param features Found features
@sa detail::ImageFeatures */
virtual
void
find
(
InputArray
image
,
ImageFeatures
&
features
)
=
0
;
};
/** @brief SURF features finder.
@sa detail::FeaturesFinder, SURF
*/
class
CV_EXPORTS
SurfFeaturesFinder
:
public
FeaturesFinder
{
public
:
...
...
@@ -91,6 +115,10 @@ private:
Ptr
<
Feature2D
>
surf
;
};
/** @brief ORB features finder. :
@sa detail::FeaturesFinder, ORB
*/
class
CV_EXPORTS
OrbFeaturesFinder
:
public
FeaturesFinder
{
public
:
...
...
@@ -126,50 +154,92 @@ private:
};
#endif
/** @brief Structure containing information about matches between two images.
It's assumed that there is a homography between those images.
*/
struct
CV_EXPORTS
MatchesInfo
{
MatchesInfo
();
MatchesInfo
(
const
MatchesInfo
&
other
);
const
MatchesInfo
&
operator
=
(
const
MatchesInfo
&
other
);
int
src_img_idx
,
dst_img_idx
;
// Images indices (optional)
int
src_img_idx
,
dst_img_idx
;
//
!<
Images indices (optional)
std
::
vector
<
DMatch
>
matches
;
std
::
vector
<
uchar
>
inliers_mask
;
// Geometrically consistent matches mask
int
num_inliers
;
// Number of geometrically consistent matches
Mat
H
;
// Estimated homography
double
confidence
;
// Confidence two images are from the same panorama
std
::
vector
<
uchar
>
inliers_mask
;
//
!<
Geometrically consistent matches mask
int
num_inliers
;
//
!<
Number of geometrically consistent matches
Mat
H
;
//
!<
Estimated homography
double
confidence
;
//
!<
Confidence two images are from the same panorama
};
/** @brief Feature matchers base class. */
class
CV_EXPORTS
FeaturesMatcher
{
public
:
virtual
~
FeaturesMatcher
()
{}
/** @overload
@param features1 First image features
@param features2 Second image features
@param matches\_info Found matches
*/
void
operator
()(
const
ImageFeatures
&
features1
,
const
ImageFeatures
&
features2
,
MatchesInfo
&
matches_info
)
{
match
(
features1
,
features2
,
matches_info
);
}
/** @brief Performs images matching.
@param features Features of the source images
@param pairwise\_matches Found pairwise matches
@param mask Mask indicating which image pairs must be matched
The function is parallelized with the TBB library.
@sa detail::MatchesInfo
*/
void
operator
()(
const
std
::
vector
<
ImageFeatures
>
&
features
,
std
::
vector
<
MatchesInfo
>
&
pairwise_matches
,
const
cv
::
UMat
&
mask
=
cv
::
UMat
());
/** @return True, if it's possible to use the same matcher instance in parallel, false otherwise
*/
bool
isThreadSafe
()
const
{
return
is_thread_safe_
;
}
/** @brief Frees unused memory allocated before if there is any.
*/
virtual
void
collectGarbage
()
{}
protected
:
FeaturesMatcher
(
bool
is_thread_safe
=
false
)
:
is_thread_safe_
(
is_thread_safe
)
{}
/** @brief This method must implement matching logic in order to make the wrappers
detail::FeaturesMatcher::operator()\_ work.
@param features1 first image features
@param features2 second image features
@param matches\_info found matches
*/
virtual
void
match
(
const
ImageFeatures
&
features1
,
const
ImageFeatures
&
features2
,
MatchesInfo
&
matches_info
)
=
0
;
bool
is_thread_safe_
;
};
/** @brief Features matcher which finds two best matches for each feature and leaves the best one only if the
ratio between descriptor distances is greater than the threshold match\_conf
@sa detail::FeaturesMatcher
*/
class
CV_EXPORTS
BestOf2NearestMatcher
:
public
FeaturesMatcher
{
public
:
/** @brief Constructs a "best of 2 nearest" matcher.
@param try\_use\_gpu Should try to use GPU or not
@param match\_conf Match distances ration threshold
@param num\_matches\_thresh1 Minimum number of matches required for the 2D projective transform
estimation used in the inliers classification step
@param num\_matches\_thresh2 Minimum number of matches required for the 2D projective transform
re-estimation on inliers
*/
BestOf2NearestMatcher
(
bool
try_use_gpu
=
false
,
float
match_conf
=
0.3
f
,
int
num_matches_thresh1
=
6
,
int
num_matches_thresh2
=
6
);
...
...
@@ -197,6 +267,8 @@ protected:
int
range_width_
;
};
//! @} stitching_match
}
// namespace detail
}
// namespace cv
...
...
modules/stitching/include/opencv2/stitching/detail/motion_estimators.hpp
View file @
472c2106
...
...
@@ -51,23 +51,50 @@
namespace
cv
{
namespace
detail
{
//! @addtogroup stitching_rotation
//! @{
/** @brief Rotation estimator base class.
It takes features of all images, pairwise matches between all images and estimates rotations of all
cameras.
@note The coordinate system origin is implementation-dependent, but you can always normalize the
rotations in respect to the first camera, for instance. :
*/
class
CV_EXPORTS
Estimator
{
public
:
virtual
~
Estimator
()
{}
/** @brief Estimates camera parameters.
@param features Features of images
@param pairwise\_matches Pairwise matches of images
@param cameras Estimated camera parameters
@return True in case of success, false otherwise
*/
bool
operator
()(
const
std
::
vector
<
ImageFeatures
>
&
features
,
const
std
::
vector
<
MatchesInfo
>
&
pairwise_matches
,
std
::
vector
<
CameraParams
>
&
cameras
)
{
return
estimate
(
features
,
pairwise_matches
,
cameras
);
}
protected
:
/** @brief This method must implement camera parameters estimation logic in order to make the wrapper
detail::Estimator::operator()\_ work.
@param features Features of images
@param pairwise\_matches Pairwise matches of images
@param cameras Estimated camera parameters
@return True in case of success, false otherwise
*/
virtual
bool
estimate
(
const
std
::
vector
<
ImageFeatures
>
&
features
,
const
std
::
vector
<
MatchesInfo
>
&
pairwise_matches
,
std
::
vector
<
CameraParams
>
&
cameras
)
=
0
;
};
/** @brief Homography based rotation estimator.
*/
class
CV_EXPORTS
HomographyBasedEstimator
:
public
Estimator
{
public
:
...
...
@@ -82,7 +109,8 @@ private:
bool
is_focals_estimated_
;
};
/** @brief Base class for all camera parameters refinement methods.
*/
class
CV_EXPORTS
BundleAdjusterBase
:
public
Estimator
{
public
:
...
...
@@ -100,6 +128,11 @@ public:
void
setTermCriteria
(
const
TermCriteria
&
term_criteria
)
{
term_criteria_
=
term_criteria
;
}
protected
:
/** @brief Construct a bundle adjuster base instance.
@param num\_params\_per\_cam Number of parameters per camera
@param num\_errs\_per\_measurement Number of error terms (components) per match
*/
BundleAdjusterBase
(
int
num_params_per_cam
,
int
num_errs_per_measurement
)
:
num_params_per_cam_
(
num_params_per_cam
),
num_errs_per_measurement_
(
num_errs_per_measurement
)
...
...
@@ -114,9 +147,26 @@ protected:
const
std
::
vector
<
MatchesInfo
>
&
pairwise_matches
,
std
::
vector
<
CameraParams
>
&
cameras
);
/** @brief Sets initial camera parameter to refine.
@param cameras Camera parameters
*/
virtual
void
setUpInitialCameraParams
(
const
std
::
vector
<
CameraParams
>
&
cameras
)
=
0
;
/** @brief Gets the refined camera parameters.
@param cameras Refined camera parameters
*/
virtual
void
obtainRefinedCameraParams
(
std
::
vector
<
CameraParams
>
&
cameras
)
const
=
0
;
/** @brief Calculates error vector.
@param err Error column-vector of length total\_num\_matches \* num\_errs\_per\_measurement
*/
virtual
void
calcError
(
Mat
&
err
)
=
0
;
/** @brief Calculates the cost function jacobian.
@param jac Jacobian matrix of dimensions
(total\_num\_matches \* num\_errs\_per\_measurement) x (num\_images \* num\_params\_per\_cam)
*/
virtual
void
calcJacobian
(
Mat
&
jac
)
=
0
;
// 3x3 8U mask, where 0 means don't refine respective parameter, != 0 means refine
...
...
@@ -145,9 +195,12 @@ protected:
};
// Minimizes reprojection error.
// It can estimate focal length, aspect ratio, principal point.
// You can affect only on them via the refinement mask.
/** @brief Implementation of the camera parameters refinement algorithm which minimizes sum of the reprojection
error squares
It can estimate focal length, aspect ratio, principal point.
You can affect only on them via the refinement mask.
*/
class
CV_EXPORTS
BundleAdjusterReproj
:
public
BundleAdjusterBase
{
public
:
...
...
@@ -163,8 +216,11 @@ private:
};
// Minimizes sun of ray-to-ray distances.
// It can estimate focal length. It ignores the refinement mask for now.
/** @brief Implementation of the camera parameters refinement algorithm which minimizes sum of the distances
between the rays passing through the camera center and a feature. :
It can estimate focal length. It ignores the refinement mask for now.
*/
class
CV_EXPORTS
BundleAdjusterRay
:
public
BundleAdjusterBase
{
public
:
...
...
@@ -186,6 +242,11 @@ enum WaveCorrectKind
WAVE_CORRECT_VERT
};
/** @brief Tries to make panorama more horizontal (or vertical).
@param rmats Camera rotation matrices.
@param kind Correction kind, see detail::WaveCorrectKind.
*/
void
CV_EXPORTS
waveCorrect
(
std
::
vector
<
Mat
>
&
rmats
,
WaveCorrectKind
kind
);
...
...
@@ -205,6 +266,8 @@ void CV_EXPORTS findMaxSpanningTree(
int
num_images
,
const
std
::
vector
<
MatchesInfo
>
&
pairwise_matches
,
Graph
&
span_tree
,
std
::
vector
<
int
>
&
centers
);
//! @} stitching_rotation
}
// namespace detail
}
// namespace cv
...
...
modules/stitching/include/opencv2/stitching/detail/seam_finders.hpp
View file @
472c2106
...
...
@@ -50,22 +50,35 @@
namespace
cv
{
namespace
detail
{
//! @addtogroup stitching_seam
//! @{
/** @brief Base class for a seam estimator.
*/
class
CV_EXPORTS
SeamFinder
{
public
:
virtual
~
SeamFinder
()
{}
/** @brief Estimates seams.
@param src Source images
@param corners Source image top-left corners
@param masks Source image masks to update
*/
virtual
void
find
(
const
std
::
vector
<
UMat
>
&
src
,
const
std
::
vector
<
Point
>
&
corners
,
std
::
vector
<
UMat
>
&
masks
)
=
0
;
};
/** @brief Stub seam estimator which does nothing.
*/
class
CV_EXPORTS
NoSeamFinder
:
public
SeamFinder
{
public
:
void
find
(
const
std
::
vector
<
UMat
>&
,
const
std
::
vector
<
Point
>&
,
std
::
vector
<
UMat
>&
)
{}
};
/** @brief Base class for all pairwise seam estimators.
*/
class
CV_EXPORTS
PairwiseSeamFinder
:
public
SeamFinder
{
public
:
...
...
@@ -74,6 +87,12 @@ public:
protected
:
void
run
();
/** @brief Resolves masks intersection of two specified images in the given ROI.
@param first First image index
@param second Second image index
@param roi Region of interest
*/
virtual
void
findInPair
(
size_t
first
,
size_t
second
,
Rect
roi
)
=
0
;
std
::
vector
<
UMat
>
images_
;
...
...
@@ -82,7 +101,8 @@ protected:
std
::
vector
<
UMat
>
masks_
;
};
/** @brief Voronoi diagram-based seam estimator.
*/
class
CV_EXPORTS
VoronoiSeamFinder
:
public
PairwiseSeamFinder
{
public
:
...
...
@@ -201,14 +221,16 @@ private:
std
::
set
<
std
::
pair
<
int
,
int
>
>
edges_
;
};
/** @brief Base class for all minimum graph-cut-based seam estimators.
*/
class
CV_EXPORTS
GraphCutSeamFinderBase
{
public
:
enum
CostType
{
COST_COLOR
,
COST_COLOR_GRAD
};
};
/** @brief Minimum graph cut-based seam estimator. See details in @cite V03.
*/
class
CV_EXPORTS
GraphCutSeamFinder
:
public
GraphCutSeamFinderBase
,
public
SeamFinder
{
public
:
...
...
@@ -253,6 +275,8 @@ private:
};
#endif
//! @}
}
// namespace detail
}
// namespace cv
...
...
modules/stitching/include/opencv2/stitching/detail/timelapsers.hpp
View file @
472c2106
...
...
@@ -49,6 +49,9 @@
namespace
cv
{
namespace
detail
{
//! @addtogroup stitching
//! @{
// Base Timelapser class, takes a sequence of images, applies appropriate shift, stores result in dst_.
class
CV_EXPORTS
Timelapser
...
...
@@ -80,6 +83,8 @@ public:
virtual
void
initialize
(
const
std
::
vector
<
Point
>
&
corners
,
const
std
::
vector
<
Size
>
&
sizes
);
};
//! @}
}
// namespace detail
}
// namespace cv
...
...
modules/stitching/include/opencv2/stitching/detail/util.hpp
View file @
472c2106
...
...
@@ -99,6 +99,9 @@
namespace
cv
{
namespace
detail
{
//! @addtogroup stitching
//! @{
class
CV_EXPORTS
DisjointSets
{
public
:
...
...
@@ -158,6 +161,8 @@ CV_EXPORTS void selectRandomSubset(int count, int size, std::vector<int> &subset
CV_EXPORTS
int
&
stitchingLogLevel
();
//! @}
}
// namespace detail
}
// namespace cv
...
...
modules/stitching/include/opencv2/stitching/detail/util_inl.hpp
View file @
472c2106
...
...
@@ -47,6 +47,8 @@
#include "opencv2/core.hpp"
#include "util.hpp" // Make your IDE see declarations
//! @cond IGNORED
namespace
cv
{
namespace
detail
{
...
...
@@ -124,4 +126,6 @@ static inline double sqr(double x) { return x * x; }
}
// namespace detail
}
// namespace cv
//! @endcond
#endif // __OPENCV_STITCHING_UTIL_INL_HPP__
modules/stitching/include/opencv2/stitching/detail/warpers.hpp
View file @
472c2106
...
...
@@ -51,28 +51,76 @@
namespace
cv
{
namespace
detail
{
//! @addtogroup stitching_warp
//! @{
/** @brief Rotation-only model image warper interface.
*/
class
CV_EXPORTS
RotationWarper
{
public
:
virtual
~
RotationWarper
()
{}
/** @brief Projects the image point.
@param pt Source point
@param K Camera intrinsic parameters
@param R Camera rotation matrix
@return Projected point
*/
virtual
Point2f
warpPoint
(
const
Point2f
&
pt
,
InputArray
K
,
InputArray
R
)
=
0
;
/** @brief Builds the projection maps according to the given camera data.
@param src\_size Source image size
@param K Camera intrinsic parameters
@param R Camera rotation matrix
@param xmap Projection map for the x axis
@param ymap Projection map for the y axis
@return Projected image minimum bounding box
*/
virtual
Rect
buildMaps
(
Size
src_size
,
InputArray
K
,
InputArray
R
,
OutputArray
xmap
,
OutputArray
ymap
)
=
0
;
/** @brief Projects the image.
@param src Source image
@param K Camera intrinsic parameters
@param R Camera rotation matrix
@param interp\_mode Interpolation mode
@param border\_mode Border extrapolation mode
@param dst Projected image
@return Project image top-left corner
*/
virtual
Point
warp
(
InputArray
src
,
InputArray
K
,
InputArray
R
,
int
interp_mode
,
int
border_mode
,
OutputArray
dst
)
=
0
;
/** @brief Projects the image backward.
@param src Projected image
@param K Camera intrinsic parameters
@param R Camera rotation matrix
@param interp\_mode Interpolation mode
@param border\_mode Border extrapolation mode
@param dst\_size Backward-projected image size
@param dst Backward-projected image
*/
virtual
void
warpBackward
(
InputArray
src
,
InputArray
K
,
InputArray
R
,
int
interp_mode
,
int
border_mode
,
Size
dst_size
,
OutputArray
dst
)
=
0
;
/**
@param src\_size Source image bounding box
@param K Camera intrinsic parameters
@param R Camera rotation matrix
@return Projected image minimum bounding box
*/
virtual
Rect
warpRoi
(
Size
src_size
,
InputArray
K
,
InputArray
R
)
=
0
;
virtual
float
getScale
()
const
{
return
1.
f
;
}
virtual
void
setScale
(
float
)
{}
};
/** @brief Base class for warping logic implementation.
*/
struct
CV_EXPORTS
ProjectorBase
{
void
setCameraParams
(
InputArray
K
=
Mat
::
eye
(
3
,
3
,
CV_32F
),
...
...
@@ -87,7 +135,8 @@ struct CV_EXPORTS ProjectorBase
float
t
[
3
];
};
/** @brief Base class for rotation-based warper using a detail::ProjectorBase\_ derived class.
*/
template
<
class
P
>
class
CV_EXPORTS
RotationWarperBase
:
public
RotationWarper
{
...
...
@@ -126,10 +175,15 @@ struct CV_EXPORTS PlaneProjector : ProjectorBase
void
mapBackward
(
float
u
,
float
v
,
float
&
x
,
float
&
y
);
};
/** @brief Warper that maps an image onto the z = 1 plane.
*/
class
CV_EXPORTS
PlaneWarper
:
public
RotationWarperBase
<
PlaneProjector
>
{
public
:
/** @brief Construct an instance of the plane warper class.
@param scale Projected image scale multiplier
*/
PlaneWarper
(
float
scale
=
1.
f
)
{
projector_
.
scale
=
scale
;
}
Point2f
warpPoint
(
const
Point2f
&
pt
,
InputArray
K
,
InputArray
R
,
InputArray
T
);
...
...
@@ -154,11 +208,18 @@ struct CV_EXPORTS SphericalProjector : ProjectorBase
};
// Projects image onto unit sphere with origin at (0, 0, 0).
// Poles are located at (0, -1, 0) and (0, 1, 0) points.
/** @brief Warper that maps an image onto the unit sphere located at the origin.
Projects image onto unit sphere with origin at (0, 0, 0).
Poles are located at (0, -1, 0) and (0, 1, 0) points.
*/
class
CV_EXPORTS
SphericalWarper
:
public
RotationWarperBase
<
SphericalProjector
>
{
public
:
/** @brief Construct an instance of the spherical warper class.
@param scale Projected image scale multiplier
*/
SphericalWarper
(
float
scale
)
{
projector_
.
scale
=
scale
;
}
Rect
buildMaps
(
Size
src_size
,
InputArray
K
,
InputArray
R
,
OutputArray
xmap
,
OutputArray
ymap
);
...
...
@@ -175,10 +236,15 @@ struct CV_EXPORTS CylindricalProjector : ProjectorBase
};
// Projects image onto x * x + z * z = 1 cylinder
/** @brief Warper that maps an image onto the x\*x + z\*z = 1 cylinder.
*/
class
CV_EXPORTS
CylindricalWarper
:
public
RotationWarperBase
<
CylindricalProjector
>
{
public
:
/** @brief Construct an instance of the cylindrical warper class.
@param scale Projected image scale multiplier
*/
CylindricalWarper
(
float
scale
)
{
projector_
.
scale
=
scale
;
}
Rect
buildMaps
(
Size
src_size
,
InputArray
K
,
InputArray
R
,
OutputArray
xmap
,
OutputArray
ymap
);
...
...
@@ -508,6 +574,8 @@ protected:
}
};
//! @} stitching_warp
}
// namespace detail
}
// namespace cv
...
...
modules/stitching/include/opencv2/stitching/detail/warpers_inl.hpp
View file @
472c2106
...
...
@@ -47,6 +47,8 @@
#include "warpers.hpp" // Make your IDE see declarations
#include <limits>
//! @cond IGNORED
namespace
cv
{
namespace
detail
{
...
...
@@ -767,4 +769,6 @@ void PlanePortraitProjector::mapBackward(float u0, float v0, float &x, float &y)
}
// namespace detail
}
// namespace cv
//! @endcond
#endif // __OPENCV_STITCHING_WARPERS_INL_HPP__
modules/stitching/include/opencv2/stitching/warpers.hpp
View file @
472c2106
...
...
@@ -47,6 +47,11 @@
namespace
cv
{
//! @addtogroup stitching_warp
//! @{
/** @brief Image warper factories base class.
*/
class
WarperCreator
{
public
:
...
...
@@ -54,21 +59,25 @@ public:
virtual
Ptr
<
detail
::
RotationWarper
>
create
(
float
scale
)
const
=
0
;
};
/** @brief Plane warper factory class.
@sa detail::PlaneWarper
*/
class
PlaneWarper
:
public
WarperCreator
{
public
:
Ptr
<
detail
::
RotationWarper
>
create
(
float
scale
)
const
{
return
makePtr
<
detail
::
PlaneWarper
>
(
scale
);
}
};
/** @brief Cylindrical warper factory class.
@sa detail::CylindricalWarper
*/
class
CylindricalWarper
:
public
WarperCreator
{
public
:
Ptr
<
detail
::
RotationWarper
>
create
(
float
scale
)
const
{
return
makePtr
<
detail
::
CylindricalWarper
>
(
scale
);
}
};
/** @brief Spherical warper factory class */
class
SphericalWarper
:
public
WarperCreator
{
public
:
...
...
@@ -167,6 +176,8 @@ public:
};
#endif
//! @} stitching_warp
}
// namespace cv
#endif // __OPENCV_STITCHING_WARPER_CREATORS_HPP__
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment