Commit bd1fd59f authored by Alexander Alekhin's avatar Alexander Alekhin

Merge remote-tracking branch 'upstream/3.4' into merge-3.4

parents 3f421223 de8eda5f
...@@ -8,8 +8,19 @@ function Utils(errorOutputId) { // eslint-disable-line no-unused-vars ...@@ -8,8 +8,19 @@ function Utils(errorOutputId) { // eslint-disable-line no-unused-vars
script.setAttribute('async', ''); script.setAttribute('async', '');
script.setAttribute('type', 'text/javascript'); script.setAttribute('type', 'text/javascript');
script.addEventListener('load', () => { script.addEventListener('load', () => {
if (cv.getBuildInformation)
{
console.log(cv.getBuildInformation()); console.log(cv.getBuildInformation());
onloadCallback(); onloadCallback();
}
else
{
// WASM
cv['onRuntimeInitialized']=()=>{
console.log(cv.getBuildInformation());
onloadCallback();
}
}
}); });
script.addEventListener('error', () => { script.addEventListener('error', () => {
self.printError('Failed to load ' + OPENCV_URL); self.printError('Failed to load ' + OPENCV_URL);
......
Create calibration pattern {#tutorial_camera_calibration_pattern}
=========================================
The goal of this tutorial is to learn how to create calibration pattern.
You can find a chessboard pattern in https://github.com/opencv/opencv/blob/master/doc/pattern.png
You can find a circleboard pattern in https://github.com/opencv/opencv/blob/master/doc/acircles_pattern.png
Create your own pattern
---------------
Now, if you want to create your own pattern, you will need python to use https://github.com/opencv/opencv/blob/master/doc/pattern_tools/gen_pattern.py
Example
create a checkerboard pattern in file chessboard.svg with 9 rows, 6 columns and a square size of 20mm:
python gen_pattern.py -o chessboard.svg --rows 9 --columns 6 --type checkerboard --square_size 20
create a circle board pattern in file circleboard.svg with 7 rows, 5 columns and a radius of 15mm:
python gen_pattern.py -o circleboard.svg --rows 7 --columns 5 --type circles --square_size 15
create a circle board pattern in file acircleboard.svg with 7 rows, 5 columns and a square size of 10mm and less spacing between circle:
python gen_pattern.py -o acircleboard.svg --rows 7 --columns 5 --type acircles --square_size 10 --radius_rate 2
If you want to change unit use -u option (mm inches, px, m)
If you want to change page size use -w and -h options
If you want to create a ChArUco board read tutorial Detection of ChArUco Corners in opencv_contrib tutorial(https://docs.opencv.org/3.4/df/d4a/tutorial_charuco_detection.html)
\ No newline at end of file
...@@ -3,6 +3,14 @@ Camera calibration and 3D reconstruction (calib3d module) {#tutorial_table_of_co ...@@ -3,6 +3,14 @@ Camera calibration and 3D reconstruction (calib3d module) {#tutorial_table_of_co
Although we get most of our images in a 2D format they do come from a 3D world. Here you will learn how to find out 3D world information from 2D images. Although we get most of our images in a 2D format they do come from a 3D world. Here you will learn how to find out 3D world information from 2D images.
- @subpage tutorial_camera_calibration_pattern
*Compatibility:* \> OpenCV 2.0
*Author:* Laurent Berger
You will learn how to create some calibration pattern.
- @subpage tutorial_camera_calibration_square_chess - @subpage tutorial_camera_calibration_square_chess
*Compatibility:* \> OpenCV 2.0 *Compatibility:* \> OpenCV 2.0
......
...@@ -12,7 +12,7 @@ Tutorial was written for the following versions of corresponding software: ...@@ -12,7 +12,7 @@ Tutorial was written for the following versions of corresponding software:
- Download and install Android Studio from https://developer.android.com/studio. - Download and install Android Studio from https://developer.android.com/studio.
- Get the latest pre-built OpenCV for Android release from https://github.com/opencv/opencv/releases and unpack it (for example, `opencv-3.4.5-android-sdk.zip`). - Get the latest pre-built OpenCV for Android release from https://github.com/opencv/opencv/releases and unpack it (for example, `opencv-4.1.0-android-sdk.zip`).
- Download MobileNet object detection model from https://github.com/chuanqi305/MobileNet-SSD. We need a configuration file `MobileNetSSD_deploy.prototxt` and weights `MobileNetSSD_deploy.caffemodel`. - Download MobileNet object detection model from https://github.com/chuanqi305/MobileNet-SSD. We need a configuration file `MobileNetSSD_deploy.prototxt` and weights `MobileNetSSD_deploy.caffemodel`.
......
...@@ -104,8 +104,9 @@ void cv::fisheye::projectPoints(InputArray objectPoints, OutputArray imagePoints ...@@ -104,8 +104,9 @@ void cv::fisheye::projectPoints(InputArray objectPoints, OutputArray imagePoints
Vec4d k = _D.depth() == CV_32F ? (Vec4d)*_D.getMat().ptr<Vec4f>(): *_D.getMat().ptr<Vec4d>(); Vec4d k = _D.depth() == CV_32F ? (Vec4d)*_D.getMat().ptr<Vec4f>(): *_D.getMat().ptr<Vec4d>();
const bool isJacobianNeeded = jacobian.needed();
JacobianRow *Jn = 0; JacobianRow *Jn = 0;
if (jacobian.needed()) if (isJacobianNeeded)
{ {
int nvars = 2 + 2 + 1 + 4 + 3 + 3; // f, c, alpha, k, om, T, int nvars = 2 + 2 + 1 + 4 + 3 + 3; // f, c, alpha, k, om, T,
jacobian.create(2*(int)n, nvars, CV_64F); jacobian.create(2*(int)n, nvars, CV_64F);
...@@ -153,7 +154,7 @@ void cv::fisheye::projectPoints(InputArray objectPoints, OutputArray imagePoints ...@@ -153,7 +154,7 @@ void cv::fisheye::projectPoints(InputArray objectPoints, OutputArray imagePoints
else else
xpd[i] = final_point; xpd[i] = final_point;
if (jacobian.needed()) if (isJacobianNeeded)
{ {
//Vec3d Xi = pdepth == CV_32F ? (Vec3d)Xf[i] : Xd[i]; //Vec3d Xi = pdepth == CV_32F ? (Vec3d)Xf[i] : Xd[i];
//Vec3d Y = aff*Xi; //Vec3d Y = aff*Xi;
......
...@@ -4430,6 +4430,7 @@ public: ...@@ -4430,6 +4430,7 @@ public:
: size_(size), originPtr_(ptr), alignment_(alignment), ptr_(ptr), allocatedPtr_(NULL) : size_(size), originPtr_(ptr), alignment_(alignment), ptr_(ptr), allocatedPtr_(NULL)
{ {
CV_DbgAssert((alignment & (alignment - 1)) == 0); // check for 2^n CV_DbgAssert((alignment & (alignment - 1)) == 0); // check for 2^n
CV_DbgAssert(!readAccess || ptr);
if (((size_t)ptr_ & (alignment - 1)) != 0) if (((size_t)ptr_ & (alignment - 1)) != 0)
{ {
allocatedPtr_ = new uchar[size_ + alignment - 1]; allocatedPtr_ = new uchar[size_ + alignment - 1];
...@@ -4483,6 +4484,7 @@ public: ...@@ -4483,6 +4484,7 @@ public:
: size_(rows*step), originPtr_(ptr), alignment_(alignment), ptr_(ptr), allocatedPtr_(NULL), rows_(rows), cols_(cols), step_(step) : size_(rows*step), originPtr_(ptr), alignment_(alignment), ptr_(ptr), allocatedPtr_(NULL), rows_(rows), cols_(cols), step_(step)
{ {
CV_DbgAssert((alignment & (alignment - 1)) == 0); // check for 2^n CV_DbgAssert((alignment & (alignment - 1)) == 0); // check for 2^n
CV_DbgAssert(!readAccess || ptr != NULL);
if (ptr == 0 || ((size_t)ptr_ & (alignment - 1)) != 0) if (ptr == 0 || ((size_t)ptr_ & (alignment - 1)) != 0)
{ {
allocatedPtr_ = new uchar[size_ + extrabytes + alignment - 1]; allocatedPtr_ = new uchar[size_ + extrabytes + alignment - 1];
......
...@@ -76,7 +76,7 @@ TEST(Imgcodecs_Tiff, write_read_16bit_big_little_endian) ...@@ -76,7 +76,7 @@ TEST(Imgcodecs_Tiff, write_read_16bit_big_little_endian)
// Write sample TIFF file // Write sample TIFF file
FILE* fp = fopen(filename.c_str(), "wb"); FILE* fp = fopen(filename.c_str(), "wb");
ASSERT_TRUE(fp != NULL); ASSERT_TRUE(fp != NULL);
ASSERT_EQ((size_t)1, fwrite(tiff_sample_data, 86, 1, fp)); ASSERT_EQ((size_t)1, fwrite(tiff_sample_data[i], 86, 1, fp));
fclose(fp); fclose(fp);
Mat img = imread(filename, IMREAD_UNCHANGED); Mat img = imread(filename, IMREAD_UNCHANGED);
......
...@@ -360,6 +360,8 @@ public: ...@@ -360,6 +360,8 @@ public:
{ {
CV_TRACE_FUNCTION(); CV_TRACE_FUNCTION();
CV_DbgAssert(cn > 0);
Mat dx, dy; Mat dx, dy;
AutoBuffer<short> dxMax(0), dyMax(0); AutoBuffer<short> dxMax(0), dyMax(0);
std::deque<uchar*> stack, borderPeaksLocal; std::deque<uchar*> stack, borderPeaksLocal;
......
...@@ -48,16 +48,35 @@ ...@@ -48,16 +48,35 @@
/** /**
@defgroup photo Computational Photography @defgroup photo Computational Photography
This module includes photo processing algorithms
@{ @{
@defgroup photo_inpaint Inpainting
@defgroup photo_denoise Denoising @defgroup photo_denoise Denoising
@defgroup photo_hdr HDR imaging @defgroup photo_hdr HDR imaging
This section describes high dynamic range imaging algorithms namely tonemapping, exposure alignment, This section describes high dynamic range imaging algorithms namely tonemapping, exposure alignment,
camera calibration with multiple exposures and exposure fusion. camera calibration with multiple exposures and exposure fusion.
@defgroup photo_decolor Contrast Preserving Decolorization
Useful links:
http://www.cse.cuhk.edu.hk/leojia/projects/color2gray/index.html
@defgroup photo_clone Seamless Cloning @defgroup photo_clone Seamless Cloning
Useful links:
https://www.learnopencv.com/seamless-cloning-using-opencv-python-cpp
@defgroup photo_render Non-Photorealistic Rendering @defgroup photo_render Non-Photorealistic Rendering
@defgroup photo_c C API
Useful links:
http://www.inf.ufrgs.br/~eslgastal/DomainTransform
https://www.learnopencv.com/non-photorealistic-rendering-using-opencv-python-c/
@} @}
*/ */
...@@ -67,24 +86,13 @@ namespace cv ...@@ -67,24 +86,13 @@ namespace cv
//! @addtogroup photo //! @addtogroup photo
//! @{ //! @{
//! @addtogroup photo_inpaint
//! @{
//! the inpainting algorithm //! the inpainting algorithm
enum enum
{ {
INPAINT_NS = 0, // Navier-Stokes algorithm INPAINT_NS = 0, //!< Use Navier-Stokes based method
INPAINT_TELEA = 1 // A. Telea algorithm INPAINT_TELEA = 1 //!< Use the algorithm proposed by Alexandru Telea @cite Telea04
};
enum
{
NORMAL_CLONE = 1,
MIXED_CLONE = 2,
MONOCHROME_TRANSFER = 3
};
enum
{
RECURS_FILTER = 1,
NORMCONV_FILTER = 2
}; };
/** @brief Restores the selected region in an image using the region neighborhood. /** @brief Restores the selected region in an image using the region neighborhood.
...@@ -95,9 +103,7 @@ needs to be inpainted. ...@@ -95,9 +103,7 @@ needs to be inpainted.
@param dst Output image with the same size and type as src . @param dst Output image with the same size and type as src .
@param inpaintRadius Radius of a circular neighborhood of each point inpainted that is considered @param inpaintRadius Radius of a circular neighborhood of each point inpainted that is considered
by the algorithm. by the algorithm.
@param flags Inpainting method that could be one of the following: @param flags Inpainting method that could be cv::INPAINT_NS or cv::INPAINT_TELEA
- **INPAINT_NS** Navier-Stokes based method [Navier01]
- **INPAINT_TELEA** Method by Alexandru Telea @cite Telea04 .
The function reconstructs the selected image area from the pixel near the area boundary. The The function reconstructs the selected image area from the pixel near the area boundary. The
function may be used to remove dust and scratches from a scanned photo, or to remove undesirable function may be used to remove dust and scratches from a scanned photo, or to remove undesirable
...@@ -112,6 +118,8 @@ objects from still images or video. See <http://en.wikipedia.org/wiki/Inpainting ...@@ -112,6 +118,8 @@ objects from still images or video. See <http://en.wikipedia.org/wiki/Inpainting
CV_EXPORTS_W void inpaint( InputArray src, InputArray inpaintMask, CV_EXPORTS_W void inpaint( InputArray src, InputArray inpaintMask,
OutputArray dst, double inpaintRadius, int flags ); OutputArray dst, double inpaintRadius, int flags );
//! @} photo_inpaint
//! @addtogroup photo_denoise //! @addtogroup photo_denoise
//! @{ //! @{
...@@ -678,6 +686,9 @@ CV_EXPORTS_W Ptr<MergeRobertson> createMergeRobertson(); ...@@ -678,6 +686,9 @@ CV_EXPORTS_W Ptr<MergeRobertson> createMergeRobertson();
//! @} photo_hdr //! @} photo_hdr
//! @addtogroup photo_decolor
//! @{
/** @brief Transforms a color image to a grayscale image. It is a basic tool in digital printing, stylized /** @brief Transforms a color image to a grayscale image. It is a basic tool in digital printing, stylized
black-and-white photograph rendering, and in many single channel image processing applications black-and-white photograph rendering, and in many single channel image processing applications
@cite CL12 . @cite CL12 .
...@@ -690,9 +701,24 @@ This function is to be applied on color images. ...@@ -690,9 +701,24 @@ This function is to be applied on color images.
*/ */
CV_EXPORTS_W void decolor( InputArray src, OutputArray grayscale, OutputArray color_boost); CV_EXPORTS_W void decolor( InputArray src, OutputArray grayscale, OutputArray color_boost);
//! @} photo_decolor
//! @addtogroup photo_clone //! @addtogroup photo_clone
//! @{ //! @{
//! seamlessClone algorithm flags
enum
{
/** The power of the method is fully expressed when inserting objects with complex outlines into a new background*/
NORMAL_CLONE = 1,
/** The classic method, color-based selection and alpha masking might be time consuming and often leaves an undesirable
halo. Seamless cloning, even averaged with the original image, is not effective. Mixed seamless cloning based on a loose selection proves effective.*/
MIXED_CLONE = 2,
/** Monochrome transfer allows the user to easily replace certain features of one object by alternative features.*/
MONOCHROME_TRANSFER = 3};
/** @example samples/cpp/tutorial_code/photo/seamless_cloning/cloning_demo.cpp /** @example samples/cpp/tutorial_code/photo/seamless_cloning/cloning_demo.cpp
An example using seamlessClone function An example using seamlessClone function
*/ */
...@@ -707,15 +733,7 @@ content @cite PM03 . ...@@ -707,15 +733,7 @@ content @cite PM03 .
@param mask Input 8-bit 1 or 3-channel image. @param mask Input 8-bit 1 or 3-channel image.
@param p Point in dst image where object is placed. @param p Point in dst image where object is placed.
@param blend Output image with the same size and type as dst. @param blend Output image with the same size and type as dst.
@param flags Cloning method that could be one of the following: @param flags Cloning method that could be cv::NORMAL_CLONE, cv::MIXED_CLONE or cv::MONOCHROME_TRANSFER
- **NORMAL_CLONE** The power of the method is fully expressed when inserting objects with
complex outlines into a new background
- **MIXED_CLONE** The classic method, color-based selection and alpha masking might be time
consuming and often leaves an undesirable halo. Seamless cloning, even averaged with the
original image, is not effective. Mixed seamless cloning based on a loose selection proves
effective.
- **MONOCHROME_TRANSFER** Monochrome transfer allows the user to easily replace certain features of
one object by alternative features.
*/ */
CV_EXPORTS_W void seamlessClone( InputArray src, InputArray dst, InputArray mask, Point p, CV_EXPORTS_W void seamlessClone( InputArray src, InputArray dst, InputArray mask, Point p,
OutputArray blend, int flags); OutputArray blend, int flags);
...@@ -750,18 +768,16 @@ CV_EXPORTS_W void illuminationChange(InputArray src, InputArray mask, OutputArra ...@@ -750,18 +768,16 @@ CV_EXPORTS_W void illuminationChange(InputArray src, InputArray mask, OutputArra
float alpha = 0.2f, float beta = 0.4f); float alpha = 0.2f, float beta = 0.4f);
/** @brief By retaining only the gradients at edge locations, before integrating with the Poisson solver, one /** @brief By retaining only the gradients at edge locations, before integrating with the Poisson solver, one
washes out the texture of the selected region, giving its contents a flat aspect. Here Canny Edge washes out the texture of the selected region, giving its contents a flat aspect. Here Canny Edge %Detector is used.
Detector is used.
@param src Input 8-bit 3-channel image. @param src Input 8-bit 3-channel image.
@param mask Input 8-bit 1 or 3-channel image. @param mask Input 8-bit 1 or 3-channel image.
@param dst Output image with the same size and type as src. @param dst Output image with the same size and type as src.
@param low_threshold Range from 0 to 100. @param low_threshold %Range from 0 to 100.
@param high_threshold Value \> 100. @param high_threshold Value \> 100.
@param kernel_size The size of the Sobel kernel to be used. @param kernel_size The size of the Sobel kernel to be used.
**NOTE:** @note
The algorithm assumes that the color of the source image is close to that of the destination. This The algorithm assumes that the color of the source image is close to that of the destination. This
assumption means that when the colors don't match, the source image color gets tinted toward the assumption means that when the colors don't match, the source image color gets tinted toward the
color of the destination image. color of the destination image.
...@@ -775,16 +791,21 @@ CV_EXPORTS_W void textureFlattening(InputArray src, InputArray mask, OutputArray ...@@ -775,16 +791,21 @@ CV_EXPORTS_W void textureFlattening(InputArray src, InputArray mask, OutputArray
//! @addtogroup photo_render //! @addtogroup photo_render
//! @{ //! @{
//! Edge preserving filters
enum
{
RECURS_FILTER = 1, //!< Recursive Filtering
NORMCONV_FILTER = 2 //!< Normalized Convolution Filtering
};
/** @brief Filtering is the fundamental operation in image and video processing. Edge-preserving smoothing /** @brief Filtering is the fundamental operation in image and video processing. Edge-preserving smoothing
filters are used in many different applications @cite EM11 . filters are used in many different applications @cite EM11 .
@param src Input 8-bit 3-channel image. @param src Input 8-bit 3-channel image.
@param dst Output 8-bit 3-channel image. @param dst Output 8-bit 3-channel image.
@param flags Edge preserving filters: @param flags Edge preserving filters: cv::RECURS_FILTER or cv::NORMCONV_FILTER
- **RECURS_FILTER** = 1 @param sigma_s %Range between 0 to 200.
- **NORMCONV_FILTER** = 2 @param sigma_r %Range between 0 to 1.
@param sigma_s Range between 0 to 200.
@param sigma_r Range between 0 to 1.
*/ */
CV_EXPORTS_W void edgePreservingFilter(InputArray src, OutputArray dst, int flags = 1, CV_EXPORTS_W void edgePreservingFilter(InputArray src, OutputArray dst, int flags = 1,
float sigma_s = 60, float sigma_r = 0.4f); float sigma_s = 60, float sigma_r = 0.4f);
...@@ -793,8 +814,8 @@ CV_EXPORTS_W void edgePreservingFilter(InputArray src, OutputArray dst, int flag ...@@ -793,8 +814,8 @@ CV_EXPORTS_W void edgePreservingFilter(InputArray src, OutputArray dst, int flag
@param src Input 8-bit 3-channel image. @param src Input 8-bit 3-channel image.
@param dst Output image with the same size and type as src. @param dst Output image with the same size and type as src.
@param sigma_s Range between 0 to 200. @param sigma_s %Range between 0 to 200.
@param sigma_r Range between 0 to 1. @param sigma_r %Range between 0 to 1.
*/ */
CV_EXPORTS_W void detailEnhance(InputArray src, OutputArray dst, float sigma_s = 10, CV_EXPORTS_W void detailEnhance(InputArray src, OutputArray dst, float sigma_s = 10,
float sigma_r = 0.15f); float sigma_r = 0.15f);
...@@ -807,9 +828,9 @@ An example using non-photorealistic line drawing functions ...@@ -807,9 +828,9 @@ An example using non-photorealistic line drawing functions
@param src Input 8-bit 3-channel image. @param src Input 8-bit 3-channel image.
@param dst1 Output 8-bit 1-channel image. @param dst1 Output 8-bit 1-channel image.
@param dst2 Output image with the same size and type as src. @param dst2 Output image with the same size and type as src.
@param sigma_s Range between 0 to 200. @param sigma_s %Range between 0 to 200.
@param sigma_r Range between 0 to 1. @param sigma_r %Range between 0 to 1.
@param shade_factor Range between 0 to 0.1. @param shade_factor %Range between 0 to 0.1.
*/ */
CV_EXPORTS_W void pencilSketch(InputArray src, OutputArray dst1, OutputArray dst2, CV_EXPORTS_W void pencilSketch(InputArray src, OutputArray dst1, OutputArray dst2,
float sigma_s = 60, float sigma_r = 0.07f, float shade_factor = 0.02f); float sigma_s = 60, float sigma_r = 0.07f, float shade_factor = 0.02f);
...@@ -820,8 +841,8 @@ contrast while preserving, or enhancing, high-contrast features. ...@@ -820,8 +841,8 @@ contrast while preserving, or enhancing, high-contrast features.
@param src Input 8-bit 3-channel image. @param src Input 8-bit 3-channel image.
@param dst Output image with the same size and type as src. @param dst Output image with the same size and type as src.
@param sigma_s Range between 0 to 200. @param sigma_s %Range between 0 to 200.
@param sigma_r Range between 0 to 1. @param sigma_r %Range between 0 to 1.
*/ */
CV_EXPORTS_W void stylization(InputArray src, OutputArray dst, float sigma_s = 60, CV_EXPORTS_W void stylization(InputArray src, OutputArray dst, float sigma_s = 60,
float sigma_r = 0.45f); float sigma_r = 0.45f);
......
...@@ -1803,9 +1803,11 @@ bool CvCaptureCAM_V4L::setProperty( int property_id, double _value ) ...@@ -1803,9 +1803,11 @@ bool CvCaptureCAM_V4L::setProperty( int property_id, double _value )
if (bool(value)) { if (bool(value)) {
convert_rgb = convertableToRgb(); convert_rgb = convertableToRgb();
return convert_rgb; return convert_rgb;
} }else{
convert_rgb = false; convert_rgb = false;
releaseFrame();
return true; return true;
}
case cv::CAP_PROP_FOURCC: case cv::CAP_PROP_FOURCC:
{ {
if (palette == static_cast<__u32>(value)) if (palette == static_cast<__u32>(value))
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment