Commit bf0075a5 authored by Alexander Alekhin's avatar Alexander Alekhin

Merge remote-tracking branch 'upstream/3.4' into merge-3.4

parents d442856f 2a32a5ad
//! [charucohdr]
#include <opencv2/aruco/charuco.hpp>
//! [charucohdr]
#include <opencv2/highgui.hpp>
#include <iostream>
#include <string>
namespace {
const char* about = "A tutorial code on charuco board creation and detection of charuco board with and without camera caliberation";
const char* keys = "{c | | Put value of c=1 to create charuco board;\nc=2 to detect charuco board without camera calibration;\nc=3 to detect charuco board with camera calibration and Pose Estimation}";
}
void createBoard();
void detectCharucoBoardWithCalibrationPose();
void detectCharucoBoardWithoutCalibration();
static bool readCameraParameters(std::string filename, cv::Mat& camMatrix, cv::Mat& distCoeffs)
{
cv::FileStorage fs(filename, cv::FileStorage::READ);
if (!fs.isOpened())
return false;
fs["camera_matrix"] >> camMatrix;
fs["distortion_coefficients"] >> distCoeffs;
return true;
}
void createBoard()
{
cv::Ptr<cv::aruco::Dictionary> dictionary = cv::aruco::getPredefinedDictionary(cv::aruco::DICT_6X6_250);
//! [createBoard]
cv::Ptr<cv::aruco::CharucoBoard> board = cv::aruco::CharucoBoard::create(5, 7, 0.04f, 0.02f, dictionary);
cv::Mat boardImage;
board->draw(cv::Size(600, 500), boardImage, 10, 1);
//! [createBoard]
cv::imwrite("BoardImage.jpg", boardImage);
}
//! [detwcp]
void detectCharucoBoardWithCalibrationPose()
{
cv::VideoCapture inputVideo;
inputVideo.open(0);
//! [matdiscoff]
cv::Mat cameraMatrix, distCoeffs;
std::string filename = "calib.txt";
bool readOk = readCameraParameters(filename, cameraMatrix, distCoeffs);
//! [matdiscoff]
if (!readOk) {
std::cerr << "Invalid camera file" << std::endl;
} else {
//! [dictboard]
cv::Ptr<cv::aruco::Dictionary> dictionary = cv::aruco::getPredefinedDictionary(cv::aruco::DICT_6X6_250);
cv::Ptr<cv::aruco::CharucoBoard> board = cv::aruco::CharucoBoard::create(5, 7, 0.04f, 0.02f, dictionary);
cv::Ptr<cv::aruco::DetectorParameters> params = cv::aruco::DetectorParameters::create();
//! [dictboard]
while (inputVideo.grab()) {
//! [inputImg]
cv::Mat image;
//! [inputImg]
cv::Mat imageCopy;
inputVideo.retrieve(image);
image.copyTo(imageCopy);
//! [midcornerdet]
std::vector<int> markerIds;
std::vector<std::vector<cv::Point2f> > markerCorners;
cv::aruco::detectMarkers(image, board->dictionary, markerCorners, markerIds, params);
//! [midcornerdet]
// if at least one marker detected
if (markerIds.size() > 0) {
cv::aruco::drawDetectedMarkers(imageCopy, markerCorners, markerIds);
//! [charidcor]
std::vector<cv::Point2f> charucoCorners;
std::vector<int> charucoIds;
cv::aruco::interpolateCornersCharuco(markerCorners, markerIds, image, board, charucoCorners, charucoIds, cameraMatrix, distCoeffs);
//! [charidcor]
// if at least one charuco corner detected
if (charucoIds.size() > 0) {
cv::Scalar color = cv::Scalar(255, 0, 0);
//! [detcor]
cv::aruco::drawDetectedCornersCharuco(imageCopy, charucoCorners, charucoIds, color);
//! [detcor]
cv::Vec3d rvec, tvec;
//! [pose]
// cv::aruco::estimatePoseCharucoBoard(charucoCorners, charucoIds, board, cameraMatrix, distCoeffs, rvec, tvec);
//! [pose]
bool valid = cv::aruco::estimatePoseCharucoBoard(charucoCorners, charucoIds, board, cameraMatrix, distCoeffs, rvec, tvec);
// if charuco pose is valid
if (valid)
cv::aruco::drawAxis(imageCopy, cameraMatrix, distCoeffs, rvec, tvec, 0.1f);
}
}
cv::imshow("out", imageCopy);
char key = (char)cv::waitKey(30);
if (key == 27)
break;
}
}
}
//! [detwcp]
//! [detwc]
void detectCharucoBoardWithoutCalibration()
{
cv::VideoCapture inputVideo;
inputVideo.open(0);
cv::Ptr<cv::aruco::Dictionary> dictionary = cv::aruco::getPredefinedDictionary(cv::aruco::DICT_6X6_250);
cv::Ptr<cv::aruco::CharucoBoard> board = cv::aruco::CharucoBoard::create(5, 7, 0.04f, 0.02f, dictionary);
cv::Ptr<cv::aruco::DetectorParameters> params = cv::aruco::DetectorParameters::create();
params->cornerRefinementMethod = cv::aruco::CORNER_REFINE_NONE;
while (inputVideo.grab()) {
cv::Mat image, imageCopy;
inputVideo.retrieve(image);
image.copyTo(imageCopy);
std::vector<int> markerIds;
std::vector<std::vector<cv::Point2f> > markerCorners;
cv::aruco::detectMarkers(image, board->dictionary, markerCorners, markerIds, params);
//or
//cv::aruco::detectMarkers(image, dictionary, markerCorners, markerIds, params);
// if at least one marker detected
if (markerIds.size() > 0) {
cv::aruco::drawDetectedMarkers(imageCopy, markerCorners, markerIds);
//! [charidcorwc]
std::vector<cv::Point2f> charucoCorners;
std::vector<int> charucoIds;
cv::aruco::interpolateCornersCharuco(markerCorners, markerIds, image, board, charucoCorners, charucoIds);
//! [charidcorwc]
// if at least one charuco corner detected
if (charucoIds.size() > 0)
cv::aruco::drawDetectedCornersCharuco(imageCopy, charucoCorners, charucoIds, cv::Scalar(255, 0, 0));
}
cv::imshow("out", imageCopy);
char key = (char)cv::waitKey(30);
if (key == 27)
break;
}
}
//! [detwc]
int main(int argc, char* argv[])
{
cv::CommandLineParser parser(argc, argv, keys);
parser.about(about);
if (argc < 2) {
parser.printMessage();
return 0;
}
int choose = parser.get<int>("c");
switch (choose) {
case 1:
createBoard();
std::cout << "An image named BoardImg.jpg is generated in folder containing this file" << std::endl;
break;
case 2:
detectCharucoBoardWithoutCalibration();
break;
case 3:
detectCharucoBoardWithCalibrationPose();
break;
default:
break;
}
return 0;
}
\ No newline at end of file
...@@ -20,6 +20,23 @@ they are very accurate in terms of subpixel accuracy. ...@@ -20,6 +20,23 @@ they are very accurate in terms of subpixel accuracy.
When high precision is necessary, such as in camera calibration, Charuco boards are a better option than standard When high precision is necessary, such as in camera calibration, Charuco boards are a better option than standard
Aruco boards. Aruco boards.
Goal
----
In this tutorial you will learn:
- How to create a charuco board ?
- How to detect the charuco corners without performing camera calibration ?
- How to detect the charuco corners with camera calibration and pose estimation ?
Source code
-----------
You can find this code in `opencv_contrib/modules/aruco/samples/tutorial_charuco_create_detect.cpp`
Here's a sample code of how to achieve all the stuff enumerated at the goal list.
@include samples/tutorial_charuco_create_detect.cpp
ChArUco Board Creation ChArUco Board Creation
------ ------
...@@ -28,9 +45,7 @@ The aruco module provides the ```cv::aruco::CharucoBoard``` class that represent ...@@ -28,9 +45,7 @@ The aruco module provides the ```cv::aruco::CharucoBoard``` class that represent
This class, as the rest of ChArUco functionalities, are defined in: This class, as the rest of ChArUco functionalities, are defined in:
@code{.cpp} @snippet samples/tutorial_charuco_create_detect.cpp charucohdr
#include <opencv2/aruco/charuco.hpp>
@endcode
To define a ```CharucoBoard```, it is necessary: To define a ```CharucoBoard```, it is necessary:
...@@ -59,11 +74,7 @@ This can be easily customized by accessing to the ids vector through ```board.id ...@@ -59,11 +74,7 @@ This can be easily customized by accessing to the ids vector through ```board.id
Once we have our ```CharucoBoard``` object, we can create an image to print it. This can be done with the Once we have our ```CharucoBoard``` object, we can create an image to print it. This can be done with the
<code>CharucoBoard::draw()</code> method: <code>CharucoBoard::draw()</code> method:
@code{.cpp} @snippet samples/tutorial_charuco_create_detect.cpp createBoard
cv::Ptr<cv::aruco::CharucoBoard> board = cv::aruco::CharucoBoard::create(5, 7, 0.04, 0.02, dictionary);
cv::Mat boardImage;
board->draw( cv::Size(600, 500), boardImage, 10, 1 );
@endcode
- The first parameter is the size of the output image in pixels. In this case 600x500 pixels. If this is not proportional - The first parameter is the size of the output image in pixels. In this case 600x500 pixels. If this is not proportional
to the board dimensions, it will be centered on the image. to the board dimensions, it will be centered on the image.
...@@ -76,9 +87,9 @@ The output image will be something like this: ...@@ -76,9 +87,9 @@ The output image will be something like this:
![](images/charucoboard.jpg) ![](images/charucoboard.jpg)
A full working example is included in the ```create_board_charuco.cpp``` inside the module samples folder. A full working example is included in the ```create_board_charuco.cpp``` inside the modules/aruco/samples/create_board_charuco.cpp.
Note: The samples now take input via commandline via the [OpenCV Commandline Parser](http://docs.opencv.org/trunk/d0/d2e/classcv_1_1CommandLineParser.html#gsc.tab=0). For this file the example parameters will look like Note: The create_board_charuco.cpp now take input via commandline via the [OpenCV Commandline Parser](http://docs.opencv.org/trunk/d0/d2e/classcv_1_1CommandLineParser.html#gsc.tab=0). For this file the example parameters will look like
@code{.cpp} @code{.cpp}
"_ output path_/chboard.png" -w=5 -h=7 -sl=200 -ml=120 -d=10 "_ output path_/chboard.png" -w=5 -h=7 -sl=200 -ml=120 -d=10
@endcode @endcode
...@@ -89,69 +100,53 @@ ChArUco Board Detection ...@@ -89,69 +100,53 @@ ChArUco Board Detection
When you detect a ChArUco board, what you are actually detecting is each of the chessboard corners of the board. When you detect a ChArUco board, what you are actually detecting is each of the chessboard corners of the board.
Each corner on a ChArUco board has a unique identifier (id) assigned. These ids go from 0 to the total number of corners Each corner on a ChArUco board has a unique identifier (id) assigned. These ids go from 0 to the total number of corners in the board.
in the board. The steps of charuco board detection can be broken down to the following steps:
So, a detected ChArUco board consists in: - **Taking input Image**
- ```std::vector<cv::Point2f> charucoCorners``` : list of image positions of the detected corners. @snippet samples/tutorial_charuco_create_detect.cpp inputImg
- ```std::vector<int> charucoIds``` : ids for each of the detected corners in ```charucoCorners```.
The detection of the ChArUco corners is based on the previous detected markers. So that, first markers are detected, and then The original image where the markers are to be detected. The image is necessary to perform subpixel refinement in the ChArUco corners.
ChArUco corners are interpolated from markers.
The function that detect the ChArUco corners is ```cv::aruco::interpolateCornersCharuco()``` . This example shows the whole process. First, markers are detected, and then the ChArUco corners are interpolated from these markers. - **Reading the camera calibration Parameters(only for detection with camera calibration)**
@code{.cpp} @snippet samples/tutorial_charuco_create_detect.cpp matdiscoff
cv::Mat inputImage;
cv::Mat cameraMatrix, distCoeffs;
// camera parameters are read from somewhere
readCameraParameters(cameraMatrix, distCoeffs);
cv::Ptr<cv::aruco::Dictionary> dictionary = cv::aruco::getPredefinedDictionary(cv::aruco::DICT_6X6_250);
cv::Ptr<cv::aruco::CharucoBoard> board = cv::aruco::CharucoBoard::create(5, 7, 0.04, 0.02, dictionary);
...
std::vector<int> markerIds;
std::vector<std::vector<cv::Point2f>> markerCorners;
cv::aruco::detectMarkers(inputImage, board.dictionary, markerCorners, markerIds);
// if at least one marker detected
if(markerIds.size() > 0) {
std::vector<cv::Point2f> charucoCorners;
std::vector<int> charucoIds;
cv::aruco::interpolateCornersCharuco(markerCorners, markerIds, inputImage, board, charucoCorners, charucoIds, cameraMatrix, distCoeffs);
}
@endcode
The parameters of the ```interpolateCornersCharuco()``` function are: The parameters of readCameraParameters are:
- ```markerCorners``` and ```markerIds```: the detected markers from ```detectMarkers()``` function. - filename- This is the path to caliberation.txt file which is the output file generated by calibrate_camera_charuco.cpp
- ```inputImage```: the original image where the markers were detected. The image is necessary to perform subpixel refinement - cameraMatrix and distCoeffs- the optional camera calibration parameters
in the ChArUco corners.
- ```board```: the ```CharucoBoard``` object
- ```charucoCorners``` and ```charucoIds```: the output interpolated Charuco corners
- ```cameraMatrix``` and ```distCoeffs```: the optional camera calibration parameters
- The function returns the number of Charuco corners interpolated.
In this case, we have call ```interpolateCornersCharuco()``` providing the camera calibration parameters. However these parameters This function takes these parameters as input and returns a boolean value of whether the camera calibration parameters are valid or not. For detection of corners without calibration, this step is not required.
are optional. A similar example without these parameters would be:
@code{.cpp}
cv::Mat inputImage; - **Detecting the markers**
cv::Ptr<cv::aruco::Dictionary> dictionary = cv::aruco::getPredefinedDictionary(cv::aruco::DICT_6X6_250);
cv::Ptr<cv::aruco::CharucoBoard> board = cv::aruco::CharucoBoard::create(5, 7, 0.04, 0.02, dictionary); @snippet samples/tutorial_charuco_create_detect.cpp dictboard
... @snippet samples/tutorial_charuco_create_detect.cpp midcornerdet
std::vector<int> markerIds;
std::vector<std::vector<cv::Point2f>> markerCorners; The parameters of detectMarkers are:
cv::Ptr<cv::aruco::DetectorParameters> params; - image - Input image.
params->cornerRefinementMethod = cv::aruco::CORNER_REFINE_NONE; - dictionary - Pointer to the Dictionary/Set of Markers that will be searched.
cv::aruco::detectMarkers(inputImage, board.dictionary, markerCorners, markerIds, params); - markerCorners - vector of detected marker corners.
- markerIds - vector of identifiers of the detected markers
// if at least one marker detected - params - marker detection parameters
if(markerIds.size() > 0) { The detection of the ChArUco corners is based on the previous detected markers. So that, first markers are detected, and then ChArUco corners are interpolated from markers.
std::vector<cv::Point2f> charucoCorners;
std::vector<int> charucoIds; - **Interpolation of charuco corners from markers**
cv::aruco::interpolateCornersCharuco(markerCorners, markerIds, inputImage, board, charucoCorners, charucoIds);
} For detection with calibration
@endcode
@snippet samples/tutorial_charuco_create_detect.cpp charidcor
For detection without calibration
@snippet samples/tutorial_charuco_create_detect.cpp charidcorwc
The function that detect the ChArUco corners is cv::aruco::interpolateCornersCharuco(). This function returns the number of Charuco corners interpolated.
- ```std::vector<cv::Point2f> charucoCorners``` : list of image positions of the detected corners.
- ```std::vector<int> charucoIds``` : ids for each of the detected corners in ```charucoCorners```.
If calibration parameters are provided, the ChArUco corners are interpolated by, first, estimating a rough pose from the ArUco markers If calibration parameters are provided, the ChArUco corners are interpolated by, first, estimating a rough pose from the ArUco markers
and, then, reprojecting the ChArUco corners back to the image. and, then, reprojecting the ChArUco corners back to the image.
...@@ -176,11 +171,9 @@ After the ChArUco corners have been interpolated, a subpixel refinement is perfo ...@@ -176,11 +171,9 @@ After the ChArUco corners have been interpolated, a subpixel refinement is perfo
Once we have interpolated the ChArUco corners, we would probably want to draw them to see if their detections are correct. Once we have interpolated the ChArUco corners, we would probably want to draw them to see if their detections are correct.
This can be easily done using the ```drawDetectedCornersCharuco()``` function: This can be easily done using the ```drawDetectedCornersCharuco()``` function:
@code{.cpp} @snippet samples/tutorial_charuco_create_detect.cpp detcor
cv::aruco::drawDetectedCornersCharuco(image, charucoCorners, charucoIds, color);
@endcode
- ```image``` is the image where the corners will be drawn (it will normally be the same image where the corners were detected). - ```imageCopy``` is the image where the corners will be drawn (it will normally be the same image where the corners were detected).
- The ```outputImage``` will be a clone of ```inputImage``` with the corners drawn. - The ```outputImage``` will be a clone of ```inputImage``` with the corners drawn.
- ```charucoCorners``` and ```charucoIds``` are the detected Charuco corners from the ```interpolateCornersCharuco()``` function. - ```charucoCorners``` and ```charucoIds``` are the detected Charuco corners from the ```interpolateCornersCharuco()``` function.
- Finally, the last parameter is the (optional) color we want to draw the corners with, of type ```cv::Scalar```. - Finally, the last parameter is the (optional) color we want to draw the corners with, of type ```cv::Scalar```.
...@@ -199,43 +192,7 @@ In the presence of occlusion. like in the following image, although some corners ...@@ -199,43 +192,7 @@ In the presence of occlusion. like in the following image, although some corners
Finally, this is a full example of ChArUco detection (without using calibration parameters): Finally, this is a full example of ChArUco detection (without using calibration parameters):
@code{.cpp} @snippet samples/tutorial_charuco_create_detect.cpp detwc
cv::VideoCapture inputVideo;
inputVideo.open(0);
cv::Ptr<cv::aruco::Dictionary> dictionary = cv::aruco::getPredefinedDictionary(cv::aruco::DICT_6X6_250);
cv::Ptr<cv::aruco::CharucoBoard> board = cv::aruco::CharucoBoard::create(5, 7, 0.04, 0.02, dictionary);
cv::Ptr<cv::aruco::DetectorParameters> params;
params->cornerRefinementMethod = cv::aruco::CORNER_REFINE_NONE;
while (inputVideo.grab()) {
cv::Mat image, imageCopy;
inputVideo.retrieve(image);
image.copyTo(imageCopy);
std::vector<int> ids;
std::vector<std::vector<cv::Point2f>> corners;
cv::aruco::detectMarkers(image, dictionary, corners, ids, params);
// if at least one marker detected
if (ids.size() > 0) {
cv::aruco::drawDetectedMarkers(imageCopy, corners, ids);
std::vector<cv::Point2f> charucoCorners;
std::vector<int> charucoIds;
cv::aruco::interpolateCornersCharuco(corners, ids, image, board, charucoCorners, charucoIds);
// if at least one charuco corner detected
if(charucoIds.size() > 0)
cv::aruco::drawDetectedCornersCharuco(imageCopy, charucoCorners, charucoIds, cv::Scalar(255, 0, 0));
}
cv::imshow("out", imageCopy);
char key = (char) cv::waitKey(waitTime);
if (key == 27)
break;
}
@endcode
Sample video: Sample video:
...@@ -243,13 +200,15 @@ Sample video: ...@@ -243,13 +200,15 @@ Sample video:
<iframe width="420" height="315" src="https://www.youtube.com/embed/Nj44m_N_9FY" frameborder="0" allowfullscreen></iframe> <iframe width="420" height="315" src="https://www.youtube.com/embed/Nj44m_N_9FY" frameborder="0" allowfullscreen></iframe>
@endhtmlonly @endhtmlonly
A full working example is included in the ```detect_board_charuco.cpp``` inside the module samples folder. A full working example is included in the ```detect_board_charuco.cpp``` inside the modules/aruco/samples/detect_board_charuco.cpp.
Note: The samples now take input via commandline via the [OpenCV Commandline Parser](http://docs.opencv.org/trunk/d0/d2e/classcv_1_1CommandLineParser.html#gsc.tab=0). For this file the example parameters will look like Note: The samples now take input via commandline via the [OpenCV Commandline Parser](http://docs.opencv.org/trunk/d0/d2e/classcv_1_1CommandLineParser.html#gsc.tab=0). For this file the example parameters will look like
@code{.cpp} @code{.cpp}
-c="_path_/calib.txt" -dp="_path_/detector_params.yml" -w=5 -h=7 -sl=0.04 -ml=0.02 -d=10 -c="_path_/calib.txt" -dp="_path_/detector_params.yml" -w=5 -h=7 -sl=0.04 -ml=0.02 -d=10
@endcode @endcode
Here the calib.txt is the output file generated by the calibrate_camera_charuco.cpp.
ChArUco Pose Estimation ChArUco Pose Estimation
------ ------
...@@ -260,9 +219,7 @@ of the ```CharucoBoard``` is placed in the board plane with the Z axis pointing ...@@ -260,9 +219,7 @@ of the ```CharucoBoard``` is placed in the board plane with the Z axis pointing
The function for pose estimation is ```estimatePoseCharucoBoard()```: The function for pose estimation is ```estimatePoseCharucoBoard()```:
@code{.cpp} @snippet samples/tutorial_charuco_create_detect.cpp pose
cv::aruco::estimatePoseCharucoBoard(charucoCorners, charucoIds, board, cameraMatrix, distCoeffs, rvec, tvec);
@endcode
- The ```charucoCorners``` and ```charucoIds``` parameters are the detected charuco corners from the ```interpolateCornersCharuco()``` - The ```charucoCorners``` and ```charucoIds``` parameters are the detected charuco corners from the ```interpolateCornersCharuco()```
function. function.
...@@ -278,50 +235,9 @@ The axis can be drawn using ```drawAxis()``` to check the pose is correctly esti ...@@ -278,50 +235,9 @@ The axis can be drawn using ```drawAxis()``` to check the pose is correctly esti
A full example of ChArUco detection with pose estimation: A full example of ChArUco detection with pose estimation:
@code{.cpp} @snippet samples/tutorial_charuco_create_detect.cpp detwcp
cv::VideoCapture inputVideo;
inputVideo.open(0);
cv::Mat cameraMatrix, distCoeffs;
// camera parameters are read from somewhere
readCameraParameters(cameraMatrix, distCoeffs);
cv::Ptr<cv::aruco::Dictionary> dictionary = cv::aruco::getPredefinedDictionary(cv::aruco::DICT_6X6_250);
cv::Ptr<cv::aruco::CharucoBoard> board = cv::aruco::CharucoBoard::create(5, 7, 0.04, 0.02, dictionary);
while (inputVideo.grab()) {
cv::Mat image, imageCopy;
inputVideo.retrieve(image);
image.copyTo(imageCopy);
std::vector<int> ids;
std::vector<std::vector<cv::Point2f>> corners;
cv::aruco::detectMarkers(image, dictionary, corners, ids);
// if at least one marker detected
if (ids.size() > 0) {
std::vector<cv::Point2f> charucoCorners;
std::vector<int> charucoIds;
cv::aruco::interpolateCornersCharuco(corners, ids, image, board, charucoCorners, charucoIds, cameraMatrix, distCoeffs);
// if at least one charuco corner detected
if(charucoIds.size() > 0) {
cv::aruco::drawDetectedCornersCharuco(imageCopy, charucoCorners, charucoIds, cv::Scalar(255, 0, 0));
cv::Vec3d rvec, tvec;
bool valid = cv::aruco::estimatePoseCharucoBoard(charucoCorners, charucoIds, board, cameraMatrix, distCoeffs, rvec, tvec);
// if charuco pose is valid
if(valid)
cv::aruco::drawAxis(imageCopy, cameraMatrix, distCoeffs, rvec, tvec, 0.1);
}
}
cv::imshow("out", imageCopy);
char key = (char) cv::waitKey(waitTime);
if (key == 27)
break;
}
@endcode
A full working example is included in the ```detect_board_charuco.cpp``` inside the module samples folder. A full working example is included in the ```detect_board_charuco.cpp``` inside the modules/aruco/samples/detect_board_charuco.cpp.
Note: The samples now take input via commandline via the [OpenCV Commandline Parser](http://docs.opencv.org/trunk/d0/d2e/classcv_1_1CommandLineParser.html#gsc.tab=0). For this file the example parameters will look like Note: The samples now take input via commandline via the [OpenCV Commandline Parser](http://docs.opencv.org/trunk/d0/d2e/classcv_1_1CommandLineParser.html#gsc.tab=0). For this file the example parameters will look like
@code{.cpp} @code{.cpp}
......
...@@ -75,20 +75,21 @@ public: ...@@ -75,20 +75,21 @@ public:
* @param nbrOfSmallBins Number of bins between 0 and "histThresh". Default value is 10. * @param nbrOfSmallBins Number of bins between 0 and "histThresh". Default value is 10.
* @param nbrOfLargeBins Number of bins between "histThresh" and 32*pi*pi (highest edge reliability value). Default value is 5. * @param nbrOfLargeBins Number of bins between "histThresh" and 32*pi*pi (highest edge reliability value). Default value is 5.
*/ */
struct CV_EXPORTS Params struct CV_EXPORTS_W_SIMPLE Params
{ {
Params(); CV_WRAP Params();
int width; CV_PROP_RW int width;
int height; CV_PROP_RW int height;
float histThresh; CV_PROP_RW float histThresh;
int nbrOfSmallBins; CV_PROP_RW int nbrOfSmallBins;
int nbrOfLargeBins; CV_PROP_RW int nbrOfLargeBins;
}; };
/** /**
* @brief Constructor * @brief Constructor
* @param parameters HistogramPhaseUnwrapping parameters HistogramPhaseUnwrapping::Params: width,height of the phase map and histogram characteristics. * @param parameters HistogramPhaseUnwrapping parameters HistogramPhaseUnwrapping::Params: width,height of the phase map and histogram characteristics.
*/ */
CV_WRAP
static Ptr<HistogramPhaseUnwrapping> create( const HistogramPhaseUnwrapping::Params &parameters = static Ptr<HistogramPhaseUnwrapping> create( const HistogramPhaseUnwrapping::Params &parameters =
HistogramPhaseUnwrapping::Params() ); HistogramPhaseUnwrapping::Params() );
......
#ifdef HAVE_OPENCV_PHASE_UNWRAPPING
typedef cv::phase_unwrapping::HistogramPhaseUnwrapping::Params HistogramPhaseUnwrapping_Params;
#endif
...@@ -712,7 +712,10 @@ void HistogramPhaseUnwrapping_Impl::addIncrement( OutputArray unwrappedPhaseMap ...@@ -712,7 +712,10 @@ void HistogramPhaseUnwrapping_Impl::addIncrement( OutputArray unwrappedPhaseMap
int rows = params.height; int rows = params.height;
int cols = params.width; int cols = params.width;
if( uPhaseMap.empty() ) if( uPhaseMap.empty() )
{
uPhaseMap.create(rows, cols, CV_32FC1); uPhaseMap.create(rows, cols, CV_32FC1);
uPhaseMap = Scalar::all(0);
}
int nbrOfPixels = static_cast<int>(pixels.size()); int nbrOfPixels = static_cast<int>(pixels.size());
for( int i = 0; i < nbrOfPixels; ++i ) for( int i = 0; i < nbrOfPixels; ++i )
{ {
......
...@@ -119,7 +119,7 @@ public: ...@@ -119,7 +119,7 @@ public:
* @param shadowMask Mask used to discard shadow regions. * @param shadowMask Mask used to discard shadow regions.
*/ */
CV_WRAP CV_WRAP
virtual void unwrapPhaseMap( InputArrayOfArrays wrappedPhaseMap, virtual void unwrapPhaseMap( InputArray wrappedPhaseMap,
OutputArray unwrappedPhaseMap, OutputArray unwrappedPhaseMap,
cv::Size camSize, cv::Size camSize,
InputArray shadowMask = noArray() ) = 0; InputArray shadowMask = noArray() ) = 0;
......
#!/usr/bin/env python
# Python 2/3 compatibility
from __future__ import print_function
import os, numpy
import cv2 as cv
from tests_common import NewOpenCVTests
class structured_light_test(NewOpenCVTests):
def test_unwrap(self):
paramsPsp = cv.structured_light_SinusoidalPattern_Params();
paramsFtp = cv.structured_light_SinusoidalPattern_Params();
paramsFaps = cv.structured_light_SinusoidalPattern_Params();
paramsPsp.methodId = cv.structured_light.PSP;
paramsFtp.methodId = cv.structured_light.FTP;
paramsFaps.methodId = cv.structured_light.FAPS;
sinusPsp = cv.structured_light.SinusoidalPattern_create(paramsPsp)
sinusFtp = cv.structured_light.SinusoidalPattern_create(paramsFtp)
sinusFaps = cv.structured_light.SinusoidalPattern_create(paramsFaps)
captures = []
for i in range(0,3):
capture = self.get_sample('/cv/structured_light/data/capture_sin_%d.jpg'%i, cv.IMREAD_GRAYSCALE)
if capture is None:
raise unittest.SkipTest("Missing files with test data")
captures.append(capture)
rows,cols = captures[0].shape
unwrappedPhaseMapPspRef = self.get_sample('/cv/structured_light/data/unwrappedPspTest.jpg',
cv.IMREAD_GRAYSCALE)
unwrappedPhaseMapFtpRef = self.get_sample('/cv/structured_light/data/unwrappedFtpTest.jpg',
cv.IMREAD_GRAYSCALE)
unwrappedPhaseMapFapsRef = self.get_sample('/cv/structured_light/data/unwrappedFapsTest.jpg',
cv.IMREAD_GRAYSCALE)
wrappedPhaseMap,shadowMask = sinusPsp.computePhaseMap(captures);
unwrappedPhaseMap = sinusPsp.unwrapPhaseMap(wrappedPhaseMap, (cols, rows), shadowMask=shadowMask)
unwrappedPhaseMap8 = unwrappedPhaseMap*1 + 128
unwrappedPhaseMap8 = numpy.uint8(unwrappedPhaseMap8)
sumOfDiff = 0
count = 0
for i in range(rows):
for j in range(cols):
ref = int(unwrappedPhaseMapPspRef[i, j])
comp = int(unwrappedPhaseMap8[i, j])
sumOfDiff += (ref - comp)
count += 1
ratio = sumOfDiff/float(count)
self.assertLessEqual(ratio, 0.2)
wrappedPhaseMap,shadowMask = sinusFtp.computePhaseMap(captures);
unwrappedPhaseMap = sinusFtp.unwrapPhaseMap(wrappedPhaseMap, (cols, rows), shadowMask=shadowMask)
unwrappedPhaseMap8 = unwrappedPhaseMap*1 + 128
unwrappedPhaseMap8 = numpy.uint8(unwrappedPhaseMap8)
sumOfDiff = 0
count = 0
for i in range(rows):
for j in range(cols):
ref = int(unwrappedPhaseMapFtpRef[i, j])
comp = int(unwrappedPhaseMap8[i, j])
sumOfDiff += (ref - comp)
count += 1
ratio = sumOfDiff/float(count)
self.assertLessEqual(ratio, 0.2)
wrappedPhaseMap,shadowMask2 = sinusFaps.computePhaseMap(captures);
unwrappedPhaseMap = sinusFaps.unwrapPhaseMap(wrappedPhaseMap, (cols, rows), shadowMask=shadowMask)
unwrappedPhaseMap8 = unwrappedPhaseMap*1 + 128
unwrappedPhaseMap8 = numpy.uint8(unwrappedPhaseMap8)
sumOfDiff = 0
count = 0
for i in range(rows):
for j in range(cols):
ref = int(unwrappedPhaseMapFapsRef[i, j])
comp = int(unwrappedPhaseMap8[i, j])
sumOfDiff += (ref - comp)
count += 1
ratio = sumOfDiff/float(count)
self.assertLessEqual(ratio, 0.2)
if __name__ == '__main__':
NewOpenCVTests.bootstrap()
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment