Commit 5a0c0440 authored by Vladislav Sovrasov's avatar Vladislav Sovrasov

Add interactive calibration app

parent ec63343f
......@@ -4,3 +4,4 @@ link_libraries(${OPENCV_LINKER_LIBS})
add_subdirectory(traincascade)
add_subdirectory(createsamples)
add_subdirectory(annotation)
add_subdirectory(interactive-calibration)
set(OPENCV_INTERACTIVECALIBRATION_DEPS opencv_core opencv_aruco opencv_highgui opencv_calib3d opencv_videoio)
ocv_check_dependencies(${OPENCV_INTERACTIVECALIBRATION_DEPS})
if(NOT OCV_DEPENDENCIES_FOUND)
return()
endif()
find_package(LAPACK)
if(LAPACK_FOUND)
find_file(LAPACK_HEADER "lapacke.h")
if(LAPACK_HEADER)
add_definitions(-DUSE_LAPACK)
link_libraries(${LAPACK_LIBRARIES})
endif()
endif()
project(interactive-calibration)
set(the_target opencv_interactive-calibration)
ocv_target_include_directories(${the_target} PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}" "${OpenCV_SOURCE_DIR}/include/opencv")
ocv_target_include_modules_recurse(${the_target} ${OPENCV_INTERACTIVECALIBRATION_DEPS})
file(GLOB SRCS *.cpp)
file(GLOB HDRS *.h*)
set(interactive-calibration_files ${SRCS} ${HDRS})
ocv_add_executable(${the_target} ${interactive-calibration_files})
ocv_target_link_libraries(${the_target} ${OPENCV_INTERACTIVECALIBRATION_DEPS})
set_target_properties(${the_target} PROPERTIES
DEBUG_POSTFIX "${OPENCV_DEBUG_POSTFIX}"
ARCHIVE_OUTPUT_DIRECTORY ${LIBRARY_OUTPUT_PATH}
RUNTIME_OUTPUT_DIRECTORY ${EXECUTABLE_OUTPUT_PATH}
INSTALL_NAME_DIR lib
OUTPUT_NAME "opencv_interactive-calibration")
if(ENABLE_SOLUTION_FOLDERS)
set_target_properties(${the_target} PROPERTIES FOLDER "applications")
endif()
if(INSTALL_CREATE_DISTRIB)
if(BUILD_SHARED_LIBS)
install(TARGETS ${the_target} RUNTIME DESTINATION ${OPENCV_BIN_INSTALL_PATH} CONFIGURATIONS Release COMPONENT dev)
endif()
else()
install(TARGETS ${the_target} OPTIONAL RUNTIME DESTINATION ${OPENCV_BIN_INSTALL_PATH} COMPONENT dev)
endif()
#ifndef CALIB_COMMON_HPP
#define CALIB_COMMON_HPP
#include <memory>
#include <opencv2/core.hpp>
#include <vector>
#include <string>
namespace calib
{
#define OVERLAY_DELAY 1000
#define IMAGE_MAX_WIDTH 1280
#define IMAGE_MAX_HEIGHT 960
bool showOverlayMessage(const std::string& message);
enum InputType { Video, Pictures };
enum InputVideoSource { Camera, File };
enum TemplateType { AcirclesGrid, Chessboard, chAruco, DoubleAcirclesGrid };
static const std::string mainWindowName = "Calibration";
static const std::string gridWindowName = "Board locations";
static const std::string consoleHelp = "Hot keys:\nesc - exit application\n"
"s - save current data to .xml file\n"
"r - delete last frame\n"
"u - enable/disable applying undistortion"
"d - delete all frames\n"
"v - switch visualization";
static const double sigmaMult = 1.96;
struct calibrationData
{
cv::Mat cameraMatrix;
cv::Mat distCoeffs;
cv::Mat stdDeviations;
cv::Mat perViewErrors;
std::vector<cv::Mat> rvecs;
std::vector<cv::Mat> tvecs;
double totalAvgErr;
cv::Size imageSize;
std::vector<std::vector<cv::Point2f> > imagePoints;
std::vector< std::vector<cv::Point3f> > objectPoints;
std::vector<cv::Mat> allCharucoCorners;
std::vector<cv::Mat> allCharucoIds;
cv::Mat undistMap1, undistMap2;
calibrationData()
{
imageSize = cv::Size(IMAGE_MAX_WIDTH, IMAGE_MAX_HEIGHT);
}
};
struct cameraParameters
{
cv::Mat cameraMatrix;
cv::Mat distCoeffs;
cv::Mat stdDeviations;
double avgError;
cameraParameters(){}
cameraParameters(cv::Mat& _cameraMatrix, cv::Mat& _distCoeffs, cv::Mat& _stdDeviations, double _avgError = 0) :
cameraMatrix(_cameraMatrix), distCoeffs(_distCoeffs), stdDeviations(_stdDeviations), avgError(_avgError)
{}
};
struct captureParameters
{
InputType captureMethod;
InputVideoSource source;
TemplateType board;
cv::Size boardSize;
int charucoDictName;
int calibrationStep;
float charucoSquareLenght, charucoMarkerSize;
float captureDelay;
float squareSize;
float templDst;
std::string videoFileName;
bool flipVertical;
int camID;
int fps;
cv::Size cameraResolution;
int maxFramesNum;
int minFramesNum;
captureParameters()
{
calibrationStep = 1;
captureDelay = 500.f;
maxFramesNum = 30;
minFramesNum = 10;
fps = 30;
cameraResolution = cv::Size(IMAGE_MAX_WIDTH, IMAGE_MAX_HEIGHT);
}
};
struct internalParameters
{
double solverEps;
int solverMaxIters;
bool fastSolving;
double filterAlpha;
internalParameters()
{
solverEps = 1e-7;
solverMaxIters = 30;
fastSolving = false;
filterAlpha = 0.1;
}
};
}
#endif
This diff is collapsed.
#ifndef CALIB_CONTROLLER_HPP
#define CALIB_CONTROLLER_HPP
#include "calibCommon.hpp"
#include <stack>
#include <string>
#include <ostream>
namespace calib {
class calibController
{
protected:
cv::Ptr<calibrationData> mCalibData;
int mCalibFlags;
unsigned mMinFramesNum;
bool mNeedTuning;
bool mConfIntervalsState;
bool mCoverageQualityState;
double estimateCoverageQuality();
public:
calibController();
calibController(cv::Ptr<calibrationData> data, int initialFlags, bool autoTuning,
int minFramesNum);
void updateState();
bool getCommonCalibrationState() const;
bool getFramesNumberState() const;
bool getConfidenceIntrervalsState() const;
bool getRMSState() const;
bool getPointsCoverageState() const;
int getNewFlags() const;
};
class calibDataController
{
protected:
cv::Ptr<calibrationData> mCalibData;
std::stack<cameraParameters> mParamsStack;
std::string mParamsFileName;
unsigned mMaxFramesNum;
double mAlpha;
double estimateGridSubsetQuality(size_t excludedIndex);
public:
calibDataController(cv::Ptr<calibrationData> data, int maxFrames, double convParameter);
calibDataController();
void filterFrames();
void setParametersFileName(const std::string& name);
void deleteLastFrame();
void rememberCurrentParameters();
void deleteAllData();
bool saveCurrentCameraParameters() const;
void printParametersToConsole(std::ostream &output) const;
void updateUndistortMap();
};
}
#endif
#include "calibPipeline.hpp"
#include <opencv2/highgui.hpp>
#include <stdexcept>
using namespace calib;
#define CAP_DELAY 10
cv::Size CalibPipeline::getCameraResolution()
{
mCapture.set(cv::CAP_PROP_FRAME_WIDTH, 10000);
mCapture.set(cv::CAP_PROP_FRAME_HEIGHT, 10000);
int w = (int)mCapture.get(cv::CAP_PROP_FRAME_WIDTH);
int h = (int)mCapture.get(cv::CAP_PROP_FRAME_HEIGHT);
return cv::Size(w,h);
}
CalibPipeline::CalibPipeline(captureParameters params) :
mCaptureParams(params)
{
}
PipelineExitStatus CalibPipeline::start(std::vector<cv::Ptr<FrameProcessor> > processors)
{
if(mCaptureParams.source == Camera && !mCapture.isOpened())
{
mCapture.open(mCaptureParams.camID);
cv::Size maxRes = getCameraResolution();
cv::Size neededRes = mCaptureParams.cameraResolution;
if(maxRes.width < neededRes.width) {
double aR = (double)maxRes.width / maxRes.height;
mCapture.set(cv::CAP_PROP_FRAME_WIDTH, neededRes.width);
mCapture.set(cv::CAP_PROP_FRAME_HEIGHT, neededRes.width/aR);
}
else if(maxRes.height < neededRes.height) {
double aR = (double)maxRes.width / maxRes.height;
mCapture.set(cv::CAP_PROP_FRAME_HEIGHT, neededRes.height);
mCapture.set(cv::CAP_PROP_FRAME_WIDTH, neededRes.height*aR);
}
else {
mCapture.set(cv::CAP_PROP_FRAME_HEIGHT, neededRes.height);
mCapture.set(cv::CAP_PROP_FRAME_WIDTH, neededRes.width);
}
mCapture.set(cv::CAP_PROP_AUTOFOCUS, 0);
}
else if (mCaptureParams.source == File && !mCapture.isOpened())
mCapture.open(mCaptureParams.videoFileName);
mImageSize = cv::Size((int)mCapture.get(cv::CAP_PROP_FRAME_WIDTH), (int)mCapture.get(cv::CAP_PROP_FRAME_HEIGHT));
if(!mCapture.isOpened())
throw std::runtime_error("Unable to open video source");
cv::Mat frame, processedFrame;
while(mCapture.grab()) {
mCapture.retrieve(frame);
if(mCaptureParams.flipVertical)
cv::flip(frame, frame, -1);
frame.copyTo(processedFrame);
for (std::vector<cv::Ptr<FrameProcessor> >::iterator it = processors.begin(); it != processors.end(); ++it)
processedFrame = (*it)->processFrame(processedFrame);
cv::imshow(mainWindowName, processedFrame);
int key = cv::waitKey(CAP_DELAY);
if(key == 27) // esc
return Finished;
else if (key == 114) // r
return DeleteLastFrame;
else if (key == 100) // d
return DeleteAllFrames;
else if (key == 115) // s
return SaveCurrentData;
else if (key == 117) // u
return SwitchUndistort;
else if (key == 118) // v
return SwitchVisualisation;
for (std::vector<cv::Ptr<FrameProcessor> >::iterator it = processors.begin(); it != processors.end(); ++it)
if((*it)->isProcessed())
return Calibrate;
}
return Finished;
}
cv::Size CalibPipeline::getImageSize() const
{
return mImageSize;
}
#ifndef CALIB_PIPELINE_HPP
#define CALIB_PIPELINE_HPP
#include <vector>
#include <opencv2/highgui.hpp>
#include "calibCommon.hpp"
#include "frameProcessor.hpp"
namespace calib
{
enum PipelineExitStatus { Finished,
DeleteLastFrame,
Calibrate,
DeleteAllFrames,
SaveCurrentData,
SwitchUndistort,
SwitchVisualisation
};
class CalibPipeline
{
protected:
captureParameters mCaptureParams;
cv::Size mImageSize;
cv::VideoCapture mCapture;
cv::Size getCameraResolution();
public:
CalibPipeline(captureParameters params);
PipelineExitStatus start(std::vector<cv::Ptr<FrameProcessor> > processors);
cv::Size getImageSize() const;
};
}
#endif
This diff is collapsed.
#ifndef CV_CALIBRATION_FORK_HPP
#define CV_CALIBRATION_FORK_HPP
#include <opencv2/core.hpp>
#include <opencv2/aruco/charuco.hpp>
#include <opencv2/calib3d.hpp>
#include <opencv2/calib3d/calib3d_c.h>
namespace cvfork
{
using namespace cv;
#define CV_CALIB_NINTRINSIC 18
#define CALIB_USE_QR (1 << 18)
double calibrateCamera(InputArrayOfArrays objectPoints,
InputArrayOfArrays imagePoints, Size imageSize,
InputOutputArray cameraMatrix, InputOutputArray distCoeffs,
OutputArrayOfArrays rvecs, OutputArrayOfArrays tvecs, OutputArray stdDeviations,
OutputArray perViewErrors, int flags = 0, TermCriteria criteria = TermCriteria(
TermCriteria::COUNT + TermCriteria::EPS, 30, DBL_EPSILON) );
double cvCalibrateCamera2( const CvMat* object_points,
const CvMat* image_points,
const CvMat* point_counts,
CvSize image_size,
CvMat* camera_matrix,
CvMat* distortion_coeffs,
CvMat* rotation_vectors CV_DEFAULT(NULL),
CvMat* translation_vectors CV_DEFAULT(NULL),
CvMat* stdDeviations_vector CV_DEFAULT(NULL),
CvMat* perViewErrors_vector CV_DEFAULT(NULL),
int flags CV_DEFAULT(0),
CvTermCriteria term_crit CV_DEFAULT(cvTermCriteria(
CV_TERMCRIT_ITER+CV_TERMCRIT_EPS,30,DBL_EPSILON)) );
double calibrateCameraCharuco(InputArrayOfArrays _charucoCorners, InputArrayOfArrays _charucoIds,
Ptr<aruco::CharucoBoard> &_board, Size imageSize,
InputOutputArray _cameraMatrix, InputOutputArray _distCoeffs,
OutputArrayOfArrays _rvecs, OutputArrayOfArrays _tvecs, OutputArray _stdDeviations, OutputArray _perViewErrors,
int flags = 0, TermCriteria criteria = TermCriteria(
TermCriteria::COUNT + TermCriteria::EPS, 30, DBL_EPSILON) );
class CvLevMarqFork : public CvLevMarq
{
public:
CvLevMarqFork( int nparams, int nerrs, CvTermCriteria criteria=
cvTermCriteria(CV_TERMCRIT_EPS+CV_TERMCRIT_ITER,30,DBL_EPSILON),
bool completeSymmFlag=false );
bool updateAlt( const CvMat*& _param, CvMat*& _JtJ, CvMat*& _JtErr, double*& _errNorm );
void step();
~CvLevMarqFork();
};
}
#endif
<?xml version="1.0"?>
<opencv_storage>
<charuco_dict>0</charuco_dict>
<charuco_square_lenght>200</charuco_square_lenght>
<charuco_marker_size>100</charuco_marker_size>
<calibration_step>1</calibration_step>
<max_frames_num>30</max_frames_num>
<min_frames_num>10</min_frames_num>
<solver_eps>1e-7</solver_eps>
<solver_max_iters>30</solver_max_iters>
<fast_solver>0</fast_solver>
<frame_filter_conv_param>0.1</frame_filter_conv_param>
<camera_resolution>800 600</camera_resolution>
</opencv_storage>
\ No newline at end of file
This diff is collapsed.
#ifndef FRAME_PROCESSOR_HPP
#define FRAME_PROCESSOR_HPP
#include <opencv2/core.hpp>
#include <opencv2/aruco/charuco.hpp>
#include <opencv2/calib3d.hpp>
#include "calibCommon.hpp"
#include "calibController.hpp"
namespace calib
{
class FrameProcessor
{
protected:
public:
virtual ~FrameProcessor();
virtual cv::Mat processFrame(const cv::Mat& frame) = 0;
virtual bool isProcessed() const = 0;
virtual void resetState() = 0;
};
class CalibProcessor : public FrameProcessor
{
protected:
cv::Ptr<calibrationData> mCalibData;
TemplateType mBoardType;
cv::Size mBoardSize;
std::vector<cv::Point2f> mTemplateLocations;
std::vector<cv::Point2f> mCurrentImagePoints;
cv::Mat mCurrentCharucoCorners;
cv::Mat mCurrentCharucoIds;
cv::Ptr<cv::SimpleBlobDetector> mBlobDetectorPtr;
cv::Ptr<cv::aruco::Dictionary> mArucoDictionary;
cv::Ptr<cv::aruco::CharucoBoard> mCharucoBoard;
int mNeededFramesNum;
unsigned mDelayBetweenCaptures;
int mCapuredFrames;
double mMaxTemplateOffset;
float mSquareSize;
float mTemplDist;
bool detectAndParseChessboard(const cv::Mat& frame);
bool detectAndParseChAruco(const cv::Mat& frame);
bool detectAndParseACircles(const cv::Mat& frame);
bool detectAndParseDualACircles(const cv::Mat& frame);
void saveFrameData();
void showCaptureMessage(const cv::Mat &frame, const std::string& message);
bool checkLastFrame();
public:
CalibProcessor(cv::Ptr<calibrationData> data, captureParameters& capParams);
virtual cv::Mat processFrame(const cv::Mat& frame);
virtual bool isProcessed() const;
virtual void resetState();
~CalibProcessor();
};
enum visualisationMode {Grid, Window};
class ShowProcessor : public FrameProcessor
{
protected:
cv::Ptr<calibrationData> mCalibdata;
cv::Ptr<calibController> mController;
TemplateType mBoardType;
visualisationMode mVisMode;
bool mNeedUndistort;
double mGridViewScale;
double mTextSize;
void drawBoard(cv::Mat& img, cv::InputArray points);
void drawGridPoints(const cv::Mat& frame);
public:
ShowProcessor(cv::Ptr<calibrationData> data, cv::Ptr<calibController> controller, TemplateType board);
virtual cv::Mat processFrame(const cv::Mat& frame);
virtual bool isProcessed() const;
virtual void resetState();
void setVisualizationMode(visualisationMode mode);
void switchVisualizationMode();
void clearBoardsView();
void updateBoardsView();
void switchUndistort();
void setUndistort(bool isEnabled);
~ShowProcessor();
};
}
#endif
This diff is collapsed.
#ifndef LINALG_HPP
#define LINALG_HPP
#include <opencv2/core.hpp>
namespace cvfork {
double invert( cv::InputArray _src, cv::OutputArray _dst, int method );
bool solve(cv::InputArray _src, cv::InputArray _src2arg, cv::OutputArray _dst, int method );
}
#endif
#include <opencv2/core.hpp>
#include <opencv2/calib3d.hpp>
#include <opencv2/aruco/charuco.hpp>
#include <opencv2/cvconfig.h>
#include <opencv2/highgui.hpp>
#include <string>
#include <vector>
#include <stdexcept>
#include <algorithm>
#include <iostream>
#include "calibCommon.hpp"
#include "calibPipeline.hpp"
#include "frameProcessor.hpp"
#include "cvCalibrationFork.hpp"
#include "calibController.hpp"
#include "parametersController.hpp"
#include "rotationConverters.hpp"
using namespace calib;
const std::string keys =
"{v | | Input from video file }"
"{ci | 0 | Default camera id }"
"{flip | false | Vertical flip of input frames }"
"{t | circles | Template for calibration (circles, chessboard, dualCircles, chAruco) }"
"{sz | 16.3 | Distance between two nearest centers of circles or squares on calibration board}"
"{dst | 295 | Distance between white and black parts of daulCircles template}"
"{w | | Width of template (in corners or circles)}"
"{h | | Height of template (in corners or circles)}"
"{of | cameraParameters.xml | Output file name}"
"{ft | true | Auto tuning of calibration flags}"
"{vis | grid | Captured boards visualisation (grid, window)}"
"{d | 0.8 | Min delay between captures}"
"{pf | defaultConfig.xml| Advanced application parameters}"
"{help | | Print help}";
bool calib::showOverlayMessage(const std::string& message)
{
#ifdef HAVE_QT
cv::displayOverlay(mainWindowName, message, OVERLAY_DELAY);
return true;
#else
std::cout << message << std::endl;
return false;
#endif
}
static void deleteButton(int state, void* data)
{
state++; //to avoid gcc warnings
(static_cast<cv::Ptr<calibDataController>*>(data))->get()->deleteLastFrame();
calib::showOverlayMessage("Last frame deleted");
}
static void deleteAllButton(int state, void* data)
{
state++;
(static_cast<cv::Ptr<calibDataController>*>(data))->get()->deleteAllData();
calib::showOverlayMessage("All frames deleted");
}
static void saveCurrentParamsButton(int state, void* data)
{
state++;
if((static_cast<cv::Ptr<calibDataController>*>(data))->get()->saveCurrentCameraParameters())
calib::showOverlayMessage("Calibration parameters saved");
}
#ifdef HAVE_QT
static void switchVisualizationModeButton(int state, void* data)
{
state++;
ShowProcessor* processor = static_cast<ShowProcessor*>(((cv::Ptr<FrameProcessor>*)data)->get());
processor->switchVisualizationMode();
}
static void undistortButton(int state, void* data)
{
ShowProcessor* processor = static_cast<ShowProcessor*>(((cv::Ptr<FrameProcessor>*)data)->get());
processor->setUndistort(static_cast<bool>(state));
calib::showOverlayMessage(std::string("Undistort is ") +
(static_cast<bool>(state) ? std::string("on") : std::string("off")));
}
#endif //HAVE_QT
int main(int argc, char** argv)
{
cv::CommandLineParser parser(argc, argv, keys);
if(parser.has("help")) {
parser.printMessage();
return 0;
}
std::cout << consoleHelp << std::endl;
parametersController paramsController;
if(!paramsController.loadFromParser(parser))
return 0;
captureParameters capParams = paramsController.getCaptureParameters();
internalParameters intParams = paramsController.getInternalParameters();
cv::TermCriteria solverTermCrit = cv::TermCriteria(cv::TermCriteria::COUNT+cv::TermCriteria::EPS,
intParams.solverMaxIters, intParams.solverEps);
cv::Ptr<calibrationData> globalData(new calibrationData);
if(!parser.has("v")) globalData->imageSize = capParams.cameraResolution;
int calibrationFlags = 0;
if(intParams.fastSolving) calibrationFlags |= CALIB_USE_QR;
cv::Ptr<calibController> controller(new calibController(globalData, calibrationFlags,
parser.get<bool>("ft"), capParams.minFramesNum));
cv::Ptr<calibDataController> dataController(new calibDataController(globalData, capParams.maxFramesNum,
intParams.filterAlpha));
dataController->setParametersFileName(parser.get<std::string>("of"));
cv::Ptr<FrameProcessor> capProcessor, showProcessor;
capProcessor = cv::Ptr<FrameProcessor>(new CalibProcessor(globalData, capParams));
showProcessor = cv::Ptr<FrameProcessor>(new ShowProcessor(globalData, controller, capParams.board));
if(parser.get<std::string>("vis").find("window") == 0) {
static_cast<ShowProcessor*>(showProcessor.get())->setVisualizationMode(Window);
cv::namedWindow(gridWindowName);
cv::moveWindow(gridWindowName, 1280, 500);
}
cv::Ptr<CalibPipeline> pipeline(new CalibPipeline(capParams));
std::vector<cv::Ptr<FrameProcessor> > processors;
processors.push_back(capProcessor);
processors.push_back(showProcessor);
cv::namedWindow(mainWindowName);
cv::moveWindow(mainWindowName, 10, 10);
#ifdef HAVE_QT
cv::createButton("Delete last frame", deleteButton, &dataController, cv::QT_PUSH_BUTTON);
cv::createButton("Delete all frames", deleteAllButton, &dataController, cv::QT_PUSH_BUTTON);
cv::createButton("Undistort", undistortButton, &showProcessor, cv::QT_CHECKBOX, false);
cv::createButton("Save current parameters", saveCurrentParamsButton, &dataController, cv::QT_PUSH_BUTTON);
cv::createButton("Switch visualisation mode", switchVisualizationModeButton, &showProcessor, cv::QT_PUSH_BUTTON);
#endif //HAVE_QT
try {
bool pipelineFinished = false;
while(!pipelineFinished)
{
PipelineExitStatus exitStatus = pipeline->start(processors);
if (exitStatus == Finished) {
if(controller->getCommonCalibrationState())
saveCurrentParamsButton(0, &dataController);
pipelineFinished = true;
continue;
}
else if (exitStatus == Calibrate) {
dataController->rememberCurrentParameters();
globalData->imageSize = pipeline->getImageSize();
calibrationFlags = controller->getNewFlags();
if(capParams.board != chAruco) {
globalData->totalAvgErr =
cvfork::calibrateCamera(globalData->objectPoints, globalData->imagePoints,
globalData->imageSize, globalData->cameraMatrix,
globalData->distCoeffs, cv::noArray(), cv::noArray(),
globalData->stdDeviations, globalData->perViewErrors,
calibrationFlags, solverTermCrit);
}
else {
cv::Ptr<cv::aruco::Dictionary> dictionary =
cv::aruco::getPredefinedDictionary(cv::aruco::PREDEFINED_DICTIONARY_NAME(capParams.charucoDictName));
cv::Ptr<cv::aruco::CharucoBoard> charucoboard =
cv::aruco::CharucoBoard::create(capParams.boardSize.width, capParams.boardSize.height,
capParams.charucoSquareLenght, capParams.charucoMarkerSize, dictionary);
globalData->totalAvgErr =
cvfork::calibrateCameraCharuco(globalData->allCharucoCorners, globalData->allCharucoIds,
charucoboard, globalData->imageSize,
globalData->cameraMatrix, globalData->distCoeffs,
cv::noArray(), cv::noArray(), globalData->stdDeviations,
globalData->perViewErrors, calibrationFlags, solverTermCrit);
}
dataController->updateUndistortMap();
dataController->printParametersToConsole(std::cout);
controller->updateState();
for(int j = 0; j < capParams.calibrationStep; j++)
dataController->filterFrames();
static_cast<ShowProcessor*>(showProcessor.get())->updateBoardsView();
}
else if (exitStatus == DeleteLastFrame) {
deleteButton(0, &dataController);
static_cast<ShowProcessor*>(showProcessor.get())->updateBoardsView();
}
else if (exitStatus == DeleteAllFrames) {
deleteAllButton(0, &dataController);
static_cast<ShowProcessor*>(showProcessor.get())->updateBoardsView();
}
else if (exitStatus == SaveCurrentData) {
saveCurrentParamsButton(0, &dataController);
}
else if (exitStatus == SwitchUndistort)
static_cast<ShowProcessor*>(showProcessor.get())->switchUndistort();
else if (exitStatus == SwitchVisualisation)
static_cast<ShowProcessor*>(showProcessor.get())->switchVisualizationMode();
for (std::vector<cv::Ptr<FrameProcessor> >::iterator it = processors.begin(); it != processors.end(); ++it)
(*it)->resetState();
}
}
catch (std::runtime_error exp) {
std::cout << exp.what() << std::endl;
}
return 0;
}
#include "parametersController.hpp"
#include <iostream>
template <typename T>
static bool readFromNode(cv::FileNode node, T& value)
{
if(!node.isNone()) {
node >> value;
return true;
}
else
return false;
}
static bool checkAssertion(bool value, const std::string& msg)
{
if(!value)
std::cerr << "Error: " << msg << std::endl;
return value;
}
bool calib::parametersController::loadFromFile(const std::string &inputFileName)
{
cv::FileStorage reader;
reader.open(inputFileName, cv::FileStorage::READ);
if(!reader.isOpened()) {
std::cerr << "Warning: Unable to open " << inputFileName <<
" Applicatioin stated with default advanced parameters" << std::endl;
return true;
}
readFromNode(reader["charuco_dict"], mCapParams.charucoDictName);
readFromNode(reader["charuco_square_lenght"], mCapParams.charucoSquareLenght);
readFromNode(reader["charuco_marker_size"], mCapParams.charucoMarkerSize);
readFromNode(reader["camera_resolution"], mCapParams.cameraResolution);
readFromNode(reader["calibration_step"], mCapParams.calibrationStep);
readFromNode(reader["max_frames_num"], mCapParams.maxFramesNum);
readFromNode(reader["min_frames_num"], mCapParams.minFramesNum);
readFromNode(reader["solver_eps"], mInternalParameters.solverEps);
readFromNode(reader["solver_max_iters"], mInternalParameters.solverMaxIters);
readFromNode(reader["fast_solver"], mInternalParameters.fastSolving);
readFromNode(reader["frame_filter_conv_param"], mInternalParameters.filterAlpha);
bool retValue =
checkAssertion(mCapParams.charucoDictName >= 0, "Dict name must be >= 0") &&
checkAssertion(mCapParams.charucoMarkerSize > 0, "Marker size must be positive") &&
checkAssertion(mCapParams.charucoSquareLenght > 0, "Square size must be positive") &&
checkAssertion(mCapParams.minFramesNum > 1, "Minimal number of frames for calibration < 1") &&
checkAssertion(mCapParams.calibrationStep > 0, "Calibration step must be positive") &&
checkAssertion(mCapParams.maxFramesNum > mCapParams.minFramesNum, "maxFramesNum < minFramesNum") &&
checkAssertion(mInternalParameters.solverEps > 0, "Solver precision must be positive") &&
checkAssertion(mInternalParameters.solverMaxIters > 0, "Max solver iterations number must be positive") &&
checkAssertion(mInternalParameters.filterAlpha >=0 && mInternalParameters.filterAlpha <=1 ,
"Frame filter convolution parameter must be in [0,1] interval") &&
checkAssertion(mCapParams.cameraResolution.width > 0 && mCapParams.cameraResolution.height > 0,
"Wrong camera resolution values");
reader.release();
return retValue;
}
calib::parametersController::parametersController()
{
}
calib::captureParameters calib::parametersController::getCaptureParameters() const
{
return mCapParams;
}
calib::internalParameters calib::parametersController::getInternalParameters() const
{
return mInternalParameters;
}
bool calib::parametersController::loadFromParser(cv::CommandLineParser &parser)
{
mCapParams.flipVertical = parser.get<bool>("flip");
mCapParams.captureDelay = parser.get<float>("d");
mCapParams.squareSize = parser.get<float>("sz");
mCapParams.templDst = parser.get<float>("dst");
if(!checkAssertion(mCapParams.squareSize > 0, "Distance between corners or circles must be positive"))
return false;
if(!checkAssertion(mCapParams.templDst > 0, "Distance betwen parts of dual template must be positive"))
return false;
if (parser.has("v")) {
mCapParams.source = File;
mCapParams.videoFileName = parser.get<std::string>("v");
}
else {
mCapParams.source = Camera;
mCapParams.camID = parser.get<int>("ci");
}
std::string templateType = parser.get<std::string>("t");
if(templateType.find("circles", 0) == 0) {
mCapParams.board = AcirclesGrid;
mCapParams.boardSize = cv::Size(4, 11);
}
else if(templateType.find("chessboard", 0) == 0) {
mCapParams.board = Chessboard;
mCapParams.boardSize = cv::Size(7, 7);
}
else if(templateType.find("dualcircles", 0) == 0) {
mCapParams.board = DoubleAcirclesGrid;
mCapParams.boardSize = cv::Size(4, 11);
}
else if(templateType.find("charuco", 0) == 0) {
mCapParams.board = chAruco;
mCapParams.boardSize = cv::Size(6, 8);
mCapParams.charucoDictName = 0;
mCapParams.charucoSquareLenght = 200;
mCapParams.charucoMarkerSize = 100;
}
else {
std::cerr << "Wrong template name\n";
return false;
}
if(parser.has("w") && parser.has("h")) {
mCapParams.boardSize = cv::Size(parser.get<int>("w"), parser.get<int>("h"));
if(!checkAssertion(mCapParams.boardSize.width > 0 || mCapParams.boardSize.height > 0,
"Board size must be positive"))
return false;
}
if(!checkAssertion(parser.get<std::string>("of").find(".xml") > 0,
"Wrong output file name: correct format is [name].xml"))
return false;
loadFromFile(parser.get<std::string>("pf"));
return true;
}
#ifndef PARAMETERS_CONTROLLER_HPP
#define PARAMETERS_CONTROLLER_HPP
#include <string>
#include <opencv2/core.hpp>
#include "calibCommon.hpp"
namespace calib {
class parametersController
{
protected:
captureParameters mCapParams;
internalParameters mInternalParameters;
bool loadFromFile(const std::string& inputFileName);
public:
parametersController();
parametersController(cv::Ptr<captureParameters> params);
captureParameters getCaptureParameters() const;
internalParameters getInternalParameters() const;
bool loadFromParser(cv::CommandLineParser& parser);
};
}
#endif
#include "rotationConverters.hpp"
#include <cmath>
#include <opencv2/calib3d.hpp>
#include <opencv2/core.hpp>
#define CALIB_PI 3.14159265358979323846
#define CALIB_PI_2 1.57079632679489661923
void calib::Euler(const cv::Mat& src, cv::Mat& dst, int argType)
{
if((src.rows == 3) && (src.cols == 3))
{
//convert rotaion matrix to 3 angles (pitch, yaw, roll)
dst = cv::Mat(3, 1, CV_64F);
double pitch, yaw, roll;
if(src.at<double>(0,2) < -0.998)
{
pitch = -atan2(src.at<double>(1,0), src.at<double>(1,1));
yaw = -CALIB_PI_2;
roll = 0.;
}
else if(src.at<double>(0,2) > 0.998)
{
pitch = atan2(src.at<double>(1,0), src.at<double>(1,1));
yaw = CALIB_PI_2;
roll = 0.;
}
else
{
pitch = atan2(-src.at<double>(1,2), src.at<double>(2,2));
yaw = asin(src.at<double>(0,2));
roll = atan2(-src.at<double>(0,1), src.at<double>(0,0));
}
if(argType == CALIB_DEGREES)
{
pitch *= 180./CALIB_PI;
yaw *= 180./CALIB_PI;
roll *= 180./CALIB_PI;
}
else if(argType != CALIB_RADIANS)
CV_Error(cv::Error::StsBadFlag, "Invalid argument type");
dst.at<double>(0,0) = pitch;
dst.at<double>(1,0) = yaw;
dst.at<double>(2,0) = roll;
}
else if( (src.cols == 1 && src.rows == 3) ||
(src.cols == 3 && src.rows == 1 ) )
{
//convert vector which contains 3 angles (pitch, yaw, roll) to rotaion matrix
double pitch, yaw, roll;
if(src.cols == 1 && src.rows == 3)
{
pitch = src.at<double>(0,0);
yaw = src.at<double>(1,0);
roll = src.at<double>(2,0);
}
else{
pitch = src.at<double>(0,0);
yaw = src.at<double>(0,1);
roll = src.at<double>(0,2);
}
if(argType == CALIB_DEGREES)
{
pitch *= CALIB_PI / 180.;
yaw *= CALIB_PI / 180.;
roll *= CALIB_PI / 180.;
}
else if(argType != CALIB_RADIANS)
CV_Error(cv::Error::StsBadFlag, "Invalid argument type");
dst = cv::Mat(3, 3, CV_64F);
cv::Mat M(3, 3, CV_64F);
cv::Mat i = cv::Mat::eye(3, 3, CV_64F);
i.copyTo(dst);
i.copyTo(M);
double* pR = dst.ptr<double>();
pR[4] = cos(pitch);
pR[7] = sin(pitch);
pR[8] = pR[4];
pR[5] = -pR[7];
double* pM = M.ptr<double>();
pM[0] = cos(yaw);
pM[2] = sin(yaw);
pM[8] = pM[0];
pM[6] = -pM[2];
dst *= M;
i.copyTo(M);
pM[0] = cos(roll);
pM[3] = sin(roll);
pM[4] = pM[0];
pM[1] = -pM[3];
dst *= M;
}
else
CV_Error(cv::Error::StsBadFlag, "Input matrix must be 1x3, 3x1 or 3x3" );
}
void calib::RodriguesToEuler(const cv::Mat& src, cv::Mat& dst, int argType)
{
CV_Assert((src.cols == 1 && src.rows == 3) || (src.cols == 3 && src.rows == 1));
cv::Mat R;
cv::Rodrigues(src, R);
Euler(R, dst, argType);
}
void calib::EulerToRodrigues(const cv::Mat& src, cv::Mat& dst, int argType)
{
CV_Assert((src.cols == 1 && src.rows == 3) || (src.cols == 3 && src.rows == 1));
cv::Mat R;
Euler(src, R, argType);
cv::Rodrigues(R, dst);
}
#ifndef RAOTATION_CONVERTERS_HPP
#define RAOTATION_CONVERTERS_HPP
#include <opencv2/core.hpp>
namespace calib
{
#define CALIB_RADIANS 0
#define CALIB_DEGREES 1
void Euler(const cv::Mat& src, cv::Mat& dst, int argType = CALIB_RADIANS);
void RodriguesToEuler(const cv::Mat& src, cv::Mat& dst, int argType = CALIB_RADIANS);
void EulerToRodrigues(const cv::Mat& src, cv::Mat& dst, int argType = CALIB_RADIANS);
}
#endif
Interactive camera calibration application {#tutorial_interactive_calibration}
==============================
According to classical calibration technique user must collect all data first and when run @ref cv::calibrateCamera function
to obtain camera parameters. If average re-projection error is huge or if estimated parameters seems to be wrong, process of
selection or collecting data and starting of @ref cv::calibrateCamera repeats.
Interactive calibration process assumes that after each new data portion user can see results and errors estimation, also
he can delete last data portion and finally, when dataset for calibration is big enough starts process of auto data selection.
Main application features
------
The sample application will:
- Determine the distortion matrix and confidence interval for each element
- Determine the camera matrix and confidence interval for each element
- Take input from camera or video file
- Read configuration from XML file
- Save the results into XML file
- Calculate re-projection error
- Reject patterns views on sharp angles to prevent appear of ill-conditioned jacobian blocks
- Auto switch calibration flags (fix aspect ratio and elements of distortion matrix if needed)
- Auto detect when calibration is done by using several criteria
- Auto capture of static patterns (user doesn't need press any keys to capture frame, just don't move pattern for a second)
Supported patterns:
- Black-white chessboard
- Asymmetrical circle pattern
- Dual asymmetrical circle pattern
- chAruco (chessboard with Aruco markers)
Description of parameters
------
Application has two groups of parameters: primary (passed through command line) and advances (passed through XML file).
### Primary parameters:
All of this parameters are passed to application through a command line.
-[parameter]=[default value]: description
- -v=[filename]: get video from filename, default input -- camera with id=0
- -ci=[0]: get video from camera with specified id
- -flip=[false]: vertical flip of input frames
- -t=[circles]: pattern for calibration (circles, chessboard, dualCircles, chAruco)
- -sz=[16.3]: distance between two nearest centers of circles or squares on calibration board
- -dst=[295] distance between white and black parts of daulCircles pattern
- -w=[width]: width of pattern (in corners or circles)
- -h=[height]: height of pattern (in corners or circles)
- -of=[camParams.xml]: output file name
- -ft=[true]: auto tuning of calibration flags
- -vis=[grid]: captured boards visualization (grid, window)
- -d=[0.8]: delay between captures in seconds
- -pf=[defaultConfig.xml]: advanced application parameters file
### Advanced parameters:
By default values of advanced parameters are stored in defaultConfig.xml
@code{.xml}
<?xml version="1.0"?>
<opencv_storage>
<charuco_dict>0</charuco_dict>
<charuco_square_lenght>200</charuco_square_lenght>
<charuco_marker_size>100</charuco_marker_size>
<calibration_step>1</calibration_step>
<max_frames_num>30</max_frames_num>
<min_frames_num>10</min_frames_num>
<solver_eps>1e-7</solver_eps>
<solver_max_iters>30</solver_max_iters>
<fast_solver>0</fast_solver>
<frame_filter_conv_param>0.1</frame_filter_conv_param>
<camera_resolution>1280 720</camera_resolution>
</opencv_storage>
@endcode
- *charuco_dict*: name of special dictionary, which has been used for generation of chAruco pattern
- *charuco_square_lenght*: size of square on chAruco board (in pixels)
- *charuco_marker_size*: size of Aruco markers on chAruco board (in pixels)
- *calibration_step*: interval in frames between launches of @ref cv::calibrateCamera
- *max_frames_num*: if number of frames for calibration is greater then this value frames filter starts working.
After filtration size of calibration dataset is equals to *max_frames_num*
- *min_frames_num*: if number of frames is greater then this value turns on auto flags tuning, undistorted view and quality evaluation
- *solver_eps*: precision of Levenberg-Marquardt solver in @ref cv::calibrateCamera
- *solver_max_iters*: iterations limit of solver
- *fast_solver*: if this value is nonzero and Lapack is found QR decomposition is used instead of SVD in solver.
QR faster than SVD, but potentially less precise
- *frame_filter_conv_param*: parameter which used in linear convolution of bicriterial frames filter
- *camera_resolution*: resolution of camera which is used for calibration
**Note:** *charuco_dict*, *charuco_square_lenght* and *charuco_marker_size* are used for chAruco pattern generation
(see Aruco module description for details: [Aruco tutorials](https://github.com/Itseez/opencv_contrib/tree/master/modules/aruco/tutorials))
Default chAruco pattern:
![](images/charuco_board.png)
Dual circles pattern
------
To make this pattern you need standard OpenCV circles pattern and binary inverted one.
Place two patterns on one plane in order when all horizontal lines of circles in one pattern are
continuations of similar lines in another.
Measure distance between patterns as shown at picture below pass it as **dst** command line parameter. Also measure distance between centers of nearest circles and pass
this value as **sz** command line parameter.
![](images/dualCircles.jpg)
This pattern is very sensitive to quality of production and measurements.
Data filtration
------
When size of calibration dataset is greater then *max_frames_num* starts working
data filter. It tries to remove "bad" frames from dataset. Filter removes the frame
on which \f$loss\_function\f$ takes maximum.
\f[loss\_function(i)=\alpha RMS(i)+(1-\alpha)reducedGridQuality(i)\f]
**RMS** is an average re-projection error calculated for frame *i*, **reducedGridQuality**
is scene coverage quality evaluation without frame *i*. \f$\alpha\f$ is equals to
**frame_filter_conv_param**.
Calibration process
------
To start calibration just run application. Place pattern ahead the camera and fixate pattern in some pose.
After that wait for capturing (will be shown message like "Frame #i captured").
Current focal distance and re-projection error will be shown at the main screen. Move pattern to the next position and repeat procedure. Try to cover image plane
uniformly and don't show pattern on sharp angles to the image plane.
![](images/screen_charuco.jpg)
If calibration seems to be successful (confidence intervals and average re-projection
error are small, frame coverage quality and number of pattern views are big enough)
application will show a message like on screen below.
![](images/screen_finish.jpg)
Hot keys:
- Esc -- exit application
- s -- save current data to XML file
- r -- delete last frame
- d -- delete all frames
- u -- enable/disable applying of undistortion
- v -- switch visualization mode
Results
------
As result you will get camera parameters and confidence intervals for them.
Example of output XML file:
@code{.xml}
<?xml version="1.0"?>
<opencv_storage>
<calibrationDate>"Thu 07 Apr 2016 04:23:03 PM MSK"</calibrationDate>
<framesCount>21</framesCount>
<cameraResolution>
1280 720</cameraResolution>
<cameraMatrix type_id="opencv-matrix">
<rows>3</rows>
<cols>3</cols>
<dt>d</dt>
<data>
1.2519588293098975e+03 0. 6.6684948780852471e+02 0.
1.2519588293098975e+03 3.6298123112613683e+02 0. 0. 1.</data></cameraMatrix>
<cameraMatrix_std_dev type_id="opencv-matrix">
<rows>4</rows>
<cols>1</cols>
<dt>d</dt>
<data>
0. 1.2887048808572649e+01 2.8536856683866230e+00
2.8341737483430314e+00</data></cameraMatrix_std_dev>
<dist_coeffs type_id="opencv-matrix">
<rows>1</rows>
<cols>5</cols>
<dt>d</dt>
<data>
1.3569117181595716e-01 -8.2513063822554633e-01 0. 0.
1.6412101575010554e+00</data></dist_coeffs>
<dist_coeffs_std_dev type_id="opencv-matrix">
<rows>5</rows>
<cols>1</cols>
<dt>d</dt>
<data>
1.5570675523402111e-02 8.7229075437543435e-02 0. 0.
1.8382427901856876e-01</data></dist_coeffs_std_dev>
<avg_reprojection_error>4.2691743074130178e-01</avg_reprojection_error>
</opencv_storage>
@endcode
......@@ -30,3 +30,14 @@ how to find out from the 2D images information about the 3D world.
Real time pose estimation of a textured object using ORB features, FlannBased matcher, PnP
approach plus Ransac and Linear Kalman Filter to reject possible bad poses.
- @subpage tutorial_interactive_calibration
*Compatibility:* \> OpenCV 3.1
*Author:* Vladislav Sovrasov
Camera calibration by using either the chessboard, chAruco, asymmetrical circle or dual asymmetrical circle
pattern. Calibration process is continious, so you can see results after each new pattern shot.
As an output you get average reprojection error, intrinsic camera parameters, distortion coefficients and
confidence intervals for all of evaluated variables.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment