Commit 250bcd9f authored by Alexander Alekhin's avatar Alexander Alekhin

Merge remote-tracking branch 'upstream/3.4' into merge-3.4

parents a1d87309 4446ef5e
...@@ -144,6 +144,8 @@ enum CornerRefineMethod{ ...@@ -144,6 +144,8 @@ enum CornerRefineMethod{
* done at full resolution. * done at full resolution.
* - aprilTagQuadSigma: What Gaussian blur should be applied to the segmented image (used for quad detection?) * - aprilTagQuadSigma: What Gaussian blur should be applied to the segmented image (used for quad detection?)
* Parameter is the standard deviation in pixels. Very noisy images benefit from non-zero values (e.g. 0.8). * Parameter is the standard deviation in pixels. Very noisy images benefit from non-zero values (e.g. 0.8).
* - detectInvertedMarker: to check if there is a white marker. In order to generate a "white" marker just
* invert a normal marker by using a tilde, ~markerImage. (default false)
*/ */
struct CV_EXPORTS_W DetectorParameters { struct CV_EXPORTS_W DetectorParameters {
...@@ -183,6 +185,9 @@ struct CV_EXPORTS_W DetectorParameters { ...@@ -183,6 +185,9 @@ struct CV_EXPORTS_W DetectorParameters {
CV_PROP_RW float aprilTagMaxLineFitMse; CV_PROP_RW float aprilTagMaxLineFitMse;
CV_PROP_RW int aprilTagMinWhiteBlackDiff; CV_PROP_RW int aprilTagMinWhiteBlackDiff;
CV_PROP_RW int aprilTagDeglitch; CV_PROP_RW int aprilTagDeglitch;
// to detect white (inverted) markers
CV_PROP_RW bool detectInvertedMarker;
}; };
......
...@@ -86,7 +86,8 @@ DetectorParameters::DetectorParameters() ...@@ -86,7 +86,8 @@ DetectorParameters::DetectorParameters()
aprilTagCriticalRad( (float)(10* CV_PI /180) ), aprilTagCriticalRad( (float)(10* CV_PI /180) ),
aprilTagMaxLineFitMse(10.0), aprilTagMaxLineFitMse(10.0),
aprilTagMinWhiteBlackDiff(5), aprilTagMinWhiteBlackDiff(5),
aprilTagDeglitch(0){} aprilTagDeglitch(0),
detectInvertedMarker(false){}
/** /**
...@@ -512,7 +513,19 @@ static bool _identifyOneCandidate(const Ptr<Dictionary>& dictionary, InputArray ...@@ -512,7 +513,19 @@ static bool _identifyOneCandidate(const Ptr<Dictionary>& dictionary, InputArray
int(dictionary->markerSize * dictionary->markerSize * params->maxErroneousBitsInBorderRate); int(dictionary->markerSize * dictionary->markerSize * params->maxErroneousBitsInBorderRate);
int borderErrors = int borderErrors =
_getBorderErrors(candidateBits, dictionary->markerSize, params->markerBorderBits); _getBorderErrors(candidateBits, dictionary->markerSize, params->markerBorderBits);
if(borderErrors > maximumErrorsInBorder) return false; // border is wrong
// check if it is a white marker
if(params->detectInvertedMarker){
// to get from 255 to 1
Mat invertedImg = ~candidateBits-254;
int invBError = _getBorderErrors(invertedImg, dictionary->markerSize, params->markerBorderBits);
// white marker
if(invBError<borderErrors){
borderErrors = invBError;
invertedImg.copyTo(candidateBits);
}
}
if(borderErrors > maximumErrorsInBorder) return false;
// take only inner bits // take only inner bits
Mat onlyBits = Mat onlyBits =
......
...@@ -182,12 +182,29 @@ static void getSyntheticRT(double yaw, double pitch, double distance, Mat &rvec, ...@@ -182,12 +182,29 @@ static void getSyntheticRT(double yaw, double pitch, double distance, Mat &rvec,
*/ */
static Mat projectMarker(Ptr<aruco::Dictionary> &dictionary, int id, Mat cameraMatrix, double yaw, static Mat projectMarker(Ptr<aruco::Dictionary> &dictionary, int id, Mat cameraMatrix, double yaw,
double pitch, double distance, Size imageSize, int markerBorder, double pitch, double distance, Size imageSize, int markerBorder,
vector< Point2f > &corners) { vector< Point2f > &corners, int encloseMarker=0) {
// canonical image // canonical image
Mat markerImg; Mat marker, markerImg;
const int markerSizePixels = 100; const int markerSizePixels = 100;
aruco::drawMarker(dictionary, id, markerSizePixels, markerImg, markerBorder);
aruco::drawMarker(dictionary, id, markerSizePixels, marker, markerBorder);
marker.copyTo(markerImg);
if(encloseMarker){ //to enclose the marker
int enclose = int(marker.rows/4);
markerImg = Mat::zeros(marker.rows+(2*enclose), marker.cols+(enclose*2), CV_8UC1);
Mat field= markerImg.rowRange(int(enclose), int(markerImg.rows-enclose))
.colRange(int(0), int(markerImg.cols));
field.setTo(255);
field= markerImg.rowRange(int(0), int(markerImg.rows))
.colRange(int(enclose), int(markerImg.cols-enclose));
field.setTo(255);
field = markerImg(Rect(enclose,enclose,marker.rows,marker.cols));
marker.copyTo(field);
}
// get rvec and tvec for the perspective // get rvec and tvec for the perspective
Mat rvec, tvec; Mat rvec, tvec;
...@@ -205,10 +222,10 @@ static Mat projectMarker(Ptr<aruco::Dictionary> &dictionary, int id, Mat cameraM ...@@ -205,10 +222,10 @@ static Mat projectMarker(Ptr<aruco::Dictionary> &dictionary, int id, Mat cameraM
projectPoints(markerObjPoints, rvec, tvec, cameraMatrix, distCoeffs, corners); projectPoints(markerObjPoints, rvec, tvec, cameraMatrix, distCoeffs, corners);
vector< Point2f > originalCorners; vector< Point2f > originalCorners;
originalCorners.push_back(Point2f(0, 0)); originalCorners.push_back(Point2f(0+float(encloseMarker*markerSizePixels/4), 0+float(encloseMarker*markerSizePixels/4)));
originalCorners.push_back(Point2f((float)markerSizePixels, 0)); originalCorners.push_back(originalCorners[0]+Point2f((float)markerSizePixels, 0));
originalCorners.push_back(Point2f((float)markerSizePixels, (float)markerSizePixels)); originalCorners.push_back(originalCorners[0]+Point2f((float)markerSizePixels, (float)markerSizePixels));
originalCorners.push_back(Point2f(0, (float)markerSizePixels)); originalCorners.push_back(originalCorners[0]+Point2f(0, (float)markerSizePixels));
Mat transformation = getPerspectiveTransform(originalCorners, corners); Mat transformation = getPerspectiveTransform(originalCorners, corners);
...@@ -238,6 +255,11 @@ class CV_ArucoDetectionPerspective : public cvtest::BaseTest { ...@@ -238,6 +255,11 @@ class CV_ArucoDetectionPerspective : public cvtest::BaseTest {
public: public:
CV_ArucoDetectionPerspective(); CV_ArucoDetectionPerspective();
enum checkWithParameter{
USE_APRILTAG=1, /// Detect marker candidates :: using AprilTag
DETECT_INVERTED_MARKER, /// Check if there is a white marker
};
protected: protected:
void run(int); void run(int);
}; };
...@@ -246,9 +268,10 @@ class CV_ArucoDetectionPerspective : public cvtest::BaseTest { ...@@ -246,9 +268,10 @@ class CV_ArucoDetectionPerspective : public cvtest::BaseTest {
CV_ArucoDetectionPerspective::CV_ArucoDetectionPerspective() {} CV_ArucoDetectionPerspective::CV_ArucoDetectionPerspective() {}
void CV_ArucoDetectionPerspective::run(int aprilDecimate) { void CV_ArucoDetectionPerspective::run(int tryWith) {
int iter = 0; int iter = 0;
int szEnclosed = 0;
Mat cameraMatrix = Mat::eye(3, 3, CV_64FC1); Mat cameraMatrix = Mat::eye(3, 3, CV_64FC1);
Size imgSize(500, 500); Size imgSize(500, 500);
cameraMatrix.at< double >(0, 0) = cameraMatrix.at< double >(1, 1) = 650; cameraMatrix.at< double >(0, 0) = cameraMatrix.at< double >(1, 1) = 650;
...@@ -257,29 +280,35 @@ void CV_ArucoDetectionPerspective::run(int aprilDecimate) { ...@@ -257,29 +280,35 @@ void CV_ArucoDetectionPerspective::run(int aprilDecimate) {
Ptr<aruco::Dictionary> dictionary = aruco::getPredefinedDictionary(aruco::DICT_6X6_250); Ptr<aruco::Dictionary> dictionary = aruco::getPredefinedDictionary(aruco::DICT_6X6_250);
// detect from different positions // detect from different positions
for(double distance = 0.1; distance <= 0.5; distance += 0.2) { for(double distance = 0.1; distance < 0.7; distance += 0.2) {
for(int pitch = 0; pitch < 360; pitch += 60) { for(int pitch = 0; pitch < 360; pitch += (distance == 0.1? 60:180)) {
for(int yaw = 30; yaw <= 90; yaw += 50) { for(int yaw = 70; yaw <= 120; yaw += 40){
int currentId = iter % 250; int currentId = iter % 250;
int markerBorder = iter % 2 + 1; int markerBorder = iter % 2 + 1;
iter++; iter++;
vector< Point2f > groundTruthCorners; vector< Point2f > groundTruthCorners;
// create synthetic image
Mat img =
projectMarker(dictionary, currentId, cameraMatrix, deg2rad(yaw), deg2rad(pitch),
distance, imgSize, markerBorder, groundTruthCorners);
// detect markers
vector< vector< Point2f > > corners;
vector< int > ids;
Ptr<aruco::DetectorParameters> params = aruco::DetectorParameters::create(); Ptr<aruco::DetectorParameters> params = aruco::DetectorParameters::create();
params->minDistanceToBorder = 1; params->minDistanceToBorder = 1;
params->markerBorderBits = markerBorder; params->markerBorderBits = markerBorder;
if(aprilDecimate == 1){ /// create synthetic image
params->cornerRefinementMethod = cv::aruco::CORNER_REFINE_APRILTAG; Mat img=
projectMarker(dictionary, currentId, cameraMatrix, deg2rad(yaw), deg2rad(pitch),
distance, imgSize, markerBorder, groundTruthCorners, szEnclosed);
// marker :: Inverted
if(CV_ArucoDetectionPerspective::DETECT_INVERTED_MARKER == tryWith){
img = ~img;
params->detectInvertedMarker = true;
}
if(CV_ArucoDetectionPerspective::USE_APRILTAG == tryWith){
params->cornerRefinementMethod = cv::aruco::CORNER_REFINE_APRILTAG;
} }
// detect markers
vector< vector< Point2f > > corners;
vector< int > ids;
aruco::detectMarkers(img, dictionary, corners, ids, params); aruco::detectMarkers(img, dictionary, corners, ids, params);
// check results // check results
...@@ -293,14 +322,33 @@ void CV_ArucoDetectionPerspective::run(int aprilDecimate) { ...@@ -293,14 +322,33 @@ void CV_ArucoDetectionPerspective::run(int aprilDecimate) {
} }
for(int c = 0; c < 4; c++) { for(int c = 0; c < 4; c++) {
double dist = cv::norm(groundTruthCorners[c] - corners[0][c]); // TODO cvtest double dist = cv::norm(groundTruthCorners[c] - corners[0][c]); // TODO cvtest
if(dist > 5) { if(CV_ArucoDetectionPerspective::DETECT_INVERTED_MARKER == tryWith){
ts->printf(cvtest::TS::LOG, "Incorrect marker corners position"); if(szEnclosed && dist > 3){
ts->set_failed_test_info(cvtest::TS::FAIL_BAD_ACCURACY); ts->printf(cvtest::TS::LOG, "Incorrect marker corners position");
return; ts->set_failed_test_info(cvtest::TS::FAIL_BAD_ACCURACY);
return;
}
if(!szEnclosed && dist >15){
ts->printf(cvtest::TS::LOG, "Incorrect marker corners position");
ts->set_failed_test_info(cvtest::TS::FAIL_BAD_ACCURACY);
return;
}
}
else{
if(dist > 5) {
ts->printf(cvtest::TS::LOG, "Incorrect marker corners position");
ts->set_failed_test_info(cvtest::TS::FAIL_BAD_ACCURACY);
return;
}
} }
} }
} }
} }
// change the state :: to detect an enclosed inverted marker
if( CV_ArucoDetectionPerspective::DETECT_INVERTED_MARKER == tryWith && distance == 0.1 ){
distance -= 0.1;
szEnclosed++;
}
} }
} }
...@@ -488,10 +536,16 @@ void CV_ArucoBitCorrection::run(int) { ...@@ -488,10 +536,16 @@ void CV_ArucoBitCorrection::run(int) {
} }
typedef CV_ArucoDetectionPerspective CV_AprilTagDetectionPerspective; typedef CV_ArucoDetectionPerspective CV_AprilTagDetectionPerspective;
typedef CV_ArucoDetectionPerspective CV_InvertedArucoDetectionPerspective;
TEST(CV_InvertedArucoDetectionPerspective, algorithmic) {
CV_InvertedArucoDetectionPerspective test;
test.safe_run(CV_ArucoDetectionPerspective::DETECT_INVERTED_MARKER);
}
TEST(CV_AprilTagDetectionPerspective, algorithmic) { TEST(CV_AprilTagDetectionPerspective, algorithmic) {
CV_AprilTagDetectionPerspective test; CV_AprilTagDetectionPerspective test;
test.safe_run(1); test.safe_run(CV_ArucoDetectionPerspective::USE_APRILTAG);
} }
TEST(CV_ArucoDetectionSimple, algorithmic) { TEST(CV_ArucoDetectionSimple, algorithmic) {
......
...@@ -74,8 +74,8 @@ public: ...@@ -74,8 +74,8 @@ public:
@endcode @endcode
*/ */
CV_WRAP virtual bool fit( InputArray image, CV_WRAP virtual bool fit( InputArray image,
InputArray faces, const std::vector<Rect>& faces,
OutputArrayOfArrays landmarks ) = 0; CV_OUT std::vector<std::vector<Point2f> >& landmarks ) = 0;
}; /* Facemark*/ }; /* Facemark*/
......
...@@ -146,7 +146,7 @@ public: ...@@ -146,7 +146,7 @@ public:
}; };
//! overload with additional Config structures //! overload with additional Config structures
virtual bool fitConfig( InputArray image, InputArray roi, OutputArrayOfArrays _landmarks, const std::vector<Config> &runtime_params ) = 0; virtual bool fitConfig( InputArray image, const std::vector<Rect>& roi, std::vector<std::vector<Point2f> >& _landmarks, const std::vector<Config> &runtime_params ) = 0;
//! initializer //! initializer
......
...@@ -73,7 +73,7 @@ public: ...@@ -73,7 +73,7 @@ public:
void loadModel(String fs) CV_OVERRIDE; void loadModel(String fs) CV_OVERRIDE;
bool setFaceDetector(FN_FaceDetector f, void* userdata) CV_OVERRIDE; bool setFaceDetector(FN_FaceDetector f, void* userdata) CV_OVERRIDE;
bool getFaces(InputArray image, OutputArray faces) CV_OVERRIDE; bool getFaces(InputArray image, OutputArray faces) CV_OVERRIDE;
bool fit(InputArray image, InputArray faces, OutputArrayOfArrays landmarks ) CV_OVERRIDE; bool fit(InputArray image, const std::vector<Rect>& faces, CV_OUT std::vector<std::vector<Point2f> >& landmarks ) CV_OVERRIDE;
void training(String imageList, String groundTruth); void training(String imageList, String groundTruth);
bool training(vector<Mat>& images, vector< vector<Point2f> >& landmarks,string filename,Size scale,string modelFilename) CV_OVERRIDE; bool training(vector<Mat>& images, vector< vector<Point2f> >& landmarks,string filename,Size scale,string modelFilename) CV_OVERRIDE;
// Destructor for the class. // Destructor for the class.
......
...@@ -103,12 +103,11 @@ public: ...@@ -103,12 +103,11 @@ public:
bool getData(void * items) CV_OVERRIDE; bool getData(void * items) CV_OVERRIDE;
bool fitConfig( InputArray image, InputArray roi, OutputArrayOfArrays _landmarks, const std::vector<Config> &runtime_params ) CV_OVERRIDE; bool fitConfig( InputArray image, const std::vector<Rect>& roi, std::vector<std::vector<Point2f> >& _landmarks, const std::vector<Config> &runtime_params ) CV_OVERRIDE;
protected: protected:
bool fit( InputArray image, InputArray faces, OutputArrayOfArrays landmarks ) CV_OVERRIDE; bool fit( InputArray image, const std::vector<Rect>& faces, CV_OUT std::vector<std::vector<Point2f> >& landmarks ) CV_OVERRIDE;
//bool fit( InputArray image, InputArray faces, InputOutputArray landmarks, void * runtime_params);//!< from many ROIs
bool fitImpl( const Mat image, std::vector<Point2f>& landmarks,const Mat R,const Point2f T,const float scale, const int sclIdx=0 ); bool fitImpl( const Mat image, std::vector<Point2f>& landmarks,const Mat R,const Point2f T,const float scale, const int sclIdx=0 );
bool addTrainingSample(InputArray image, InputArray landmarks) CV_OVERRIDE; bool addTrainingSample(InputArray image, InputArray landmarks) CV_OVERRIDE;
...@@ -323,19 +322,18 @@ void FacemarkAAMImpl::training(void* parameters){ ...@@ -323,19 +322,18 @@ void FacemarkAAMImpl::training(void* parameters){
if(params.verbose) printf("Training is completed\n"); if(params.verbose) printf("Training is completed\n");
} }
bool FacemarkAAMImpl::fit( InputArray image, InputArray roi, OutputArrayOfArrays _landmarks ) bool FacemarkAAMImpl::fit( InputArray image, const std::vector<Rect>& roi, CV_OUT std::vector<std::vector<Point2f> >& _landmarks )
{ {
std::vector<Config> config; // empty std::vector<Config> config; // empty
return fitConfig(image, roi, _landmarks, config); return fitConfig(image, roi, _landmarks, config);
} }
bool FacemarkAAMImpl::fitConfig( InputArray image, InputArray roi, OutputArrayOfArrays _landmarks, const std::vector<Config> &configs ) bool FacemarkAAMImpl::fitConfig( InputArray image, const std::vector<Rect>& roi, std::vector<std::vector<Point2f> >& _landmarks, const std::vector<Config> &configs )
{ {
std::vector<Rect> & faces = *(std::vector<Rect> *)roi.getObj(); const std::vector<Rect> & faces = roi;
if(faces.size()<1) return false; if(faces.size()<1) return false;
std::vector<std::vector<Point2f> > & landmarks = std::vector<std::vector<Point2f> > & landmarks = _landmarks;
*(std::vector<std::vector<Point2f> >*) _landmarks.getObj();
landmarks.resize(faces.size()); landmarks.resize(faces.size());
Mat img = image.getMat(); Mat img = image.getMat();
......
...@@ -115,7 +115,7 @@ public: ...@@ -115,7 +115,7 @@ public:
protected: protected:
bool fit( InputArray image, InputArray faces, OutputArrayOfArrays landmarks ) CV_OVERRIDE;//!< from many ROIs bool fit( InputArray image, const std::vector<Rect> & faces, std::vector<std::vector<Point2f> > & landmarks ) CV_OVERRIDE;//!< from many ROIs
bool fitImpl( const Mat image, std::vector<Point2f> & landmarks );//!< from a face bool fitImpl( const Mat image, std::vector<Point2f> & landmarks );//!< from a face
bool addTrainingSample(InputArray image, InputArray landmarks) CV_OVERRIDE; bool addTrainingSample(InputArray image, InputArray landmarks) CV_OVERRIDE;
...@@ -370,14 +370,12 @@ void FacemarkLBFImpl::training(void* parameters){ ...@@ -370,14 +370,12 @@ void FacemarkLBFImpl::training(void* parameters){
isModelTrained = true; isModelTrained = true;
} }
bool FacemarkLBFImpl::fit( InputArray image, InputArray roi, OutputArrayOfArrays _landmarks ) bool FacemarkLBFImpl::fit( InputArray image, const std::vector<Rect> & roi, CV_OUT std::vector<std::vector<Point2f> > & _landmarks )
{ {
// FIXIT const std::vector<Rect> & faces = roi;
std::vector<Rect> & faces = *(std::vector<Rect> *)roi.getObj();
if (faces.empty()) return false; if (faces.empty()) return false;
std::vector<std::vector<Point2f> > & landmarks = std::vector<std::vector<Point2f> > & landmarks = _landmarks;
*(std::vector<std::vector<Point2f> >*) _landmarks.getObj();
landmarks.resize(faces.size()); landmarks.resize(faces.size());
......
...@@ -168,15 +168,15 @@ void FacemarkKazemiImpl :: loadModel(String filename){ ...@@ -168,15 +168,15 @@ void FacemarkKazemiImpl :: loadModel(String filename){
f.close(); f.close();
isModelLoaded = true; isModelLoaded = true;
} }
bool FacemarkKazemiImpl::fit(InputArray img, InputArray roi, OutputArrayOfArrays landmarks){ bool FacemarkKazemiImpl::fit(InputArray img, const std::vector<Rect>& roi, CV_OUT std::vector<std::vector<Point2f> >& landmarks){
if(!isModelLoaded){ if(!isModelLoaded){
String error_message = "No model loaded. Aborting...."; String error_message = "No model loaded. Aborting....";
CV_Error(Error::StsBadArg, error_message); CV_Error(Error::StsBadArg, error_message);
return false; return false;
} }
Mat image = img.getMat(); Mat image = img.getMat();
std::vector<Rect> & faces = *(std::vector<Rect>*)roi.getObj(); const std::vector<Rect> & faces = roi;
std::vector<std::vector<Point2f> > & shapes = *(std::vector<std::vector<Point2f> >*) landmarks.getObj(); std::vector<std::vector<Point2f> > & shapes = landmarks;
shapes.resize(faces.size()); shapes.resize(faces.size());
if(image.empty()){ if(image.empty()){
......
...@@ -558,9 +558,14 @@ namespace cv { ...@@ -558,9 +558,14 @@ namespace cv {
center = Point((int)(img_plane_rotated.cols / 2.0), (int)(img_plane_rotated.rows / 2.0)); center = Point((int)(img_plane_rotated.cols / 2.0), (int)(img_plane_rotated.rows / 2.0));
rot = cv::getRotationMatrix2D(center, -45.0, 1.0); rot = cv::getRotationMatrix2D(center, -45.0, 1.0);
warpAffine(tmp_gradiant, tmp_rot, rot, bbox.size()); // Using this bigger box avoids clipping the ends of narrow images
Rect bbox2 = cv::RotatedRect(center, img_plane_rotated.size(), -45.0).boundingRect();\
warpAffine(tmp_gradiant, tmp_rot, rot, bbox2.size());
tmp_gradiant = tmp_rot(Rect((bbox.width - img.cols) / 2, (bbox.height - img.rows) / 2, img.cols, img.rows)); // for narrow images, bbox might be less tall or wide than img
int start_x = std::max(0, (bbox.width - img.cols) / 2);
int start_y = std::max(0, (bbox.height - img.rows) / 2);
tmp_gradiant = tmp_rot(Rect(start_x, start_y, img.cols, img.rows));
threshold(tmp_gradiant, tmp_gradiant_pos, 0, 0, THRESH_TOZERO); threshold(tmp_gradiant, tmp_gradiant_pos, 0, 0, THRESH_TOZERO);
threshold(tmp_gradiant, tmp_gradiant_neg, 0, 0, THRESH_TOZERO_INV); threshold(tmp_gradiant, tmp_gradiant_neg, 0, 0, THRESH_TOZERO_INV);
...@@ -573,9 +578,12 @@ namespace cv { ...@@ -573,9 +578,12 @@ namespace cv {
center = Point((int)(img_plane_rotated.cols / 2.0), (int)(img_plane_rotated.rows / 2.0)); center = Point((int)(img_plane_rotated.cols / 2.0), (int)(img_plane_rotated.rows / 2.0));
rot = cv::getRotationMatrix2D(center, -45.0, 1.0); rot = cv::getRotationMatrix2D(center, -45.0, 1.0);
warpAffine(tmp_gradiant, tmp_rot, rot, bbox.size()); bbox2 = cv::RotatedRect(center, img_plane_rotated.size(), -45.0).boundingRect();\
warpAffine(tmp_gradiant, tmp_rot, rot, bbox2.size());
tmp_gradiant = tmp_rot(Rect((bbox.width - img.cols) / 2, (bbox.height - img.rows) / 2, img.cols, img.rows)); start_x = std::max(0, (bbox.width - img.cols) / 2);
start_y = std::max(0, (bbox.height - img.rows) / 2);
tmp_gradiant = tmp_rot(Rect(start_x, start_y, img.cols, img.rows));
threshold(tmp_gradiant, tmp_gradiant_pos, 0, 0, THRESH_TOZERO); threshold(tmp_gradiant, tmp_gradiant_pos, 0, 0, THRESH_TOZERO);
threshold(tmp_gradiant, tmp_gradiant_neg, 0, 0, THRESH_TOZERO_INV); threshold(tmp_gradiant, tmp_gradiant_neg, 0, 0, THRESH_TOZERO_INV);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment