Commit 6e38b6aa authored by Vladimir Dudnik's avatar Vladimir Dudnik

removed trailing backspaces, reduced number of warnings (under MSVC2010 x64) for…

removed trailing backspaces, reduced number of warnings (under MSVC2010 x64) for size_t to int conversion, added handling of samples launch without parameters (should not have abnormal termination if there was no paramaters supplied)
parent 092beae2
......@@ -1126,16 +1126,16 @@ CV_IMPL void cvProjectPoints2( const CvMat* objectPoints,
if( _dpdr != dpdr )
cvConvert( _dpdr, dpdr );
if( _dpdt != dpdt )
cvConvert( _dpdt, dpdt );
if( _dpdf != dpdf )
cvConvert( _dpdf, dpdf );
if( _dpdc != dpdc )
cvConvert( _dpdc, dpdc );
if( _dpdk != dpdk )
cvConvert( _dpdk, dpdk );
}
......@@ -1663,7 +1663,7 @@ CV_IMPL double cvCalibrateCamera2( const CvMat* objectPoints,
if( !proceed )
break;
reprojErr = 0;
for( i = 0, pos = 0; i < nimages; i++, pos += ni )
......@@ -1756,7 +1756,7 @@ CV_IMPL double cvCalibrateCamera2( const CvMat* objectPoints,
cvConvert( &src, &dst );
}
}
return std::sqrt(reprojErr/total);
}
......@@ -2268,7 +2268,7 @@ double cvStereoCalibrate( const CvMat* _objectPoints, const CvMat* _imagePoints1
cvConvertScale( &F, matF, fabs(f[8]) > 0 ? 1./f[8] : 1 );
}
}
return std::sqrt(reprojErr/(pointsTotal*2));
}
......@@ -2282,14 +2282,14 @@ icvGetRectangles( const CvMat* cameraMatrix, const CvMat* distCoeffs,
int x, y, k;
cv::Ptr<CvMat> _pts = cvCreateMat(1, N*N, CV_32FC2);
CvPoint2D32f* pts = (CvPoint2D32f*)(_pts->data.ptr);
for( y = k = 0; y < N; y++ )
for( x = 0; x < N; x++ )
pts[k++] = cvPoint2D32f((float)x*imgSize.width/(N-1),
(float)y*imgSize.height/(N-1));
cvUndistortPoints(_pts, _pts, cameraMatrix, distCoeffs, R, newCameraMatrix);
float iX0=-FLT_MAX, iX1=FLT_MAX, iY0=-FLT_MAX, iY1=FLT_MAX;
float oX0=FLT_MAX, oX1=-FLT_MAX, oY0=FLT_MAX, oY1=-FLT_MAX;
// find the inscribed rectangle.
......@@ -2302,7 +2302,7 @@ icvGetRectangles( const CvMat* cameraMatrix, const CvMat* distCoeffs,
oX1 = MAX(oX1, p.x);
oY0 = MIN(oY0, p.y);
oY1 = MAX(oY1, p.y);
if( x == 0 )
iX0 = MAX(iX0, p.x);
if( x == N-1 )
......@@ -2314,7 +2314,7 @@ icvGetRectangles( const CvMat* cameraMatrix, const CvMat* distCoeffs,
}
inner = cv::Rect_<float>(iX0, iY0, iX1-iX0, iY1-iY0);
outer = cv::Rect_<float>(oX0, oY0, oX1-oX0, oY1-oY0);
}
}
void cvStereoRectify( const CvMat* _cameraMatrix1, const CvMat* _cameraMatrix2,
......@@ -2327,7 +2327,7 @@ void cvStereoRectify( const CvMat* _cameraMatrix1, const CvMat* _cameraMatrix2,
double _om[3], _t[3], _uu[3]={0,0,0}, _r_r[3][3], _pp[3][4];
double _ww[3], _wr[3][3], _z[3] = {0,0,0}, _ri[3][3];
cv::Rect_<float> inner1, inner2, outer1, outer2;
CvMat om = cvMat(3, 1, CV_64F, _om);
CvMat t = cvMat(3, 1, CV_64F, _t);
CvMat uu = cvMat(3, 1, CV_64F, _uu);
......@@ -2439,12 +2439,12 @@ void cvStereoRectify( const CvMat* _cameraMatrix1, const CvMat* _cameraMatrix2,
_pp[1][2] = cc_new[1].y;
_pp[idx][3] = _t[idx]*fc_new; // baseline * focal length
cvConvert(&pp, _P2);
alpha = MIN(alpha, 1.);
icvGetRectangles( _cameraMatrix1, _distCoeffs1, _R1, _P1, imageSize, inner1, outer1 );
icvGetRectangles( _cameraMatrix2, _distCoeffs2, _R2, _P2, imageSize, inner2, outer2 );
{
newImgSize = newImgSize.width*newImgSize.height != 0 ? newImgSize : imageSize;
double cx1_0 = cc_new[0].x;
......@@ -2456,7 +2456,7 @@ void cvStereoRectify( const CvMat* _cameraMatrix1, const CvMat* _cameraMatrix2,
double cx2 = newImgSize.width*cx2_0/imageSize.width;
double cy2 = newImgSize.height*cy2_0/imageSize.height;
double s = 1.;
if( alpha >= 0 )
{
double s0 = std::max(std::max(std::max((double)cx1/(cx1_0 - inner1.x), (double)cy1/(cy1_0 - inner1.y)),
......@@ -2466,7 +2466,7 @@ void cvStereoRectify( const CvMat* _cameraMatrix1, const CvMat* _cameraMatrix2,
(double)(newImgSize.width - cx2)/(inner2.x + inner2.width - cx2_0)),
(double)(newImgSize.height - cy2)/(inner2.y + inner2.height - cy2_0)),
s0);
double s1 = std::min(std::min(std::min((double)cx1/(cx1_0 - outer1.x), (double)cy1/(cy1_0 - outer1.y)),
(double)(newImgSize.width - cx1)/(outer1.x + outer1.width - cx1_0)),
(double)(newImgSize.height - cy1)/(outer1.y + outer1.height - cy1_0));
......@@ -2474,25 +2474,25 @@ void cvStereoRectify( const CvMat* _cameraMatrix1, const CvMat* _cameraMatrix2,
(double)(newImgSize.width - cx2)/(outer2.x + outer2.width - cx2_0)),
(double)(newImgSize.height - cy2)/(outer2.y + outer2.height - cy2_0)),
s1);
s = s0*(1 - alpha) + s1*alpha;
}
fc_new *= s;
cc_new[0] = cvPoint2D64f(cx1, cy1);
cc_new[1] = cvPoint2D64f(cx2, cy2);
cvmSet(_P1, 0, 0, fc_new);
cvmSet(_P1, 1, 1, fc_new);
cvmSet(_P1, 0, 2, cx1);
cvmSet(_P1, 1, 2, cy1);
cvmSet(_P2, 0, 0, fc_new);
cvmSet(_P2, 1, 1, fc_new);
cvmSet(_P2, 0, 2, cx2);
cvmSet(_P2, 1, 2, cy2);
cvmSet(_P2, idx, 3, s*cvmGet(_P2, idx, 3));
if(roi1)
{
*roi1 = cv::Rect(cvCeil((inner1.x - cx1_0)*s + cx1),
......@@ -2500,7 +2500,7 @@ void cvStereoRectify( const CvMat* _cameraMatrix1, const CvMat* _cameraMatrix2,
cvFloor(inner1.width*s), cvFloor(inner1.height*s))
& cv::Rect(0, 0, newImgSize.width, newImgSize.height);
}
if(roi2)
{
*roi2 = cv::Rect(cvCeil((inner2.x - cx2_0)*s + cx2),
......@@ -2533,18 +2533,18 @@ void cvGetOptimalNewCameraMatrix( const CvMat* cameraMatrix, const CvMat* distCo
{
cv::Rect_<float> inner, outer;
icvGetRectangles( cameraMatrix, distCoeffs, 0, cameraMatrix, imgSize, inner, outer );
newImgSize = newImgSize.width*newImgSize.height != 0 ? newImgSize : imgSize;
double M[3][3];
CvMat matM = cvMat(3, 3, CV_64F, M);
cvConvert(cameraMatrix, &matM);
double cx0 = M[0][2];
double cy0 = M[1][2];
double cx = (newImgSize.width-1)*0.5;
double cy = (newImgSize.height-1)*0.5;
double s0 = std::max(std::max(std::max((double)cx/(cx0 - inner.x), (double)cy/(cy0 - inner.y)),
(double)cx/(inner.x + inner.width - cx0)),
(double)cy/(inner.y + inner.height - cy0));
......@@ -2552,13 +2552,13 @@ void cvGetOptimalNewCameraMatrix( const CvMat* cameraMatrix, const CvMat* distCo
(double)cx/(outer.x + outer.width - cx0)),
(double)cy/(outer.y + outer.height - cy0));
double s = s0*(1 - alpha) + s1*alpha;
M[0][0] *= s;
M[1][1] *= s;
M[0][2] = cx;
M[1][2] = cy;
cvConvert(&matM, newCameraMatrix);
if( validPixROI )
{
inner = cv::Rect_<float>((float)((inner.x - cx0)*s + cx),
......@@ -2774,7 +2774,7 @@ void cv::reprojectImageTo3D( InputArray _disparity,
{
Mat disparity = _disparity.getMat(), Q = _Qmat.getMat();
int stype = disparity.type();
CV_Assert( stype == CV_8UC1 || stype == CV_16SC1 ||
stype == CV_32SC1 || stype == CV_32FC1 );
CV_Assert( Q.size() == Size(4,4) );
......@@ -2786,18 +2786,18 @@ void cv::reprojectImageTo3D( InputArray _disparity,
dtype = CV_MAKETYPE(CV_MAT_DEPTH(dtype), 3);
CV_Assert( dtype == CV_16SC3 || dtype == CV_32SC3 || dtype == CV_32FC3 );
}
__3dImage.create(disparity.size(), CV_MAKETYPE(dtype, 3));
Mat _3dImage = __3dImage.getMat();
const double bigZ = 10000.;
double q[4][4];
Mat _Q(4, 4, CV_64F, q);
Q.convertTo(_Q, CV_64F);
int x, cols = disparity.cols;
CV_Assert( cols >= 0 );
vector<float> _sbuf(cols+1), _dbuf(cols*3+1);
float* sbuf = &_sbuf[0], *dbuf = &_dbuf[0];
double minDisparity = FLT_MAX;
......@@ -2884,7 +2884,7 @@ void cvReprojectImageTo3D( const CvArr* disparityImage,
CV_Assert( disp.size() == _3dimg.size() );
int dtype = _3dimg.type();
CV_Assert( dtype == CV_16SC3 || dtype == CV_32SC3 || dtype == CV_32FC3 );
cv::reprojectImageTo3D(disp, _3dimg, mq, handleMissingValues != 0, dtype );
}
......@@ -3131,7 +3131,7 @@ static void collectCalibrationData( InputArrayOfArrays objectPoints,
objPtMat.create(1, (int)total, CV_32FC3);
imgPtMat1.create(1, (int)total, CV_32FC2);
Point2f* imgPtData2 = 0;
if( imgPtMat2 )
{
imgPtMat2->create(1, (int)total, CV_32FC2);
......@@ -3140,7 +3140,7 @@ static void collectCalibrationData( InputArrayOfArrays objectPoints,
Point3f* objPtData = objPtMat.ptr<Point3f>();
Point2f* imgPtData1 = imgPtMat1.ptr<Point2f>();
for( i = 0; i < nimages; i++, j += ni )
{
Mat objpt = objectPoints.getMat(i);
......@@ -3162,7 +3162,7 @@ static void collectCalibrationData( InputArrayOfArrays objectPoints,
}
}
static Mat prepareCameraMatrix(Mat& cameraMatrix0, int rtype)
{
Mat cameraMatrix = Mat::eye(3, 3, rtype);
......@@ -3178,7 +3178,7 @@ static Mat prepareDistCoeffs(Mat& distCoeffs0, int rtype)
distCoeffs0.size() == Size(1, 5) ||
distCoeffs0.size() == Size(1, 8) ||
distCoeffs0.size() == Size(4, 1) ||
distCoeffs0.size() == Size(5, 1) ||
distCoeffs0.size() == Size(5, 1) ||
distCoeffs0.size() == Size(8, 1) )
{
Mat dstCoeffs(distCoeffs, Rect(0, 0, distCoeffs0.cols, distCoeffs0.rows));
......@@ -3186,8 +3186,8 @@ static Mat prepareDistCoeffs(Mat& distCoeffs0, int rtype)
}
return distCoeffs;
}
}
} // namespace cv
void cv::Rodrigues(InputArray _src, OutputArray _dst, OutputArray _jacobian)
......@@ -3232,60 +3232,60 @@ void cv::composeRT( InputArray _rvec1, InputArray _tvec1,
_rvec3.create(rvec1.size(), rtype);
_tvec3.create(tvec1.size(), rtype);
Mat rvec3 = _rvec3.getMat(), tvec3 = _tvec3.getMat();
CvMat c_rvec1 = rvec1, c_tvec1 = tvec1, c_rvec2 = rvec2,
c_tvec2 = tvec2, c_rvec3 = rvec3, c_tvec3 = tvec3;
CvMat c_dr3dr1, c_dr3dt1, c_dr3dr2, c_dr3dt2, c_dt3dr1, c_dt3dt1, c_dt3dr2, c_dt3dt2;
CvMat *p_dr3dr1=0, *p_dr3dt1=0, *p_dr3dr2=0, *p_dr3dt2=0, *p_dt3dr1=0, *p_dt3dt1=0, *p_dt3dr2=0, *p_dt3dt2=0;
if( _dr3dr1.needed() )
{
_dr3dr1.create(3, 3, rtype);
p_dr3dr1 = &(c_dr3dr1 = _dr3dr1.getMat());
}
if( _dr3dt1.needed() )
{
_dr3dt1.create(3, 3, rtype);
p_dr3dt1 = &(c_dr3dt1 = _dr3dt1.getMat());
}
if( _dr3dr2.needed() )
{
_dr3dr2.create(3, 3, rtype);
p_dr3dr2 = &(c_dr3dr2 = _dr3dr2.getMat());
}
if( _dr3dt2.needed() )
{
_dr3dt2.create(3, 3, rtype);
p_dr3dt2 = &(c_dr3dt2 = _dr3dt2.getMat());
}
if( _dt3dr1.needed() )
{
_dt3dr1.create(3, 3, rtype);
p_dt3dr1 = &(c_dt3dr1 = _dt3dr1.getMat());
}
if( _dt3dt1.needed() )
{
_dt3dt1.create(3, 3, rtype);
p_dt3dt1 = &(c_dt3dt1 = _dt3dt1.getMat());
}
if( _dt3dr2.needed() )
{
_dt3dr2.create(3, 3, rtype);
p_dt3dr2 = &(c_dt3dr2 = _dt3dr2.getMat());
}
if( _dt3dt2.needed() )
{
_dt3dt2.create(3, 3, rtype);
p_dt3dt2 = &(c_dt3dt2 = _dt3dt2.getMat());
}
cvComposeRT(&c_rvec1, &c_tvec1, &c_rvec2, &c_tvec2, &c_rvec3, &c_tvec3,
p_dr3dr1, p_dr3dt1, p_dr3dr2, p_dr3dt2,
p_dt3dr1, p_dt3dt1, p_dt3dr2, p_dt3dt2);
......@@ -3304,10 +3304,10 @@ void cv::projectPoints( InputArray _opoints,
Mat opoints = _opoints.getMat();
int npoints = opoints.checkVector(3), depth = opoints.depth();
CV_Assert(npoints >= 0 && (depth == CV_32F || depth == CV_64F));
CvMat dpdrot, dpdt, dpdf, dpdc, dpddist;
CvMat *pdpdrot=0, *pdpdt=0, *pdpdf=0, *pdpdc=0, *pdpddist=0;
_ipoints.create(npoints, 1, CV_MAKETYPE(depth, 2), -1, true);
CvMat c_imagePoints = _ipoints.getMat();
CvMat c_objectPoints = opoints;
......@@ -3318,7 +3318,7 @@ void cv::projectPoints( InputArray _opoints,
CvMat c_rvec = rvec, c_tvec = tvec;
CvMat c_distCoeffs = distCoeffs;
int ndistCoeffs = distCoeffs.rows + distCoeffs.cols - 1;
if( _jacobian.needed() )
{
_jacobian.create(npoints*2, 3+3+2+2+ndistCoeffs, CV_64F);
......@@ -3361,7 +3361,8 @@ double cv::calibrateCamera( InputArrayOfArrays _objectPoints,
if( !(flags & CALIB_RATIONAL_MODEL) )
distCoeffs = distCoeffs.rows == 1 ? distCoeffs.colRange(0, 5) : distCoeffs.rowRange(0, 5);
size_t i, nimages = _objectPoints.total();
int i;
size_t nimages = _objectPoints.total();
CV_Assert( nimages > 0 );
Mat objPt, imgPt, npoints, rvecM((int)nimages, 3, CV_64FC1), tvecM((int)nimages, 3, CV_64FC1);
collectCalibrationData( _objectPoints, _imagePoints, noArray(),
......@@ -3373,10 +3374,10 @@ double cv::calibrateCamera( InputArrayOfArrays _objectPoints,
double reprojErr = cvCalibrateCamera2(&c_objPt, &c_imgPt, &c_npoints, imageSize,
&c_cameraMatrix, &c_distCoeffs, &c_rvecM,
&c_tvecM, flags );
_rvecs.create(nimages, 1, CV_64FC3);
_tvecs.create(nimages, 1, CV_64FC3);
for( i = 0; i < nimages; i++ )
_rvecs.create((int)nimages, 1, CV_64FC3);
_tvecs.create((int)nimages, 1, CV_64FC3);
for( i = 0; i < (int)nimages; i++ )
{
_rvecs.create(3, 1, CV_64F, i, true);
_tvecs.create(3, 1, CV_64F, i, true);
......@@ -3386,7 +3387,7 @@ double cv::calibrateCamera( InputArrayOfArrays _objectPoints,
}
cameraMatrix.copyTo(_cameraMatrix);
distCoeffs.copyTo(_distCoeffs);
return reprojErr;
}
......
......@@ -1965,7 +1965,7 @@ public:
virtual bool empty() const;
protected:
virtual void computeImpl( const Mat& image, vector<KeyPoint>& keypoints, Mat& descriptors ) const;
virtual void computeImpl( const Mat& image, vector<KeyPoint>& keypoints, Mat& descriptors ) const;
RTreeClassifier classifier_;
static const int BORDER_SIZE = 16;
......
......@@ -376,13 +376,13 @@ void OpponentColorDescriptorExtractor::computeImpl( const Mat& bgrImage, vector<
channelKeypoints[ci].insert( channelKeypoints[ci].begin(), keypoints.begin(), keypoints.end() );
// Use class_id member to get indices into initial keypoints vector
for( size_t ki = 0; ki < channelKeypoints[ci].size(); ki++ )
channelKeypoints[ci][ki].class_id = ki;
channelKeypoints[ci][ki].class_id = (int)ki;
descriptorExtractor->compute( opponentChannels[ci], channelKeypoints[ci], channelDescriptors[ci] );
idxs[ci].resize( channelKeypoints[ci].size() );
for( size_t ki = 0; ki < channelKeypoints[ci].size(); ki++ )
{
idxs[ci][ki] = ki;
idxs[ci][ki] = (int)ki;
}
std::sort( idxs[ci].begin(), idxs[ci].end(), KP_LessThan(channelKeypoints[ci]) );
}
......
......@@ -127,7 +127,7 @@ HarrisResponse::HarrisResponse(const cv::Mat& image, double k) :
dX_offsets_.resize(7 * 9);
dY_offsets_.resize(7 * 9);
std::vector<int>::iterator dX_offsets = dX_offsets_.begin(), dY_offsets = dY_offsets_.begin();
unsigned int image_step = image.step1();
unsigned int image_step = (unsigned int)image.step1();
for (size_t y = 0; y <= 6 * image_step; y += image_step)
{
int dX_offset = y + 2, dY_offset = y + 2 * image_step;
......
......@@ -93,7 +93,7 @@ icvFarthestNode( CvSpillTreeNode* node,
}
return result;
}
// clone a new tree node
static inline CvSpillTreeNode*
icvCloneSpillTreeNode( CvSpillTreeNode* node )
......
......@@ -20,7 +20,7 @@ void help()
"see facedetect.cmd for one call:\n"
"./facedetect --cascade=\"../../data/haarcascades/haarcascade_frontalface_alt.xml\" --nested-cascade=\"../../data/haarcascades/haarcascade_eye.xml\" --scale=1.3 \n"
"Hit any key to quit.\n"
"Using OpenCV version %s\n" << CV_VERSION << "\n" << endl;
"Using OpenCV version " << CV_VERSION << "\n" << endl;
}
void detectAndDraw( Mat& img,
......@@ -49,7 +49,7 @@ int main( int argc, const char** argv )
for( int i = 1; i < argc; i++ )
{
cout << "Processing " << i << " " << argv[i] << endl;
cout << "Processing " << i << " " << argv[i] << endl;
if( cascadeOpt.compare( 0, cascadeOptLen, argv[i], cascadeOptLen ) == 0 )
{
cascadeName.assign( argv[i] + cascadeOptLen );
......@@ -111,7 +111,7 @@ int main( int argc, const char** argv )
if( capture )
{
cout << "In capture ..." << endl;
cout << "In capture ..." << endl;
for(;;)
{
IplImage* iplImg = cvQueryFrame( capture );
......@@ -130,12 +130,13 @@ int main( int argc, const char** argv )
}
waitKey(0);
_cleanup_:
cvReleaseCapture( &capture );
}
else
{
cout << "In image read" << endl;
cout << "In image read" << endl;
if( !image.empty() )
{
detectAndDraw( image, cascade, nestedCascade, scale );
......@@ -239,6 +240,6 @@ void detectAndDraw( Mat& img,
radius = cvRound((nr->width + nr->height)*0.25*scale);
circle( img, center, radius, color, 3, 8, 0 );
}
}
cv::imshow( "result", img );
}
cv::imshow( "result", img );
}
......@@ -16,14 +16,13 @@
using namespace std;
void help()
{
printf(
"This program demonstrated the use of the SURF Detector and Descriptor using\n"
"either FLANN (fast approx nearst neighbor classification) or brute force matching\n"
"on planar objects.\n"
"Call:\n"
"./find_obj [<object_filename default box.png> <scene_filename default box_in_scene.png>]\n\n"
);
printf(
"This program demonstrated the use of the SURF Detector and Descriptor using\n"
"either FLANN (fast approx nearst neighbor classification) or brute force matching\n"
"on planar objects.\n"
"Usage:\n"
"./find_obj <object_filename> <scene_filename>, default is box.png and box_in_scene.png\n\n");
return;
}
// define whether to use approximate nearest-neighbor search
......@@ -214,8 +213,19 @@ int main(int argc, char** argv)
const char* object_filename = argc == 3 ? argv[1] : "box.png";
const char* scene_filename = argc == 3 ? argv[2] : "box_in_scene.png";
CvMemStorage* storage = cvCreateMemStorage(0);
help();
IplImage* object = cvLoadImage( object_filename, CV_LOAD_IMAGE_GRAYSCALE );
IplImage* image = cvLoadImage( scene_filename, CV_LOAD_IMAGE_GRAYSCALE );
if( !object || !image )
{
fprintf( stderr, "Can not load %s and/or %s\n",
object_filename, scene_filename );
exit(-1);
}
CvMemStorage* storage = cvCreateMemStorage(0);
cvNamedWindow("Object", 1);
cvNamedWindow("Object Correspond", 1);
......@@ -232,30 +242,24 @@ int main(int argc, char** argv)
{{255,255,255}}
};
IplImage* object = cvLoadImage( object_filename, CV_LOAD_IMAGE_GRAYSCALE );
IplImage* image = cvLoadImage( scene_filename, CV_LOAD_IMAGE_GRAYSCALE );
if( !object || !image )
{
fprintf( stderr, "Can not load %s and/or %s\n"
"Usage: find_obj [<object_filename> <scene_filename>]\n",
object_filename, scene_filename );
exit(-1);
}
IplImage* object_color = cvCreateImage(cvGetSize(object), 8, 3);
cvCvtColor( object, object_color, CV_GRAY2BGR );
CvSeq *objectKeypoints = 0, *objectDescriptors = 0;
CvSeq *imageKeypoints = 0, *imageDescriptors = 0;
CvSeq* objectKeypoints = 0, *objectDescriptors = 0;
CvSeq* imageKeypoints = 0, *imageDescriptors = 0;
int i;
CvSURFParams params = cvSURFParams(500, 1);
double tt = (double)cvGetTickCount();
cvExtractSURF( object, 0, &objectKeypoints, &objectDescriptors, storage, params );
printf("Object Descriptors: %d\n", objectDescriptors->total);
cvExtractSURF( image, 0, &imageKeypoints, &imageDescriptors, storage, params );
printf("Image Descriptors: %d\n", imageDescriptors->total);
tt = (double)cvGetTickCount() - tt;
printf( "Extraction time = %gms\n", tt/(cvGetTickFrequency()*1000.));
CvPoint src_corners[4] = {{0,0}, {object->width,0}, {object->width, object->height}, {0, object->height}};
CvPoint dst_corners[4];
IplImage* correspond = cvCreateImage( cvSize(image->width, object->height+image->height), 8, 1 );
......
......@@ -12,14 +12,17 @@ using namespace cv;
void help()
{
cout << "This program shows the use of the Calonder point descriptor classifier"
"SURF is used to detect interest points, Calonder is used to describe/match these points\n"
"Format:" << endl <<
"SURF is used to detect interest points, Calonder is used to describe/match these points\n"
"Format:" << endl <<
" classifier_file(to write) test_image file_with_train_images_filenames(txt)" <<
" or" << endl <<
" classifier_file(to read) test_image"
"Using OpenCV version %s\n" << CV_VERSION << "\n"
<< endl;
" classifier_file(to read) test_image" << "\n" << endl <<
"Using OpenCV version " << CV_VERSION << "\n" << endl;
return;
}
/*
* Generates random perspective transform of image
*/
......@@ -131,7 +134,7 @@ void testCalonderClassifier( const string& classifierFilename, const string& img
Mat points1t; perspectiveTransform(Mat(points1), points1t, H12);
for( size_t mi = 0; mi < matches.size(); mi++ )
{
if( norm(points2[matches[mi].trainIdx] - points1t.at<Point2f>(mi,0)) < 4 ) // inlier
if( norm(points2[matches[mi].trainIdx] - points1t.at<Point2f>((int)mi,0)) < 4 ) // inlier
matchesMask[mi] = 1;
}
......@@ -148,7 +151,7 @@ int main( int argc, char **argv )
{
if( argc != 4 && argc != 3 )
{
help();
help();
return -1;
}
......
......@@ -12,43 +12,46 @@ using namespace cv;
void help()
{
printf( "This program shows the use of the \"fern\" plannar PlanarObjectDetector point\n"
"descriptor classifier"
"Usage:\n"
"./find_obj_ferns [<object_filename default: box.png> <scene_filename default:box_in_scene.png>]\n"
"\n");
"descriptor classifier\n"
"Usage:\n"
"./find_obj_ferns <object_filename> <scene_filename>, default: box.png and box_in_scene.png\n\n");
return;
}
int main(int argc, char** argv)
{
int i;
const char* object_filename = argc > 1 ? argv[1] : "box.png";
const char* scene_filename = argc > 2 ? argv[2] : "box_in_scene.png";
int i;
help();
cvNamedWindow("Object", 1);
cvNamedWindow("Image", 1);
cvNamedWindow("Object Correspondence", 1);
Mat object = imread( object_filename, CV_LOAD_IMAGE_GRAYSCALE );
Mat image;
double imgscale = 1;
Mat _image = imread( scene_filename, CV_LOAD_IMAGE_GRAYSCALE );
resize(_image, image, Size(), 1./imgscale, 1./imgscale, INTER_CUBIC);
help();
Mat object = imread( object_filename, CV_LOAD_IMAGE_GRAYSCALE );
Mat scene = imread( scene_filename, CV_LOAD_IMAGE_GRAYSCALE );
if( !object.data || !image.data )
if( !object.data || !scene.data )
{
fprintf( stderr, "Can not load %s and/or %s\n"
"Usage: find_obj_ferns [<object_filename> <scene_filename>]\n",
fprintf( stderr, "Can not load %s and/or %s\n",
object_filename, scene_filename );
exit(-1);
}
double imgscale = 1;
Mat image;
resize(scene, image, Size(), 1./imgscale, 1./imgscale, INTER_CUBIC);
cvNamedWindow("Object", 1);
cvNamedWindow("Image", 1);
cvNamedWindow("Object Correspondence", 1);
Size patchSize(32, 32);
LDetector ldetector(7, 20, 2, 2000, patchSize.width, 2);
ldetector.setVerbose(true);
PlanarObjectDetector detector;
vector<Mat> objpyr, imgpyr;
int blurKSize = 3;
double sigma = 0;
......@@ -56,10 +59,10 @@ int main(int argc, char** argv)
GaussianBlur(image, image, Size(blurKSize, blurKSize), sigma, sigma);
buildPyramid(object, objpyr, ldetector.nOctaves-1);
buildPyramid(image, imgpyr, ldetector.nOctaves-1);
vector<KeyPoint> objKeypoints, imgKeypoints;
PatchGenerator gen(0,256,5,true,0.8,1.2,-CV_PI/2,CV_PI/2,-CV_PI/2,CV_PI/2);
PatchGenerator gen(0,256,5,true,0.8,1.2,-CV_PI/2,CV_PI/2,-CV_PI/2,CV_PI/2);
string model_filename = format("%s_model.xml.gz", object_filename);
printf("Trying to load %s ...\n", model_filename.c_str());
FileStorage fs(model_filename, FileStorage::READ);
......@@ -76,7 +79,7 @@ int main(int argc, char** argv)
ldetector.getMostStable2D(object, objKeypoints, 100, gen);
printf("Done.\nStep 2. Training ferns-based planar object detector ...\n");
detector.setVerbose(true);
detector.train(objpyr, objKeypoints, patchSize.width, 100, 11, 10000, ldetector, gen);
printf("Done.\nStep 3. Saving the model to %s ...\n", model_filename.c_str());
if( fs.open(model_filename, FileStorage::WRITE) )
......@@ -84,7 +87,7 @@ int main(int argc, char** argv)
}
printf("Now find the keypoints in the image, try recognize them and compute the homography matrix\n");
fs.release();
vector<Point2f> dst_corners;
Mat correspond( object.rows + image.rows, std::max(object.cols, image.cols), CV_8UC3);
correspond = Scalar(0.);
......@@ -92,20 +95,20 @@ int main(int argc, char** argv)
cvtColor(object, part, CV_GRAY2BGR);
part = Mat(correspond, Rect(0, object.rows, image.cols, image.rows));
cvtColor(image, part, CV_GRAY2BGR);
vector<int> pairs;
Mat H;
double t = (double)getTickCount();
objKeypoints = detector.getModelPoints();
ldetector(imgpyr, imgKeypoints, 300);
std::cout << "Object keypoints: " << objKeypoints.size() << "\n";
std::cout << "Image keypoints: " << imgKeypoints.size() << "\n";
bool found = detector(imgpyr, imgKeypoints, H, dst_corners, &pairs);
t = (double)getTickCount() - t;
printf("%gms\n", t*1000/getTickFrequency());
if( found )
{
for( i = 0; i < 4; i++ )
......@@ -116,14 +119,14 @@ int main(int argc, char** argv)
Point(r2.x, r2.y+object.rows), Scalar(0,0,255) );
}
}
for( i = 0; i < (int)pairs.size(); i += 2 )
{
line( correspond, objKeypoints[pairs[i]].pt,
imgKeypoints[pairs[i+1]].pt + Point2f(0,(float)object.rows),
Scalar(0,255,0) );
}
imshow( "Object Correspondence", correspond );
Mat objectColor;
cvtColor(object, objectColor, CV_GRAY2BGR);
......@@ -139,10 +142,12 @@ int main(int argc, char** argv)
circle( imageColor, imgKeypoints[i].pt, 2, Scalar(0,0,255), -1 );
circle( imageColor, imgKeypoints[i].pt, (1 << imgKeypoints[i].octave)*15, Scalar(0,255,0), 1 );
}
imwrite("correspond.png", correspond );
imshow( "Object", objectColor );
imshow( "Image", imageColor );
waitKey(0);
return 0;
}
......@@ -13,14 +13,15 @@
using namespace cv;
using namespace std;
void myhelp()
void help()
{
printf("\nSigh: This program is not complete/will be replaced. \n"
"So: Use this just to see hints of how to use things like Rodrigues\n"
" conversions, finding the fundamental matrix, using descriptor\n"
" finding and matching in features2d and using camera parameters\n"
);
printf("\nSigh: This program is not complete/will be replaced. \n"
"So: Use this just to see hints of how to use things like Rodrigues\n"
" conversions, finding the fundamental matrix, using descriptor\n"
" finding and matching in features2d and using camera parameters\n"
"Usage: build3dmodel -i <intrinsics_filename>\n"
"\t[-d <detector>] [-de <descriptor_extractor>] -m <model_name>\n\n");
return;
}
......@@ -33,12 +34,12 @@ static bool readCameraMatrix(const string& filename,
fs["image_height"] >> calibratedImageSize.height;
fs["distortion_coefficients"] >> distCoeffs;
fs["camera_matrix"] >> cameraMatrix;
if( distCoeffs.type() != CV_64F )
distCoeffs = Mat_<double>(distCoeffs);
if( cameraMatrix.type() != CV_64F )
cameraMatrix = Mat_<double>(cameraMatrix);
return true;
}
......@@ -154,19 +155,19 @@ static void findConstrainedCorrespondences(const Mat& _F,
{
float F[9]={0};
int dsize = descriptors1.cols;
Mat Fhdr = Mat(3, 3, CV_32F, F);
_F.convertTo(Fhdr, CV_32F);
matches.clear();
for( size_t i = 0; i < keypoints1.size(); i++ )
for( int i = 0; i < (int)keypoints1.size(); i++ )
{
Point2f p1 = keypoints1[i].pt;
double bestDist1 = DBL_MAX, bestDist2 = DBL_MAX;
int bestIdx1 = -1, bestIdx2 = -1;
const float* d1 = descriptors1.ptr<float>(i);
for( size_t j = 0; j < keypoints2.size(); j++ )
for( int j = 0; j < (int)keypoints2.size(); j++ )
{
Point2f p2 = keypoints2[j].pt;
double e = p2.x*(F[0]*p1.x + F[1]*p1.y + F[2]) +
......@@ -177,7 +178,7 @@ static void findConstrainedCorrespondences(const Mat& _F,
const float* d2 = descriptors2.ptr<float>(j);
double dist = 0;
int k = 0;
for( ; k <= dsize - 8; k += 8 )
{
float t0 = d1[k] - d2[k], t1 = d1[k+1] - d2[k+1];
......@@ -186,11 +187,11 @@ static void findConstrainedCorrespondences(const Mat& _F,
float t6 = d1[k+6] - d2[k+6], t7 = d1[k+7] - d2[k+7];
dist += t0*t0 + t1*t1 + t2*t2 + t3*t3 +
t4*t4 + t5*t5 + t6*t6 + t7*t7;
if( dist >= bestDist2 )
break;
}
if( dist < bestDist2 )
{
for( ; k < dsize; k++ )
......@@ -198,7 +199,7 @@ static void findConstrainedCorrespondences(const Mat& _F,
float t = d1[k] - d2[k];
dist += t*t;
}
if( dist < bestDist1 )
{
bestDist2 = bestDist1;
......@@ -213,7 +214,7 @@ static void findConstrainedCorrespondences(const Mat& _F,
}
}
}
if( bestIdx1 >= 0 && bestDist1 < bestDist2*ratio )
{
Point2f p2 = keypoints1[bestIdx1].pt;
......@@ -224,21 +225,21 @@ static void findConstrainedCorrespondences(const Mat& _F,
continue;
double threshold = bestDist1/ratio;
const float* d22 = descriptors2.ptr<float>(bestIdx1);
size_t i1 = 0;
for( ; i1 < keypoints1.size(); i1++ )
int i1 = 0;
for( ; i1 < (int)keypoints1.size(); i1++ )
{
if( i1 == i )
continue;
Point2f p1 = keypoints1[i1].pt;
const float* d11 = descriptors1.ptr<float>(i1);
double dist = 0;
e = p2.x*(F[0]*p1.x + F[1]*p1.y + F[2]) +
p2.y*(F[3]*p1.x + F[4]*p1.y + F[5]) +
F[6]*p1.x + F[7]*p1.y + F[8];
if( fabs(e) > eps )
continue;
for( int k = 0; k < dsize; k++ )
{
float t = d11[k] - d22[k];
......@@ -246,7 +247,7 @@ static void findConstrainedCorrespondences(const Mat& _F,
if( dist >= threshold )
break;
}
if( dist < threshold )
break;
}
......@@ -334,7 +335,7 @@ void triangulatePoint_test(void)
randu(tvec1, Scalar::all(-10), Scalar::all(10));
randu(rvec2, Scalar::all(-10), Scalar::all(10));
randu(tvec2, Scalar::all(-10), Scalar::all(10));
randu(objptmat, Scalar::all(-10), Scalar::all(10));
double eps = 1e-2;
randu(deltamat1, Scalar::all(-eps), Scalar::all(eps));
......@@ -343,10 +344,10 @@ void triangulatePoint_test(void)
Mat_<float> cameraMatrix(3,3);
double fx = 1000., fy = 1010., cx = 400.5, cy = 300.5;
cameraMatrix << fx, 0, cx, 0, fy, cy, 0, 0, 1;
projectPoints(Mat(objpt)+Mat(delta1), rvec1, tvec1, cameraMatrix, Mat(), imgpt1);
projectPoints(Mat(objpt)+Mat(delta2), rvec2, tvec2, cameraMatrix, Mat(), imgpt2);
vector<Point3f> objptt(n);
vector<Point2f> pts(2);
vector<Mat> Rv(2), tv(2);
......@@ -390,10 +391,10 @@ static void build3dmodel( const Ptr<FeatureDetector>& detector,
PointModel& model )
{
int progressBarSize = 10;
const double Feps = 5;
const double DescriptorRatio = 0.7;
vector<vector<KeyPoint> > allkeypoints;
vector<int> dstart;
vector<float> alldescriptorsVec;
......@@ -402,23 +403,23 @@ static void build3dmodel( const Ptr<FeatureDetector>& detector,
int descriptorSize = 0;
Mat descriptorbuf;
Set2i pairs, keypointsIdxMap;
model.points.clear();
model.didx.clear();
dstart.push_back(0);
size_t nimages = imageList.size();
size_t nimagePairs = (nimages - 1)*nimages/2 - nimages;
printf("\nComputing descriptors ");
// 1. find all the keypoints and all the descriptors
for( size_t i = 0; i < nimages; i++ )
{
Mat img = imread(imageList[i], 1), gray;
cvtColor(img, gray, CV_BGR2GRAY);
vector<KeyPoint> keypoints;
detector->detect(gray, keypoints);
descriptorExtractor->compute(gray, keypoints, descriptorbuf);
......@@ -426,7 +427,7 @@ static void build3dmodel( const Ptr<FeatureDetector>& detector,
for( size_t k = 0; k < keypoints.size(); k++ )
keypoints[k].pt += roiofs;
allkeypoints.push_back(keypoints);
Mat buf = descriptorbuf;
if( !buf.isContinuous() || buf.type() != CV_32F )
{
......@@ -434,41 +435,41 @@ static void build3dmodel( const Ptr<FeatureDetector>& detector,
descriptorbuf.convertTo(buf, CV_32F);
}
descriptorSize = buf.cols;
size_t prev = alldescriptorsVec.size();
size_t delta = buf.rows*buf.cols;
alldescriptorsVec.resize(prev + delta);
std::copy(buf.ptr<float>(), buf.ptr<float>() + delta,
alldescriptorsVec.begin() + prev);
dstart.push_back(dstart.back() + keypoints.size());
dstart.push_back(dstart.back() + (int)keypoints.size());
Mat R, t;
unpackPose(poseList[i], R, t);
Rs.push_back(R);
ts.push_back(t);
if( (i+1)*progressBarSize/nimages > i*progressBarSize/nimages )
{
putchar('.');
fflush(stdout);
}
}
Mat alldescriptors(alldescriptorsVec.size()/descriptorSize, descriptorSize, CV_32F,
Mat alldescriptors((int)alldescriptorsVec.size()/descriptorSize, descriptorSize, CV_32F,
&alldescriptorsVec[0]);
printf("\nOk. total images = %d. total keypoints = %d\n",
(int)nimages, alldescriptors.rows);
printf("\nFinding correspondences ");
int pairsFound = 0;
vector<Point2f> pts_k(2);
vector<Mat> Rs_k(2), ts_k(2);
//namedWindow("img1", 1);
//namedWindow("img2", 1);
// 2. find pairwise correspondences
for( size_t i = 0; i < nimages; i++ )
for( size_t j = i+1; j < nimages; j++ )
......@@ -477,47 +478,47 @@ static void build3dmodel( const Ptr<FeatureDetector>& detector,
const vector<KeyPoint>& keypoints2 = allkeypoints[j];
Mat descriptors1 = alldescriptors.rowRange(dstart[i], dstart[i+1]);
Mat descriptors2 = alldescriptors.rowRange(dstart[j], dstart[j+1]);
Mat F = getFundamentalMat(Rs[i], ts[i], Rs[j], ts[j], cameraMatrix);
findConstrainedCorrespondences( F, keypoints1, keypoints2,
descriptors1, descriptors2,
pairwiseMatches, Feps, DescriptorRatio );
//pairsFound += (int)pairwiseMatches.size();
//Mat img1 = imread(format("%s/frame%04d.jpg", model.name.c_str(), (int)i), 1);
//Mat img2 = imread(format("%s/frame%04d.jpg", model.name.c_str(), (int)j), 1);
//double avg_err = 0;
for( size_t k = 0; k < pairwiseMatches.size(); k++ )
{
int i1 = pairwiseMatches[k][0], i2 = pairwiseMatches[k][1];
pts_k[0] = keypoints1[i1].pt;
pts_k[1] = keypoints2[i2].pt;
Rs_k[0] = Rs[i]; Rs_k[1] = Rs[j];
ts_k[0] = ts[i]; ts_k[1] = ts[j];
Point3f objpt = triangulatePoint(pts_k, Rs_k, ts_k, cameraMatrix);
vector<Point3f> objpts;
objpts.push_back(objpt);
vector<Point2f> imgpts1, imgpts2;
projectPoints(Mat(objpts), Rs_k[0], ts_k[0], cameraMatrix, Mat(), imgpts1);
projectPoints(Mat(objpts), Rs_k[1], ts_k[1], cameraMatrix, Mat(), imgpts2);
double e1 = norm(imgpts1[0] - keypoints1[i1].pt);
double e2 = norm(imgpts2[0] - keypoints2[i2].pt);
if( e1 + e2 > 5 )
continue;
pairsFound++;
//model.points.push_back(objpt);
pairs[Pair2i(i1+dstart[i], i2+dstart[j])] = 1;
pairs[Pair2i(i2+dstart[j], i1+dstart[i])] = 1;
keypointsIdxMap[Pair2i(i,i1)] = 1;
keypointsIdxMap[Pair2i(j,i2)] = 1;
keypointsIdxMap[Pair2i((int)i,i1)] = 1;
keypointsIdxMap[Pair2i((int)j,i2)] = 1;
//CV_Assert(e1 < 5 && e2 < 5);
//Scalar color(rand()%256,rand()%256, rand()%256);
//circle(img1, keypoints1[i1].pt, 2, color, -1, CV_AA);
......@@ -527,41 +528,41 @@ static void build3dmodel( const Ptr<FeatureDetector>& detector,
//imshow("img1", img1);
//imshow("img2", img2);
//waitKey();
if( (i+1)*progressBarSize/nimagePairs > i*progressBarSize/nimagePairs )
{
putchar('.');
fflush(stdout);
}
}
printf("\nOk. Total pairs = %d\n", pairsFound );
// 3. build the keypoint clusters
vector<Pair2i> keypointsIdx;
Set2i::iterator kpidx_it = keypointsIdxMap.begin(), kpidx_end = keypointsIdxMap.end();
for( ; kpidx_it != kpidx_end; ++kpidx_it )
keypointsIdx.push_back(kpidx_it->first);
printf("\nClustering correspondences ");
vector<int> labels;
int nclasses = partition( keypointsIdx, labels, EqKeypoints(&dstart, &pairs) );
printf("\nOk. Total classes (i.e. 3d points) = %d\n", nclasses );
model.descriptors.create(keypointsIdx.size(), descriptorSize, CV_32F);
model.descriptors.create((int)keypointsIdx.size(), descriptorSize, CV_32F);
model.didx.resize(nclasses);
model.points.resize(nclasses);
vector<vector<Pair2i> > clusters(nclasses);
for( size_t i = 0; i < keypointsIdx.size(); i++ )
clusters[labels[i]].push_back(keypointsIdx[i]);
// 4. now compute 3D points corresponding to each cluster and fill in the model data
printf("\nComputing 3D coordinates ");
int globalDIdx = 0;
for( int k = 0; k < nclasses; k++ )
{
......@@ -575,7 +576,7 @@ static void build3dmodel( const Ptr<FeatureDetector>& detector,
int imgidx = clusters[k][i].first, ptidx = clusters[k][i].second;
Mat dstrow = model.descriptors.row(globalDIdx);
alldescriptors.row(dstart[imgidx] + ptidx).copyTo(dstrow);
model.didx[k][i] = globalDIdx++;
pts_k[i] = allkeypoints[imgidx][ptidx].pt;
Rs_k[i] = Rs[imgidx];
......@@ -583,7 +584,7 @@ static void build3dmodel( const Ptr<FeatureDetector>& detector,
}
Point3f objpt = triangulatePoint(pts_k, Rs_k, ts_k, cameraMatrix);
model.points[k] = objpt;
if( (i+1)*progressBarSize/nclasses > i*progressBarSize/nclasses )
{
putchar('.');
......@@ -600,10 +601,10 @@ static void build3dmodel( const Ptr<FeatureDetector>& detector,
{
img = imread(format("%s/frame%04d.jpg", model.name.c_str(), (int)i), 1);
projectPoints(Mat(model.points), Rs[i], ts[i], cameraMatrix, Mat(), imagePoints);
for( int k = 0; k < (int)imagePoints.size(); k++ )
circle(img, imagePoints[k], 2, Scalar(0,255,0), -1, CV_AA, 0);
imshow("Test", img);
int c = waitKey();
if( c == 'q' || c == 'Q' )
......@@ -614,75 +615,73 @@ static void build3dmodel( const Ptr<FeatureDetector>& detector,
int main(int argc, char** argv)
{
triangulatePoint_test();
const char* help = "Usage: build3dmodel -i <intrinsics_filename>\n"
"\t[-d <detector>] [-de <descriptor_extractor>] -m <model_name>\n\n";
if(argc < 3)
{
puts(help);
myhelp();
return 0;
}
const char* intrinsicsFilename = 0;
const char* modelName = 0;
const char* modelName = 0;
const char* detectorName = "SURF";
const char* descriptorExtractorName = "SURF";
vector<Point3f> modelBox;
vector<string> imageList;
vector<Rect> roiList;
vector<Vec6f> poseList;
vector<string> imageList;
vector<Rect> roiList;
vector<Vec6f> poseList;
if(argc < 3)
{
help();
return -1;
}
for( int i = 1; i < argc; i++ )
{
if( strcmp(argv[i], "-i") == 0 )
intrinsicsFilename = argv[++i];
else if( strcmp(argv[i], "-m") == 0 )
modelName = argv[++i];
else if( strcmp(argv[i], "-d") == 0 )
intrinsicsFilename = argv[++i];
else if( strcmp(argv[i], "-m") == 0 )
modelName = argv[++i];
else if( strcmp(argv[i], "-d") == 0 )
detectorName = argv[++i];
else if( strcmp(argv[i], "-de") == 0 )
descriptorExtractorName = argv[++i];
else
{
printf("Incorrect option\n");
puts(help);
return 0;
}
else
{
help();
printf("Incorrect option\n");
return -1;
}
}
if( !intrinsicsFilename || !modelName )
{
printf("Some of the required parameters are missing\n");
help();
return -1;
}
if( !intrinsicsFilename || !modelName )
{
printf("Some of the required parameters are missing\n");
puts(help);
return 0;
}
triangulatePoint_test();
Mat cameraMatrix, distCoeffs;
Size calibratedImageSize;
readCameraMatrix(intrinsicsFilename, cameraMatrix, distCoeffs, calibratedImageSize);
Ptr<FeatureDetector> detector = FeatureDetector::create(detectorName);
Ptr<DescriptorExtractor> descriptorExtractor = DescriptorExtractor::create(descriptorExtractorName);
string modelIndexFilename = format("%s_segm/frame_index.yml", modelName);
if(!readModelViews( modelIndexFilename, modelBox, imageList, roiList, poseList))
{
printf("Can not read the model. Check the parameters and the working directory\n");
puts(help);
return 0;
}
help();
return -1;
}
PointModel model;
model.name = modelName;
build3dmodel( detector, descriptorExtractor, modelBox,
imageList, roiList, poseList, cameraMatrix, model );
string outputModelName = format("%s_model.yml.gz", modelName);
printf("\nDone! Now saving the model ...\n");
writeModel(outputModelName, modelName, model);
return 0;
}
......@@ -9,42 +9,46 @@ using namespace std;
void help()
{
cout <<
"\nThis program demonstrates Chamfer matching -- computing a distance between an \n"
"edge template and a query edge image.\n"
"Call:\n"
"./chamfer [<image edge map> <template edge map>]\n"
"By default\n"
"the inputs are ./chamfer logo_in_clutter.png logo.png\n"<< endl;
cout <<
"\nThis program demonstrates Chamfer matching -- computing a distance between an \n"
"edge template and a query edge image.\n"
"Usage:\n"
"./chamfer <image edge map> <template edge map>,"
" By default the inputs are logo_in_clutter.png logo.png\n" << endl;
return;
}
int main( int argc, char** argv )
{
if( argc != 1 && argc != 3 )
if( argc != 3 )
{
help();
return 0;
}
Mat img = imread(argc == 3 ? argv[1] : "logo_in_clutter.png", 0);
Mat cimg;
cvtColor(img, cimg, CV_GRAY2BGR);
Mat tpl = imread(argc == 3 ? argv[2] : "logo.png", 0);
// if the image and the template are not edge maps but normal grayscale images,
// you might want to uncomment the lines below to produce the maps. You can also
// run Sobel instead of Canny.
// Canny(img, img, 5, 50, 3);
// Canny(tpl, tpl, 5, 50, 3);
vector<vector<Point> > results;
vector<float> costs;
int best = chamerMatching( img, tpl, results, costs );
if( best < 0 )
{
cout << "not found;\n";
cout << "matching not found\n";
return 0;
}
size_t i, n = results[best].size();
for( i = 0; i < n; i++ )
{
......@@ -52,7 +56,10 @@ int main( int argc, char** argv )
if( pt.inside(Rect(0, 0, cimg.cols, cimg.rows)) )
cimg.at<Vec3b>(pt) = Vec3b(0, 255, 0);
}
imshow("result", cimg);
waitKey();
return 0;
}
......@@ -8,30 +8,31 @@ using namespace std;
void help()
{
cout << "\nThis program demonstrates line finding with the Hough transform.\n"
"Call:\n"
"./houghlines [image_len -- Default is pic1.png\n" << endl;
cout << "\nThis program demonstrates line finding with the Hough transform.\n"
"Usage:\n"
"./houghlines <image_name>, Default is pic1.png\n" << endl;
}
int main(int argc, char** argv)
{
const char* filename = argc >= 2 ? argv[1] : "pic1.png";
Mat src = imread(filename, 0);
if(src.empty())
{
help();
cout << "can not open " << filename << endl;
cout << "Usage: houghlines <image_name>" << endl;
return -1;
}
help();
Mat dst, cdst;
Canny(src, dst, 50, 200, 3);
cvtColor(dst, cdst, CV_GRAY2BGR);
#if 0
vector<Vec2f> lines;
HoughLines(dst, lines, 1, CV_PI/180, 100, 0, 0 );
for( size_t i = 0; i < lines.size(); i++ )
{
float rho = lines[i][0], theta = lines[i][1];
......@@ -57,6 +58,7 @@ int main(int argc, char** argv)
imshow("detected lines", cdst);
waitKey();
return 0;
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment