Commit 6e38b6aa authored by Vladimir Dudnik's avatar Vladimir Dudnik

removed trailing backspaces, reduced number of warnings (under MSVC2010 x64) for…

removed trailing backspaces, reduced number of warnings (under MSVC2010 x64) for size_t to int conversion, added handling of samples launch without parameters (should not have abnormal termination if there was no paramaters supplied)
parent 092beae2
This diff is collapsed.
......@@ -1965,7 +1965,7 @@ public:
virtual bool empty() const;
protected:
virtual void computeImpl( const Mat& image, vector<KeyPoint>& keypoints, Mat& descriptors ) const;
virtual void computeImpl( const Mat& image, vector<KeyPoint>& keypoints, Mat& descriptors ) const;
RTreeClassifier classifier_;
static const int BORDER_SIZE = 16;
......
......@@ -376,13 +376,13 @@ void OpponentColorDescriptorExtractor::computeImpl( const Mat& bgrImage, vector<
channelKeypoints[ci].insert( channelKeypoints[ci].begin(), keypoints.begin(), keypoints.end() );
// Use class_id member to get indices into initial keypoints vector
for( size_t ki = 0; ki < channelKeypoints[ci].size(); ki++ )
channelKeypoints[ci][ki].class_id = ki;
channelKeypoints[ci][ki].class_id = (int)ki;
descriptorExtractor->compute( opponentChannels[ci], channelKeypoints[ci], channelDescriptors[ci] );
idxs[ci].resize( channelKeypoints[ci].size() );
for( size_t ki = 0; ki < channelKeypoints[ci].size(); ki++ )
{
idxs[ci][ki] = ki;
idxs[ci][ki] = (int)ki;
}
std::sort( idxs[ci].begin(), idxs[ci].end(), KP_LessThan(channelKeypoints[ci]) );
}
......
......@@ -127,7 +127,7 @@ HarrisResponse::HarrisResponse(const cv::Mat& image, double k) :
dX_offsets_.resize(7 * 9);
dY_offsets_.resize(7 * 9);
std::vector<int>::iterator dX_offsets = dX_offsets_.begin(), dY_offsets = dY_offsets_.begin();
unsigned int image_step = image.step1();
unsigned int image_step = (unsigned int)image.step1();
for (size_t y = 0; y <= 6 * image_step; y += image_step)
{
int dX_offset = y + 2, dY_offset = y + 2 * image_step;
......
......@@ -93,7 +93,7 @@ icvFarthestNode( CvSpillTreeNode* node,
}
return result;
}
// clone a new tree node
static inline CvSpillTreeNode*
icvCloneSpillTreeNode( CvSpillTreeNode* node )
......
......@@ -20,7 +20,7 @@ void help()
"see facedetect.cmd for one call:\n"
"./facedetect --cascade=\"../../data/haarcascades/haarcascade_frontalface_alt.xml\" --nested-cascade=\"../../data/haarcascades/haarcascade_eye.xml\" --scale=1.3 \n"
"Hit any key to quit.\n"
"Using OpenCV version %s\n" << CV_VERSION << "\n" << endl;
"Using OpenCV version " << CV_VERSION << "\n" << endl;
}
void detectAndDraw( Mat& img,
......@@ -49,7 +49,7 @@ int main( int argc, const char** argv )
for( int i = 1; i < argc; i++ )
{
cout << "Processing " << i << " " << argv[i] << endl;
cout << "Processing " << i << " " << argv[i] << endl;
if( cascadeOpt.compare( 0, cascadeOptLen, argv[i], cascadeOptLen ) == 0 )
{
cascadeName.assign( argv[i] + cascadeOptLen );
......@@ -111,7 +111,7 @@ int main( int argc, const char** argv )
if( capture )
{
cout << "In capture ..." << endl;
cout << "In capture ..." << endl;
for(;;)
{
IplImage* iplImg = cvQueryFrame( capture );
......@@ -130,12 +130,13 @@ int main( int argc, const char** argv )
}
waitKey(0);
_cleanup_:
cvReleaseCapture( &capture );
}
else
{
cout << "In image read" << endl;
cout << "In image read" << endl;
if( !image.empty() )
{
detectAndDraw( image, cascade, nestedCascade, scale );
......@@ -239,6 +240,6 @@ void detectAndDraw( Mat& img,
radius = cvRound((nr->width + nr->height)*0.25*scale);
circle( img, center, radius, color, 3, 8, 0 );
}
}
cv::imshow( "result", img );
}
cv::imshow( "result", img );
}
......@@ -16,14 +16,13 @@
using namespace std;
void help()
{
printf(
"This program demonstrated the use of the SURF Detector and Descriptor using\n"
"either FLANN (fast approx nearst neighbor classification) or brute force matching\n"
"on planar objects.\n"
"Call:\n"
"./find_obj [<object_filename default box.png> <scene_filename default box_in_scene.png>]\n\n"
);
printf(
"This program demonstrated the use of the SURF Detector and Descriptor using\n"
"either FLANN (fast approx nearst neighbor classification) or brute force matching\n"
"on planar objects.\n"
"Usage:\n"
"./find_obj <object_filename> <scene_filename>, default is box.png and box_in_scene.png\n\n");
return;
}
// define whether to use approximate nearest-neighbor search
......@@ -214,8 +213,19 @@ int main(int argc, char** argv)
const char* object_filename = argc == 3 ? argv[1] : "box.png";
const char* scene_filename = argc == 3 ? argv[2] : "box_in_scene.png";
CvMemStorage* storage = cvCreateMemStorage(0);
help();
IplImage* object = cvLoadImage( object_filename, CV_LOAD_IMAGE_GRAYSCALE );
IplImage* image = cvLoadImage( scene_filename, CV_LOAD_IMAGE_GRAYSCALE );
if( !object || !image )
{
fprintf( stderr, "Can not load %s and/or %s\n",
object_filename, scene_filename );
exit(-1);
}
CvMemStorage* storage = cvCreateMemStorage(0);
cvNamedWindow("Object", 1);
cvNamedWindow("Object Correspond", 1);
......@@ -232,30 +242,24 @@ int main(int argc, char** argv)
{{255,255,255}}
};
IplImage* object = cvLoadImage( object_filename, CV_LOAD_IMAGE_GRAYSCALE );
IplImage* image = cvLoadImage( scene_filename, CV_LOAD_IMAGE_GRAYSCALE );
if( !object || !image )
{
fprintf( stderr, "Can not load %s and/or %s\n"
"Usage: find_obj [<object_filename> <scene_filename>]\n",
object_filename, scene_filename );
exit(-1);
}
IplImage* object_color = cvCreateImage(cvGetSize(object), 8, 3);
cvCvtColor( object, object_color, CV_GRAY2BGR );
CvSeq *objectKeypoints = 0, *objectDescriptors = 0;
CvSeq *imageKeypoints = 0, *imageDescriptors = 0;
CvSeq* objectKeypoints = 0, *objectDescriptors = 0;
CvSeq* imageKeypoints = 0, *imageDescriptors = 0;
int i;
CvSURFParams params = cvSURFParams(500, 1);
double tt = (double)cvGetTickCount();
cvExtractSURF( object, 0, &objectKeypoints, &objectDescriptors, storage, params );
printf("Object Descriptors: %d\n", objectDescriptors->total);
cvExtractSURF( image, 0, &imageKeypoints, &imageDescriptors, storage, params );
printf("Image Descriptors: %d\n", imageDescriptors->total);
tt = (double)cvGetTickCount() - tt;
printf( "Extraction time = %gms\n", tt/(cvGetTickFrequency()*1000.));
CvPoint src_corners[4] = {{0,0}, {object->width,0}, {object->width, object->height}, {0, object->height}};
CvPoint dst_corners[4];
IplImage* correspond = cvCreateImage( cvSize(image->width, object->height+image->height), 8, 1 );
......
......@@ -12,14 +12,17 @@ using namespace cv;
void help()
{
cout << "This program shows the use of the Calonder point descriptor classifier"
"SURF is used to detect interest points, Calonder is used to describe/match these points\n"
"Format:" << endl <<
"SURF is used to detect interest points, Calonder is used to describe/match these points\n"
"Format:" << endl <<
" classifier_file(to write) test_image file_with_train_images_filenames(txt)" <<
" or" << endl <<
" classifier_file(to read) test_image"
"Using OpenCV version %s\n" << CV_VERSION << "\n"
<< endl;
" classifier_file(to read) test_image" << "\n" << endl <<
"Using OpenCV version " << CV_VERSION << "\n" << endl;
return;
}
/*
* Generates random perspective transform of image
*/
......@@ -131,7 +134,7 @@ void testCalonderClassifier( const string& classifierFilename, const string& img
Mat points1t; perspectiveTransform(Mat(points1), points1t, H12);
for( size_t mi = 0; mi < matches.size(); mi++ )
{
if( norm(points2[matches[mi].trainIdx] - points1t.at<Point2f>(mi,0)) < 4 ) // inlier
if( norm(points2[matches[mi].trainIdx] - points1t.at<Point2f>((int)mi,0)) < 4 ) // inlier
matchesMask[mi] = 1;
}
......@@ -148,7 +151,7 @@ int main( int argc, char **argv )
{
if( argc != 4 && argc != 3 )
{
help();
help();
return -1;
}
......
......@@ -12,43 +12,46 @@ using namespace cv;
void help()
{
printf( "This program shows the use of the \"fern\" plannar PlanarObjectDetector point\n"
"descriptor classifier"
"Usage:\n"
"./find_obj_ferns [<object_filename default: box.png> <scene_filename default:box_in_scene.png>]\n"
"\n");
"descriptor classifier\n"
"Usage:\n"
"./find_obj_ferns <object_filename> <scene_filename>, default: box.png and box_in_scene.png\n\n");
return;
}
int main(int argc, char** argv)
{
int i;
const char* object_filename = argc > 1 ? argv[1] : "box.png";
const char* scene_filename = argc > 2 ? argv[2] : "box_in_scene.png";
int i;
help();
cvNamedWindow("Object", 1);
cvNamedWindow("Image", 1);
cvNamedWindow("Object Correspondence", 1);
Mat object = imread( object_filename, CV_LOAD_IMAGE_GRAYSCALE );
Mat image;
double imgscale = 1;
Mat _image = imread( scene_filename, CV_LOAD_IMAGE_GRAYSCALE );
resize(_image, image, Size(), 1./imgscale, 1./imgscale, INTER_CUBIC);
help();
Mat object = imread( object_filename, CV_LOAD_IMAGE_GRAYSCALE );
Mat scene = imread( scene_filename, CV_LOAD_IMAGE_GRAYSCALE );
if( !object.data || !image.data )
if( !object.data || !scene.data )
{
fprintf( stderr, "Can not load %s and/or %s\n"
"Usage: find_obj_ferns [<object_filename> <scene_filename>]\n",
fprintf( stderr, "Can not load %s and/or %s\n",
object_filename, scene_filename );
exit(-1);
}
double imgscale = 1;
Mat image;
resize(scene, image, Size(), 1./imgscale, 1./imgscale, INTER_CUBIC);
cvNamedWindow("Object", 1);
cvNamedWindow("Image", 1);
cvNamedWindow("Object Correspondence", 1);
Size patchSize(32, 32);
LDetector ldetector(7, 20, 2, 2000, patchSize.width, 2);
ldetector.setVerbose(true);
PlanarObjectDetector detector;
vector<Mat> objpyr, imgpyr;
int blurKSize = 3;
double sigma = 0;
......@@ -56,10 +59,10 @@ int main(int argc, char** argv)
GaussianBlur(image, image, Size(blurKSize, blurKSize), sigma, sigma);
buildPyramid(object, objpyr, ldetector.nOctaves-1);
buildPyramid(image, imgpyr, ldetector.nOctaves-1);
vector<KeyPoint> objKeypoints, imgKeypoints;
PatchGenerator gen(0,256,5,true,0.8,1.2,-CV_PI/2,CV_PI/2,-CV_PI/2,CV_PI/2);
PatchGenerator gen(0,256,5,true,0.8,1.2,-CV_PI/2,CV_PI/2,-CV_PI/2,CV_PI/2);
string model_filename = format("%s_model.xml.gz", object_filename);
printf("Trying to load %s ...\n", model_filename.c_str());
FileStorage fs(model_filename, FileStorage::READ);
......@@ -76,7 +79,7 @@ int main(int argc, char** argv)
ldetector.getMostStable2D(object, objKeypoints, 100, gen);
printf("Done.\nStep 2. Training ferns-based planar object detector ...\n");
detector.setVerbose(true);
detector.train(objpyr, objKeypoints, patchSize.width, 100, 11, 10000, ldetector, gen);
printf("Done.\nStep 3. Saving the model to %s ...\n", model_filename.c_str());
if( fs.open(model_filename, FileStorage::WRITE) )
......@@ -84,7 +87,7 @@ int main(int argc, char** argv)
}
printf("Now find the keypoints in the image, try recognize them and compute the homography matrix\n");
fs.release();
vector<Point2f> dst_corners;
Mat correspond( object.rows + image.rows, std::max(object.cols, image.cols), CV_8UC3);
correspond = Scalar(0.);
......@@ -92,20 +95,20 @@ int main(int argc, char** argv)
cvtColor(object, part, CV_GRAY2BGR);
part = Mat(correspond, Rect(0, object.rows, image.cols, image.rows));
cvtColor(image, part, CV_GRAY2BGR);
vector<int> pairs;
Mat H;
double t = (double)getTickCount();
objKeypoints = detector.getModelPoints();
ldetector(imgpyr, imgKeypoints, 300);
std::cout << "Object keypoints: " << objKeypoints.size() << "\n";
std::cout << "Image keypoints: " << imgKeypoints.size() << "\n";
bool found = detector(imgpyr, imgKeypoints, H, dst_corners, &pairs);
t = (double)getTickCount() - t;
printf("%gms\n", t*1000/getTickFrequency());
if( found )
{
for( i = 0; i < 4; i++ )
......@@ -116,14 +119,14 @@ int main(int argc, char** argv)
Point(r2.x, r2.y+object.rows), Scalar(0,0,255) );
}
}
for( i = 0; i < (int)pairs.size(); i += 2 )
{
line( correspond, objKeypoints[pairs[i]].pt,
imgKeypoints[pairs[i+1]].pt + Point2f(0,(float)object.rows),
Scalar(0,255,0) );
}
imshow( "Object Correspondence", correspond );
Mat objectColor;
cvtColor(object, objectColor, CV_GRAY2BGR);
......@@ -139,10 +142,12 @@ int main(int argc, char** argv)
circle( imageColor, imgKeypoints[i].pt, 2, Scalar(0,0,255), -1 );
circle( imageColor, imgKeypoints[i].pt, (1 << imgKeypoints[i].octave)*15, Scalar(0,255,0), 1 );
}
imwrite("correspond.png", correspond );
imshow( "Object", objectColor );
imshow( "Image", imageColor );
waitKey(0);
return 0;
}
This diff is collapsed.
......@@ -9,42 +9,46 @@ using namespace std;
void help()
{
cout <<
"\nThis program demonstrates Chamfer matching -- computing a distance between an \n"
"edge template and a query edge image.\n"
"Call:\n"
"./chamfer [<image edge map> <template edge map>]\n"
"By default\n"
"the inputs are ./chamfer logo_in_clutter.png logo.png\n"<< endl;
cout <<
"\nThis program demonstrates Chamfer matching -- computing a distance between an \n"
"edge template and a query edge image.\n"
"Usage:\n"
"./chamfer <image edge map> <template edge map>,"
" By default the inputs are logo_in_clutter.png logo.png\n" << endl;
return;
}
int main( int argc, char** argv )
{
if( argc != 1 && argc != 3 )
if( argc != 3 )
{
help();
return 0;
}
Mat img = imread(argc == 3 ? argv[1] : "logo_in_clutter.png", 0);
Mat cimg;
cvtColor(img, cimg, CV_GRAY2BGR);
Mat tpl = imread(argc == 3 ? argv[2] : "logo.png", 0);
// if the image and the template are not edge maps but normal grayscale images,
// you might want to uncomment the lines below to produce the maps. You can also
// run Sobel instead of Canny.
// Canny(img, img, 5, 50, 3);
// Canny(tpl, tpl, 5, 50, 3);
vector<vector<Point> > results;
vector<float> costs;
int best = chamerMatching( img, tpl, results, costs );
if( best < 0 )
{
cout << "not found;\n";
cout << "matching not found\n";
return 0;
}
size_t i, n = results[best].size();
for( i = 0; i < n; i++ )
{
......@@ -52,7 +56,10 @@ int main( int argc, char** argv )
if( pt.inside(Rect(0, 0, cimg.cols, cimg.rows)) )
cimg.at<Vec3b>(pt) = Vec3b(0, 255, 0);
}
imshow("result", cimg);
waitKey();
return 0;
}
......@@ -8,30 +8,31 @@ using namespace std;
void help()
{
cout << "\nThis program demonstrates line finding with the Hough transform.\n"
"Call:\n"
"./houghlines [image_len -- Default is pic1.png\n" << endl;
cout << "\nThis program demonstrates line finding with the Hough transform.\n"
"Usage:\n"
"./houghlines <image_name>, Default is pic1.png\n" << endl;
}
int main(int argc, char** argv)
{
const char* filename = argc >= 2 ? argv[1] : "pic1.png";
Mat src = imread(filename, 0);
if(src.empty())
{
help();
cout << "can not open " << filename << endl;
cout << "Usage: houghlines <image_name>" << endl;
return -1;
}
help();
Mat dst, cdst;
Canny(src, dst, 50, 200, 3);
cvtColor(dst, cdst, CV_GRAY2BGR);
#if 0
vector<Vec2f> lines;
HoughLines(dst, lines, 1, CV_PI/180, 100, 0, 0 );
for( size_t i = 0; i < lines.size(); i++ )
{
float rho = lines[i][0], theta = lines[i][1];
......@@ -57,6 +58,7 @@ int main(int argc, char** argv)
imshow("detected lines", cdst);
waitKey();
return 0;
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment