Commit f1c549fa authored by yao's avatar yao

revise ocl samples, add tvl1 sample

parent 2c198f6c
......@@ -16,10 +16,13 @@ const static Scalar colors[] = { CV_RGB(0,0,255),
CV_RGB(255,128,0),
CV_RGB(255,255,0),
CV_RGB(255,0,0),
CV_RGB(255,0,255)} ;
CV_RGB(255,0,255)
} ;
int64 work_begin = 0;
int64 work_end = 0;
string outputName;
static void workBegin()
{
......@@ -29,33 +32,42 @@ static void workEnd()
{
work_end += (getTickCount() - work_begin);
}
static double getTime(){
static double getTime()
{
return work_end /((double)cvGetTickFrequency() * 1000.);
}
void detect( Mat& img, vector<Rect>& faces,
cv::ocl::OclCascadeClassifierBuf& cascade,
ocl::OclCascadeClassifierBuf& cascade,
double scale, bool calTime);
void detectCPU( Mat& img, vector<Rect>& faces,
CascadeClassifier& cascade,
double scale, bool calTime);
void Draw(Mat& img, vector<Rect>& faces, double scale);
// This function test if gpu_rst matches cpu_rst.
// If the two vectors are not equal, it will return the difference in vector size
// Else if will return (total diff of each cpu and gpu rects covered pixels)/(total cpu rects covered pixels)
double checkRectSimilarity(Size sz, std::vector<Rect>& cpu_rst, std::vector<Rect>& gpu_rst);
double checkRectSimilarity(Size sz, vector<Rect>& cpu_rst, vector<Rect>& gpu_rst);
int main( int argc, const char** argv )
{
const char* keys =
"{ h | help | false | print help message }"
"{ i | input | | specify input image }"
"{ t | template | ../../../data/haarcascades/haarcascade_frontalface_alt.xml | specify template file }"
"{ t | template | haarcascade_frontalface_alt.xml |"
" specify template file path }"
"{ c | scale | 1.0 | scale image }"
"{ s | use_cpu | false | use cpu or gpu to process the image }";
"{ s | use_cpu | false | use cpu or gpu to process the image }"
"{ o | output | facedetect_output.jpg |"
" specify output image save path(only works when input is images) }";
CommandLineParser cmd(argc, argv, keys);
if (cmd.get<bool>("help"))
......@@ -69,9 +81,10 @@ int main( int argc, const char** argv )
bool useCPU = cmd.get<bool>("s");
string inputName = cmd.get<string>("i");
outputName = cmd.get<string>("o");
string cascadeName = cmd.get<string>("t");
double scale = cmd.get<double>("c");
cv::ocl::OclCascadeClassifierBuf cascade;
ocl::OclCascadeClassifierBuf cascade;
CascadeClassifier cpu_cascade;
if( !cascade.load( cascadeName ) || !cpu_cascade.load(cascadeName) )
......@@ -105,9 +118,10 @@ int main( int argc, const char** argv )
return -1;
}
cvNamedWindow( "result", 1 );
std::vector<cv::ocl::Info> oclinfo;
int devnums = cv::ocl::getDevice(oclinfo);
vector<ocl::Info> oclinfo;
int devnums = ocl::getDevice(oclinfo);
if( devnums < 1 )
{
std::cout << "no device found\n";
......@@ -130,10 +144,12 @@ int main( int argc, const char** argv )
frame.copyTo( frameCopy );
else
flip( frame, frameCopy, 0 );
if(useCPU){
if(useCPU)
{
detectCPU(frameCopy, faces, cpu_cascade, scale, false);
}
else{
else
{
detect(frameCopy, faces, cascade, scale, false);
}
Draw(frameCopy, faces, scale);
......@@ -141,8 +157,10 @@ int main( int argc, const char** argv )
goto _cleanup_;
}
waitKey(0);
_cleanup_:
cvReleaseCapture( &capture );
}
......@@ -152,15 +170,18 @@ _cleanup_:
vector<Rect> faces;
vector<Rect> ref_rst;
double accuracy = 0.;
for(int i = 0; i <= LOOP_NUM;i ++)
for(int i = 0; i <= LOOP_NUM; i ++)
{
cout << "loop" << i << endl;
if(useCPU){
if(useCPU)
{
detectCPU(image, faces, cpu_cascade, scale, i==0?false:true);
}
else{
else
{
detect(image, faces, cascade, scale, i==0?false:true);
if(i == 0){
if(i == 0)
{
detectCPU(image, ref_rst, cpu_cascade, scale, false);
accuracy = checkRectSimilarity(image.size(), ref_rst, faces);
}
......@@ -180,20 +201,19 @@ _cleanup_:
}
cvDestroyWindow("result");
return 0;
}
void detect( Mat& img, vector<Rect>& faces,
cv::ocl::OclCascadeClassifierBuf& cascade,
ocl::OclCascadeClassifierBuf& cascade,
double scale, bool calTime)
{
cv::ocl::oclMat image(img);
cv::ocl::oclMat gray, smallImg( cvRound (img.rows/scale), cvRound(img.cols/scale), CV_8UC1 );
ocl::oclMat image(img);
ocl::oclMat gray, smallImg( cvRound (img.rows/scale), cvRound(img.cols/scale), CV_8UC1 );
if(calTime) workBegin();
cv::ocl::cvtColor( image, gray, CV_BGR2GRAY );
cv::ocl::resize( gray, smallImg, smallImg.size(), 0, 0, INTER_LINEAR );
cv::ocl::equalizeHist( smallImg, smallImg );
ocl::cvtColor( image, gray, CV_BGR2GRAY );
ocl::resize( gray, smallImg, smallImg.size(), 0, 0, INTER_LINEAR );
ocl::equalizeHist( smallImg, smallImg );
cascade.detectMultiScale( smallImg, faces, 1.1,
3, 0
......@@ -202,6 +222,7 @@ void detect( Mat& img, vector<Rect>& faces,
if(calTime) workEnd();
}
void detectCPU( Mat& img, vector<Rect>& faces,
CascadeClassifier& cascade,
double scale, bool calTime)
......@@ -217,6 +238,7 @@ void detectCPU( Mat& img, vector<Rect>& faces,
if(calTime) workEnd();
}
void Draw(Mat& img, vector<Rect>& faces, double scale)
{
int i = 0;
......@@ -230,31 +252,38 @@ void Draw(Mat& img, vector<Rect>& faces, double scale)
radius = cvRound((r->width + r->height)*0.25*scale);
circle( img, center, radius, color, 3, 8, 0 );
}
cv::imshow( "result", img );
imshow( "result", img );
imwrite( outputName, img );
}
double checkRectSimilarity(Size sz, std::vector<Rect>& ob1, std::vector<Rect>& ob2)
double checkRectSimilarity(Size sz, vector<Rect>& ob1, vector<Rect>& ob2)
{
double final_test_result = 0.0;
size_t sz1 = ob1.size();
size_t sz2 = ob2.size();
if(sz1 != sz2)
{
return sz1 > sz2 ? (double)(sz1 - sz2) : (double)(sz2 - sz1);
}
else
{
cv::Mat cpu_result(sz, CV_8UC1);
if(sz1==0 && sz2==0)
return 0;
Mat cpu_result(sz, CV_8UC1);
cpu_result.setTo(0);
for(vector<Rect>::const_iterator r = ob1.begin(); r != ob1.end(); r++)
{
cv::Mat cpu_result_roi(cpu_result, *r);
Mat cpu_result_roi(cpu_result, *r);
cpu_result_roi.setTo(1);
cpu_result.copyTo(cpu_result);
}
int cpu_area = cv::countNonZero(cpu_result > 0);
int cpu_area = countNonZero(cpu_result > 0);
cv::Mat gpu_result(sz, CV_8UC1);
Mat gpu_result(sz, CV_8UC1);
gpu_result.setTo(0);
for(vector<Rect>::const_iterator r2 = ob2.begin(); r2 != ob2.end(); r2++)
{
......@@ -263,11 +292,13 @@ double checkRectSimilarity(Size sz, std::vector<Rect>& ob1, std::vector<Rect>& o
gpu_result.copyTo(gpu_result);
}
cv::Mat result_;
Mat result_;
multiply(cpu_result, gpu_result, result_);
int result = cv::countNonZero(result_ > 0);
int result = countNonZero(result_ > 0);
if(cpu_area!=0 && result!=0)
final_test_result = 1.0 - (double)result/(double)cpu_area;
else if(cpu_area==0 && result!=0)
final_test_result = -1;
}
return final_test_result;
}
This diff is collapsed.
......@@ -23,7 +23,8 @@ static void workEnd()
{
work_end += (getTickCount() - work_begin);
}
static double getTime(){
static double getTime()
{
return work_end * 1000. / getTickFrequency();
}
......@@ -96,11 +97,12 @@ int main(int argc, const char* argv[])
"{ h | help | false | print help message }"
"{ l | left | | specify left image }"
"{ r | right | | specify right image }"
"{ c | camera | 0 | enable camera capturing }"
"{ c | camera | 0 | specify camera id }"
"{ s | use_cpu | false | use cpu or gpu to process the image }"
"{ v | video | | use video as input }"
"{ points | points | 1000 | specify points count [GoodFeatureToTrack] }"
"{ min_dist | min_dist | 0 | specify minimal distance between points [GoodFeatureToTrack] }";
"{ o | output | pyrlk_output.jpg| specify output save path when input is images }"
"{ p | points | 1000 | specify points count [GoodFeatureToTrack] }"
"{ m | min_dist | 0 | specify minimal distance between points [GoodFeatureToTrack] }";
CommandLineParser cmd(argc, argv, keys);
......@@ -113,13 +115,13 @@ int main(int argc, const char* argv[])
}
bool defaultPicturesFail = false;
string fname0 = cmd.get<string>("left");
string fname1 = cmd.get<string>("right");
string vdofile = cmd.get<string>("video");
int points = cmd.get<int>("points");
double minDist = cmd.get<double>("min_dist");
string fname0 = cmd.get<string>("l");
string fname1 = cmd.get<string>("r");
string vdofile = cmd.get<string>("v");
string outfile = cmd.get<string>("o");
int points = cmd.get<int>("p");
double minDist = cmd.get<double>("m");
bool useCPU = cmd.get<bool>("s");
bool useCamera = cmd.get<bool>("c");
int inputName = cmd.get<int>("c");
oclMat d_nextPts, d_status;
......@@ -132,22 +134,9 @@ int main(int argc, const char* argv[])
vector<unsigned char> status(points);
vector<float> err;
if (frame0.empty() || frame1.empty())
{
useCamera = true;
defaultPicturesFail = true;
CvCapture* capture = 0;
capture = cvCaptureFromCAM( inputName );
if (!capture)
{
cout << "Can't load input images" << endl;
return -1;
}
}
cout << "Points count : " << points << endl << endl;
if (useCamera)
if (frame0.empty() || frame1.empty())
{
CvCapture* capture = 0;
Mat frame, frameCopy;
......@@ -241,7 +230,7 @@ _cleanup_:
else
{
nocamera:
for(int i = 0; i <= LOOP_NUM;i ++)
for(int i = 0; i <= LOOP_NUM; i ++)
{
cout << "loop" << i << endl;
if (i > 0) workBegin();
......@@ -274,8 +263,8 @@ nocamera:
cout << getTime() / LOOP_NUM << " ms" << endl;
drawArrows(frame0, pts, nextPts, status, Scalar(255, 0, 0));
imshow("PyrLK [Sparse]", frame0);
imwrite(outfile, frame0);
}
}
}
......
......@@ -6,7 +6,6 @@
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/ocl/ocl.hpp"
#include <iostream>
#include <math.h>
#include <string.h>
......@@ -14,23 +13,50 @@
using namespace cv;
using namespace std;
static void help()
#define ACCURACY_CHECK 1
#if ACCURACY_CHECK
// check if two vectors of vector of points are near or not
// prior assumption is that they are in correct order
static bool checkPoints(
vector< vector<Point> > set1,
vector< vector<Point> > set2,
int maxDiff = 5)
{
cout <<
"\nA program using OCL module pyramid scaling, Canny, dilate functions, threshold, split; cpu contours, contour simpification and\n"
"memory storage (it's got it all folks) to find\n"
"squares in a list of images pic1-6.png\n"
"Returns sequence of squares detected on the image.\n"
"the sequence is stored in the specified memory storage\n"
"Call:\n"
"./squares\n"
"Using OpenCV version %s\n" << CV_VERSION << "\n" << endl;
}
if(set1.size() != set2.size())
{
return false;
}
for(vector< vector<Point> >::iterator it1 = set1.begin(), it2 = set2.begin();
it1 < set1.end() && it2 < set2.end(); it1 ++, it2 ++)
{
vector<Point> pts1 = *it1;
vector<Point> pts2 = *it2;
if(pts1.size() != pts2.size())
{
return false;
}
for(size_t i = 0; i < pts1.size(); i ++)
{
Point pt1 = pts1[i], pt2 = pts2[i];
if(std::abs(pt1.x - pt2.x) > maxDiff ||
std::abs(pt1.y - pt2.y) > maxDiff)
{
return false;
}
}
}
return true;
}
#endif
int thresh = 50, N = 11;
const char* wndname = "OpenCL Square Detection Demo";
// helper function:
// finds a cosine of angle between vectors
// from pt0->pt1 and from pt0->pt2
......@@ -43,9 +69,92 @@ static double angle( Point pt1, Point pt2, Point pt0 )
return (dx1*dx2 + dy1*dy2)/sqrt((dx1*dx1 + dy1*dy1)*(dx2*dx2 + dy2*dy2) + 1e-10);
}
// returns sequence of squares detected on the image.
// the sequence is stored in the specified memory storage
static void findSquares( const Mat& image, vector<vector<Point> >& squares )
{
squares.clear();
Mat pyr, timg, gray0(image.size(), CV_8U), gray;
// down-scale and upscale the image to filter out the noise
pyrDown(image, pyr, Size(image.cols/2, image.rows/2));
pyrUp(pyr, timg, image.size());
vector<vector<Point> > contours;
// find squares in every color plane of the image
for( int c = 0; c < 3; c++ )
{
int ch[] = {c, 0};
mixChannels(&timg, 1, &gray0, 1, ch, 1);
// try several threshold levels
for( int l = 0; l < N; l++ )
{
// hack: use Canny instead of zero threshold level.
// Canny helps to catch squares with gradient shading
if( l == 0 )
{
// apply Canny. Take the upper threshold from slider
// and set the lower to 0 (which forces edges merging)
Canny(gray0, gray, 0, thresh, 5);
// dilate canny output to remove potential
// holes between edge segments
dilate(gray, gray, Mat(), Point(-1,-1));
}
else
{
// apply threshold if l!=0:
// tgray(x,y) = gray(x,y) < (l+1)*255/N ? 255 : 0
cv::threshold(gray0, gray, (l+1)*255/N, 255, THRESH_BINARY);
}
// find contours and store them all as a list
findContours(gray, contours, CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE);
vector<Point> approx;
// test each contour
for( size_t i = 0; i < contours.size(); i++ )
{
// approximate contour with accuracy proportional
// to the contour perimeter
approxPolyDP(Mat(contours[i]), approx, arcLength(Mat(contours[i]), true)*0.02, true);
// square contours should have 4 vertices after approximation
// relatively large area (to filter out noisy contours)
// and be convex.
// Note: absolute value of an area is used because
// area may be positive or negative - in accordance with the
// contour orientation
if( approx.size() == 4 &&
fabs(contourArea(Mat(approx))) > 1000 &&
isContourConvex(Mat(approx)) )
{
double maxCosine = 0;
for( int j = 2; j < 5; j++ )
{
// find the maximum cosine of the angle between joint edges
double cosine = fabs(angle(approx[j%4], approx[j-2], approx[j-1]));
maxCosine = MAX(maxCosine, cosine);
}
// if cosines of all angles are small
// (all angles are ~90 degree) then write quandrange
// vertices to resultant sequence
if( maxCosine < 0.3 )
squares.push_back(approx);
}
}
}
}
}
// returns sequence of squares detected on the image.
// the sequence is stored in the specified memory storage
static void findSquares_ocl( const Mat& image, vector<vector<Point> >& squares )
{
squares.clear();
......@@ -91,7 +200,6 @@ static void findSquares( const Mat& image, vector<vector<Point> >& squares )
findContours(gray, contours, CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE);
vector<Point> approx;
// test each contour
for( size_t i = 0; i < contours.size(); i++ )
{
......@@ -110,7 +218,6 @@ static void findSquares( const Mat& image, vector<vector<Point> >& squares )
isContourConvex(Mat(approx)) )
{
double maxCosine = 0;
for( int j = 2; j < 5; j++ )
{
// find the maximum cosine of the angle between joint edges
......@@ -139,40 +246,93 @@ static void drawSquares( Mat& image, const vector<vector<Point> >& squares )
int n = (int)squares[i].size();
polylines(image, &p, &n, 1, true, Scalar(0,255,0), 3, CV_AA);
}
}
imshow(wndname, image);
// draw both pure-C++ and ocl square results onto a single image
static Mat drawSquaresBoth( const Mat& image,
const vector<vector<Point> >& sqsCPP,
const vector<vector<Point> >& sqsOCL
)
{
Mat imgToShow(Size(image.cols * 2, image.rows), image.type());
Mat lImg = imgToShow(Rect(Point(0, 0), image.size()));
Mat rImg = imgToShow(Rect(Point(image.cols, 0), image.size()));
image.copyTo(lImg);
image.copyTo(rImg);
drawSquares(lImg, sqsCPP);
drawSquares(rImg, sqsOCL);
float fontScale = 0.8f;
Scalar white = Scalar::all(255), black = Scalar::all(0);
putText(lImg, "C++", Point(10, 20), FONT_HERSHEY_COMPLEX_SMALL, fontScale, black, 2);
putText(rImg, "OCL", Point(10, 20), FONT_HERSHEY_COMPLEX_SMALL, fontScale, black, 2);
putText(lImg, "C++", Point(10, 20), FONT_HERSHEY_COMPLEX_SMALL, fontScale, white, 1);
putText(rImg, "OCL", Point(10, 20), FONT_HERSHEY_COMPLEX_SMALL, fontScale, white, 1);
return imgToShow;
}
int main(int /*argc*/, char** /*argv*/)
int main(int argc, char** argv)
{
const char* keys =
"{ i | input | | specify input image }"
"{ o | output | squares_output.jpg | specify output save path}";
CommandLineParser cmd(argc, argv, keys);
string inputName = cmd.get<string>("i");
string outfile = cmd.get<string>("o");
if(inputName.empty())
{
cout << "Avaible options:" << endl;
cmd.printParams();
return 0;
}
//ocl::setBinpath("F:/kernel_bin");
vector<ocl::Info> info;
CV_Assert(ocl::getDevice(info));
static const char* names[] = { "pic1.png", "pic2.png", "pic3.png",
"pic4.png", "pic5.png", "pic6.png", 0 };
help();
int iterations = 10;
namedWindow( wndname, 1 );
vector<vector<Point> > squares;
vector<vector<Point> > squares_cpu, squares_ocl;
for( int i = 0; names[i] != 0; i++ )
{
Mat image = imread(names[i], 1);
Mat image = imread(inputName, 1);
if( image.empty() )
{
cout << "Couldn't load " << names[i] << endl;
continue;
cout << "Couldn't load " << inputName << endl;
return -1;
}
int j = iterations;
int64 t_ocl = 0, t_cpp = 0;
//warm-ups
cout << "warming up ..." << endl;
findSquares(image, squares_cpu);
findSquares_ocl(image, squares_ocl);
#if ACCURACY_CHECK
cout << "Checking ocl accuracy ... " << endl;
cout << (checkPoints(squares_cpu, squares_ocl) ? "Pass" : "Failed") << endl;
#endif
do
{
int64 t_start = cv::getTickCount();
findSquares(image, squares_cpu);
t_cpp += cv::getTickCount() - t_start;
findSquares(image, squares);
drawSquares(image, squares);
int c = waitKey();
if( (char)c == 27 )
break;
t_start = cv::getTickCount();
findSquares_ocl(image, squares_ocl);
t_ocl += cv::getTickCount() - t_start;
cout << "run loop: " << j << endl;
}
while(--j);
cout << "cpp average time: " << 1000.0f * (double)t_cpp / getTickFrequency() / iterations << "ms" << endl;
cout << "ocl average time: " << 1000.0f * (double)t_ocl / getTickFrequency() / iterations << "ms" << endl;
Mat result = drawSquaresBoth(image, squares_cpu, squares_ocl);
imshow(wndname, result);
imwrite(outfile, result);
cvWaitKey(0);
return 0;
}
This diff is collapsed.
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2010-2012, Multicoreware, Inc., all rights reserved.
// Copyright (C) 2010-2012, Advanced Micro Devices, Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// @Authors
// Peng Xiao, pengxiao@multicorewareinc.com
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other oclMaterials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors as is and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include <iostream>
#include <stdio.h>
#include "opencv2/core/core.hpp"
......@@ -61,14 +16,6 @@ const float GOOD_PORTION = 0.15f;
namespace
{
void help();
void help()
{
std::cout << "\nThis program demonstrates using SURF_OCL features detector and descriptor extractor" << std::endl;
std::cout << "\nUsage:\n\tsurf_matcher --left <image1> --right <image2> [-c]" << std::endl;
std::cout << "\nExample:\n\tsurf_matcher --left box.png --right box_in_scene.png" << std::endl;
}
int64 work_begin = 0;
int64 work_end = 0;
......@@ -81,7 +28,8 @@ void workEnd()
{
work_end = getTickCount() - work_begin;
}
double getTime(){
double getTime()
{
return work_end /((double)cvGetTickFrequency() * 1000.);
}
......@@ -118,7 +66,7 @@ Mat drawGoodMatches(
const vector<KeyPoint>& keypoints2,
vector<DMatch>& matches,
vector<Point2f>& scene_corners_
)
)
{
//-- Sort matches and preserve top 10% matches
std::sort(matches.begin(), matches.end());
......@@ -154,8 +102,10 @@ Mat drawGoodMatches(
}
//-- Get the corners from the image_1 ( the object to be "detected" )
std::vector<Point2f> obj_corners(4);
obj_corners[0] = cvPoint(0,0); obj_corners[1] = cvPoint( cpu_img1.cols, 0 );
obj_corners[2] = cvPoint( cpu_img1.cols, cpu_img1.rows ); obj_corners[3] = cvPoint( 0, cpu_img1.rows );
obj_corners[0] = cvPoint(0,0);
obj_corners[1] = cvPoint( cpu_img1.cols, 0 );
obj_corners[2] = cvPoint( cpu_img1.cols, cpu_img1.rows );
obj_corners[3] = cvPoint( 0, cpu_img1.rows );
std::vector<Point2f> scene_corners(4);
Mat H = findHomography( obj, scene, CV_RANSAC );
......@@ -185,6 +135,21 @@ Mat drawGoodMatches(
// use cpu findHomography interface to calculate the transformation matrix
int main(int argc, char* argv[])
{
const char* keys =
"{ h | help | false | print help message }"
"{ l | left | | specify left image }"
"{ r | right | | specify right image }"
"{ o | output | SURF_output.jpg | specify output save path (only works in CPU or GPU only mode) }"
"{ c | use_cpu | false | use CPU algorithms }"
"{ a | use_all | false | use both CPU and GPU algorithms}";
CommandLineParser cmd(argc, argv, keys);
if (cmd.get<bool>("help"))
{
std::cout << "Avaible options:" << std::endl;
cmd.printParams();
return 0;
}
vector<cv::ocl::Info> info;
if(cv::ocl::getDevice(info) == 0)
{
......@@ -195,48 +160,32 @@ int main(int argc, char* argv[])
Mat cpu_img1, cpu_img2, cpu_img1_grey, cpu_img2_grey;
oclMat img1, img2;
bool useCPU = false;
bool useCPU = cmd.get<bool>("c");
bool useGPU = false;
bool useALL = false;
bool useALL = cmd.get<bool>("a");
for (int i = 1; i < argc; ++i)
{
if (string(argv[i]) == "--left")
{
cpu_img1 = imread(argv[++i]);
string outpath = cmd.get<std::string>("o");
cpu_img1 = imread(cmd.get<std::string>("l"));
CV_Assert(!cpu_img1.empty());
cvtColor(cpu_img1, cpu_img1_grey, CV_BGR2GRAY);
img1 = cpu_img1_grey;
}
else if (string(argv[i]) == "--right")
{
cpu_img2 = imread(argv[++i]);
cpu_img2 = imread(cmd.get<std::string>("r"));
CV_Assert(!cpu_img2.empty());
cvtColor(cpu_img2, cpu_img2_grey, CV_BGR2GRAY);
img2 = cpu_img2_grey;
}
else if (string(argv[i]) == "-c")
{
useCPU = true;
useGPU = false;
useALL = false;
}else if(string(argv[i]) == "-g")
{
useGPU = true;
useCPU = false;
useALL = false;
}else if(string(argv[i]) == "-a")
if(useALL)
{
useALL = true;
useCPU = false;
useGPU = false;
}
else if (string(argv[i]) == "--help")
else if(useCPU==false && useALL==false)
{
help();
return -1;
}
useGPU = true;
}
if(!useCPU)
{
std::cout
......@@ -298,7 +247,8 @@ int main(int argc, char* argv[])
surf_time = getTime();
std::cout << "SURF run time: " << surf_time / LOOP_NUM << " ms" << std::endl<<"\n";
}else
}
else
{
//cpu runs
for (int i = 0; i <= LOOP_NUM; i++)
......@@ -371,12 +321,15 @@ int main(int argc, char* argv[])
{
namedWindow("cpu surf matches", 0);
imshow("cpu surf matches", img_matches);
imwrite(outpath, img_matches);
}
else if(useGPU)
{
namedWindow("ocl surf matches", 0);
imshow("ocl surf matches", img_matches);
}else
imwrite(outpath, img_matches);
}
else
{
namedWindow("cpu surf matches", 0);
imshow("cpu surf matches", img_matches);
......
#include <iostream>
#include <vector>
#include <iomanip>
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/ocl/ocl.hpp"
#include "opencv2/video/video.hpp"
using namespace std;
using namespace cv;
using namespace cv::ocl;
typedef unsigned char uchar;
#define LOOP_NUM 10
int64 work_begin = 0;
int64 work_end = 0;
static void workBegin()
{
work_begin = getTickCount();
}
static void workEnd()
{
work_end += (getTickCount() - work_begin);
}
static double getTime()
{
return work_end * 1000. / getTickFrequency();
}
template <typename T> inline T clamp (T x, T a, T b)
{
return ((x) > (a) ? ((x) < (b) ? (x) : (b)) : (a));
}
template <typename T> inline T mapValue(T x, T a, T b, T c, T d)
{
x = clamp(x, a, b);
return c + (d - c) * (x - a) / (b - a);
}
static void getFlowField(const Mat& u, const Mat& v, Mat& flowField)
{
float maxDisplacement = 1.0f;
for (int i = 0; i < u.rows; ++i)
{
const float* ptr_u = u.ptr<float>(i);
const float* ptr_v = v.ptr<float>(i);
for (int j = 0; j < u.cols; ++j)
{
float d = max(fabsf(ptr_u[j]), fabsf(ptr_v[j]));
if (d > maxDisplacement)
maxDisplacement = d;
}
}
flowField.create(u.size(), CV_8UC4);
for (int i = 0; i < flowField.rows; ++i)
{
const float* ptr_u = u.ptr<float>(i);
const float* ptr_v = v.ptr<float>(i);
Vec4b* row = flowField.ptr<Vec4b>(i);
for (int j = 0; j < flowField.cols; ++j)
{
row[j][0] = 0;
row[j][1] = static_cast<unsigned char> (mapValue (-ptr_v[j], -maxDisplacement, maxDisplacement, 0.0f, 255.0f));
row[j][2] = static_cast<unsigned char> (mapValue ( ptr_u[j], -maxDisplacement, maxDisplacement, 0.0f, 255.0f));
row[j][3] = 255;
}
}
}
int main(int argc, const char* argv[])
{
static std::vector<Info> ocl_info;
ocl::getDevice(ocl_info);
//if you want to use undefault device, set it here
setDevice(ocl_info[0]);
//set this to save kernel compile time from second time you run
ocl::setBinpath("./");
const char* keys =
"{ h | help | false | print help message }"
"{ l | left | | specify left image }"
"{ r | right | | specify right image }"
"{ o | output | tvl1_output.jpg | specify output save path }"
"{ c | camera | 0 | enable camera capturing }"
"{ s | use_cpu | false | use cpu or gpu to process the image }"
"{ v | video | | use video as input }";
CommandLineParser cmd(argc, argv, keys);
if (cmd.get<bool>("help"))
{
cout << "Usage: pyrlk_optical_flow [options]" << endl;
cout << "Avaible options:" << endl;
cmd.printParams();
return 0;
}
bool defaultPicturesFail = false;
string fname0 = cmd.get<string>("l");
string fname1 = cmd.get<string>("r");
string vdofile = cmd.get<string>("v");
string outpath = cmd.get<string>("o");
bool useCPU = cmd.get<bool>("s");
bool useCamera = cmd.get<bool>("c");
int inputName = cmd.get<int>("c");
Mat frame0 = imread(fname0, cv::IMREAD_GRAYSCALE);
Mat frame1 = imread(fname1, cv::IMREAD_GRAYSCALE);
cv::Ptr<cv::DenseOpticalFlow> alg = cv::createOptFlow_DualTVL1();
cv::ocl::OpticalFlowDual_TVL1_OCL d_alg;
Mat flow, show_flow;
Mat flow_vec[2];
if (frame0.empty() || frame1.empty())
{
useCamera = true;
defaultPicturesFail = true;
CvCapture* capture = 0;
capture = cvCaptureFromCAM( inputName );
if (!capture)
{
cout << "Can't load input images" << endl;
return -1;
}
}
if (useCamera)
{
CvCapture* capture = 0;
Mat frame, frameCopy;
Mat frame0Gray, frame1Gray;
Mat ptr0, ptr1;
if(vdofile == "")
capture = cvCaptureFromCAM( inputName );
else
capture = cvCreateFileCapture(vdofile.c_str());
int c = inputName ;
if(!capture)
{
if(vdofile == "")
cout << "Capture from CAM " << c << " didn't work" << endl;
else
cout << "Capture from file " << vdofile << " failed" <<endl;
if (defaultPicturesFail)
{
return -1;
}
goto nocamera;
}
cout << "In capture ..." << endl;
for(int i = 0;; i++)
{
frame = cvQueryFrame( capture );
if( frame.empty() )
break;
if (i == 0)
{
frame.copyTo( frame0 );
cvtColor(frame0, frame0Gray, COLOR_BGR2GRAY);
}
else
{
if (i%2 == 1)
{
frame.copyTo(frame1);
cvtColor(frame1, frame1Gray, COLOR_BGR2GRAY);
ptr0 = frame0Gray;
ptr1 = frame1Gray;
}
else
{
frame.copyTo(frame0);
cvtColor(frame0, frame0Gray, COLOR_BGR2GRAY);
ptr0 = frame1Gray;
ptr1 = frame0Gray;
}
if (useCPU)
{
alg->calc(ptr0, ptr1, flow);
split(flow, flow_vec);
}
else
{
oclMat d_flowx, d_flowy;
d_alg(oclMat(ptr0), oclMat(ptr1), d_flowx, d_flowy);
d_flowx.download(flow_vec[0]);
d_flowy.download(flow_vec[1]);
}
if (i%2 == 1)
frame1.copyTo(frameCopy);
else
frame0.copyTo(frameCopy);
getFlowField(flow_vec[0], flow_vec[1], show_flow);
imshow("PyrLK [Sparse]", show_flow);
}
if( waitKey( 10 ) >= 0 )
goto _cleanup_;
}
waitKey(0);
_cleanup_:
cvReleaseCapture( &capture );
}
else
{
nocamera:
oclMat d_flowx, d_flowy;
for(int i = 0; i <= LOOP_NUM; i ++)
{
cout << "loop" << i << endl;
if (i > 0) workBegin();
if (useCPU)
{
alg->calc(frame0, frame1, flow);
split(flow, flow_vec);
}
else
{
d_alg(oclMat(frame0), oclMat(frame1), d_flowx, d_flowy);
d_flowx.download(flow_vec[0]);
d_flowy.download(flow_vec[1]);
}
if (i > 0 && i <= LOOP_NUM)
workEnd();
if (i == LOOP_NUM)
{
if (useCPU)
cout << "average CPU time (noCamera) : ";
else
cout << "average GPU time (noCamera) : ";
cout << getTime() / LOOP_NUM << " ms" << endl;
getFlowField(flow_vec[0], flow_vec[1], show_flow);
imshow("PyrLK [Sparse]", show_flow);
imwrite(outpath, show_flow);
}
}
}
waitKey();
return 0;
}
\ No newline at end of file
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment