Commit a1de9e9a authored by Roman Donchenko's avatar Roman Donchenko Committed by OpenCV Buildbot

Merge pull request #2329 from ilya-lavrenov:macosx_ogl

parents ecbe8430 b92a46c1
......@@ -142,7 +142,7 @@ OCV_OPTION(WITH_IPP "Include Intel IPP support" OFF
OCV_OPTION(WITH_JASPER "Include JPEG2K support" ON IF (NOT IOS) )
OCV_OPTION(WITH_JPEG "Include JPEG support" ON)
OCV_OPTION(WITH_OPENEXR "Include ILM support via OpenEXR" ON IF (NOT IOS) )
OCV_OPTION(WITH_OPENGL "Include OpenGL support" OFF IF (NOT ANDROID AND NOT APPLE) )
OCV_OPTION(WITH_OPENGL "Include OpenGL support" OFF IF (NOT ANDROID) )
OCV_OPTION(WITH_OPENNI "Include OpenNI support" OFF IF (NOT ANDROID AND NOT IOS) )
OCV_OPTION(WITH_PNG "Include PNG support" ON)
OCV_OPTION(WITH_PVAPI "Include Prosilica GigE support" ON IF (NOT ANDROID AND NOT IOS) )
......
......@@ -47,22 +47,27 @@
#include "gl_core_3_1.hpp"
#ifdef HAVE_OPENGL
#if defined(__APPLE__)
#include <mach-o/dyld.h>
#ifdef __APPLE__
#include <dlfcn.h>
static void* AppleGLGetProcAddress (const char* name)
{
static const struct mach_header* image = 0;
if (!image)
image = NSAddImage("/System/Library/Frameworks/OpenGL.framework/Versions/Current/OpenGL", NSADDIMAGE_OPTION_RETURN_ON_ERROR);
// prepend a '_' for the Unix C symbol mangling convention
std::string symbolName = "_";
symbolName += std::string(name);
NSSymbol symbol = image ? NSLookupSymbolInImage(image, &symbolName[0], NSLOOKUPSYMBOLINIMAGE_OPTION_BIND | NSLOOKUPSYMBOLINIMAGE_OPTION_RETURN_ON_ERROR) : 0;
return symbol ? NSAddressOfSymbol(symbol) : 0;
static bool initialized = false;
static void * handle = NULL;
if (!handle)
{
if (!initialized)
{
initialized = true;
const char * const path = "/System/Library/Frameworks/OpenGL.framework/Versions/Current/OpenGL";
handle = dlopen(path, RTLD_LAZY | RTLD_GLOBAL);
}
if (!handle)
return NULL;
}
return dlsym(handle, name);
}
#endif // __APPLE__
......
......@@ -7,6 +7,6 @@ FIND_PACKAGE( OpenCV REQUIRED )
find_package (OpenGL REQUIRED)
ADD_EXECUTABLE(OpenGL_Qt_Binding main.cpp)
ADD_EXECUTABLE(OpenGL_Qt_Binding qt_opengl.cpp)
TARGET_LINK_LIBRARIES(OpenGL_Qt_Binding ${OpenCV_LIBS} ${OPENGL_LIBRARIES} )
configure_file(${CMAKE_CURRENT_SOURCE_DIR}/cube4.avi ${CMAKE_CURRENT_BINARY_DIR}/cube4.avi COPYONLY)
//Yannick Verdie 2010
//--- Please read help() below: ---
// Yannick Verdie 2010
// --- Please read help() below: ---
#include <iostream>
#include <vector>
......@@ -11,18 +10,10 @@
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/legacy/compat.hpp>
#if defined WIN32 || defined _WIN32 || defined WINCE
#include <windows.h>
#undef small
#undef min
#undef max
#undef abs
#endif
#ifdef __APPLE__
#include <OpenGL/gl.h>
#include <OpenGL/gl.h>
#else
#include <GL/gl.h>
#include <GL/gl.h>
#endif
using namespace std;
......@@ -30,21 +21,22 @@ using namespace cv;
static void help()
{
cout << "\nThis demo demonstrates the use of the Qt enhanced version of the highgui GUI interface\n"
" and dang if it doesn't throw in the use of of the POSIT 3D tracking algorithm too\n"
cout << "This demo demonstrates the use of the Qt enhanced version of the highgui GUI interface\n"
"and dang if it doesn't throw in the use of of the POSIT 3D tracking algorithm too\n"
"It works off of the video: cube4.avi\n"
"Using OpenCV version %s\n" << CV_VERSION << "\n\n"
" 1). This demo is mainly based on work from Javier Barandiaran Martirena\n"
" See this page http://code.opencv.org/projects/opencv/wiki/Posit.\n"
" 2). This is a demo to illustrate how to use **OpenGL Callback**.\n"
" 3). You need Qt binding to compile this sample with OpenGL support enabled.\n"
" 4). The features' detection is very basic and could highly be improved \n"
" (basic thresholding tuned for the specific video) but 2).\n"
" 5) THANKS TO Google Summer of Code 2010 for supporting this work!\n" << endl;
"Using OpenCV version " << CV_VERSION << "\n\n"
" 1) This demo is mainly based on work from Javier Barandiaran Martirena\n"
" See this page http://code.opencv.org/projects/opencv/wiki/Posit.\n"
" 2) This is a demo to illustrate how to use **OpenGL Callback**.\n"
" 3) You need Qt binding to compile this sample with OpenGL support enabled.\n"
" 4) The features' detection is very basic and could highly be improved\n"
" (basic thresholding tuned for the specific video) but 2).\n"
" 5) Thanks to Google Summer of Code 2010 for supporting this work!\n" << endl;
}
#define FOCAL_LENGTH 600
#define CUBE_SIZE 10
#define CUBE_SIZE 0.5
static void renderCube(float size)
{
......@@ -88,7 +80,6 @@ static void renderCube(float size)
glEnd();
}
static void on_opengl(void* param)
{
//Draw the object with the estimated pose
......@@ -104,172 +95,174 @@ static void on_opengl(void* param)
glDisable( GL_LIGHTING );
}
static void initPOSIT(std::vector<CvPoint3D32f> *modelPoints)
static void initPOSIT(std::vector<CvPoint3D32f> * modelPoints)
{
//Create the model pointss
modelPoints->push_back(cvPoint3D32f(0.0f, 0.0f, 0.0f)); //The first must be (0,0,0)
// Create the model pointss
modelPoints->push_back(cvPoint3D32f(0.0f, 0.0f, 0.0f)); // The first must be (0, 0, 0)
modelPoints->push_back(cvPoint3D32f(0.0f, 0.0f, CUBE_SIZE));
modelPoints->push_back(cvPoint3D32f(CUBE_SIZE, 0.0f, 0.0f));
modelPoints->push_back(cvPoint3D32f(0.0f, CUBE_SIZE, 0.0f));
}
static void foundCorners(vector<CvPoint2D32f> *srcImagePoints, const Mat& source, Mat& grayImage)
static void foundCorners(vector<CvPoint2D32f> * srcImagePoints, const Mat & source, Mat & grayImage)
{
cvtColor(source, grayImage, COLOR_RGB2GRAY);
GaussianBlur(grayImage, grayImage, Size(11,11), 0, 0);
GaussianBlur(grayImage, grayImage, Size(11, 11), 0, 0);
normalize(grayImage, grayImage, 0, 255, NORM_MINMAX);
threshold(grayImage, grayImage, 26, 255, THRESH_BINARY_INV); //25
Mat MgrayImage = grayImage;
//For debug
//MgrayImage = MgrayImage.clone();//deep copy
vector<vector<Point> > contours;
vector<Vec4i> hierarchy;
findContours(MgrayImage, contours, hierarchy, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_NONE);
Point p;
vector<CvPoint2D32f> srcImagePoints_temp(4,cvPoint2D32f(0,0));
vector<CvPoint2D32f> srcImagePoints_temp(4, cvPoint2D32f(0, 0));
if (contours.size() == srcImagePoints_temp.size())
{
for(size_t i = 0 ; i<contours.size(); i++ )
for (size_t i = 0; i < contours.size(); i++ )
{
p.x = p.y = 0;
for(size_t j = 0 ; j<contours[i].size(); j++ )
p+=contours[i][j];
for (size_t j = 0 ; j < contours[i].size(); j++)
p += contours[i][j];
srcImagePoints_temp.at(i)=cvPoint2D32f(float(p.x)/contours[i].size(),float(p.y)/contours[i].size());
srcImagePoints_temp.at(i) = cvPoint2D32f(float(p.x) / contours[i].size(), float(p.y) / contours[i].size());
}
//Need to keep the same order
//> y = 0
//> x = 1
//< x = 2
//< y = 3
// Need to keep the same order
// > y = 0
// > x = 1
// < x = 2
// < y = 3
//get point 0;
// get point 0;
size_t index = 0;
for(size_t i = 1 ; i<srcImagePoints_temp.size(); i++ )
{
for (size_t i = 1 ; i<srcImagePoints_temp.size(); i++)
if (srcImagePoints_temp.at(i).y > srcImagePoints_temp.at(index).y)
index = i;
}
srcImagePoints->at(0) = srcImagePoints_temp.at(index);
//get point 1;
// get point 1;
index = 0;
for(size_t i = 1 ; i<srcImagePoints_temp.size(); i++ )
{
for (size_t i = 1 ; i<srcImagePoints_temp.size(); i++)
if (srcImagePoints_temp.at(i).x > srcImagePoints_temp.at(index).x)
index = i;
}
srcImagePoints->at(1) = srcImagePoints_temp.at(index);
//get point 2;
// get point 2;
index = 0;
for(size_t i = 1 ; i<srcImagePoints_temp.size(); i++ )
{
for (size_t i = 1 ; i<srcImagePoints_temp.size(); i++)
if (srcImagePoints_temp.at(i).x < srcImagePoints_temp.at(index).x)
index = i;
}
srcImagePoints->at(2) = srcImagePoints_temp.at(index);
//get point 3;
// get point 3;
index = 0;
for(size_t i = 1 ; i<srcImagePoints_temp.size(); i++ )
{
for (size_t i = 1 ; i<srcImagePoints_temp.size(); i++ )
if (srcImagePoints_temp.at(i).y < srcImagePoints_temp.at(index).y)
index = i;
}
srcImagePoints->at(3) = srcImagePoints_temp.at(index);
Mat Msource = source;
stringstream ss;
for(size_t i = 0 ; i<srcImagePoints_temp.size(); i++ )
for (size_t i = 0; i<srcImagePoints_temp.size(); i++ )
{
ss<<i;
circle(Msource,srcImagePoints->at(i),5,Scalar(0,0,255));
putText(Msource,ss.str(),srcImagePoints->at(i),FONT_HERSHEY_SIMPLEX,1,Scalar(0,0,255));
ss << i;
circle(Msource, srcImagePoints->at(i), 5, Scalar(0, 0, 255));
putText(Msource, ss.str(), srcImagePoints->at(i), FONT_HERSHEY_SIMPLEX, 1, Scalar(0, 0, 255));
ss.str("");
//new coordinate system in the middle of the frame and reversed (camera coordinate system)
srcImagePoints->at(i) = cvPoint2D32f(srcImagePoints_temp.at(i).x-source.cols/2,source.rows/2-srcImagePoints_temp.at(i).y);
// new coordinate system in the middle of the frame and reversed (camera coordinate system)
srcImagePoints->at(i) = cvPoint2D32f(srcImagePoints_temp.at(i).x - source.cols / 2,
source.rows / 2 - srcImagePoints_temp.at(i).y);
}
}
}
static void createOpenGLMatrixFrom(float *posePOSIT,const CvMatr32f &rotationMatrix, const CvVect32f &translationVector)
static void createOpenGLMatrixFrom(float * posePOSIT, const CvMatr32f & rotationMatrix,
const CvVect32f & translationVector)
{
//coordinate system returned is relative to the first 3D input point
for (int f=0; f<3; f++)
{
for (int c=0; c<3; c++)
{
posePOSIT[c*4+f] = rotationMatrix[f*3+c]; //transposed
}
}
posePOSIT[3] = 0.0;
posePOSIT[7] = 0.0;
posePOSIT[11] = 0.0;
posePOSIT[12] = translationVector[0];
posePOSIT[13] = translationVector[1];
posePOSIT[14] = translationVector[2];
posePOSIT[15] = 1.0;
// coordinate system returned is relative to the first 3D input point
for (int f = 0; f < 3; f++)
for (int c = 0; c < 3; c++)
posePOSIT[c * 4 + f] = rotationMatrix[f * 3 + c]; // transposed
posePOSIT[3] = translationVector[0];
posePOSIT[7] = translationVector[1];
posePOSIT[11] = translationVector[2];
posePOSIT[12] = 0.0f;
posePOSIT[13] = 0.0f;
posePOSIT[14] = 0.0f;
posePOSIT[15] = 1.0f;
}
int main(void)
{
help();
VideoCapture video("cube4.avi");
CV_Assert(video.isOpened());
Mat source, grayImage;
string fileName = "cube4.avi";
VideoCapture video(fileName);
if (!video.isOpened())
{
cerr << "Video file " << fileName << " could not be opened" << endl;
return EXIT_FAILURE;
}
Mat source, grayImage;
video >> source;
namedWindow("original", WINDOW_AUTOSIZE | CV_WINDOW_FREERATIO);
namedWindow("POSIT", WINDOW_AUTOSIZE | CV_WINDOW_FREERATIO);
displayOverlay("POSIT", "We lost the 4 corners' detection quite often (the red circles disappear). This demo is only to illustrate how to use OpenGL callback.\n -- Press ESC to exit.", 10000);
namedWindow("Original", WINDOW_AUTOSIZE | CV_WINDOW_FREERATIO);
namedWindow("POSIT", WINDOW_OPENGL | CV_WINDOW_FREERATIO);
resizeWindow("POSIT", source.cols, source.rows);
float OpenGLMatrix[]={0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
setOpenGlDrawCallback("POSIT",on_opengl,OpenGLMatrix);
displayOverlay("POSIT", "We lost the 4 corners' detection quite often (the red circles disappear).\n"
"This demo is only to illustrate how to use OpenGL callback.\n"
" -- Press ESC to exit.", 10000);
float OpenGLMatrix[] = { 0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0 };
setOpenGlContext("POSIT");
setOpenGlDrawCallback("POSIT", on_opengl, OpenGLMatrix);
vector<CvPoint3D32f> modelPoints;
initPOSIT(&modelPoints);
//Create the POSIT object with the model points
CvPOSITObject* positObject = cvCreatePOSITObject( &modelPoints[0], (int)modelPoints.size() );
// Create the POSIT object with the model points
CvPOSITObject* positObject = cvCreatePOSITObject( &modelPoints[0], (int)modelPoints.size());
CvMatr32f rotation_matrix = new float[9];
CvVect32f translation_vector = new float[3];
CvTermCriteria criteria = cvTermCriteria(CV_TERMCRIT_EPS | CV_TERMCRIT_ITER, 100, 1.0e-4f);
vector<CvPoint2D32f> srcImagePoints(4,cvPoint2D32f(0,0));
CvTermCriteria criteria = cvTermCriteria(CV_TERMCRIT_EPS | CV_TERMCRIT_ITER, 100, 1e-4f);
vector<CvPoint2D32f> srcImagePoints(4, cvPoint2D32f(0, 0));
while(waitKey(33) != 27)
while (waitKey(33) != 27)
{
video >> source;
imshow("original",source);
if (source.empty())
break;
imshow("Original", source);
foundCorners(&srcImagePoints,source,grayImage);
cvPOSIT( positObject, &srcImagePoints[0], FOCAL_LENGTH, criteria, rotation_matrix, translation_vector );
createOpenGLMatrixFrom(OpenGLMatrix,rotation_matrix,translation_vector);
foundCorners(&srcImagePoints, source, grayImage);
cvPOSIT(positObject, &srcImagePoints[0], FOCAL_LENGTH, criteria, rotation_matrix, translation_vector);
createOpenGLMatrixFrom(OpenGLMatrix, rotation_matrix, translation_vector);
imshow("POSIT",source);
updateWindow("POSIT");
if (video.get(CV_CAP_PROP_POS_AVI_RATIO) > 0.99)
video.set(CV_CAP_PROP_POS_AVI_RATIO, 0);
}
setOpenGlDrawCallback("POSIT", NULL, NULL);
destroyAllWindows();
cvReleasePOSITObject(&positObject);
return 0;
delete[]rotation_matrix;
delete[]translation_vector;
return EXIT_SUCCESS;
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment