Commit 4a066317 authored by Alexander Alekhin's avatar Alexander Alekhin

Merge pull request #1428 from alalek:pr1257_refactoring

parents 1f8ccc16 e310fc55
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
/*
By downloading, copying, installing or using the software you agree to this
license. If you do not agree to this license, do not download, install,
copy or use the software.
License Agreement
For Open Source Computer Vision Library
(3-clause BSD License)
Copyright (C) 2013, OpenCV Foundation, all rights reserved.
Third party copyrights are property of their respective owners.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the names of the copyright holders nor the names of the contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
This software is provided by the copyright holders and contributors "as is" and
any express or implied warranties, including, but not limited to, the implied
warranties of merchantability and fitness for a particular purpose are
disclaimed. In no event shall copyright holders or contributors be liable for
any direct, indirect, incidental, special, exemplary, or consequential damages
(including, but not limited to, procurement of substitute goods or services;
loss of use, data, or profits; or business interruption) however caused
and on any theory of liability, whether in contract, strict liability,
or tort (including negligence or otherwise) arising in any way out of
the use of this software, even if advised of the possibility of such damage.
This file was part of GSoC Project: Facemark API for OpenCV
This file contains results of GSoC Project: Facemark API for OpenCV
Final report: https://gist.github.com/kurnianggoro/74de9121e122ad0bd825176751d47ecc
Student: Laksono Kurnianggoro
Mentor: Delia Passalacqua
......@@ -45,230 +20,229 @@ Mentor: Delia Passalacqua
#include "opencv2/face.hpp"
#include "opencv2/objdetect.hpp"
#include "opencv2/objdetect/objdetect_c.h"
#include "opencv2/imgproc/types_c.h"
#include <vector>
#include <string>
namespace cv {
namespace face {
//! @addtogroup face
//! @{
struct CV_EXPORTS_W CParams{
typedef bool(*FN_FaceDetector)(InputArray, OutputArray, void* userData);
struct CParams{
String cascade; //!< the face detector
double scaleFactor; //!< Parameter specifying how much the image size is reduced at each image scale.
int minNeighbors; //!< Parameter specifying how many neighbors each candidate rectangle should have to retain it.
Size minSize; //!< Minimum possible object size.
Size maxSize; //!< Maximum possible object size.
CParams(
CV_EXPORTS CParams(
String cascade_model,
double sf = 1.1,
int minN = 3,
Size minSz = Size(30, 30),
Size maxSz = Size()
);
};
/** @brief Default face detector
This function is mainly utilized by the implementation of a Facemark Algorithm.
End users are advised to use function Facemark::getFaces which can be manually defined
and circumvented to the algorithm by Facemark::setFaceDetector.
@param image The input image to be processed.
@param faces Output of the function which represent region of interest of the detected faces.
Each face is stored in cv::Rect container.
@param extra_params extra parameters
<B>Example of usage</B>
@code
std::vector<cv::Rect> faces;
CParams params("haarcascade_frontalface_alt.xml");
cv::face::getFaces(frame, faces, &params);
for(int j=0;j<faces.size();j++){
CascadeClassifier face_cascade;
};
/** @brief Default face detector
This function is mainly utilized by the implementation of a Facemark Algorithm.
End users are advised to use function Facemark::getFaces which can be manually defined
and circumvented to the algorithm by Facemark::setFaceDetector.
@param image The input image to be processed.
@param faces Output of the function which represent region of interest of the detected faces.
Each face is stored in cv::Rect container.
@param params detector parameters
<B>Example of usage</B>
@code
std::vector<cv::Rect> faces;
CParams params("haarcascade_frontalface_alt.xml");
cv::face::getFaces(frame, faces, &params);
for(int j=0;j<faces.size();j++){
cv::rectangle(frame, faces[j], cv::Scalar(255,0,255));
}
cv::imshow("detection", frame);
@endcode
*/
/*other option: move this function inside Facemark as default face detector*/
CV_EXPORTS bool getFaces( InputArray image,
OutputArray faces,
void * extra_params
);
/** @brief A utility to load list of paths to training image and annotation file.
@param imageList The specified file contains paths to the training images.
@param annotationList The specified file contains paths to the training annotations.
@param images The loaded paths of training images.
@param annotations The loaded paths of annotation files.
Example of usage:
@code
String imageFiles = "images_path.txt";
String ptsFiles = "annotations_path.txt";
std::vector<String> images_train;
std::vector<String> landmarks_train;
loadDatasetList(imageFiles,ptsFiles,images_train,landmarks_train);
@endcode
*/
CV_EXPORTS_W bool loadDatasetList(String imageList,
}
cv::imshow("detection", frame);
@endcode
*/
CV_EXPORTS bool getFaces(InputArray image, OutputArray faces, CParams* params);
/** @brief A utility to load list of paths to training image and annotation file.
@param imageList The specified file contains paths to the training images.
@param annotationList The specified file contains paths to the training annotations.
@param images The loaded paths of training images.
@param annotations The loaded paths of annotation files.
Example of usage:
@code
String imageFiles = "images_path.txt";
String ptsFiles = "annotations_path.txt";
std::vector<String> images_train;
std::vector<String> landmarks_train;
loadDatasetList(imageFiles,ptsFiles,images_train,landmarks_train);
@endcode
*/
CV_EXPORTS_W bool loadDatasetList(String imageList,
String annotationList,
std::vector<String> & images,
std::vector<String> & annotations);
/** @brief A utility to load facial landmark dataset from a single file.
@param filename The filename of a file that contains the dataset information.
Each line contains the filename of an image followed by
pairs of x and y values of facial landmarks points separated by a space.
Example
@code
/home/user/ibug/image_003_1.jpg 336.820955 240.864510 334.238298 260.922709 335.266918 ...
/home/user/ibug/image_005_1.jpg 376.158428 230.845712 376.736984 254.924635 383.265403 ...
@endcode
@param images A vector where each element represent the filename of image in the dataset.
Images are not loaded by default to save the memory.
@param facePoints The loaded landmark points for all training data.
@param delim Delimiter between each element, the default value is a whitespace.
@param offset An offset value to adjust the loaded points.
<B>Example of usage</B>
@code
cv::String imageFiles = "../data/images_train.txt";
cv::String ptsFiles = "../data/points_train.txt";
std::vector<String> images;
std::vector<std::vector<Point2f> > facePoints;
loadTrainingData(imageFiles, ptsFiles, images, facePoints, 0.0);
@endcode
*/
CV_EXPORTS_W bool loadTrainingData( String filename , std::vector<String> & images,
/** @brief A utility to load facial landmark dataset from a single file.
@param filename The filename of a file that contains the dataset information.
Each line contains the filename of an image followed by
pairs of x and y values of facial landmarks points separated by a space.
Example
@code
/home/user/ibug/image_003_1.jpg 336.820955 240.864510 334.238298 260.922709 335.266918 ...
/home/user/ibug/image_005_1.jpg 376.158428 230.845712 376.736984 254.924635 383.265403 ...
@endcode
@param images A vector where each element represent the filename of image in the dataset.
Images are not loaded by default to save the memory.
@param facePoints The loaded landmark points for all training data.
@param delim Delimiter between each element, the default value is a whitespace.
@param offset An offset value to adjust the loaded points.
<B>Example of usage</B>
@code
cv::String imageFiles = "../data/images_train.txt";
cv::String ptsFiles = "../data/points_train.txt";
std::vector<String> images;
std::vector<std::vector<Point2f> > facePoints;
loadTrainingData(imageFiles, ptsFiles, images, facePoints, 0.0f);
@endcode
*/
CV_EXPORTS_W bool loadTrainingData( String filename , std::vector<String> & images,
OutputArray facePoints,
char delim = ' ', float offset = 0.0);
/** @brief A utility to load facial landmark information from the dataset.
@param imageList A file contains the list of image filenames in the training dataset.
@param groundTruth A file contains the list of filenames
where the landmarks points information are stored.
The content in each file should follow the standard format (see face::loadFacePoints).
@param images A vector where each element represent the filename of image in the dataset.
Images are not loaded by default to save the memory.
@param facePoints The loaded landmark points for all training data.
@param offset An offset value to adjust the loaded points.
<B>Example of usage</B>
@code
cv::String imageFiles = "../data/images_train.txt";
cv::String ptsFiles = "../data/points_train.txt";
std::vector<String> images;
std::vector<std::vector<Point2f> > facePoints;
loadTrainingData(imageFiles, ptsFiles, images, facePoints, 0.0);
@endcode
example of content in the images_train.txt
@code
/home/user/ibug/image_003_1.jpg
/home/user/ibug/image_004_1.jpg
/home/user/ibug/image_005_1.jpg
/home/user/ibug/image_006.jpg
@endcode
example of content in the points_train.txt
@code
/home/user/ibug/image_003_1.pts
/home/user/ibug/image_004_1.pts
/home/user/ibug/image_005_1.pts
/home/user/ibug/image_006.pts
@endcode
*/
CV_EXPORTS_W bool loadTrainingData( String imageList, String groundTruth,
char delim = ' ', float offset = 0.0f);
/** @brief A utility to load facial landmark information from the dataset.
@param imageList A file contains the list of image filenames in the training dataset.
@param groundTruth A file contains the list of filenames
where the landmarks points information are stored.
The content in each file should follow the standard format (see face::loadFacePoints).
@param images A vector where each element represent the filename of image in the dataset.
Images are not loaded by default to save the memory.
@param facePoints The loaded landmark points for all training data.
@param offset An offset value to adjust the loaded points.
<B>Example of usage</B>
@code
cv::String imageFiles = "../data/images_train.txt";
cv::String ptsFiles = "../data/points_train.txt";
std::vector<String> images;
std::vector<std::vector<Point2f> > facePoints;
loadTrainingData(imageFiles, ptsFiles, images, facePoints, 0.0f);
@endcode
example of content in the images_train.txt
@code
/home/user/ibug/image_003_1.jpg
/home/user/ibug/image_004_1.jpg
/home/user/ibug/image_005_1.jpg
/home/user/ibug/image_006.jpg
@endcode
example of content in the points_train.txt
@code
/home/user/ibug/image_003_1.pts
/home/user/ibug/image_004_1.pts
/home/user/ibug/image_005_1.pts
/home/user/ibug/image_006.pts
@endcode
*/
CV_EXPORTS_W bool loadTrainingData( String imageList, String groundTruth,
std::vector<String> & images,
OutputArray facePoints,
float offset = 0.0);
/** @brief A utility to load facial landmark information from a given file.
@param filename The filename of file contains the facial landmarks data.
@param points The loaded facial landmark points.
@param offset An offset value to adjust the loaded points.
<B>Example of usage</B>
@code
std::vector<Point2f> points;
face::loadFacePoints("filename.txt", points, 0.0);
@endcode
The annotation file should follow the default format which is
@code
version: 1
n_points: 68
{
212.716603 499.771793
230.232816 566.290071
...
}
@endcode
where n_points is the number of points considered
and each point is represented as its position in x and y.
*/
CV_EXPORTS_W bool loadFacePoints( String filename, OutputArray points,
float offset = 0.0);
/** @brief Utility to draw the detected facial landmark points
@param image The input image to be processed.
@param points Contains the data of points which will be drawn.
@param color The color of points in BGR format represented by cv::Scalar.
<B>Example of usage</B>
@code
std::vector<Rect> faces;
std::vector<std::vector<Point2f> > landmarks;
facemark->getFaces(img, faces);
facemark->fit(img, faces, landmarks);
for(int j=0;j<rects.size();j++){
float offset = 0.0f);
/** @brief A utility to load facial landmark information from a given file.
@param filename The filename of file contains the facial landmarks data.
@param points The loaded facial landmark points.
@param offset An offset value to adjust the loaded points.
<B>Example of usage</B>
@code
std::vector<Point2f> points;
face::loadFacePoints("filename.txt", points, 0.0f);
@endcode
The annotation file should follow the default format which is
@code
version: 1
n_points: 68
{
212.716603 499.771793
230.232816 566.290071
...
}
@endcode
where n_points is the number of points considered
and each point is represented as its position in x and y.
*/
CV_EXPORTS_W bool loadFacePoints( String filename, OutputArray points,
float offset = 0.0f);
/** @brief Utility to draw the detected facial landmark points
@param image The input image to be processed.
@param points Contains the data of points which will be drawn.
@param color The color of points in BGR format represented by cv::Scalar.
<B>Example of usage</B>
@code
std::vector<Rect> faces;
std::vector<std::vector<Point2f> > landmarks;
facemark->getFaces(img, faces);
facemark->fit(img, faces, landmarks);
for(int j=0;j<rects.size();j++){
face::drawFacemarks(frame, landmarks[j], Scalar(0,0,255));
}
@endcode
*/
CV_EXPORTS_W void drawFacemarks( InputOutputArray image, InputArray points,
}
@endcode
*/
CV_EXPORTS_W void drawFacemarks( InputOutputArray image, InputArray points,
Scalar color = Scalar(255,0,0));
/** @brief Abstract base class for all facemark models
/** @brief Abstract base class for all facemark models
All facemark models in OpenCV are derived from the abstract base class Facemark, which
provides a unified access to all facemark algorithms in OpenCV.
All facemark models in OpenCV are derived from the abstract base class Facemark, which
provides a unified access to all facemark algorithms in OpenCV.
To utilize this API in your program, please take a look at the @ref tutorial_table_of_content_facemark
### Description
To utilize this API in your program, please take a look at the @ref tutorial_table_of_content_facemark
### Description
Facemark is a base class which provides universal access to any specific facemark algorithm.
Therefore, the users should declare a desired algorithm before they can use it in their application.
Facemark is a base class which provides universal access to any specific facemark algorithm.
Therefore, the users should declare a desired algorithm before they can use it in their application.
Here is an example on how to declare facemark algorithm:
@code
// Using Facemark in your code:
Ptr<Facemark> facemark = FacemarkLBF::create();
@endcode
Here is an example on how to declare facemark algorithm:
@code
// Using Facemark in your code:
Ptr<Facemark> facemark = FacemarkLBF::create();
@endcode
The typical pipeline for facemark detection is listed as follows:
- (Non-mandatory) Set a user defined face detection using Facemark::setFaceDetector.
The typical pipeline for facemark detection is listed as follows:
- (Non-mandatory) Set a user defined face detection using Facemark::setFaceDetector.
The facemark algorithms are desgined to fit the facial points into a face.
Therefore, the face information should be provided to the facemark algorithm.
Some algorithms might provides a default face recognition function.
However, the users might prefer to use their own face detector to obtains the best possible detection result.
- (Non-mandatory) Training the model for a specific algorithm using Facemark::training.
- (Non-mandatory) Training the model for a specific algorithm using Facemark::training.
In this case, the model should be automatically saved by the algorithm.
If the user already have a trained model, then this part can be omitted.
- Load the trained model using Facemark::loadModel.
- Perform the fitting via the Facemark::fit.
*/
class CV_EXPORTS_W Facemark : public virtual Algorithm
{
public:
- Load the trained model using Facemark::loadModel.
- Perform the fitting via the Facemark::fit.
*/
class CV_EXPORTS_W Facemark : public virtual Algorithm
{
public:
virtual void read( const FileNode& fn )=0;
virtual void write( FileStorage& fs ) const=0;
......@@ -365,40 +339,42 @@ namespace face {
std::vector<std::vector<Point2f> > landmarks;
facemark->fit(image, faces, landmarks);
@endcode
TODO remove "config" from here
*/
virtual bool fit( InputArray image,\
InputArray faces,\
InputOutputArray landmarks,\
virtual bool fit( InputArray image,
InputArray faces,
InputOutputArray landmarks,
void * config = 0)=0;
/** @brief Set a user defined face detector for the Facemark algorithm.
@param f The user defined face detector function
@param detector The user defined face detector function
@param userData Detector parameters
<B>Example of usage</B>
@code
facemark->setFaceDetector(myDetector);
MyDetectorParameters detectorParameters(...);
facemark->setFaceDetector(myDetector, &detectorParameters);
@endcode
Example of a user defined face detector
@code
bool myDetector( InputArray image, OutputArray ROIs ){
std::vector<Rect> & faces = *(std::vector<Rect>*) ROIs.getObj();
faces.clear();
Mat img = image.getMat();
bool myDetector( InputArray image, OutputArray faces, void* userData)
{
MyDetectorParameters* params = (MyDetectorParameters*)userData;
// -------- do something --------
}
@endcode
TODO Lifetime of detector parameters is uncontrolled. Rework interface design to "Ptr<FaceDetector>".
*/
virtual bool setFaceDetector(bool(*f)(InputArray , OutputArray, void * ))=0;
virtual bool setFaceDetector(FN_FaceDetector detector, void* userData = 0)=0;
/** @brief Detect faces from a given image using default or user defined face detector.
Some Algorithm might not provide a default face detector.
@param image Input image.
@param faces Output of the function which represent region of interest of the detected faces.
Each face is stored in cv::Rect container.
@param extra_params Optional extra-parameters for the face detector function.
@param faces Output of the function which represent region of interest of the detected faces. Each face is stored in cv::Rect container.
<B>Example of usage</B>
@code
......@@ -409,7 +385,7 @@ namespace face {
}
@endcode
*/
virtual bool getFaces( InputArray image , OutputArray faces, void * extra_params=0)=0;
virtual bool getFaces(InputArray image, OutputArray faces)=0;
/** @brief Get data from an algorithm
......@@ -427,13 +403,10 @@ namespace face {
cout<<s0<<endl;
@endcode
*/
virtual bool getData(void * items=0)=0;
}; /* Facemark*/
virtual bool getData(void * items=0)=0; // FIXIT
}; /* Facemark*/
//! @}
} /* namespace face */
} /* namespace cv */
#endif //__OPENCV_FACELANDMARK_HPP__
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
/*
By downloading, copying, installing or using the software you agree to this
license. If you do not agree to this license, do not download, install,
copy or use the software.
License Agreement
For Open Source Computer Vision Library
(3-clause BSD License)
Copyright (C) 2013, OpenCV Foundation, all rights reserved.
Third party copyrights are property of their respective owners.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the names of the copyright holders nor the names of the contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
This software is provided by the copyright holders and contributors "as is" and
any express or implied warranties, including, but not limited to, the implied
warranties of merchantability and fitness for a particular purpose are
disclaimed. In no event shall copyright holders or contributors be liable for
any direct, indirect, incidental, special, exemplary, or consequential damages
(including, but not limited to, procurement of substitute goods or services;
loss of use, data, or profits; or business interruption) however caused
and on any theory of liability, whether in contract, strict liability,
or tort (including negligence or otherwise) arising in any way out of
the use of this software, even if advised of the possibility of such damage.
This file was part of GSoC Project: Facemark API for OpenCV
This file contains results of GSoC Project: Facemark API for OpenCV
Final report: https://gist.github.com/kurnianggoro/74de9121e122ad0bd825176751d47ecc
Student: Laksono Kurnianggoro
Mentor: Delia Passalacqua
......@@ -44,9 +19,9 @@ namespace face {
//! @addtogroup face
//! @{
class CV_EXPORTS_W FacemarkAAM : public Facemark
{
public:
class CV_EXPORTS_W FacemarkAAM : public Facemark
{
public:
struct CV_EXPORTS Params
{
/**
......@@ -80,8 +55,8 @@ namespace face {
struct CV_EXPORTS Config
{
Config( Mat rot = Mat::eye(2,2,CV_32F),
Point2f trans = Point2f(0.0,0.0),
float scaling = 1.0,
Point2f trans = Point2f(0.0f,0.0f),
float scaling = 1.0f,
int scale_id=0
);
......@@ -151,7 +126,7 @@ namespace face {
static Ptr<FacemarkAAM> create(const FacemarkAAM::Params &parameters = FacemarkAAM::Params() );
virtual ~FacemarkAAM() {}
}; /* AAM */
}; /* AAM */
//! @}
......
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
/*
By downloading, copying, installing or using the software you agree to this
license. If you do not agree to this license, do not download, install,
copy or use the software.
License Agreement
For Open Source Computer Vision Library
(3-clause BSD License)
Copyright (C) 2013, OpenCV Foundation, all rights reserved.
Third party copyrights are property of their respective owners.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the names of the copyright holders nor the names of the contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
This software is provided by the copyright holders and contributors "as is" and
any express or implied warranties, including, but not limited to, the implied
warranties of merchantability and fitness for a particular purpose are
disclaimed. In no event shall copyright holders or contributors be liable for
any direct, indirect, incidental, special, exemplary, or consequential damages
(including, but not limited to, procurement of substitute goods or services;
loss of use, data, or profits; or business interruption) however caused
and on any theory of liability, whether in contract, strict liability,
or tort (including negligence or otherwise) arising in any way out of
the use of this software, even if advised of the possibility of such damage.
This file was part of GSoC Project: Facemark API for OpenCV
This file contains results of GSoC Project: Facemark API for OpenCV
Final report: https://gist.github.com/kurnianggoro/74de9121e122ad0bd825176751d47ecc
Student: Laksono Kurnianggoro
Mentor: Delia Passalacqua
......@@ -45,9 +20,9 @@ namespace face {
//! @addtogroup face
//! @{
class CV_EXPORTS_W FacemarkLBF : public Facemark
{
public:
class CV_EXPORTS_W FacemarkLBF : public Facemark
{
public:
struct CV_EXPORTS Params
{
/**
......@@ -110,7 +85,7 @@ namespace face {
static Ptr<FacemarkLBF> create(const FacemarkLBF::Params &parameters = FacemarkLBF::Params() );
virtual ~FacemarkLBF(){};
}; /* LBF */
}; /* LBF */
//! @}
......
/*
By downloading, copying, installing or using the software you agree to this
license. If you do not agree to this license, do not download, install,
copy or use the software.
License Agreement
For Open Source Computer Vision Library
(3-clause BSD License)
Copyright (C) 2013, OpenCV Foundation, all rights reserved.
Third party copyrights are property of their respective owners.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the names of the copyright holders nor the names of the contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
This software is provided by the copyright holders and contributors "as is" and
any express or implied warranties, including, but not limited to, the implied
warranties of merchantability and fitness for a particular purpose are
disclaimed. In no event shall copyright holders or contributors be liable for
any direct, indirect, incidental, special, exemplary, or consequential damages
(including, but not limited to, procurement of substitute goods or services;
loss of use, data, or profits; or business interruption) however caused
and on any theory of liability, whether in contract, strict liability,
or tort (including negligence or otherwise) arising in any way out of
the use of this software, even if advised of the possibility of such damage.
This file was part of GSoC Project: Facemark API for OpenCV
Final report: https://gist.github.com/kurnianggoro/74de9121e122ad0bd825176751d47ecc
Student: Laksono Kurnianggoro
......@@ -63,31 +34,31 @@ Mentor: Delia Passalacqua
* example of the dataset is available at https://ibug.doc.ic.ac.uk/download/annotations/lfpw.zip
*--------------------------------------------------*/
#include <stdio.h>
#include <fstream>
#include <sstream>
#include "opencv2/core.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/imgproc.hpp"
#include "opencv2/face.hpp"
#include <stdio.h>
#include <fstream>
#include <sstream>
#include "opencv2/core.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/imgproc.hpp"
#include "opencv2/face.hpp"
#include <iostream>
#include <string>
#include <ctime>
#include <iostream>
#include <string>
#include <ctime>
using namespace std;
using namespace cv;
using namespace cv::face;
using namespace std;
using namespace cv;
using namespace cv::face;
bool myDetector( InputArray image, OutputArray ROIs, CascadeClassifier face_cascade);
bool getInitialFitting(Mat image, Rect face, std::vector<Point2f> s0,
bool myDetector( InputArray image, OutputArray ROIs, CascadeClassifier *face_cascade);
bool getInitialFitting(Mat image, Rect face, std::vector<Point2f> s0,
CascadeClassifier eyes_cascade, Mat & R, Point2f & Trans, float & scale);
bool parseArguments(int argc, char** argv, CommandLineParser & , String & cascade,
bool parseArguments(int argc, char** argv, CommandLineParser & , String & cascade,
String & model, String & images, String & annotations, String & testImages
);
);
int main(int argc, char** argv )
{
int main(int argc, char** argv )
{
CommandLineParser parser(argc, argv,"");
String cascade_path,eyes_cascade_path,images_path, annotations_path, test_images_path;
if(!parseArguments(argc, argv, parser,cascade_path,eyes_cascade_path,images_path, annotations_path, test_images_path))
......@@ -159,7 +130,7 @@ Mentor: Delia Passalacqua
printf("image #%i ", i);
//! [detect_face]
image = imread(images[i]);
myDetector(image, faces, face_cascade);
myDetector(image, faces, &face_cascade);
//! [detect_face]
if(faces.size()>0){
//! [get_initialization]
......@@ -194,25 +165,26 @@ Mentor: Delia Passalacqua
} //for
//! [fitting]
}
}
bool myDetector( InputArray image, OutputArray ROIs, CascadeClassifier face_cascade){
bool myDetector(InputArray image, OutputArray faces, CascadeClassifier *face_cascade)
{
Mat gray;
std::vector<Rect> & faces = *(std::vector<Rect>*) ROIs.getObj();
faces.clear();
if(image.channels()>1){
cvtColor(image.getMat(),gray,CV_BGR2GRAY);
}else{
if (image.channels() > 1)
cvtColor(image, gray, COLOR_BGR2GRAY);
else
gray = image.getMat().clone();
}
equalizeHist( gray, gray );
face_cascade.detectMultiScale( gray, faces, 1.2, 2, CV_HAAR_SCALE_IMAGE, Size(30, 30) );
equalizeHist(gray, gray);
std::vector<Rect> faces_;
face_cascade->detectMultiScale(gray, faces_, 1.4, 2, CASCADE_SCALE_IMAGE, Size(30, 30));
Mat(faces_).copyTo(faces);
return true;
}
}
bool getInitialFitting(Mat image, Rect face, std::vector<Point2f> s0 ,CascadeClassifier eyes_cascade, Mat & R, Point2f & Trans, float & scale){
bool getInitialFitting(Mat image, Rect face, std::vector<Point2f> s0 ,CascadeClassifier eyes_cascade, Mat & R, Point2f & Trans, float & scale){
std::vector<Point2f> mybase;
std::vector<Point2f> T;
std::vector<Point2f> base = Mat(Mat(s0)+Scalar(image.cols/2,image.rows/2)).reshape(2);
......@@ -230,7 +202,7 @@ Mentor: Delia Passalacqua
std::vector<Rect> eyes;
//-- In each face, detect eyes
eyes_cascade.detectMultiScale( faceROI, eyes, 1.1, 2, 0 |CV_HAAR_SCALE_IMAGE, Size(20, 20) );
eyes_cascade.detectMultiScale( faceROI, eyes, 1.1, 2, CASCADE_SCALE_IMAGE, Size(20, 20) );
if(eyes.size()==2){
found = true;
int j=0;
......@@ -273,15 +245,15 @@ Mentor: Delia Passalacqua
Trans = Point2f( (float)(face.x + face.width*0.5),(float)(face.y + face.height*0.5));
}
return found;
}
}
bool parseArguments(int argc, char** argv, CommandLineParser & parser,
bool parseArguments(int argc, char** argv, CommandLineParser & parser,
String & cascade,
String & model,
String & images,
String & annotations,
String & test_images
){
){
const String keys =
"{ @f face-cascade | | (required) path to the cascade model file for the face detector }"
"{ @e eyes-cascade | | (required) path to the cascade model file for the eyes detector }"
......@@ -315,4 +287,4 @@ Mentor: Delia Passalacqua
return false;
}
return true;
}
}
/*
By downloading, copying, installing or using the software you agree to this
license. If you do not agree to this license, do not download, install,
copy or use the software.
License Agreement
For Open Source Computer Vision Library
(3-clause BSD License)
Copyright (C) 2013, OpenCV Foundation, all rights reserved.
Third party copyrights are property of their respective owners.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the names of the copyright holders nor the names of the contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
This software is provided by the copyright holders and contributors "as is" and
any express or implied warranties, including, but not limited to, the implied
warranties of merchantability and fitness for a particular purpose are
disclaimed. In no event shall copyright holders or contributors be liable for
any direct, indirect, incidental, special, exemplary, or consequential damages
(including, but not limited to, procurement of substitute goods or services;
loss of use, data, or profits; or business interruption) however caused
and on any theory of liability, whether in contract, strict liability,
or tort (including negligence or otherwise) arising in any way out of
the use of this software, even if advised of the possibility of such damage.
This file was part of GSoC Project: Facemark API for OpenCV
Final report: https://gist.github.com/kurnianggoro/74de9121e122ad0bd825176751d47ecc
Student: Laksono Kurnianggoro
......@@ -63,27 +34,26 @@ Mentor: Delia Passalacqua
* example of the dataset is available at https://ibug.doc.ic.ac.uk/download/annotations/ibug.zip
*--------------------------------------------------*/
#include <stdio.h>
#include <fstream>
#include <sstream>
#include <iostream>
#include "opencv2/core.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/imgproc.hpp"
#include "opencv2/face.hpp"
using namespace std;
using namespace cv;
using namespace cv::face;
CascadeClassifier face_cascade;
bool myDetector( InputArray image, OutputArray roi, void * config=0 );
bool parseArguments(int argc, char** argv, CommandLineParser & , String & cascade,
#include <stdio.h>
#include <fstream>
#include <sstream>
#include <iostream>
#include "opencv2/core.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/imgproc.hpp"
#include "opencv2/face.hpp"
using namespace std;
using namespace cv;
using namespace cv::face;
static bool myDetector( InputArray image, OutputArray roi, CascadeClassifier *face_detector);
static bool parseArguments(int argc, char** argv, CommandLineParser & , String & cascade,
String & model, String & images, String & annotations, String & testImages
);
);
int main(int argc, char** argv)
{
int main(int argc, char** argv)
{
CommandLineParser parser(argc, argv,"");
String cascade_path,model_path,images_path, annotations_path, test_images_path;
if(!parseArguments(argc, argv, parser,cascade_path,model_path,images_path, annotations_path, test_images_path))
......@@ -95,8 +65,9 @@ Mentor: Delia Passalacqua
params.cascade_face = cascade_path;
Ptr<Facemark> facemark = FacemarkLBF::create(params);
CascadeClassifier face_cascade;
face_cascade.load(params.cascade_face.c_str());
facemark->setFaceDetector(myDetector);
facemark->setFaceDetector((FN_FaceDetector)myDetector, &face_cascade);
/*Loads the dataset*/
std::vector<String> images_train;
......@@ -147,37 +118,32 @@ Mentor: Delia Passalacqua
cout<<"face not found"<<endl;
}
}
}
}
bool myDetector( InputArray image, OutputArray roi, void * config ){
bool myDetector(InputArray image, OutputArray faces, CascadeClassifier *face_cascade)
{
Mat gray;
std::vector<Rect> & faces = *(std::vector<Rect>*) roi.getObj();
faces.clear();
if(config!=0){
//do nothing
}
if(image.channels()>1){
cvtColor(image,gray,CV_BGR2GRAY);
}else{
if (image.channels() > 1)
cvtColor(image, gray, COLOR_BGR2GRAY);
else
gray = image.getMat().clone();
}
equalizeHist( gray, gray );
face_cascade.detectMultiScale( gray, faces, 1.4, 2, CV_HAAR_SCALE_IMAGE, Size(30, 30) );
equalizeHist(gray, gray);
std::vector<Rect> faces_;
face_cascade->detectMultiScale(gray, faces_, 1.4, 2, CASCADE_SCALE_IMAGE, Size(30, 30));
Mat(faces_).copyTo(faces);
return true;
}
}
bool parseArguments(int argc, char** argv, CommandLineParser & parser,
bool parseArguments(int argc, char** argv, CommandLineParser & parser,
String & cascade,
String & model,
String & images,
String & annotations,
String & test_images
){
){
const String keys =
"{ @c cascade | | (required) path to the face cascade xml file fo the face detector }"
"{ @i images | | (required) path of a text file contains the list of paths to all training images}"
......@@ -214,4 +180,4 @@ Mentor: Delia Passalacqua
}
return true;
}
}
/*
By downloading, copying, installing or using the software you agree to this
license. If you do not agree to this license, do not download, install,
copy or use the software.
License Agreement
For Open Source Computer Vision Library
(3-clause BSD License)
Copyright (C) 2013, OpenCV Foundation, all rights reserved.
Third party copyrights are property of their respective owners.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the names of the copyright holders nor the names of the contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
This software is provided by the copyright holders and contributors "as is" and
any express or implied warranties, including, but not limited to, the implied
warranties of merchantability and fitness for a particular purpose are
disclaimed. In no event shall copyright holders or contributors be liable for
any direct, indirect, incidental, special, exemplary, or consequential damages
(including, but not limited to, procurement of substitute goods or services;
loss of use, data, or profits; or business interruption) however caused
and on any theory of liability, whether in contract, strict liability,
or tort (including negligence or otherwise) arising in any way out of
the use of this software, even if advised of the possibility of such damage.
This file was part of GSoC Project: Facemark API for OpenCV
This file contains results of GSoC Project: Facemark API for OpenCV
Final report: https://gist.github.com/kurnianggoro/74de9121e122ad0bd825176751d47ecc
Student: Laksono Kurnianggoro
Mentor: Delia Passalacqua
......@@ -47,7 +18,7 @@ Mentor: Delia Passalacqua
#include <stdio.h>
#include <ctime>
#include <iostream>
#include <iostream>
#include "opencv2/core.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/imgproc.hpp"
......@@ -57,9 +28,8 @@ using namespace std;
using namespace cv;
using namespace cv::face;
CascadeClassifier face_cascade;
bool myDetector( InputArray image, OutputArray ROIs, void * config = 0);
bool parseArguments(int argc, char** argv, CommandLineParser & parser,
static bool myDetector(InputArray image, OutputArray ROIs, CascadeClassifier *face_cascade);
static bool parseArguments(int argc, char** argv, CommandLineParser & parser,
String & cascade, String & model,String & video);
int main(int argc, char** argv ){
......@@ -68,6 +38,7 @@ int main(int argc, char** argv ){
if(!parseArguments(argc, argv, parser,cascade_path,model_path,video_path))
return -1;
CascadeClassifier face_cascade;
face_cascade.load(cascade_path);
FacemarkLBF::Params params;
......@@ -75,7 +46,7 @@ int main(int argc, char** argv ){
params.cascade_face = cascade_path;
Ptr<Facemark> facemark = FacemarkLBF::create(params);
facemark->setFaceDetector(myDetector);
facemark->setFaceDetector((FN_FaceDetector)myDetector, &face_cascade);
facemark->loadModel(params.model_filename.c_str());
VideoCapture capture(video_path);
......@@ -144,23 +115,20 @@ int main(int argc, char** argv ){
waitKey(0); // key press to close window
}
bool myDetector( InputArray image, OutputArray ROIs, void * config ){
bool myDetector(InputArray image, OutputArray faces, CascadeClassifier *face_cascade)
{
Mat gray;
std::vector<Rect> & faces = *(std::vector<Rect>*) ROIs.getObj();
faces.clear();
if(config!=0){
//do nothing
}
if(image.channels()>1){
cvtColor(image.getMat(),gray,CV_BGR2GRAY);
}else{
if (image.channels() > 1)
cvtColor(image, gray, COLOR_BGR2GRAY);
else
gray = image.getMat().clone();
}
equalizeHist( gray, gray );
face_cascade.detectMultiScale( gray, faces, 1.4, 2, CV_HAAR_SCALE_IMAGE, Size(30, 30) );
equalizeHist(gray, gray);
std::vector<Rect> faces_;
face_cascade->detectMultiScale(gray, faces_, 1.4, 2, CASCADE_SCALE_IMAGE, Size(30, 30));
Mat(faces_).copyTo(faces);
return true;
}
......
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
/*
By downloading, copying, installing or using the software you agree to this
license. If you do not agree to this license, do not download, install,
copy or use the software.
License Agreement
For Open Source Computer Vision Library
(3-clause BSD License)
Copyright (C) 2013, OpenCV Foundation, all rights reserved.
Third party copyrights are property of their respective owners.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the names of the copyright holders nor the names of the contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
This software is provided by the copyright holders and contributors "as is" and
any express or implied warranties, including, but not limited to, the implied
warranties of merchantability and fitness for a particular purpose are
disclaimed. In no event shall copyright holders or contributors be liable for
any direct, indirect, incidental, special, exemplary, or consequential damages
(including, but not limited to, procurement of substitute goods or services;
loss of use, data, or profits; or business interruption) however caused
and on any theory of liability, whether in contract, strict liability,
or tort (including negligence or otherwise) arising in any way out of
the use of this software, even if advised of the possibility of such damage.
This file was part of GSoC Project: Facemark API for OpenCV
This file contains results of GSoC Project: Facemark API for OpenCV
Final report: https://gist.github.com/kurnianggoro/74de9121e122ad0bd825176751d47ecc
Student: Laksono Kurnianggoro
Mentor: Delia Passalacqua
*/
#include "precomp.hpp"
#include "opencv2/face.hpp"
#include "opencv2/core.hpp"
#include "precomp.hpp"
/*dataset parser*/
#include <fstream>
......@@ -46,36 +22,38 @@ Mentor: Delia Passalacqua
namespace cv {
namespace face {
CParams::CParams(String s, double sf, int minN, Size minSz, Size maxSz){
using namespace std;
CParams::CParams(String s, double sf, int minN, Size minSz, Size maxSz){
cascade = s;
scaleFactor = sf;
minNeighbors = minN;
minSize = minSz;
maxSize = maxSz;
if (!face_cascade.load(cascade))
{
CV_Error_(Error::StsBadArg, ("Error loading face_cascade: %s", cascade.c_str()));
}
}
bool getFaces(InputArray image, OutputArray faces, void * parameters){
bool getFaces(InputArray image, OutputArray faces, CParams* params)
{
CV_Assert(params);
Mat gray;
std::vector<Rect> roi;
if(parameters!=0){
CParams * params = (CParams *)parameters;
cvtColor( image.getMat(), gray, CV_BGR2GRAY );
equalizeHist( gray, gray );
cvtColor(image.getMat(), gray, COLOR_BGR2GRAY);
equalizeHist(gray, gray);
CascadeClassifier face_cascade;
if( !face_cascade.load( params->cascade ) ){ printf("--(!)Error loading face_cascade\n"); return false; };
face_cascade.detectMultiScale( gray, roi, params->scaleFactor, params->minNeighbors, 0|CV_HAAR_SCALE_IMAGE, params->minSize, params->maxSize);
params->face_cascade.detectMultiScale( gray, roi, params->scaleFactor, params->minNeighbors, CASCADE_SCALE_IMAGE, params->minSize, params->maxSize);
Mat(roi).copyTo(faces);
return true;
}else{
return false;
}
}
}
bool loadDatasetList(String imageList, String groundTruth, std::vector<String> & images, std::vector<String> & landmarks){
bool loadDatasetList(String imageList, String groundTruth, std::vector<String> & images, std::vector<String> & landmarks){
std::string line;
/*clear the output containers*/
......@@ -103,22 +81,22 @@ namespace face {
}
return true;
}
}
bool loadTrainingData(String filename, std::vector<String> & images, OutputArray _facePoints, char delim, float offset){
bool loadTrainingData(String filename, std::vector<String> & images, OutputArray _facePoints, char delim, float offset){
std::string line;
std::string item;
std::vector<Point2f> pts;
std::vector<float> raw;
// FIXIT
std::vector<std::vector<Point2f> > & facePoints =
*(std::vector<std::vector<Point2f> >*) _facePoints.getObj();
std::ifstream infile;
infile.open(filename.c_str(), std::ios::in);
if (!infile) {
std::string error_message = "No valid input file was given, please check the given filename.";
CV_Error(CV_StsBadArg, error_message);
CV_Error_(Error::StsBadArg, ("No valid input file was given, please check the given filename: %s", filename.c_str()));
}
/*clear the output containers*/
......@@ -148,12 +126,13 @@ namespace face {
} // main loading process
return true;
}
}
bool loadTrainingData(String imageList, String groundTruth, std::vector<String> & images, OutputArray _facePoints, float offset){
bool loadTrainingData(String imageList, String groundTruth, std::vector<String> & images, OutputArray _facePoints, float offset){
std::string line;
std::vector<Point2f> facePts;
// FIXIT
std::vector<std::vector<Point2f> > & facePoints =
*(std::vector<std::vector<Point2f> >*) _facePoints.getObj();
......@@ -165,8 +144,7 @@ namespace face {
std::ifstream infile;
infile.open(imageList.c_str(), std::ios::in);
if (!infile) {
std::string error_message = "No valid input file was given, please check the given filename.";
CV_Error(CV_StsBadArg, error_message);
CV_Error_(Error::StsBadArg, ("No valid input file was given, please check the given filename: %s", imageList.c_str()));
}
while (getline (infile, line)){
......@@ -182,10 +160,10 @@ namespace face {
}
return true;
}
}
bool loadFacePoints(String filename, OutputArray points, float offset){
std::vector<Point2f> & pts = *(std::vector<Point2f> *)points.getObj();
bool loadFacePoints(String filename, OutputArray points, float offset){
vector<Point2f> pts;
std::string line, item;
std::ifstream infile(filename.c_str());
......@@ -222,16 +200,16 @@ namespace face {
}
Mat(pts).copyTo(points);
return true;
}
}
void drawFacemarks(InputOutputArray image, InputArray points, Scalar color){
void drawFacemarks(InputOutputArray image, InputArray points, Scalar color){
Mat img = image.getMat();
std::vector<Point2f> pts = *(std::vector<Point2f>*)points.getObj();
vector<Point2f> pts = points.getMat();
for(size_t i=0;i<pts.size();i++){
circle(img, pts[i],3, color,-1);
}
} //drawPoints
}
} /* namespace face */
} /* namespace cv */
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
/*
By downloading, copying, installing or using the software you agree to this
license. If you do not agree to this license, do not download, install,
copy or use the software.
License Agreement
For Open Source Computer Vision Library
(3-clause BSD License)
Copyright (C) 2013, OpenCV Foundation, all rights reserved.
Third party copyrights are property of their respective owners.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the names of the copyright holders nor the names of the contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
This software is provided by the copyright holders and contributors "as is" and
any express or implied warranties, including, but not limited to, the implied
warranties of merchantability and fitness for a particular purpose are
disclaimed. In no event shall copyright holders or contributors be liable for
any direct, indirect, incidental, special, exemplary, or consequential damages
(including, but not limited to, procurement of substitute goods or services;
loss of use, data, or profits; or business interruption) however caused
and on any theory of liability, whether in contract, strict liability,
or tort (including negligence or otherwise) arising in any way out of
the use of this software, even if advised of the possibility of such damage.
This file was part of GSoC Project: Facemark API for OpenCV
This file contains results of GSoC Project: Facemark API for OpenCV
Final report: https://gist.github.com/kurnianggoro/74de9121e122ad0bd825176751d47ecc
Student: Laksono Kurnianggoro
Mentor: Delia Passalacqua
*/
#include "opencv2/face.hpp"
#include "precomp.hpp"
#include "opencv2/face.hpp"
namespace cv {
namespace face {
/*
* Parameters
*/
FacemarkAAM::Params::Params(){
/*
* Parameters
*/
FacemarkAAM::Params::Params(){
model_filename = "";
m = 200;
n = 10;
......@@ -53,16 +28,16 @@ namespace face {
max_m = 550;
max_n = 136;
texture_max_m = 145;
}
}
FacemarkAAM::Config::Config(Mat rot, Point2f trans, float scaling,int scale_id){
FacemarkAAM::Config::Config(Mat rot, Point2f trans, float scaling,int scale_id){
R = rot.clone();
t = trans;
scale = scaling;
model_scale_idx = scale_id;
}
}
void FacemarkAAM::Params::read( const cv::FileNode& fn ){
void FacemarkAAM::Params::read( const cv::FileNode& fn ){
*this = FacemarkAAM::Params();
if (!fn["model_filename"].empty()) fn["model_filename"] >> model_filename;
......@@ -75,9 +50,9 @@ namespace face {
if (!fn["max_n"].empty()) fn["max_n"] >> m;
if (!fn["texture_max_m"].empty()) fn["texture_max_m"] >> m;
if (!fn["scales"].empty()) fn["scales"] >> m;
}
}
void FacemarkAAM::Params::write( cv::FileStorage& fs ) const{
void FacemarkAAM::Params::write( cv::FileStorage& fs ) const{
fs << "model_filename" << model_filename;
fs << "m" << m;
fs << "n" << n;
......@@ -87,10 +62,10 @@ namespace face {
fs << "max_n" << verbose;
fs << "texture_max_m" << verbose;
fs << "scales" << verbose;
}
}
class FacemarkAAMImpl : public FacemarkAAM {
public:
class FacemarkAAMImpl : public FacemarkAAM {
public:
FacemarkAAMImpl( const FacemarkAAM::Params &parameters = FacemarkAAM::Params() );
void read( const FileNode& /*fn*/ );
void write( FileStorage& /*fs*/ ) const;
......@@ -98,12 +73,12 @@ namespace face {
void saveModel(String fs);
void loadModel(String fs);
bool setFaceDetector(bool(*f)(InputArray , OutputArray, void * ));
bool getFaces( InputArray image ,OutputArray faces, void * extra_params);
bool setFaceDetector(bool(*f)(InputArray , OutputArray, void * ), void* userData);
bool getFaces(InputArray image, OutputArray faces);
bool getData(void * items);
protected:
protected:
bool fit( InputArray image, InputArray faces, InputOutputArray landmarks, void * runtime_params);//!< from many ROIs
bool fitImpl( const Mat image, std::vector<Point2f>& landmarks,const Mat R,const Point2f T,const float scale, const int sclIdx=0 );
......@@ -139,90 +114,75 @@ namespace face {
std::vector<std::vector<Point2f> > facePoints;
FacemarkAAM::Params params;
FacemarkAAM::Model AAM;
bool(*faceDetector)(InputArray , OutputArray, void *);
bool isSetDetector;
FN_FaceDetector faceDetector;
void* faceDetectorData;
private:
private:
bool isModelTrained;
};
};
/*
* Constructor
*/
Ptr<FacemarkAAM> FacemarkAAM::create(const FacemarkAAM::Params &parameters){
/*
* Constructor
*/
Ptr<FacemarkAAM> FacemarkAAM::create(const FacemarkAAM::Params &parameters){
return Ptr<FacemarkAAMImpl>(new FacemarkAAMImpl(parameters));
}
}
FacemarkAAMImpl::FacemarkAAMImpl( const FacemarkAAM::Params &parameters ) :
params( parameters )
{
isSetDetector =false;
FacemarkAAMImpl::FacemarkAAMImpl( const FacemarkAAM::Params &parameters ) :
params( parameters ),
faceDetector(NULL), faceDetectorData(NULL)
{
isModelTrained = false;
}
}
void FacemarkAAMImpl::read( const cv::FileNode& fn ){
void FacemarkAAMImpl::read( const cv::FileNode& fn ){
params.read( fn );
}
}
void FacemarkAAMImpl::write( cv::FileStorage& fs ) const {
void FacemarkAAMImpl::write( cv::FileStorage& fs ) const {
params.write( fs );
}
}
bool FacemarkAAMImpl::setFaceDetector(bool(*f)(InputArray , OutputArray, void *)){
bool FacemarkAAMImpl::setFaceDetector(bool(*f)(InputArray , OutputArray, void *), void* userData){
faceDetector = f;
isSetDetector = true;
faceDetectorData = userData;
return true;
}
}
bool FacemarkAAMImpl::getFaces( InputArray image , OutputArray roi, void * extra_params){
if(!isSetDetector){
bool FacemarkAAMImpl::getFaces(InputArray image, OutputArray faces)
{
if (!faceDetector)
return false;
}
if(extra_params!=0){
//do nothing
}
return faceDetector(image, faces, faceDetectorData);
}
std::vector<Rect> faces;
faces.clear();
bool FacemarkAAMImpl::getData(void * items){
CV_Assert(items);
faceDetector(image.getMat(), faces, extra_params);
Mat(faces).copyTo(roi);
return true;
}
bool FacemarkAAMImpl::getData(void * items){
if(items==0){
return true;
}else{
Data * data = (Data*)items;
Data* data = (Data*)items;
data->s0 = AAM.s0;
return true;
}
}
}
bool FacemarkAAMImpl::addTrainingSample(InputArray image, InputArray landmarks){
bool FacemarkAAMImpl::addTrainingSample(InputArray image, InputArray landmarks){
// FIXIT
std::vector<Point2f> & _landmarks = *(std::vector<Point2f>*)landmarks.getObj();
images.push_back(image.getMat());
facePoints.push_back(_landmarks);
return true;
}
}
void FacemarkAAMImpl::training(void* parameters){
void FacemarkAAMImpl::training(void* parameters){
if(parameters!=0){/*do nothing*/}
if (images.size()<1) {
std::string error_message =
"Training data is not provided. Consider to add using addTrainingSample() function!";
CV_Error(CV_StsBadArg, error_message);
CV_Error(Error::StsBadArg, "Training data is not provided. Consider to add using addTrainingSample() function!");
}
if(strcmp(params.model_filename.c_str(),"")==0 && params.save_model){
std::string error_message = "The model_filename parameter should be set!";
CV_Error(CV_StsBadArg, error_message);
CV_Error(Error::StsBadArg, "The model_filename parameter should be set!");
}
std::vector<std::vector<Point2f> > normalized;
......@@ -297,7 +257,7 @@ namespace face {
Mat T= texture_feats.t();
/* -------------- E. Create the texture model -----------------*/
reduce(T,AAM.textures[scale].A0,1, CV_REDUCE_AVG);
reduce(T,AAM.textures[scale].A0,1, REDUCE_AVG);
if(params.verbose) printf("(2/4) Compute the feature average ...\n");
Mat A0_mtx = repeat(AAM.textures[scale].A0,1,T.cols);
......@@ -325,10 +285,10 @@ namespace face {
}
isModelTrained = true;
if(params.verbose) printf("Training is completed\n");
}
}
bool FacemarkAAMImpl::fit( InputArray image, InputArray roi, InputOutputArray _landmarks, void * runtime_params)
{
bool FacemarkAAMImpl::fit( InputArray image, InputArray roi, InputOutputArray _landmarks, void * runtime_params)
{
std::vector<Rect> & faces = *(std::vector<Rect> *)roi.getObj();
if(faces.size()<1) return false;
......@@ -341,9 +301,7 @@ namespace face {
std::vector<Config> conf = *(std::vector<Config>*)runtime_params;
if (conf.size()!=faces.size()) {
std::string error_message =
"Number of faces and extra_parameters are different!";
CV_Error(CV_StsBadArg, error_message);
CV_Error(Error::StsBadArg, "Number of faces and extra_parameters are different!");
}
for(size_t i=0; i<conf.size();i++){
fitImpl(img, landmarks[i], conf[i].R,conf[i].t, conf[i].scale, conf[i].model_scale_idx);
......@@ -359,9 +317,9 @@ namespace face {
}
return true;
}
}
bool FacemarkAAMImpl::fitImpl( const Mat image, std::vector<Point2f>& landmarks, const Mat R, const Point2f T, const float scale, int _scl){
bool FacemarkAAMImpl::fitImpl( const Mat image, std::vector<Point2f>& landmarks, const Mat R, const Point2f T, const float scale, int _scl){
if (landmarks.size()>0)
landmarks.clear();
......@@ -386,7 +344,7 @@ namespace face {
Mat imgray;
Mat img;
if(image.channels()>1){
cvtColor(image,imgray,CV_BGR2GRAY);
cvtColor(image,imgray,COLOR_BGR2GRAY);
}else{
imgray = image;
}
......@@ -452,9 +410,9 @@ namespace face {
}
landmarks = Mat(scale*Mat(curr_shape)).reshape(2);
return true;
}
}
void FacemarkAAMImpl::saveModel(String s){
void FacemarkAAMImpl::saveModel(String s){
FileStorage fs(s.c_str(),FileStorage::WRITE_BASE64);
fs << "AAM_tri" << AAM.triangles;
fs << "scales" << AAM.scales;
......@@ -497,9 +455,9 @@ namespace face {
}
fs.release();
if(params.verbose) printf("The model is successfully saved! \n");
}
}
void FacemarkAAMImpl::loadModel(String s){
void FacemarkAAMImpl::loadModel(String s){
FileStorage fs(s.c_str(),FileStorage::READ);
String x;
fs["AAM_tri"] >> AAM.triangles;
......@@ -545,9 +503,9 @@ namespace face {
fs.release();
isModelTrained = true;
if(params.verbose) printf("the model has been loaded\n");
}
}
Mat FacemarkAAMImpl::procrustes(std::vector<Point2f> P, std::vector<Point2f> Q, Mat & rot, Scalar & trans, float & scale){
Mat FacemarkAAMImpl::procrustes(std::vector<Point2f> P, std::vector<Point2f> Q, Mat & rot, Scalar & trans, float & scale){
// calculate average
Scalar mx = mean(P);
......@@ -564,8 +522,8 @@ namespace face {
// calculate the sum
Mat sumXs, sumYs;
reduce(Xs,sumXs, 0, CV_REDUCE_SUM);
reduce(Ys,sumYs, 0, CV_REDUCE_SUM);
reduce(Xs,sumXs, 0, REDUCE_SUM);
reduce(Ys,sumYs, 0, REDUCE_SUM);
//calculate the normrnd
double normX = sqrt(Mat(sumXs.reshape(1)).at<float>(0)+Mat(sumXs.reshape(1)).at<float>(1));
......@@ -603,9 +561,9 @@ namespace face {
Mat Qmat = Mat(Q).reshape(1);
return Mat(scale*Qmat*rot+trans).clone();
}
}
void FacemarkAAMImpl::procrustesAnalysis(std::vector<std::vector<Point2f> > shapes, std::vector<std::vector<Point2f> > & normalized, std::vector<Point2f> & new_mean){
void FacemarkAAMImpl::procrustesAnalysis(std::vector<std::vector<Point2f> > shapes, std::vector<std::vector<Point2f> > & normalized, std::vector<Point2f> & new_mean){
std::vector<Scalar> mean_every_shape;
mean_every_shape.resize(shapes.size());
......@@ -648,9 +606,9 @@ namespace face {
// update
aligned.reshape(2).copyTo(mean_shape);
}
}
}
void FacemarkAAMImpl::calcMeanShape(std::vector<std::vector<Point2f> > shapes,std::vector<Point2f> & mean){
void FacemarkAAMImpl::calcMeanShape(std::vector<std::vector<Point2f> > shapes,std::vector<Point2f> & mean){
mean.resize(shapes[0].size());
Point2f tmp;
for(unsigned i=0;i<shapes[0].size();i++){
......@@ -664,9 +622,9 @@ namespace face {
tmp.y/=shapes.size();
mean[i] = tmp;
}
}
}
void FacemarkAAMImpl::getProjection(const Mat M, Mat & P, int n){
void FacemarkAAMImpl::getProjection(const Mat M, Mat & P, int n){
Mat U,S,Vt,S1, Ut;
int k;
if(M.rows < M.cols){
......@@ -703,9 +661,9 @@ namespace face {
P = Mat(M*U.colRange(0,k)*D).clone();
}
}
}
Mat FacemarkAAMImpl::orthonormal(Mat Mo){
Mat FacemarkAAMImpl::orthonormal(Mat Mo){
Mat M;
Mo.convertTo(M,CV_32FC1);
......@@ -741,9 +699,9 @@ namespace face {
}
return O.colRange(0,k).clone();
}
}
void FacemarkAAMImpl::calcSimilarityEig(std::vector<Point2f> s0,Mat S, Mat & Q_orth, Mat & S_orth){
void FacemarkAAMImpl::calcSimilarityEig(std::vector<Point2f> s0,Mat S, Mat & Q_orth, Mat & S_orth){
int npts = (int)s0.size();
Mat Q = Mat::zeros(2*npts,4,CV_32FC1);
......@@ -793,16 +751,16 @@ namespace face {
Q_orth = allOrth.colRange(0,4).clone();
S_orth = allOrth.colRange(4,allOrth.cols).clone();
}
}
inline Mat FacemarkAAMImpl::linearize(Mat s){ // all x values and then all y values
inline Mat FacemarkAAMImpl::linearize(Mat s){ // all x values and then all y values
return Mat(s.reshape(1).t()).reshape(1,2*s.rows);
}
inline Mat FacemarkAAMImpl::linearize(std::vector<Point2f> s){ // all x values and then all y values
}
inline Mat FacemarkAAMImpl::linearize(std::vector<Point2f> s){ // all x values and then all y values
return linearize(Mat(s));
}
}
void FacemarkAAMImpl::delaunay(std::vector<Point2f> s, std::vector<Vec3i> & triangles){
void FacemarkAAMImpl::delaunay(std::vector<Point2f> s, std::vector<Vec3i> & triangles){
triangles.clear();
......@@ -847,9 +805,9 @@ namespace face {
triangles.push_back(Vec3i(idx[v1],idx[v2],idx[v3]));
} //if
} // for
}
}
Mat FacemarkAAMImpl::createMask(std::vector<Point2f> base_shape, Rect res){
Mat FacemarkAAMImpl::createMask(std::vector<Point2f> base_shape, Rect res){
Mat mask = Mat::zeros(res.height, res.width, CV_8U);
std::vector<Point> hull;
std::vector<Point> shape;
......@@ -857,9 +815,9 @@ namespace face {
convexHull(shape,hull);
fillConvexPoly(mask, &hull[0], (int)hull.size(), 255, 8 ,0);
return mask.clone();
}
}
Mat FacemarkAAMImpl::createTextureBase(std::vector<Point2f> shape, std::vector<Vec3i> triangles, Rect res, std::vector<std::vector<Point> > & textureIdx){
Mat FacemarkAAMImpl::createTextureBase(std::vector<Point2f> shape, std::vector<Vec3i> triangles, Rect res, std::vector<std::vector<Point> > & textureIdx){
// max supported amount of triangles only 255
Mat mask = Mat::zeros(res.height, res.width, CV_8U);
......@@ -888,20 +846,20 @@ namespace face {
}
return mask.clone();
}
}
Mat FacemarkAAMImpl::warpImage(
Mat FacemarkAAMImpl::warpImage(
const Mat img, const std::vector<Point2f> target_shape,
const std::vector<Point2f> curr_shape, const std::vector<Vec3i> triangles,
const Rect res, const std::vector<std::vector<Point> > textureIdx)
{
{
// TODO: this part can be optimized, collect tranformation pair form all triangles first, then do one time remapping
Mat warped = Mat::zeros(res.height, res.width, CV_8U);
Mat warped2 = Mat::zeros(res.height, res.width, CV_8U);
Mat image,part, warped_part;
if(img.channels()>1){
cvtColor(img,image,CV_BGR2GRAY);
cvtColor(img,image,COLOR_BGR2GRAY);
}else{
image = img;
}
......@@ -976,19 +934,19 @@ namespace face {
}
return warped2.clone();
}
}
template <class T>
Mat FacemarkAAMImpl::getFeature(const Mat m, std::vector<int> map){
template <class T>
Mat FacemarkAAMImpl::getFeature(const Mat m, std::vector<int> map){
std::vector<float> feat;
Mat M = m.t();//matlab
for(size_t i=0;i<map.size();i++){
feat.push_back((float)M.at<T>(map[i]));
}
return Mat(feat).clone();
}
}
void FacemarkAAMImpl::createMaskMapping(const Mat m1, const Mat m2, std::vector<int> & ind1, std::vector<int> & ind2, std::vector<int> & ind3){
void FacemarkAAMImpl::createMaskMapping(const Mat m1, const Mat m2, std::vector<int> & ind1, std::vector<int> & ind2, std::vector<int> & ind3){
int cnt = 0, idx=0;
......@@ -1015,9 +973,9 @@ namespace face {
} // j
} // i
}
}
void FacemarkAAMImpl::image_jacobian(const Mat gx, const Mat gy, const Mat Jx, const Mat Jy, Mat & G){
void FacemarkAAMImpl::image_jacobian(const Mat gx, const Mat gy, const Mat Jx, const Mat Jy, Mat & G){
Mat Gx = repeat(gx,1,Jx.cols);
Mat Gy = repeat(gy,1,Jx.cols);
......@@ -1027,9 +985,9 @@ namespace face {
multiply(Gy,Jy,G2);
G=G1+G2;
}
}
void FacemarkAAMImpl::warpUpdate(std::vector<Point2f> & shape, Mat delta, std::vector<Point2f> s0, Mat S, Mat Q, std::vector<Vec3i> triangles,std::vector<std::vector<int> > Tp){
void FacemarkAAMImpl::warpUpdate(std::vector<Point2f> & shape, Mat delta, std::vector<Point2f> s0, Mat S, Mat Q, std::vector<Vec3i> triangles,std::vector<std::vector<int> > Tp){
std::vector<Point2f> new_shape;
int nSimEig = 4;
......@@ -1054,9 +1012,9 @@ namespace face {
Mat s = linearize(s0) +S*p + Q*r;
Mat(Mat(s.t()).reshape(0,2).t()).reshape(2).copyTo(shape);
}
}
Mat FacemarkAAMImpl::computeWarpParts(std::vector<Point2f> curr_shape,std::vector<Point2f> s0, Mat ds0, std::vector<Vec3i> triangles,std::vector<std::vector<int> > Tp){
Mat FacemarkAAMImpl::computeWarpParts(std::vector<Point2f> curr_shape,std::vector<Point2f> s0, Mat ds0, std::vector<Vec3i> triangles,std::vector<std::vector<int> > Tp){
std::vector<Point2f> new_shape;
std::vector<Point2f> ds = ds0.reshape(2);
......@@ -1100,9 +1058,9 @@ namespace face {
} // s0.size()
return Mat(new_shape).reshape(1).clone();
}
}
void FacemarkAAMImpl::gradient(const Mat M, Mat & gx, Mat & gy){
void FacemarkAAMImpl::gradient(const Mat M, Mat & gx, Mat & gy){
gx = Mat::zeros(M.size(),CV_32FC1);
gy = Mat::zeros(M.size(),CV_32FC1);
......@@ -1134,9 +1092,9 @@ namespace face {
}
}
}
}
void FacemarkAAMImpl::createWarpJacobian(Mat S, Mat Q, std::vector<Vec3i> triangles, Model::Texture & T, Mat & Wx_dp, Mat & Wy_dp, std::vector<std::vector<int> > & Tp){
void FacemarkAAMImpl::createWarpJacobian(Mat S, Mat Q, std::vector<Vec3i> triangles, Model::Texture & T, Mat & Wx_dp, Mat & Wy_dp, std::vector<std::vector<int> > & Tp){
std::vector<Point2f> base_shape = T.base_shape;
Rect resolution = T.resolution;
......@@ -1216,7 +1174,7 @@ namespace face {
Wx_dp = dW_dxdy* Mat(dx_dp,Range(0,npts));
Wy_dp = dW_dxdy* Mat(dx_dp,Range(npts,2*npts));
} //createWarpJacobian
} //createWarpJacobian
} /* namespace face */
} /* namespace cv */
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
/*
By downloading, copying, installing or using the software you agree to this
license. If you do not agree to this license, do not download, install,
copy or use the software.
License Agreement
For Open Source Computer Vision Library
(3-clause BSD License)
Copyright (C) 2013, OpenCV Foundation, all rights reserved.
Third party copyrights are property of their respective owners.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the names of the copyright holders nor the names of the contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
This software is provided by the copyright holders and contributors "as is" and
any express or implied warranties, including, but not limited to, the implied
warranties of merchantability and fitness for a particular purpose are
disclaimed. In no event shall copyright holders or contributors be liable for
any direct, indirect, incidental, special, exemplary, or consequential damages
(including, but not limited to, procurement of substitute goods or services;
loss of use, data, or profits; or business interruption) however caused
and on any theory of liability, whether in contract, strict liability,
or tort (including negligence or otherwise) arising in any way out of
the use of this software, even if advised of the possibility of such damage.
This file was part of GSoC Project: Facemark API for OpenCV
This file contains results of GSoC Project: Facemark API for OpenCV
Final report: https://gist.github.com/kurnianggoro/74de9121e122ad0bd825176751d47ecc
Student: Laksono Kurnianggoro
Mentor: Delia Passalacqua
*/
#include "opencv2/face.hpp"
#include "precomp.hpp"
#include "opencv2/face.hpp"
#include <fstream>
#include <cmath>
#include <ctime>
#include <cstdio>
#include <cassert>
#include <cstdarg>
namespace cv {
namespace face {
#define TIMER_BEGIN { double __time__ = (double)getTickCount();
#define TIMER_NOW ((getTickCount() - __time__) / getTickFrequency())
#define TIMER_END }
#define TIMER_BEGIN { double __time__ = (double)getTickCount();
#define TIMER_NOW ((getTickCount() - __time__) / getTickFrequency())
#define TIMER_END }
#define SIMILARITY_TRANSFORM(x, y, scale, rotate) do { \
#define SIMILARITY_TRANSFORM(x, y, scale, rotate) do { \
double x_tmp = scale * (rotate(0, 0)*x + rotate(0, 1)*y); \
double y_tmp = scale * (rotate(1, 0)*x + rotate(1, 1)*y); \
x = x_tmp; y = y_tmp; \
} while(0)
} while(0)
FacemarkLBF::Params::Params(){
FacemarkLBF::Params::Params(){
cascade_face = "";
shape_offset = 0.0;
......@@ -85,22 +59,22 @@ namespace face {
}
detectROI = Rect(-1,-1,-1,-1);
}
}
void FacemarkLBF::Params::read( const cv::FileNode& fn ){
void FacemarkLBF::Params::read( const cv::FileNode& fn ){
*this = FacemarkLBF::Params();
if (!fn["verbose"].empty())
fn["verbose"] >> verbose;
}
}
void FacemarkLBF::Params::write( cv::FileStorage& fs ) const{
void FacemarkLBF::Params::write( cv::FileStorage& fs ) const{
fs << "verbose" << verbose;
}
}
class FacemarkLBFImpl : public FacemarkLBF {
public:
class FacemarkLBFImpl : public FacemarkLBF {
public:
FacemarkLBFImpl( const FacemarkLBF::Params &parameters = FacemarkLBF::Params() );
void read( const FileNode& /*fn*/ );
......@@ -108,13 +82,13 @@ namespace face {
void loadModel(String fs);
bool setFaceDetector(bool(*f)(InputArray , OutputArray, void * extra_params ));
bool getFaces( InputArray image , OutputArray faces, void * extra_params);
bool setFaceDetector(bool(*f)(InputArray , OutputArray, void * extra_params ), void* userData);
bool getFaces(InputArray image, OutputArray faces);
bool getData(void * items);
Params params;
protected:
protected:
bool fit( InputArray image, InputArray faces, InputOutputArray landmarks, void * runtime_params );//!< from many ROIs
bool fitImpl( const Mat image, std::vector<Point2f> & landmarks );//!< from a face
......@@ -128,12 +102,11 @@ namespace face {
void data_augmentation(std::vector<Mat> &imgs, std::vector<Mat> &gt_shapes, std::vector<BBox> &bboxes);
Mat getMeanShape(std::vector<Mat> &gt_shapes, std::vector<BBox> &bboxes);
bool configFaceDetector();
bool defaultFaceDetector(const Mat image, std::vector<Rect> & faces);
bool defaultFaceDetector(const Mat& image, std::vector<Rect>& faces);
CascadeClassifier face_cascade;
bool(*faceDetector)(InputArray , OutputArray, void * );
bool isSetDetector;
FN_FaceDetector faceDetector;
void* faceDetectorData;
/*training data*/
std::vector<std::vector<Point2f> > data_facemarks; //original position
......@@ -141,7 +114,7 @@ namespace face {
std::vector<BBox> data_boxes;
std::vector<Mat> data_shapes; //position in the face ROI
private:
private:
bool isModelTrained;
/*---------------LBF Class---------------------*/
......@@ -242,107 +215,94 @@ namespace face {
}; // LBF
Regressor regressor;
}; // class
}; // class
/*
* Constructor
*/
Ptr<FacemarkLBF> FacemarkLBF::create(const FacemarkLBF::Params &parameters){
/*
* Constructor
*/
Ptr<FacemarkLBF> FacemarkLBF::create(const FacemarkLBF::Params &parameters){
return Ptr<FacemarkLBFImpl>(new FacemarkLBFImpl(parameters));
}
}
FacemarkLBFImpl::FacemarkLBFImpl( const FacemarkLBF::Params &parameters )
{
isSetDetector =false;
FacemarkLBFImpl::FacemarkLBFImpl( const FacemarkLBF::Params &parameters ) :
faceDetector(NULL), faceDetectorData(NULL)
{
isModelTrained = false;
params = parameters;
}
}
bool FacemarkLBFImpl::setFaceDetector(bool(*f)(InputArray , OutputArray, void * extra_params )){
bool FacemarkLBFImpl::setFaceDetector(bool(*f)(InputArray , OutputArray, void * extra_params ), void* userData){
faceDetector = f;
isSetDetector = true;
return true;
}
bool FacemarkLBFImpl::getFaces( InputArray image , OutputArray roi, void * extra_params){
if(!isSetDetector){
return false;
}
if(extra_params!=0){
//do nothing
}
std::vector<Rect> & faces = *(std::vector<Rect>*)roi.getObj();
faces.clear();
faceDetector(image.getMat(), faces, extra_params);
faceDetectorData = userData;
return true;
}
bool FacemarkLBFImpl::configFaceDetector(){
if(!isSetDetector){
/*check the cascade classifier file*/
std::ifstream infile;
infile.open(params.cascade_face.c_str(), std::ios::in);
if (!infile) {
std::string error_message = "The cascade classifier model is not found.";
CV_Error(CV_StsBadArg, error_message);
return false;
}
}
face_cascade.load(params.cascade_face.c_str());
}
bool FacemarkLBFImpl::getFaces(InputArray image, OutputArray faces_)
{
if (!faceDetector)
{
std::vector<Rect> faces;
defaultFaceDetector(image.getMat(), faces);
Mat(faces).copyTo(faces_);
return true;
}
return faceDetector(image, faces_, faceDetectorData);
}
bool FacemarkLBFImpl::defaultFaceDetector(const Mat image, std::vector<Rect> & faces){
bool FacemarkLBFImpl::defaultFaceDetector(const Mat& image, std::vector<Rect>& faces){
Mat gray;
faces.clear();
if(image.channels()>1){
cvtColor(image,gray,CV_BGR2GRAY);
}else{
if (image.channels() > 1)
{
cvtColor(image, gray, COLOR_BGR2GRAY);
}
else
{
gray = image;
}
equalizeHist( gray, gray );
face_cascade.detectMultiScale( gray, faces, 1.05, 2, 0|CV_HAAR_SCALE_IMAGE, Size(30, 30) );
equalizeHist(gray, gray);
return true;
if (face_cascade.empty())
{
{ /* check the cascade classifier file */
std::ifstream infile;
infile.open(params.cascade_face.c_str(), std::ios::in);
if (!infile)
CV_Error_(Error::StsBadArg, ("The cascade classifier model is not found: %s", params.cascade_face.c_str()));
}
bool FacemarkLBFImpl::getData(void * items){
if(items!=0){
// do nothing
face_cascade.load(params.cascade_face.c_str());
CV_Assert(!face_cascade.empty());
}
face_cascade.detectMultiScale(gray, faces, 1.05, 2, CASCADE_SCALE_IMAGE, Size(30, 30) );
return true;
}
}
bool FacemarkLBFImpl::getData(void * items){
CV_UNUSED(items);
return false;
}
bool FacemarkLBFImpl::addTrainingSample(InputArray image, InputArray landmarks){
bool FacemarkLBFImpl::addTrainingSample(InputArray image, InputArray landmarks){
// FIXIT
std::vector<Point2f> & _landmarks = *(std::vector<Point2f>*)landmarks.getObj();
configFaceDetector();
prepareTrainingData(image.getMat(), _landmarks, data_faces, data_shapes, data_boxes);
return true;
}
}
void FacemarkLBFImpl::training(void* parameters){
CV_UNUSED(parameters);
void FacemarkLBFImpl::training(void* parameters){
if(parameters!=0){/*do nothing*/}
if (data_faces.size()<1) {
std::string error_message =
"Training data is not provided. Consider to add using addTrainingSample() function!";
CV_Error(CV_StsBadArg, error_message);
if (data_faces.empty())
{
CV_Error(Error::StsBadArg, "Training data is not provided. Consider to add using addTrainingSample() function!");
}
if(strcmp(params.cascade_face.c_str(),"")==0
||(strcmp(params.model_filename.c_str(),"")==0 && params.save_model)
){
std::string error_message = "The parameter cascade_face and model_filename should be set!";
CV_Error(CV_StsBadArg, error_message);
if (params.cascade_face.empty() || (params.model_filename.empty() && params.save_model))
{
CV_Error(Error::StsBadArg, "The parameter cascade_face and model_filename should be set!");
}
// flip the image and swap the landmark position
......@@ -358,10 +318,8 @@ namespace face {
for (int i = 0; i < N; i++) {
for (int j = 0; j < params.initShape_n; j++) {
int idx = i*params.initShape_n + j;
int k = 0;
do {
k = rng.uniform(0, N);
} while (k == i);
int k = rng.uniform(0, N - 1);
k = (k >= i) ? k + 1 : k; // require k != i
imgs[idx] = data_faces[i];
gt_shapes[idx] = data_shapes[i];
bboxes[idx] = data_boxes[i];
......@@ -378,16 +336,15 @@ namespace face {
}
isModelTrained = true;
}
}
bool FacemarkLBFImpl::fit( InputArray image, InputArray roi, InputOutputArray _landmarks, void * runtime_params )
{
if(runtime_params!=0){
// do nothing
}
bool FacemarkLBFImpl::fit( InputArray image, InputArray roi, InputOutputArray _landmarks, void * runtime_params )
{
CV_UNUSED(runtime_params);
// FIXIT
std::vector<Rect> & faces = *(std::vector<Rect> *)roi.getObj();
if(faces.size()<1) return false;
if (faces.empty()) return false;
std::vector<std::vector<Point2f> > & landmarks =
*(std::vector<std::vector<Point2f> >*) _landmarks.getObj();
......@@ -400,20 +357,19 @@ namespace face {
}
return true;
}
}
bool FacemarkLBFImpl::fitImpl( const Mat image, std::vector<Point2f>& landmarks){
bool FacemarkLBFImpl::fitImpl( const Mat image, std::vector<Point2f>& landmarks){
if (landmarks.size()>0)
landmarks.clear();
if (!isModelTrained) {
std::string error_message = "The LBF model is not trained yet. Please provide a trained model.";
CV_Error(CV_StsBadArg, error_message);
CV_Error(Error::StsBadArg, "The LBF model is not trained yet. Please provide a trained model.");
}
Mat img;
if(image.channels()>1){
cvtColor(image,img,CV_BGR2GRAY);
cvtColor(image,img,COLOR_BGR2GRAY);
}else{
img = image;
}
......@@ -424,13 +380,8 @@ namespace face {
}else{
std::vector<Rect> rects;
if(!isSetDetector){
defaultFaceDetector(img, rects);
}else{
faceDetector(img, rects,0);
}
if (rects.size() == 0) return 0; //failed to get face
if (!getFaces(img, rects)) return 0;
if (rects.empty()) return 0; //failed to get face
box = rects[0];
}
......@@ -455,35 +406,34 @@ namespace face {
}
return 1;
}
}
void FacemarkLBFImpl::read( const cv::FileNode& fn ){
void FacemarkLBFImpl::read( const cv::FileNode& fn ){
params.read( fn );
}
}
void FacemarkLBFImpl::write( cv::FileStorage& fs ) const {
void FacemarkLBFImpl::write( cv::FileStorage& fs ) const {
params.write( fs );
}
}
void FacemarkLBFImpl::loadModel(String s){
void FacemarkLBFImpl::loadModel(String s){
if(params.verbose) printf("loading data from : %s\n", s.c_str());
std::ifstream infile;
infile.open(s.c_str(), std::ios::in);
if (!infile) {
std::string error_message = "No valid input file was given, please check the given filename.";
CV_Error(CV_StsBadArg, error_message);
CV_Error(Error::StsBadArg, "No valid input file was given, please check the given filename.");
}
FileStorage fs(s.c_str(),FileStorage::READ);
regressor.read(fs, params);
isModelTrained = true;
}
}
Rect FacemarkLBFImpl::getBBox(Mat &img, const Mat_<double> shape) {
Rect FacemarkLBFImpl::getBBox(Mat &img, const Mat_<double> shape) {
std::vector<Rect> rects;
if(!isSetDetector){
if(!faceDetector){
defaultFaceDetector(img, rects);
}else{
faceDetector(img, rects,0);
......@@ -517,13 +467,13 @@ namespace face {
return r;
}
return Rect(-1, -1, -1, -1);
}
}
void FacemarkLBFImpl::prepareTrainingData(Mat img, std::vector<Point2f> facePoints,
void FacemarkLBFImpl::prepareTrainingData(Mat img, std::vector<Point2f> facePoints,
std::vector<Mat> & cropped, std::vector<Mat> & shapes, std::vector<BBox> &boxes)
{
{
if(img.channels()>1){
cvtColor(img,img,CV_BGR2GRAY);
cvtColor(img,img,COLOR_BGR2GRAY);
}
Mat shape;
......@@ -553,9 +503,9 @@ namespace face {
shapes.push_back(shape);
} // if box is valid
} // prepareTrainingData
} // prepareTrainingData
void FacemarkLBFImpl::data_augmentation(std::vector<Mat> &imgs, std::vector<Mat> &gt_shapes, std::vector<BBox> &bboxes) {
void FacemarkLBFImpl::data_augmentation(std::vector<Mat> &imgs, std::vector<Mat> &gt_shapes, std::vector<BBox> &bboxes) {
int N = (int)imgs.size();
imgs.reserve(2 * N);
gt_shapes.reserve(2 * N);
......@@ -582,7 +532,7 @@ namespace face {
bboxes.push_back(bbox_flipped);
}
#define SWAP(shape, i, j) do { \
#define SWAP(shape, i, j) do { \
double tmp = shape.at<double>(i-1, 0); \
shape.at<double>(i-1, 0) = shape.at<double>(j-1, 0); \
shape.at<double>(j-1, 0) = tmp; \
......@@ -627,14 +577,14 @@ namespace face {
printf("Wrong n_landmarks, currently only 29 and 68 landmark points are supported");
}
#undef SWAP
#undef SWAP
}
}
FacemarkLBFImpl::BBox::BBox() {}
FacemarkLBFImpl::BBox::~BBox() {}
FacemarkLBFImpl::BBox::BBox() {}
FacemarkLBFImpl::BBox::~BBox() {}
FacemarkLBFImpl::BBox::BBox(double _x, double _y, double w, double h) {
FacemarkLBFImpl::BBox::BBox(double _x, double _y, double w, double h) {
x = _x;
y = _y;
width = w;
......@@ -643,10 +593,10 @@ namespace face {
y_center = y + h / 2.;
x_scale = w / 2.;
y_scale = h / 2.;
}
}
// Project absolute shape to relative shape binding to this bbox
Mat FacemarkLBFImpl::BBox::project(const Mat &shape) const {
// Project absolute shape to relative shape binding to this bbox
Mat FacemarkLBFImpl::BBox::project(const Mat &shape) const {
Mat_<double> res(shape.rows, shape.cols);
const Mat_<double> &shape_ = (Mat_<double>)shape;
for (int i = 0; i < shape.rows; i++) {
......@@ -654,10 +604,10 @@ namespace face {
res(i, 1) = (shape_(i, 1) - y_center) / y_scale;
}
return res;
}
}
// Project relative shape to absolute shape binding to this bbox
Mat FacemarkLBFImpl::BBox::reproject(const Mat &shape) const {
// Project relative shape to absolute shape binding to this bbox
Mat FacemarkLBFImpl::BBox::reproject(const Mat &shape) const {
Mat_<double> res(shape.rows, shape.cols);
const Mat_<double> &shape_ = (Mat_<double>)shape;
for (int i = 0; i < shape.rows; i++) {
......@@ -665,9 +615,9 @@ namespace face {
res(i, 1) = shape_(i, 1)*y_scale + y_center;
}
return res;
}
}
Mat FacemarkLBFImpl::getMeanShape(std::vector<Mat> &gt_shapes, std::vector<BBox> &bboxes) {
Mat FacemarkLBFImpl::getMeanShape(std::vector<Mat> &gt_shapes, std::vector<BBox> &bboxes) {
int N = (int)gt_shapes.size();
Mat mean_shape = Mat::zeros(gt_shapes[0].rows, 2, CV_64FC1);
......@@ -676,11 +626,11 @@ namespace face {
}
mean_shape /= N;
return mean_shape;
}
}
// Similarity Transform, project shape2 to shape1
// p1 ~= scale * rotate * p2, p1 and p2 are vector in math
void FacemarkLBFImpl::LBF::calcSimilarityTransform(const Mat &shape1, const Mat &shape2, double &scale, Mat &rotate) {
// Similarity Transform, project shape2 to shape1
// p1 ~= scale * rotate * p2, p1 and p2 are vector in math
void FacemarkLBFImpl::LBF::calcSimilarityTransform(const Mat &shape1, const Mat &shape2, double &scale, Mat &rotate) {
Mat_<double> rotate_(2, 2);
double x1_center, y1_center, x2_center, y2_center;
x1_center = cv::mean(shape1.col(0))[0];
......@@ -697,8 +647,8 @@ namespace face {
Mat_<double> covar1, covar2;
Mat_<double> mean1, mean2;
calcCovarMatrix(temp1, covar1, mean1, CV_COVAR_COLS);
calcCovarMatrix(temp2, covar2, mean2, CV_COVAR_COLS);
calcCovarMatrix(temp1, covar1, mean1, COVAR_COLS);
calcCovarMatrix(temp2, covar2, mean2, COVAR_COLS);
double s1 = sqrt(cv::norm(covar1));
double s2 = sqrt(cv::norm(covar2));
......@@ -714,10 +664,10 @@ namespace face {
rotate_(0, 0) = cos_theta; rotate_(0, 1) = -sin_theta;
rotate_(1, 0) = sin_theta; rotate_(1, 1) = cos_theta;
rotate = rotate_;
}
}
// Get relative delta_shapes for predicting target
std::vector<Mat> FacemarkLBFImpl::LBF::getDeltaShapes(std::vector<Mat> &gt_shapes, std::vector<Mat> &current_shapes,
// Get relative delta_shapes for predicting target
std::vector<Mat> FacemarkLBFImpl::LBF::getDeltaShapes(std::vector<Mat> &gt_shapes, std::vector<Mat> &current_shapes,
std::vector<BBox> &bboxes, Mat &mean_shape) {
std::vector<Mat> delta_shapes;
int N = (int)gt_shapes.size();
......@@ -730,25 +680,25 @@ namespace face {
// delta_shapes[i] = scale * delta_shapes[i] * rotate.t(); // the result is better without this part
}
return delta_shapes;
}
}
double FacemarkLBFImpl::LBF::calcVariance(const Mat &vec) {
double FacemarkLBFImpl::LBF::calcVariance(const Mat &vec) {
double m1 = cv::mean(vec)[0];
double m2 = cv::mean(vec.mul(vec))[0];
double variance = m2 - m1*m1;
return variance;
}
}
double FacemarkLBFImpl::LBF::calcVariance(const std::vector<double> &vec) {
double FacemarkLBFImpl::LBF::calcVariance(const std::vector<double> &vec) {
if (vec.size() == 0) return 0.;
Mat_<double> vec_(vec);
double m1 = cv::mean(vec_)[0];
double m2 = cv::mean(vec_.mul(vec_))[0];
double variance = m2 - m1*m1;
return variance;
}
}
double FacemarkLBFImpl::LBF::calcMeanError(std::vector<Mat> &gt_shapes, std::vector<Mat> &current_shapes, int landmark_n , std::vector<int> &left, std::vector<int> &right ) {
double FacemarkLBFImpl::LBF::calcMeanError(std::vector<Mat> &gt_shapes, std::vector<Mat> &current_shapes, int landmark_n , std::vector<int> &left, std::vector<int> &right ) {
int N = (int)gt_shapes.size();
double e = 0;
......@@ -778,10 +728,10 @@ namespace face {
}
e /= N*landmark_n;
return e;
}
}
/*---------------RandomTree Implementation---------------------*/
void FacemarkLBFImpl::RandomTree::initTree(int _landmark_id, int _depth, std::vector<int> feats_m, std::vector<double> radius_m) {
/*---------------RandomTree Implementation---------------------*/
void FacemarkLBFImpl::RandomTree::initTree(int _landmark_id, int _depth, std::vector<int> feats_m, std::vector<double> radius_m) {
landmark_id = _landmark_id;
depth = _depth;
nodes_n = 1 << depth;
......@@ -790,9 +740,9 @@ namespace face {
params_feats_m = feats_m;
params_radius_m = radius_m;
}
}
void FacemarkLBFImpl::RandomTree::train(std::vector<Mat> &imgs, std::vector<Mat> &current_shapes, std::vector<BBox> &bboxes,
void FacemarkLBFImpl::RandomTree::train(std::vector<Mat> &imgs, std::vector<Mat> &current_shapes, std::vector<BBox> &bboxes,
std::vector<Mat> &delta_shapes, Mat &mean_shape, std::vector<int> &index, int stage) {
Mat_<double> delta_shapes_((int)delta_shapes.size(), 2);
for (int i = 0; i < (int)delta_shapes.size(); i++) {
......@@ -800,9 +750,9 @@ namespace face {
delta_shapes_(i, 1) = delta_shapes[i].at<double>(landmark_id, 1);
}
splitNode(imgs, current_shapes, bboxes, delta_shapes_, mean_shape, index, 1, stage);
}
}
void FacemarkLBFImpl::RandomTree::splitNode(std::vector<Mat> &imgs, std::vector<Mat> &current_shapes, std::vector<BBox> &bboxes,
void FacemarkLBFImpl::RandomTree::splitNode(std::vector<Mat> &imgs, std::vector<Mat> &current_shapes, std::vector<BBox> &bboxes,
Mat &delta_shapes, Mat &mean_shape, std::vector<int> &root, int idx, int stage) {
int N = (int)root.size();
......@@ -911,28 +861,28 @@ namespace face {
splitNode(imgs, current_shapes, bboxes, delta_shapes, mean_shape, left, 2 * idx, stage);
if (2 * idx + 1 < feats.rows / 2)
splitNode(imgs, current_shapes, bboxes, delta_shapes, mean_shape, right, 2 * idx + 1, stage);
}
}
void FacemarkLBFImpl::RandomTree::write(FileStorage fs, int k, int i, int j) {
void FacemarkLBFImpl::RandomTree::write(FileStorage fs, int k, int i, int j) {
String x;
x = cv::format("tree_%i_%i_%i",k,i,j);
fs << x << feats;
x = cv::format("thresholds_%i_%i_%i",k,i,j);
fs << x << thresholds;
}
}
void FacemarkLBFImpl::RandomTree::read(FileStorage fs, int k, int i, int j) {
void FacemarkLBFImpl::RandomTree::read(FileStorage fs, int k, int i, int j) {
String x;
x = cv::format("tree_%i_%i_%i",k,i,j);
fs[x] >> feats;
x = cv::format("thresholds_%i_%i_%i",k,i,j);
fs[x] >> thresholds;
}
}
/*---------------RandomForest Implementation---------------------*/
void FacemarkLBFImpl::RandomForest::initForest(
/*---------------RandomForest Implementation---------------------*/
void FacemarkLBFImpl::RandomForest::initForest(
int _landmark_n,
int _trees_n,
int _tree_depth,
......@@ -940,7 +890,7 @@ namespace face {
std::vector<int>_feats_m,
std::vector<double>_radius_m,
bool verbose_mode
) {
) {
trees_n = _trees_n;
landmark_n = _landmark_n;
tree_depth = _tree_depth;
......@@ -956,9 +906,9 @@ namespace face {
random_trees[i].resize(trees_n);
for (int j = 0; j < trees_n; j++) random_trees[i][j].initTree(i, tree_depth, feats_m, radius_m);
}
}
}
void FacemarkLBFImpl::RandomForest::train(std::vector<Mat> &imgs, std::vector<Mat> &current_shapes, \
void FacemarkLBFImpl::RandomForest::train(std::vector<Mat> &imgs, std::vector<Mat> &current_shapes, \
std::vector<BBox> &bboxes, std::vector<Mat> &delta_shapes, Mat &mean_shape, int stage) {
int N = (int)imgs.size();
int Q = int(N / ((1. - overlap_ratio) * trees_n));
......@@ -980,9 +930,9 @@ namespace face {
if(verbose) printf("Train %2dth of %d landmark Done, it costs %.4lf s\n", i+1, landmark_n, TIMER_NOW);
TIMER_END
}
}
}
Mat FacemarkLBFImpl::RandomForest::generateLBF(Mat &img, Mat &current_shape, BBox &bbox, Mat &mean_shape) {
Mat FacemarkLBFImpl::RandomForest::generateLBF(Mat &img, Mat &current_shape, BBox &bbox, Mat &mean_shape) {
Mat_<int> lbf_feat(1, landmark_n*trees_n);
double scale;
Mat_<double> rotate;
......@@ -1026,28 +976,28 @@ namespace face {
}
}
return lbf_feat;
}
}
void FacemarkLBFImpl::RandomForest::write(FileStorage fs, int k) {
void FacemarkLBFImpl::RandomForest::write(FileStorage fs, int k) {
for (int i = 0; i < landmark_n; i++) {
for (int j = 0; j < trees_n; j++) {
random_trees[i][j].write(fs,k,i,j);
}
}
}
}
void FacemarkLBFImpl::RandomForest::read(FileStorage fs,int k)
{
void FacemarkLBFImpl::RandomForest::read(FileStorage fs,int k)
{
for (int i = 0; i < landmark_n; i++) {
for (int j = 0; j < trees_n; j++) {
random_trees[i][j].initTree(i, tree_depth, feats_m, radius_m);
random_trees[i][j].read(fs,k,i,j);
}
}
}
}
/*---------------Regressor Implementation---------------------*/
void FacemarkLBFImpl::Regressor::initRegressor(Params config) {
/*---------------Regressor Implementation---------------------*/
void FacemarkLBFImpl::Regressor::initRegressor(Params config) {
stages_n = config.stages_n;
landmark_n = config.n_landmarks;
......@@ -1071,11 +1021,11 @@ namespace face {
for (int i = 0; i < stages_n; i++) {
gl_regression_weights[i].create(2 * config.n_landmarks, F, CV_64FC1);
}
}
}
void FacemarkLBFImpl::Regressor::trainRegressor(std::vector<Mat> &imgs, std::vector<Mat> &gt_shapes, std::vector<Mat> &current_shapes,
void FacemarkLBFImpl::Regressor::trainRegressor(std::vector<Mat> &imgs, std::vector<Mat> &gt_shapes, std::vector<Mat> &current_shapes,
std::vector<BBox> &bboxes, Mat &mean_shape_, int start_from, Params config) {
assert(start_from >= 0 && start_from < stages_n);
CV_Assert(start_from >= 0 && start_from < stages_n);
mean_shape = mean_shape_;
int N = (int)imgs.size();
......@@ -1117,12 +1067,12 @@ namespace face {
if(config.verbose) printf("Train %dth stage Done with Error = %lf\n", k, e);
} // for int k
}//Regressor::training
}//Regressor::training
void FacemarkLBFImpl::Regressor::globalRegressionTrain(
void FacemarkLBFImpl::Regressor::globalRegressionTrain(
std::vector<Mat> &lbfs, std::vector<Mat> &delta_shapes,
int stage, Params config
) {
) {
int N = (int)lbfs.size();
int M = lbfs[0].cols;
......@@ -1167,15 +1117,15 @@ namespace face {
for (int i = 0; i < 2 * landmark_n_; i++) free(Y[i]);
free(X);
free(Y);
} // Regressor:globalRegressionTrain
} // Regressor:globalRegressionTrain
/*adapted from the liblinear library*/
/* TODO: change feature_node to MAT
* as the index in feature_node is only used for "counter"
*/
Mat FacemarkLBFImpl::Regressor::supportVectorRegression(
/*adapted from the liblinear library*/
/* TODO: change feature_node to MAT
* as the index in feature_node is only used for "counter"
*/
Mat FacemarkLBFImpl::Regressor::supportVectorRegression(
feature_node **x, double *y, int nsamples, int feat_size, bool verbose
){
){
#define GETI(i) ((int) y[i])
std::vector<double> w;
......@@ -1347,9 +1297,9 @@ namespace face {
return Mat(Mat(w).t()).clone();
}//end
}//end
Mat FacemarkLBFImpl::Regressor::globalRegressionPredict(const Mat &lbf, int stage) {
Mat FacemarkLBFImpl::Regressor::globalRegressionPredict(const Mat &lbf, int stage) {
const Mat_<double> &weight = (Mat_<double>)gl_regression_weights[stage];
Mat_<double> delta_shape(weight.rows / 2, 2);
const double *w_ptr = NULL;
......@@ -1368,9 +1318,9 @@ namespace face {
delta_shape(i, 1) = y;
}
return delta_shape;
} // Regressor::globalRegressionPredict
} // Regressor::globalRegressionPredict
Mat FacemarkLBFImpl::Regressor::predict(Mat &img, BBox &bbox) {
Mat FacemarkLBFImpl::Regressor::predict(Mat &img, BBox &bbox) {
Mat current_shape = bbox.reproject(mean_shape);
double scale;
Mat rotate;
......@@ -1385,9 +1335,9 @@ namespace face {
current_shape = bbox.reproject(bbox.project(current_shape) + scale * delta_shape * rotate.t());
}
return current_shape;
} // Regressor::predict
} // Regressor::predict
void FacemarkLBFImpl::Regressor::write(FileStorage fs, Params config) {
void FacemarkLBFImpl::Regressor::write(FileStorage fs, Params config) {
fs << "stages_n" << config.stages_n;
fs << "tree_n" << config.tree_n;
......@@ -1404,9 +1354,9 @@ namespace face {
x = cv::format("weights_%i",k);
fs << x << gl_regression_weights[k];
}
}
}
void FacemarkLBFImpl::Regressor::read(FileStorage fs, Params & config){
void FacemarkLBFImpl::Regressor::read(FileStorage fs, Params & config){
fs["stages_n"] >> config.stages_n;
fs["tree_n"] >> config.tree_n;
fs["tree_depth"] >> config.tree_depth;
......@@ -1436,11 +1386,11 @@ namespace face {
x = cv::format("weights_%i",k);
fs[x] >> gl_regression_weights[k];
}
}
}
#undef TIMER_BEGIN
#undef TIMER_NOW
#undef TIMER_END
#undef SIMILARITY_TRANSFORM
#undef TIMER_BEGIN
#undef TIMER_NOW
#undef TIMER_END
#undef SIMILARITY_TRANSFORM
} /* namespace face */
} /* namespace cv */
......@@ -52,5 +52,7 @@
#include <set>
#include <limits>
#include <iostream>
#endif
/*
By downloading, copying, installing or using the software you agree to this
license. If you do not agree to this license, do not download, install,
copy or use the software.
License Agreement
For Open Source Computer Vision Library
(3-clause BSD License)
Copyright (C) 2013, OpenCV Foundation, all rights reserved.
Third party copyrights are property of their respective owners.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the names of the copyright holders nor the names of the contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
This software is provided by the copyright holders and contributors "as is" and
any express or implied warranties, including, but not limited to, the implied
warranties of merchantability and fitness for a particular purpose are
disclaimed. In no event shall copyright holders or contributors be liable for
any direct, indirect, incidental, special, exemplary, or consequential damages
(including, but not limited to, procurement of substitute goods or services;
loss of use, data, or profits; or business interruption) however caused
and on any theory of liability, whether in contract, strict liability,
or tort (including negligence or otherwise) arising in any way out of
the use of this software, even if advised of the possibility of such damage.
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
This file was part of GSoC Project: Facemark API for OpenCV
/*
This file contains results of GSoC Project: Facemark API for OpenCV
Final report: https://gist.github.com/kurnianggoro/74de9121e122ad0bd825176751d47ecc
Student: Laksono Kurnianggoro
Mentor: Delia Passalacqua
......
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
/*
By downloading, copying, installing or using the software you agree to this
license. If you do not agree to this license, do not download, install,
copy or use the software.
License Agreement
For Open Source Computer Vision Library
(3-clause BSD License)
Copyright (C) 2013, OpenCV Foundation, all rights reserved.
Third party copyrights are property of their respective owners.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the names of the copyright holders nor the names of the contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
This software is provided by the copyright holders and contributors "as is" and
any express or implied warranties, including, but not limited to, the implied
warranties of merchantability and fitness for a particular purpose are
disclaimed. In no event shall copyright holders or contributors be liable for
any direct, indirect, incidental, special, exemplary, or consequential damages
(including, but not limited to, procurement of substitute goods or services;
loss of use, data, or profits; or business interruption) however caused
and on any theory of liability, whether in contract, strict liability,
or tort (including negligence or otherwise) arising in any way out of
the use of this software, even if advised of the possibility of such damage.
This file was part of GSoC Project: Facemark API for OpenCV
This file contains results of GSoC Project: Facemark API for OpenCV
Final report: https://gist.github.com/kurnianggoro/74de9121e122ad0bd825176751d47ecc
Student: Laksono Kurnianggoro
Mentor: Delia Passalacqua
......@@ -50,24 +25,19 @@ using namespace std;
using namespace cv;
using namespace cv::face;
CascadeClassifier face_detector;
static bool customDetector( InputArray image, OutputArray ROIs, void * config = 0 ){
static bool customDetector( InputArray image, OutputArray ROIs, CascadeClassifier *face_detector){
Mat gray;
std::vector<Rect> & faces = *(std::vector<Rect>*) ROIs.getObj();
faces.clear();
if(config!=0){
//do nothing
}
if(image.channels()>1){
cvtColor(image.getMat(),gray,CV_BGR2GRAY);
cvtColor(image.getMat(),gray, COLOR_BGR2GRAY);
}else{
gray = image.getMat().clone();
}
equalizeHist( gray, gray );
face_detector.detectMultiScale( gray, faces, 1.4, 2, CV_HAAR_SCALE_IMAGE, Size(30, 30) );
face_detector->detectMultiScale( gray, faces, 1.4, 2, CASCADE_SCALE_IMAGE, Size(30, 30) );
return true;
}
......@@ -82,11 +52,11 @@ TEST(CV_Face_FacemarkAAM, can_create_default) {
TEST(CV_Face_FacemarkAAM, can_set_custom_detector) {
string cascade_filename =
cvtest::findDataFile("cascadeandhog/cascades/lbpcascade_frontalface.xml", true);
CascadeClassifier face_detector;
EXPECT_TRUE(face_detector.load(cascade_filename));
Ptr<Facemark> facemark = FacemarkAAM::create();
EXPECT_TRUE(facemark->setFaceDetector(customDetector));
EXPECT_TRUE(facemark->setFaceDetector((cv::face::FN_FaceDetector)customDetector, &face_detector));
}
TEST(CV_Face_FacemarkAAM, test_workflow) {
......@@ -106,6 +76,9 @@ TEST(CV_Face_FacemarkAAM, test_workflow) {
string cascade_filename =
cvtest::findDataFile("cascadeandhog/cascades/lbpcascade_frontalface.xml", true);
CascadeClassifier face_detector;
EXPECT_TRUE(face_detector.load(cascade_filename));
FacemarkAAM::Params params;
params.n = 1;
params.m = 1;
......@@ -115,7 +88,8 @@ TEST(CV_Face_FacemarkAAM, test_workflow) {
Mat image;
std::vector<Point2f> landmarks;
for(size_t i=0;i<images_train.size();i++){
for(size_t i=0;i<images_train.size();i++)
{
image = imread(images_train[i].c_str());
EXPECT_TRUE(loadFacePoints(points_train[i].c_str(),landmarks));
EXPECT_TRUE(landmarks.size()>0);
......@@ -125,7 +99,7 @@ TEST(CV_Face_FacemarkAAM, test_workflow) {
EXPECT_NO_THROW(facemark->training());
/*------------ Fitting Part ---------------*/
facemark->setFaceDetector(customDetector);
EXPECT_TRUE(facemark->setFaceDetector((cv::face::FN_FaceDetector)customDetector, &face_detector));
string image_filename = cvtest::findDataFile("face/david1.jpg", true);
image = imread(image_filename.c_str());
EXPECT_TRUE(!image.empty());
......
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
/*
By downloading, copying, installing or using the software you agree to this
license. If you do not agree to this license, do not download, install,
copy or use the software.
License Agreement
For Open Source Computer Vision Library
(3-clause BSD License)
Copyright (C) 2013, OpenCV Foundation, all rights reserved.
Third party copyrights are property of their respective owners.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the names of the copyright holders nor the names of the contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
This software is provided by the copyright holders and contributors "as is" and
any express or implied warranties, including, but not limited to, the implied
warranties of merchantability and fitness for a particular purpose are
disclaimed. In no event shall copyright holders or contributors be liable for
any direct, indirect, incidental, special, exemplary, or consequential damages
(including, but not limited to, procurement of substitute goods or services;
loss of use, data, or profits; or business interruption) however caused
and on any theory of liability, whether in contract, strict liability,
or tort (including negligence or otherwise) arising in any way out of
the use of this software, even if advised of the possibility of such damage.
This file was part of GSoC Project: Facemark API for OpenCV
This file contains results of GSoC Project: Facemark API for OpenCV
Final report: https://gist.github.com/kurnianggoro/74de9121e122ad0bd825176751d47ecc
Student: Laksono Kurnianggoro
Mentor: Delia Passalacqua
......@@ -61,13 +36,13 @@ static bool myCustomDetector( InputArray image, OutputArray ROIs, void * config
}
if(image.channels()>1){
cvtColor(image.getMat(),gray,CV_BGR2GRAY);
cvtColor(image.getMat(),gray,COLOR_BGR2GRAY);
}else{
gray = image.getMat().clone();
}
equalizeHist( gray, gray );
cascade_detector.detectMultiScale( gray, faces, 1.4, 2, CV_HAAR_SCALE_IMAGE, Size(30, 30) );
cascade_detector.detectMultiScale( gray, faces, 1.4, 2, CASCADE_SCALE_IMAGE, Size(30, 30) );
return true;
}
......@@ -141,8 +116,3 @@ TEST(CV_Face_FacemarkLBF, test_workflow) {
EXPECT_TRUE(facemark->fit(image, rects, facial_points));
EXPECT_TRUE(facial_points[0].size()>0);
}
TEST(CV_Face_FacemarkLBF, get_data) {
Ptr<Facemark> facemark = FacemarkLBF::create();
EXPECT_TRUE(facemark->getData());
}
......@@ -69,31 +69,25 @@ struct Conf {
Conf(cv::String s, double d){
model_path = s;
scaleFactor = d;
face_detector.load(model_path);
};
CascadeClassifier face_detector;
};
bool myDetector( InputArray image, OutputArray roi, void * config ){
bool myDetector(InputArray image, OutputArray faces, Conf *conf){
Mat gray;
std::vector<Rect> & faces = *(std::vector<Rect>*) roi.getObj();
faces.clear();
if(config!=0){
Conf* conf = (Conf*)config;
if(image.channels()>1){
cvtColor(image,gray,CV_BGR2GRAY);
}else{
if (image.channels() > 1)
cvtColor(image, gray, COLOR_BGR2GRAY);
else
gray = image.getMat().clone();
}
equalizeHist( gray, gray );
CascadeClassifier face_cascade(conf->model_path);
face_cascade.detectMultiScale( gray, faces, conf->scaleFactor, 2, CV_HAAR_SCALE_IMAGE, Size(30, 30) );
equalizeHist(gray, gray);
std::vector<Rect> faces_;
conf->face_cascade.detectMultiScale(gray, faces_, conf->scaleFactor, 2, CASCADE_SCALE_IMAGE, Size(30, 30) );
Mat(faces_).copyTo(faces);
return true;
}else{
return false;
}
}
@endcode
......@@ -101,8 +95,8 @@ bool myDetector( InputArray image, OutputArray roi, void * config ){
The following snippet demonstrates how to set the custom detector to the facemark object and use it to detect the faces. Keep in mind that some facemark object might use the face detector during the training process.
@code
Conf* config = new Conf("../data/lbpcascade_frontalface.xml",1.4);
facemark->setFaceDetector(myDetector);
Conf config("../data/lbpcascade_frontalface.xml", 1.4);
facemark->setFaceDetector(myDetector, &config); // we must guarantee proper lifetime of "config" object
@endcode
Here is the snippet for detecting face using the user defined face detector function.
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment