Commit 4a066317 authored by Alexander Alekhin's avatar Alexander Alekhin

Merge pull request #1428 from alalek:pr1257_refactoring

parents 1f8ccc16 e310fc55
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
/*
By downloading, copying, installing or using the software you agree to this
license. If you do not agree to this license, do not download, install,
copy or use the software.
License Agreement
For Open Source Computer Vision Library
(3-clause BSD License)
Copyright (C) 2013, OpenCV Foundation, all rights reserved.
Third party copyrights are property of their respective owners.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the names of the copyright holders nor the names of the contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
This software is provided by the copyright holders and contributors "as is" and
any express or implied warranties, including, but not limited to, the implied
warranties of merchantability and fitness for a particular purpose are
disclaimed. In no event shall copyright holders or contributors be liable for
any direct, indirect, incidental, special, exemplary, or consequential damages
(including, but not limited to, procurement of substitute goods or services;
loss of use, data, or profits; or business interruption) however caused
and on any theory of liability, whether in contract, strict liability,
or tort (including negligence or otherwise) arising in any way out of
the use of this software, even if advised of the possibility of such damage.
This file was part of GSoC Project: Facemark API for OpenCV
This file contains results of GSoC Project: Facemark API for OpenCV
Final report: https://gist.github.com/kurnianggoro/74de9121e122ad0bd825176751d47ecc
Student: Laksono Kurnianggoro
Mentor: Delia Passalacqua
......@@ -45,127 +20,259 @@ Mentor: Delia Passalacqua
#include "opencv2/face.hpp"
#include "opencv2/objdetect.hpp"
#include "opencv2/objdetect/objdetect_c.h"
#include "opencv2/imgproc/types_c.h"
#include <vector>
#include <string>
namespace cv {
namespace face {
//! @addtogroup face
//! @{
struct CV_EXPORTS_W CParams{
String cascade; //!< the face detector
double scaleFactor; //!< Parameter specifying how much the image size is reduced at each image scale.
int minNeighbors; //!< Parameter specifying how many neighbors each candidate rectangle should have to retain it.
Size minSize; //!< Minimum possible object size.
Size maxSize; //!< Maximum possible object size.
CParams(
String cascade_model,
double sf = 1.1,
int minN = 3,
Size minSz = Size(30, 30),
Size maxSz = Size()
);
};
/** @brief Default face detector
This function is mainly utilized by the implementation of a Facemark Algorithm.
End users are advised to use function Facemark::getFaces which can be manually defined
and circumvented to the algorithm by Facemark::setFaceDetector.
@param image The input image to be processed.
@param faces Output of the function which represent region of interest of the detected faces.
Each face is stored in cv::Rect container.
@param extra_params extra parameters
<B>Example of usage</B>
@code
std::vector<cv::Rect> faces;
CParams params("haarcascade_frontalface_alt.xml");
cv::face::getFaces(frame, faces, &params);
for(int j=0;j<faces.size();j++){
cv::rectangle(frame, faces[j], cv::Scalar(255,0,255));
}
cv::imshow("detection", frame);
@endcode
*/
/*other option: move this function inside Facemark as default face detector*/
CV_EXPORTS bool getFaces( InputArray image,
OutputArray faces,
void * extra_params
);
/** @brief A utility to load list of paths to training image and annotation file.
@param imageList The specified file contains paths to the training images.
@param annotationList The specified file contains paths to the training annotations.
@param images The loaded paths of training images.
@param annotations The loaded paths of annotation files.
Example of usage:
@code
String imageFiles = "images_path.txt";
String ptsFiles = "annotations_path.txt";
std::vector<String> images_train;
std::vector<String> landmarks_train;
loadDatasetList(imageFiles,ptsFiles,images_train,landmarks_train);
@endcode
typedef bool(*FN_FaceDetector)(InputArray, OutputArray, void* userData);
struct CParams{
String cascade; //!< the face detector
double scaleFactor; //!< Parameter specifying how much the image size is reduced at each image scale.
int minNeighbors; //!< Parameter specifying how many neighbors each candidate rectangle should have to retain it.
Size minSize; //!< Minimum possible object size.
Size maxSize; //!< Maximum possible object size.
CV_EXPORTS CParams(
String cascade_model,
double sf = 1.1,
int minN = 3,
Size minSz = Size(30, 30),
Size maxSz = Size()
);
CascadeClassifier face_cascade;
};
/** @brief Default face detector
This function is mainly utilized by the implementation of a Facemark Algorithm.
End users are advised to use function Facemark::getFaces which can be manually defined
and circumvented to the algorithm by Facemark::setFaceDetector.
@param image The input image to be processed.
@param faces Output of the function which represent region of interest of the detected faces.
Each face is stored in cv::Rect container.
@param params detector parameters
<B>Example of usage</B>
@code
std::vector<cv::Rect> faces;
CParams params("haarcascade_frontalface_alt.xml");
cv::face::getFaces(frame, faces, &params);
for(int j=0;j<faces.size();j++){
cv::rectangle(frame, faces[j], cv::Scalar(255,0,255));
}
cv::imshow("detection", frame);
@endcode
*/
CV_EXPORTS bool getFaces(InputArray image, OutputArray faces, CParams* params);
/** @brief A utility to load list of paths to training image and annotation file.
@param imageList The specified file contains paths to the training images.
@param annotationList The specified file contains paths to the training annotations.
@param images The loaded paths of training images.
@param annotations The loaded paths of annotation files.
Example of usage:
@code
String imageFiles = "images_path.txt";
String ptsFiles = "annotations_path.txt";
std::vector<String> images_train;
std::vector<String> landmarks_train;
loadDatasetList(imageFiles,ptsFiles,images_train,landmarks_train);
@endcode
*/
CV_EXPORTS_W bool loadDatasetList(String imageList,
String annotationList,
std::vector<String> & images,
std::vector<String> & annotations);
/** @brief A utility to load facial landmark dataset from a single file.
@param filename The filename of a file that contains the dataset information.
Each line contains the filename of an image followed by
pairs of x and y values of facial landmarks points separated by a space.
Example
@code
/home/user/ibug/image_003_1.jpg 336.820955 240.864510 334.238298 260.922709 335.266918 ...
/home/user/ibug/image_005_1.jpg 376.158428 230.845712 376.736984 254.924635 383.265403 ...
@endcode
@param images A vector where each element represent the filename of image in the dataset.
Images are not loaded by default to save the memory.
@param facePoints The loaded landmark points for all training data.
@param delim Delimiter between each element, the default value is a whitespace.
@param offset An offset value to adjust the loaded points.
<B>Example of usage</B>
@code
cv::String imageFiles = "../data/images_train.txt";
cv::String ptsFiles = "../data/points_train.txt";
std::vector<String> images;
std::vector<std::vector<Point2f> > facePoints;
loadTrainingData(imageFiles, ptsFiles, images, facePoints, 0.0f);
@endcode
*/
CV_EXPORTS_W bool loadTrainingData( String filename , std::vector<String> & images,
OutputArray facePoints,
char delim = ' ', float offset = 0.0f);
/** @brief A utility to load facial landmark information from the dataset.
@param imageList A file contains the list of image filenames in the training dataset.
@param groundTruth A file contains the list of filenames
where the landmarks points information are stored.
The content in each file should follow the standard format (see face::loadFacePoints).
@param images A vector where each element represent the filename of image in the dataset.
Images are not loaded by default to save the memory.
@param facePoints The loaded landmark points for all training data.
@param offset An offset value to adjust the loaded points.
<B>Example of usage</B>
@code
cv::String imageFiles = "../data/images_train.txt";
cv::String ptsFiles = "../data/points_train.txt";
std::vector<String> images;
std::vector<std::vector<Point2f> > facePoints;
loadTrainingData(imageFiles, ptsFiles, images, facePoints, 0.0f);
@endcode
example of content in the images_train.txt
@code
/home/user/ibug/image_003_1.jpg
/home/user/ibug/image_004_1.jpg
/home/user/ibug/image_005_1.jpg
/home/user/ibug/image_006.jpg
@endcode
example of content in the points_train.txt
@code
/home/user/ibug/image_003_1.pts
/home/user/ibug/image_004_1.pts
/home/user/ibug/image_005_1.pts
/home/user/ibug/image_006.pts
@endcode
*/
CV_EXPORTS_W bool loadTrainingData( String imageList, String groundTruth,
std::vector<String> & images,
OutputArray facePoints,
float offset = 0.0f);
/** @brief A utility to load facial landmark information from a given file.
@param filename The filename of file contains the facial landmarks data.
@param points The loaded facial landmark points.
@param offset An offset value to adjust the loaded points.
<B>Example of usage</B>
@code
std::vector<Point2f> points;
face::loadFacePoints("filename.txt", points, 0.0f);
@endcode
The annotation file should follow the default format which is
@code
version: 1
n_points: 68
{
212.716603 499.771793
230.232816 566.290071
...
}
@endcode
where n_points is the number of points considered
and each point is represented as its position in x and y.
*/
CV_EXPORTS_W bool loadFacePoints( String filename, OutputArray points,
float offset = 0.0f);
/** @brief Utility to draw the detected facial landmark points
@param image The input image to be processed.
@param points Contains the data of points which will be drawn.
@param color The color of points in BGR format represented by cv::Scalar.
<B>Example of usage</B>
@code
std::vector<Rect> faces;
std::vector<std::vector<Point2f> > landmarks;
facemark->getFaces(img, faces);
facemark->fit(img, faces, landmarks);
for(int j=0;j<rects.size();j++){
face::drawFacemarks(frame, landmarks[j], Scalar(0,0,255));
}
@endcode
*/
CV_EXPORTS_W void drawFacemarks( InputOutputArray image, InputArray points,
Scalar color = Scalar(255,0,0));
/** @brief Abstract base class for all facemark models
All facemark models in OpenCV are derived from the abstract base class Facemark, which
provides a unified access to all facemark algorithms in OpenCV.
To utilize this API in your program, please take a look at the @ref tutorial_table_of_content_facemark
### Description
Facemark is a base class which provides universal access to any specific facemark algorithm.
Therefore, the users should declare a desired algorithm before they can use it in their application.
Here is an example on how to declare facemark algorithm:
@code
// Using Facemark in your code:
Ptr<Facemark> facemark = FacemarkLBF::create();
@endcode
The typical pipeline for facemark detection is listed as follows:
- (Non-mandatory) Set a user defined face detection using Facemark::setFaceDetector.
The facemark algorithms are desgined to fit the facial points into a face.
Therefore, the face information should be provided to the facemark algorithm.
Some algorithms might provides a default face recognition function.
However, the users might prefer to use their own face detector to obtains the best possible detection result.
- (Non-mandatory) Training the model for a specific algorithm using Facemark::training.
In this case, the model should be automatically saved by the algorithm.
If the user already have a trained model, then this part can be omitted.
- Load the trained model using Facemark::loadModel.
- Perform the fitting via the Facemark::fit.
*/
class CV_EXPORTS_W Facemark : public virtual Algorithm
{
public:
*/
CV_EXPORTS_W bool loadDatasetList(String imageList,
String annotationList,
std::vector<String> & images,
std::vector<String> & annotations);
virtual void read( const FileNode& fn )=0;
virtual void write( FileStorage& fs ) const=0;
/** @brief A utility to load facial landmark dataset from a single file.
/** @brief Add one training sample to the trainer.
@param filename The filename of a file that contains the dataset information.
Each line contains the filename of an image followed by
pairs of x and y values of facial landmarks points separated by a space.
Example
@code
/home/user/ibug/image_003_1.jpg 336.820955 240.864510 334.238298 260.922709 335.266918 ...
/home/user/ibug/image_005_1.jpg 376.158428 230.845712 376.736984 254.924635 383.265403 ...
@endcode
@param images A vector where each element represent the filename of image in the dataset.
Images are not loaded by default to save the memory.
@param facePoints The loaded landmark points for all training data.
@param delim Delimiter between each element, the default value is a whitespace.
@param offset An offset value to adjust the loaded points.
@param image Input image.
@param landmarks The ground-truth of facial landmarks points corresponds to the image.
<B>Example of usage</B>
@code
cv::String imageFiles = "../data/images_train.txt";
cv::String ptsFiles = "../data/points_train.txt";
std::vector<String> images;
std::vector<std::vector<Point2f> > facePoints;
loadTrainingData(imageFiles, ptsFiles, images, facePoints, 0.0);
@endcode
*/
CV_EXPORTS_W bool loadTrainingData( String filename , std::vector<String> & images,
OutputArray facePoints,
char delim = ' ', float offset = 0.0);
/** @brief A utility to load facial landmark information from the dataset.
String imageFiles = "../data/images_train.txt";
String ptsFiles = "../data/points_train.txt";
std::vector<String> images_train;
std::vector<String> landmarks_train;
@param imageList A file contains the list of image filenames in the training dataset.
@param groundTruth A file contains the list of filenames
where the landmarks points information are stored.
The content in each file should follow the standard format (see face::loadFacePoints).
@param images A vector where each element represent the filename of image in the dataset.
Images are not loaded by default to save the memory.
@param facePoints The loaded landmark points for all training data.
@param offset An offset value to adjust the loaded points.
// load the list of dataset: image paths and landmark file paths
loadDatasetList(imageFiles,ptsFiles,images_train,landmarks_train);
<B>Example of usage</B>
@code
cv::String imageFiles = "../data/images_train.txt";
cv::String ptsFiles = "../data/points_train.txt";
std::vector<String> images;
std::vector<std::vector<Point2f> > facePoints;
loadTrainingData(imageFiles, ptsFiles, images, facePoints, 0.0);
Mat image;
std::vector<Point2f> facial_points;
for(size_t i=0;i<images_train.size();i++){
image = imread(images_train[i].c_str());
loadFacePoints(landmarks_train[i],facial_points);
facemark->addTrainingSample(image, facial_points);
}
@endcode
The contents in the training files should follows the standard format.
Here are examples for the contents in these files.
example of content in the images_train.txt
@code
/home/user/ibug/image_003_1.jpg
......@@ -181,259 +288,125 @@ namespace face {
/home/user/ibug/image_005_1.pts
/home/user/ibug/image_006.pts
@endcode
*/
virtual bool addTrainingSample(InputArray image, InputArray landmarks)=0;
/** @brief Trains a Facemark algorithm using the given dataset.
Before the training process, training samples should be added to the trainer
using face::addTrainingSample function.
@param parameters Optional extra parameters (algorithm dependent).
<B>Example of usage</B>
@code
FacemarkLBF::Params params;
params.model_filename = "ibug68.model"; // filename to save the trained model
Ptr<Facemark> facemark = FacemarkLBF::create(params);
// add training samples (see Facemark::addTrainingSample)
facemark->training();
@endcode
*/
CV_EXPORTS_W bool loadTrainingData( String imageList, String groundTruth,
std::vector<String> & images,
OutputArray facePoints,
float offset = 0.0);
virtual void training(void* parameters=0)=0;
/** @brief A utility to load facial landmark information from a given file.
/** @brief A function to load the trained model before the fitting process.
@param filename The filename of file contains the facial landmarks data.
@param points The loaded facial landmark points.
@param offset An offset value to adjust the loaded points.
@param model A string represent the filename of a trained model.
<B>Example of usage</B>
@code
std::vector<Point2f> points;
face::loadFacePoints("filename.txt", points, 0.0);
facemark->loadModel("../data/lbf.model");
@endcode
*/
virtual void loadModel(String model)=0;
// virtual void saveModel(String fs)=0;
/** @brief Trains a Facemark algorithm using the given dataset.
@param image Input image.
@param faces Output of the function which represent region of interest of the detected faces.
Each face is stored in cv::Rect container.
@param landmarks The detected landmark points for each faces.
@param config Algorithm specific for running time parameters.
The annotation file should follow the default format which is
<B>Example of usage</B>
@code
version: 1
n_points: 68
Mat image = imread("image.jpg");
std::vector<Rect> faces;
std::vector<std::vector<Point2f> > landmarks;
facemark->fit(image, faces, landmarks);
@endcode
TODO remove "config" from here
*/
virtual bool fit( InputArray image,
InputArray faces,
InputOutputArray landmarks,
void * config = 0)=0;
/** @brief Set a user defined face detector for the Facemark algorithm.
@param detector The user defined face detector function
@param userData Detector parameters
<B>Example of usage</B>
@code
MyDetectorParameters detectorParameters(...);
facemark->setFaceDetector(myDetector, &detectorParameters);
@endcode
Example of a user defined face detector
@code
bool myDetector( InputArray image, OutputArray faces, void* userData)
{
212.716603 499.771793
230.232816 566.290071
...
MyDetectorParameters* params = (MyDetectorParameters*)userData;
// -------- do something --------
}
@endcode
where n_points is the number of points considered
and each point is represented as its position in x and y.
*/
CV_EXPORTS_W bool loadFacePoints( String filename, OutputArray points,
float offset = 0.0);
TODO Lifetime of detector parameters is uncontrolled. Rework interface design to "Ptr<FaceDetector>".
*/
virtual bool setFaceDetector(FN_FaceDetector detector, void* userData = 0)=0;
/** @brief Utility to draw the detected facial landmark points
/** @brief Detect faces from a given image using default or user defined face detector.
Some Algorithm might not provide a default face detector.
@param image The input image to be processed.
@param points Contains the data of points which will be drawn.
@param color The color of points in BGR format represented by cv::Scalar.
@param image Input image.
@param faces Output of the function which represent region of interest of the detected faces. Each face is stored in cv::Rect container.
<B>Example of usage</B>
@code
std::vector<Rect> faces;
std::vector<std::vector<Point2f> > landmarks;
std::vector<cv::Rect> faces;
facemark->getFaces(img, faces);
facemark->fit(img, faces, landmarks);
for(int j=0;j<rects.size();j++){
face::drawFacemarks(frame, landmarks[j], Scalar(0,0,255));
for(int j=0;j<faces.size();j++){
cv::rectangle(img, faces[j], cv::Scalar(255,0,255));
}
@endcode
*/
CV_EXPORTS_W void drawFacemarks( InputOutputArray image, InputArray points,
Scalar color = Scalar(255,0,0));
virtual bool getFaces(InputArray image, OutputArray faces)=0;
/** @brief Abstract base class for all facemark models
/** @brief Get data from an algorithm
All facemark models in OpenCV are derived from the abstract base class Facemark, which
provides a unified access to all facemark algorithms in OpenCV.
@param items The obtained data, algorithm dependent.
To utilize this API in your program, please take a look at the @ref tutorial_table_of_content_facemark
### Description
<B>Example of usage</B>
@code
Ptr<FacemarkAAM> facemark = FacemarkAAM::create();
facemark->loadModel("AAM.yml");
Facemark is a base class which provides universal access to any specific facemark algorithm.
Therefore, the users should declare a desired algorithm before they can use it in their application.
FacemarkAAM::Data data;
facemark->getData(&data);
std::vector<Point2f> s0 = data.s0;
Here is an example on how to declare facemark algorithm:
@code
// Using Facemark in your code:
Ptr<Facemark> facemark = FacemarkLBF::create();
cout<<s0<<endl;
@endcode
The typical pipeline for facemark detection is listed as follows:
- (Non-mandatory) Set a user defined face detection using Facemark::setFaceDetector.
The facemark algorithms are desgined to fit the facial points into a face.
Therefore, the face information should be provided to the facemark algorithm.
Some algorithms might provides a default face recognition function.
However, the users might prefer to use their own face detector to obtains the best possible detection result.
- (Non-mandatory) Training the model for a specific algorithm using Facemark::training.
In this case, the model should be automatically saved by the algorithm.
If the user already have a trained model, then this part can be omitted.
- Load the trained model using Facemark::loadModel.
- Perform the fitting via the Facemark::fit.
*/
class CV_EXPORTS_W Facemark : public virtual Algorithm
{
public:
virtual void read( const FileNode& fn )=0;
virtual void write( FileStorage& fs ) const=0;
/** @brief Add one training sample to the trainer.
@param image Input image.
@param landmarks The ground-truth of facial landmarks points corresponds to the image.
<B>Example of usage</B>
@code
String imageFiles = "../data/images_train.txt";
String ptsFiles = "../data/points_train.txt";
std::vector<String> images_train;
std::vector<String> landmarks_train;
// load the list of dataset: image paths and landmark file paths
loadDatasetList(imageFiles,ptsFiles,images_train,landmarks_train);
Mat image;
std::vector<Point2f> facial_points;
for(size_t i=0;i<images_train.size();i++){
image = imread(images_train[i].c_str());
loadFacePoints(landmarks_train[i],facial_points);
facemark->addTrainingSample(image, facial_points);
}
@endcode
The contents in the training files should follows the standard format.
Here are examples for the contents in these files.
example of content in the images_train.txt
@code
/home/user/ibug/image_003_1.jpg
/home/user/ibug/image_004_1.jpg
/home/user/ibug/image_005_1.jpg
/home/user/ibug/image_006.jpg
@endcode
example of content in the points_train.txt
@code
/home/user/ibug/image_003_1.pts
/home/user/ibug/image_004_1.pts
/home/user/ibug/image_005_1.pts
/home/user/ibug/image_006.pts
@endcode
*/
virtual bool addTrainingSample(InputArray image, InputArray landmarks)=0;
/** @brief Trains a Facemark algorithm using the given dataset.
Before the training process, training samples should be added to the trainer
using face::addTrainingSample function.
@param parameters Optional extra parameters (algorithm dependent).
<B>Example of usage</B>
@code
FacemarkLBF::Params params;
params.model_filename = "ibug68.model"; // filename to save the trained model
Ptr<Facemark> facemark = FacemarkLBF::create(params);
// add training samples (see Facemark::addTrainingSample)
facemark->training();
@endcode
*/
virtual void training(void* parameters=0)=0;
/** @brief A function to load the trained model before the fitting process.
@param model A string represent the filename of a trained model.
<B>Example of usage</B>
@code
facemark->loadModel("../data/lbf.model");
@endcode
*/
virtual void loadModel(String model)=0;
// virtual void saveModel(String fs)=0;
/** @brief Trains a Facemark algorithm using the given dataset.
@param image Input image.
@param faces Output of the function which represent region of interest of the detected faces.
Each face is stored in cv::Rect container.
@param landmarks The detected landmark points for each faces.
@param config Algorithm specific for running time parameters.
<B>Example of usage</B>
@code
Mat image = imread("image.jpg");
std::vector<Rect> faces;
std::vector<std::vector<Point2f> > landmarks;
facemark->fit(image, faces, landmarks);
@endcode
*/
virtual bool fit( InputArray image,\
InputArray faces,\
InputOutputArray landmarks,\
void * config = 0)=0;
/** @brief Set a user defined face detector for the Facemark algorithm.
@param f The user defined face detector function
<B>Example of usage</B>
@code
facemark->setFaceDetector(myDetector);
@endcode
Example of a user defined face detector
@code
bool myDetector( InputArray image, OutputArray ROIs ){
std::vector<Rect> & faces = *(std::vector<Rect>*) ROIs.getObj();
faces.clear();
Mat img = image.getMat();
// -------- do something --------
}
@endcode
*/
virtual bool setFaceDetector(bool(*f)(InputArray , OutputArray, void * ))=0;
/** @brief Detect faces from a given image using default or user defined face detector.
Some Algorithm might not provide a default face detector.
@param image Input image.
@param faces Output of the function which represent region of interest of the detected faces.
Each face is stored in cv::Rect container.
@param extra_params Optional extra-parameters for the face detector function.
<B>Example of usage</B>
@code
std::vector<cv::Rect> faces;
facemark->getFaces(img, faces);
for(int j=0;j<faces.size();j++){
cv::rectangle(img, faces[j], cv::Scalar(255,0,255));
}
@endcode
*/
virtual bool getFaces( InputArray image , OutputArray faces, void * extra_params=0)=0;
/** @brief Get data from an algorithm
@param items The obtained data, algorithm dependent.
<B>Example of usage</B>
@code
Ptr<FacemarkAAM> facemark = FacemarkAAM::create();
facemark->loadModel("AAM.yml");
FacemarkAAM::Data data;
facemark->getData(&data);
std::vector<Point2f> s0 = data.s0;
cout<<s0<<endl;
@endcode
*/
virtual bool getData(void * items=0)=0;
}; /* Facemark*/
virtual bool getData(void * items=0)=0; // FIXIT
}; /* Facemark*/
//! @}
} /* namespace face */
} /* namespace cv */
#endif //__OPENCV_FACELANDMARK_HPP__
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
/*
By downloading, copying, installing or using the software you agree to this
license. If you do not agree to this license, do not download, install,
copy or use the software.
License Agreement
For Open Source Computer Vision Library
(3-clause BSD License)
Copyright (C) 2013, OpenCV Foundation, all rights reserved.
Third party copyrights are property of their respective owners.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the names of the copyright holders nor the names of the contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
This software is provided by the copyright holders and contributors "as is" and
any express or implied warranties, including, but not limited to, the implied
warranties of merchantability and fitness for a particular purpose are
disclaimed. In no event shall copyright holders or contributors be liable for
any direct, indirect, incidental, special, exemplary, or consequential damages
(including, but not limited to, procurement of substitute goods or services;
loss of use, data, or profits; or business interruption) however caused
and on any theory of liability, whether in contract, strict liability,
or tort (including negligence or otherwise) arising in any way out of
the use of this software, even if advised of the possibility of such damage.
This file was part of GSoC Project: Facemark API for OpenCV
This file contains results of GSoC Project: Facemark API for OpenCV
Final report: https://gist.github.com/kurnianggoro/74de9121e122ad0bd825176751d47ecc
Student: Laksono Kurnianggoro
Mentor: Delia Passalacqua
......@@ -44,114 +19,114 @@ namespace face {
//! @addtogroup face
//! @{
class CV_EXPORTS_W FacemarkAAM : public Facemark
class CV_EXPORTS_W FacemarkAAM : public Facemark
{
public:
struct CV_EXPORTS Params
{
public:
struct CV_EXPORTS Params
{
/**
* \brief Constructor
*/
Params();
/**
* \brief Read parameters from file, currently unused
*/
void read(const FileNode& /*fn*/);
/**
* \brief Read parameters from file, currently unused
*/
void write(FileStorage& /*fs*/) const;
std::string model_filename;
int m;
int n;
int n_iter;
bool verbose;
bool save_model;
int max_m, max_n, texture_max_m;
std::vector<float>scales;
};
/**
* \brief Optional parameter for fitting process.
* \brief Constructor
*/
struct CV_EXPORTS Config
{
Config( Mat rot = Mat::eye(2,2,CV_32F),
Point2f trans = Point2f(0.0,0.0),
float scaling = 1.0,
int scale_id=0
);
Mat R;
Point2f t;
float scale;
int model_scale_idx;
};
Params();
/**
* \brief Data container for the facemark::getData function
* \brief Read parameters from file, currently unused
*/
struct CV_EXPORTS Data
{
std::vector<Point2f> s0;
};
void read(const FileNode& /*fn*/);
/**
* \brief The model of AAM Algorithm
* \brief Read parameters from file, currently unused
*/
struct CV_EXPORTS Model
{
int npts; //!< unused delete
int max_n; //!< unused delete
std::vector<float>scales;
//!< defines the scales considered to build the model
/*warping*/
std::vector<Vec3i> triangles;
//!< each element contains 3 values, represent index of facemarks that construct one triangle (obtained using delaunay triangulation)
struct Texture{
int max_m; //!< unused delete
Rect resolution;
//!< resolution of the current scale
Mat A;
//!< gray values from all face region in the dataset, projected in PCA space
Mat A0;
//!< average of gray values from all face region in the dataset
Mat AA;
//!< gray values from all erorded face region in the dataset, projected in PCA space
Mat AA0;
//!< average of gray values from all erorded face region in the dataset
std::vector<std::vector<Point> > textureIdx;
//!< index for warping of each delaunay triangle region constructed by 3 facemarks
std::vector<Point2f> base_shape;
//!< basic shape, normalized to be fit in an image with current detection resolution
std::vector<int> ind1;
//!< index of pixels for mapping process to obtains the grays values of face region
std::vector<int> ind2;
//!< index of pixels for mapping process to obtains the grays values of eroded face region
};
std::vector<Texture> textures;
//!< a container to holds the texture data for each scale of fitting
/*shape*/
std::vector<Point2f> s0;
//!< the basic shape obtained from training dataset
Mat S,Q;
//!< the encoded shapes from training data
void write(FileStorage& /*fs*/) const;
std::string model_filename;
int m;
int n;
int n_iter;
bool verbose;
bool save_model;
int max_m, max_n, texture_max_m;
std::vector<float>scales;
};
/**
* \brief Optional parameter for fitting process.
*/
struct CV_EXPORTS Config
{
Config( Mat rot = Mat::eye(2,2,CV_32F),
Point2f trans = Point2f(0.0f,0.0f),
float scaling = 1.0f,
int scale_id=0
);
Mat R;
Point2f t;
float scale;
int model_scale_idx;
};
/**
* \brief Data container for the facemark::getData function
*/
struct CV_EXPORTS Data
{
std::vector<Point2f> s0;
};
/**
* \brief The model of AAM Algorithm
*/
struct CV_EXPORTS Model
{
int npts; //!< unused delete
int max_n; //!< unused delete
std::vector<float>scales;
//!< defines the scales considered to build the model
/*warping*/
std::vector<Vec3i> triangles;
//!< each element contains 3 values, represent index of facemarks that construct one triangle (obtained using delaunay triangulation)
struct Texture{
int max_m; //!< unused delete
Rect resolution;
//!< resolution of the current scale
Mat A;
//!< gray values from all face region in the dataset, projected in PCA space
Mat A0;
//!< average of gray values from all face region in the dataset
Mat AA;
//!< gray values from all erorded face region in the dataset, projected in PCA space
Mat AA0;
//!< average of gray values from all erorded face region in the dataset
std::vector<std::vector<Point> > textureIdx;
//!< index for warping of each delaunay triangle region constructed by 3 facemarks
std::vector<Point2f> base_shape;
//!< basic shape, normalized to be fit in an image with current detection resolution
std::vector<int> ind1;
//!< index of pixels for mapping process to obtains the grays values of face region
std::vector<int> ind2;
//!< index of pixels for mapping process to obtains the grays values of eroded face region
};
std::vector<Texture> textures;
//!< a container to holds the texture data for each scale of fitting
/*shape*/
std::vector<Point2f> s0;
//!< the basic shape obtained from training dataset
Mat S,Q;
//!< the encoded shapes from training data
};
//!< initializer
static Ptr<FacemarkAAM> create(const FacemarkAAM::Params &parameters = FacemarkAAM::Params() );
virtual ~FacemarkAAM() {}
//!< initializer
static Ptr<FacemarkAAM> create(const FacemarkAAM::Params &parameters = FacemarkAAM::Params() );
virtual ~FacemarkAAM() {}
}; /* AAM */
}; /* AAM */
//! @}
......
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
/*
By downloading, copying, installing or using the software you agree to this
license. If you do not agree to this license, do not download, install,
copy or use the software.
License Agreement
For Open Source Computer Vision Library
(3-clause BSD License)
Copyright (C) 2013, OpenCV Foundation, all rights reserved.
Third party copyrights are property of their respective owners.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the names of the copyright holders nor the names of the contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
This software is provided by the copyright holders and contributors "as is" and
any express or implied warranties, including, but not limited to, the implied
warranties of merchantability and fitness for a particular purpose are
disclaimed. In no event shall copyright holders or contributors be liable for
any direct, indirect, incidental, special, exemplary, or consequential damages
(including, but not limited to, procurement of substitute goods or services;
loss of use, data, or profits; or business interruption) however caused
and on any theory of liability, whether in contract, strict liability,
or tort (including negligence or otherwise) arising in any way out of
the use of this software, even if advised of the possibility of such damage.
This file was part of GSoC Project: Facemark API for OpenCV
This file contains results of GSoC Project: Facemark API for OpenCV
Final report: https://gist.github.com/kurnianggoro/74de9121e122ad0bd825176751d47ecc
Student: Laksono Kurnianggoro
Mentor: Delia Passalacqua
......@@ -45,72 +20,72 @@ namespace face {
//! @addtogroup face
//! @{
class CV_EXPORTS_W FacemarkLBF : public Facemark
class CV_EXPORTS_W FacemarkLBF : public Facemark
{
public:
struct CV_EXPORTS Params
{
/**
* \brief Constructor
*/
Params();
double shape_offset;
//!< offset for the loaded face landmark points
String cascade_face;
//!< filename of the face detector model
bool verbose;
//!< show the training print-out
int n_landmarks;
//!< number of landmark points
int initShape_n;
//!< multiplier for augment the training data
int stages_n;
//!< number of refinement stages
int tree_n;
//!< number of tree in the model for each landmark point refinement
int tree_depth;
//!< the depth of decision tree, defines the size of feature
double bagging_overlap;
//!< overlap ratio for training the LBF feature
std::string model_filename;
//!< filename where the trained model will be saved
bool save_model; //!< flag to save the trained model or not
unsigned int seed; //!< seed for shuffling the training data
std::vector<int> feats_m;
std::vector<double> radius_m;
std::vector<int> pupils[2];
//!< index of facemark points on pupils of left and right eye
Rect detectROI;
void read(const FileNode& /*fn*/);
void write(FileStorage& /*fs*/) const;
};
class BBox {
public:
struct CV_EXPORTS Params
{
/**
* \brief Constructor
*/
Params();
double shape_offset;
//!< offset for the loaded face landmark points
String cascade_face;
//!< filename of the face detector model
bool verbose;
//!< show the training print-out
int n_landmarks;
//!< number of landmark points
int initShape_n;
//!< multiplier for augment the training data
int stages_n;
//!< number of refinement stages
int tree_n;
//!< number of tree in the model for each landmark point refinement
int tree_depth;
//!< the depth of decision tree, defines the size of feature
double bagging_overlap;
//!< overlap ratio for training the LBF feature
std::string model_filename;
//!< filename where the trained model will be saved
bool save_model; //!< flag to save the trained model or not
unsigned int seed; //!< seed for shuffling the training data
std::vector<int> feats_m;
std::vector<double> radius_m;
std::vector<int> pupils[2];
//!< index of facemark points on pupils of left and right eye
Rect detectROI;
void read(const FileNode& /*fn*/);
void write(FileStorage& /*fs*/) const;
};
class BBox {
public:
BBox();
~BBox();
BBox(double x, double y, double w, double h);
cv::Mat project(const cv::Mat &shape) const;
cv::Mat reproject(const cv::Mat &shape) const;
double x, y;
double x_center, y_center;
double x_scale, y_scale;
double width, height;
};
static Ptr<FacemarkLBF> create(const FacemarkLBF::Params &parameters = FacemarkLBF::Params() );
virtual ~FacemarkLBF(){};
}; /* LBF */
BBox();
~BBox();
BBox(double x, double y, double w, double h);
cv::Mat project(const cv::Mat &shape) const;
cv::Mat reproject(const cv::Mat &shape) const;
double x, y;
double x_center, y_center;
double x_scale, y_scale;
double width, height;
};
static Ptr<FacemarkLBF> create(const FacemarkLBF::Params &parameters = FacemarkLBF::Params() );
virtual ~FacemarkLBF(){};
}; /* LBF */
//! @}
......
/*
By downloading, copying, installing or using the software you agree to this
license. If you do not agree to this license, do not download, install,
copy or use the software.
License Agreement
For Open Source Computer Vision Library
(3-clause BSD License)
Copyright (C) 2013, OpenCV Foundation, all rights reserved.
Third party copyrights are property of their respective owners.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the names of the copyright holders nor the names of the contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
This software is provided by the copyright holders and contributors "as is" and
any express or implied warranties, including, but not limited to, the implied
warranties of merchantability and fitness for a particular purpose are
disclaimed. In no event shall copyright holders or contributors be liable for
any direct, indirect, incidental, special, exemplary, or consequential damages
(including, but not limited to, procurement of substitute goods or services;
loss of use, data, or profits; or business interruption) however caused
and on any theory of liability, whether in contract, strict liability,
or tort (including negligence or otherwise) arising in any way out of
the use of this software, even if advised of the possibility of such damage.
This file was part of GSoC Project: Facemark API for OpenCV
Final report: https://gist.github.com/kurnianggoro/74de9121e122ad0bd825176751d47ecc
Student: Laksono Kurnianggoro
......@@ -63,225 +34,226 @@ Mentor: Delia Passalacqua
* example of the dataset is available at https://ibug.doc.ic.ac.uk/download/annotations/lfpw.zip
*--------------------------------------------------*/
#include <stdio.h>
#include <fstream>
#include <sstream>
#include "opencv2/core.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/imgproc.hpp"
#include "opencv2/face.hpp"
#include <iostream>
#include <string>
#include <ctime>
using namespace std;
using namespace cv;
using namespace cv::face;
bool myDetector( InputArray image, OutputArray ROIs, CascadeClassifier face_cascade);
bool getInitialFitting(Mat image, Rect face, std::vector<Point2f> s0,
CascadeClassifier eyes_cascade, Mat & R, Point2f & Trans, float & scale);
bool parseArguments(int argc, char** argv, CommandLineParser & , String & cascade,
String & model, String & images, String & annotations, String & testImages
);
int main(int argc, char** argv )
{
CommandLineParser parser(argc, argv,"");
String cascade_path,eyes_cascade_path,images_path, annotations_path, test_images_path;
if(!parseArguments(argc, argv, parser,cascade_path,eyes_cascade_path,images_path, annotations_path, test_images_path))
return -1;
//! [instance_creation]
/*create the facemark instance*/
FacemarkAAM::Params params;
params.scales.push_back(2.0);
params.scales.push_back(4.0);
params.model_filename = "AAM.yaml";
Ptr<FacemarkAAM> facemark = FacemarkAAM::create(params);
//! [instance_creation]
//! [load_dataset]
/*Loads the dataset*/
std::vector<String> images_train;
std::vector<String> landmarks_train;
loadDatasetList(images_path,annotations_path,images_train,landmarks_train);
//! [load_dataset]
//! [add_samples]
Mat image;
std::vector<Point2f> facial_points;
for(size_t i=0;i<images_train.size();i++){
image = imread(images_train[i].c_str());
loadFacePoints(landmarks_train[i],facial_points);
facemark->addTrainingSample(image, facial_points);
}
//! [add_samples]
//! [training]
/* trained model will be saved to AAM.yml */
facemark->training();
//! [training]
//! [load_test_images]
/*test using some images*/
String testFiles(images_path), testPts(annotations_path);
if(!test_images_path.empty()){
testFiles = test_images_path;
testPts = test_images_path; //unused
}
std::vector<String> images;
std::vector<String> facePoints;
loadDatasetList(testFiles, testPts, images, facePoints);
//! [load_test_images]
//! [trainsformation_variables]
float scale ;
Point2f T;
Mat R;
//! [trainsformation_variables]
//! [base_shape]
FacemarkAAM::Data data;
facemark->getData(&data);
std::vector<Point2f> s0 = data.s0;
//! [base_shape]
//! [fitting]
/*fitting process*/
std::vector<Rect> faces;
//! [load_cascade_models]
CascadeClassifier face_cascade(cascade_path);
CascadeClassifier eyes_cascade(eyes_cascade_path);
//! [load_cascade_models]
for(int i=0;i<(int)images.size();i++){
printf("image #%i ", i);
//! [detect_face]
image = imread(images[i]);
myDetector(image, faces, face_cascade);
//! [detect_face]
if(faces.size()>0){
//! [get_initialization]
std::vector<FacemarkAAM::Config> conf;
std::vector<Rect> faces_eyes;
for(unsigned j=0;j<faces.size();j++){
if(getInitialFitting(image,faces[j],s0,eyes_cascade, R,T,scale)){
conf.push_back(FacemarkAAM::Config(R,T,scale,(int)params.scales.size()-1));
faces_eyes.push_back(faces[j]);
}
}
//! [get_initialization]
//! [fitting_process]
if(conf.size()>0){
printf(" - face with eyes found %i ", (int)conf.size());
std::vector<std::vector<Point2f> > landmarks;
double newtime = (double)getTickCount();
facemark->fit(image, faces_eyes, landmarks, (void*)&conf);
double fittime = ((getTickCount() - newtime)/getTickFrequency());
for(unsigned j=0;j<landmarks.size();j++){
drawFacemarks(image, landmarks[j],Scalar(0,255,0));
}
printf("%f ms\n",fittime*1000);
imshow("fitting", image);
waitKey(0);
}else{
printf("initialization cannot be computed - skipping\n");
}
//! [fitting_process]
}
} //for
//! [fitting]
}
bool myDetector( InputArray image, OutputArray ROIs, CascadeClassifier face_cascade){
Mat gray;
std::vector<Rect> & faces = *(std::vector<Rect>*) ROIs.getObj();
faces.clear();
if(image.channels()>1){
cvtColor(image.getMat(),gray,CV_BGR2GRAY);
}else{
gray = image.getMat().clone();
}
equalizeHist( gray, gray );
face_cascade.detectMultiScale( gray, faces, 1.2, 2, CV_HAAR_SCALE_IMAGE, Size(30, 30) );
return true;
}
bool getInitialFitting(Mat image, Rect face, std::vector<Point2f> s0 ,CascadeClassifier eyes_cascade, Mat & R, Point2f & Trans, float & scale){
std::vector<Point2f> mybase;
std::vector<Point2f> T;
std::vector<Point2f> base = Mat(Mat(s0)+Scalar(image.cols/2,image.rows/2)).reshape(2);
std::vector<Point2f> base_shape,base_shape2 ;
Point2f e1 = Point2f((float)((base[39].x+base[36].x)/2.0),(float)((base[39].y+base[36].y)/2.0)); //eye1
Point2f e2 = Point2f((float)((base[45].x+base[42].x)/2.0),(float)((base[45].y+base[42].y)/2.0)); //eye2
if(face.width==0 || face.height==0) return false;
std::vector<Point2f> eye;
bool found=false;
Mat faceROI = image( face);
std::vector<Rect> eyes;
//-- In each face, detect eyes
eyes_cascade.detectMultiScale( faceROI, eyes, 1.1, 2, 0 |CV_HAAR_SCALE_IMAGE, Size(20, 20) );
if(eyes.size()==2){
found = true;
int j=0;
Point2f c1( (float)(face.x + eyes[j].x + eyes[j].width*0.5), (float)(face.y + eyes[j].y + eyes[j].height*0.5));
j=1;
Point2f c2( (float)(face.x + eyes[j].x + eyes[j].width*0.5), (float)(face.y + eyes[j].y + eyes[j].height*0.5));
Point2f pivot;
double a0,a1;
if(c1.x<c2.x){
pivot = c1;
a0 = atan2(c2.y-c1.y, c2.x-c1.x);
}else{
pivot = c2;
a0 = atan2(c1.y-c2.y, c1.x-c2.x);
}
scale = (float)(norm(Mat(c1)-Mat(c2))/norm(Mat(e1)-Mat(e2)));
mybase= Mat(Mat(s0)*scale).reshape(2);
Point2f ey1 = Point2f((float)((mybase[39].x+mybase[36].x)/2.0),(float)((mybase[39].y+mybase[36].y)/2.0));
Point2f ey2 = Point2f((float)((mybase[45].x+mybase[42].x)/2.0),(float)((mybase[45].y+mybase[42].y)/2.0));
#define TO_DEGREE 180.0/3.14159265
a1 = atan2(ey2.y-ey1.y, ey2.x-ey1.x);
Mat rot = getRotationMatrix2D(Point2f(0,0), (a1-a0)*TO_DEGREE, 1.0);
rot(Rect(0,0,2,2)).convertTo(R, CV_32F);
base_shape = Mat(Mat(R*scale*Mat(Mat(s0).reshape(1)).t()).t()).reshape(2);
ey1 = Point2f((float)((base_shape[39].x+base_shape[36].x)/2.0),(float)((base_shape[39].y+base_shape[36].y)/2.0));
ey2 = Point2f((float)((base_shape[45].x+base_shape[42].x)/2.0),(float)((base_shape[45].y+base_shape[42].y)/2.0));
T.push_back(Point2f(pivot.x-ey1.x,pivot.y-ey1.y));
Trans = Point2f(pivot.x-ey1.x,pivot.y-ey1.y);
return true;
}else{
Trans = Point2f( (float)(face.x + face.width*0.5),(float)(face.y + face.height*0.5));
}
return found;
}
bool parseArguments(int argc, char** argv, CommandLineParser & parser,
String & cascade,
String & model,
String & images,
String & annotations,
String & test_images
){
#include <stdio.h>
#include <fstream>
#include <sstream>
#include "opencv2/core.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/imgproc.hpp"
#include "opencv2/face.hpp"
#include <iostream>
#include <string>
#include <ctime>
using namespace std;
using namespace cv;
using namespace cv::face;
bool myDetector( InputArray image, OutputArray ROIs, CascadeClassifier *face_cascade);
bool getInitialFitting(Mat image, Rect face, std::vector<Point2f> s0,
CascadeClassifier eyes_cascade, Mat & R, Point2f & Trans, float & scale);
bool parseArguments(int argc, char** argv, CommandLineParser & , String & cascade,
String & model, String & images, String & annotations, String & testImages
);
int main(int argc, char** argv )
{
CommandLineParser parser(argc, argv,"");
String cascade_path,eyes_cascade_path,images_path, annotations_path, test_images_path;
if(!parseArguments(argc, argv, parser,cascade_path,eyes_cascade_path,images_path, annotations_path, test_images_path))
return -1;
//! [instance_creation]
/*create the facemark instance*/
FacemarkAAM::Params params;
params.scales.push_back(2.0);
params.scales.push_back(4.0);
params.model_filename = "AAM.yaml";
Ptr<FacemarkAAM> facemark = FacemarkAAM::create(params);
//! [instance_creation]
//! [load_dataset]
/*Loads the dataset*/
std::vector<String> images_train;
std::vector<String> landmarks_train;
loadDatasetList(images_path,annotations_path,images_train,landmarks_train);
//! [load_dataset]
//! [add_samples]
Mat image;
std::vector<Point2f> facial_points;
for(size_t i=0;i<images_train.size();i++){
image = imread(images_train[i].c_str());
loadFacePoints(landmarks_train[i],facial_points);
facemark->addTrainingSample(image, facial_points);
}
//! [add_samples]
//! [training]
/* trained model will be saved to AAM.yml */
facemark->training();
//! [training]
//! [load_test_images]
/*test using some images*/
String testFiles(images_path), testPts(annotations_path);
if(!test_images_path.empty()){
testFiles = test_images_path;
testPts = test_images_path; //unused
}
std::vector<String> images;
std::vector<String> facePoints;
loadDatasetList(testFiles, testPts, images, facePoints);
//! [load_test_images]
//! [trainsformation_variables]
float scale ;
Point2f T;
Mat R;
//! [trainsformation_variables]
//! [base_shape]
FacemarkAAM::Data data;
facemark->getData(&data);
std::vector<Point2f> s0 = data.s0;
//! [base_shape]
//! [fitting]
/*fitting process*/
std::vector<Rect> faces;
//! [load_cascade_models]
CascadeClassifier face_cascade(cascade_path);
CascadeClassifier eyes_cascade(eyes_cascade_path);
//! [load_cascade_models]
for(int i=0;i<(int)images.size();i++){
printf("image #%i ", i);
//! [detect_face]
image = imread(images[i]);
myDetector(image, faces, &face_cascade);
//! [detect_face]
if(faces.size()>0){
//! [get_initialization]
std::vector<FacemarkAAM::Config> conf;
std::vector<Rect> faces_eyes;
for(unsigned j=0;j<faces.size();j++){
if(getInitialFitting(image,faces[j],s0,eyes_cascade, R,T,scale)){
conf.push_back(FacemarkAAM::Config(R,T,scale,(int)params.scales.size()-1));
faces_eyes.push_back(faces[j]);
}
}
//! [get_initialization]
//! [fitting_process]
if(conf.size()>0){
printf(" - face with eyes found %i ", (int)conf.size());
std::vector<std::vector<Point2f> > landmarks;
double newtime = (double)getTickCount();
facemark->fit(image, faces_eyes, landmarks, (void*)&conf);
double fittime = ((getTickCount() - newtime)/getTickFrequency());
for(unsigned j=0;j<landmarks.size();j++){
drawFacemarks(image, landmarks[j],Scalar(0,255,0));
}
printf("%f ms\n",fittime*1000);
imshow("fitting", image);
waitKey(0);
}else{
printf("initialization cannot be computed - skipping\n");
}
//! [fitting_process]
}
} //for
//! [fitting]
}
bool myDetector(InputArray image, OutputArray faces, CascadeClassifier *face_cascade)
{
Mat gray;
if (image.channels() > 1)
cvtColor(image, gray, COLOR_BGR2GRAY);
else
gray = image.getMat().clone();
equalizeHist(gray, gray);
std::vector<Rect> faces_;
face_cascade->detectMultiScale(gray, faces_, 1.4, 2, CASCADE_SCALE_IMAGE, Size(30, 30));
Mat(faces_).copyTo(faces);
return true;
}
bool getInitialFitting(Mat image, Rect face, std::vector<Point2f> s0 ,CascadeClassifier eyes_cascade, Mat & R, Point2f & Trans, float & scale){
std::vector<Point2f> mybase;
std::vector<Point2f> T;
std::vector<Point2f> base = Mat(Mat(s0)+Scalar(image.cols/2,image.rows/2)).reshape(2);
std::vector<Point2f> base_shape,base_shape2 ;
Point2f e1 = Point2f((float)((base[39].x+base[36].x)/2.0),(float)((base[39].y+base[36].y)/2.0)); //eye1
Point2f e2 = Point2f((float)((base[45].x+base[42].x)/2.0),(float)((base[45].y+base[42].y)/2.0)); //eye2
if(face.width==0 || face.height==0) return false;
std::vector<Point2f> eye;
bool found=false;
Mat faceROI = image( face);
std::vector<Rect> eyes;
//-- In each face, detect eyes
eyes_cascade.detectMultiScale( faceROI, eyes, 1.1, 2, CASCADE_SCALE_IMAGE, Size(20, 20) );
if(eyes.size()==2){
found = true;
int j=0;
Point2f c1( (float)(face.x + eyes[j].x + eyes[j].width*0.5), (float)(face.y + eyes[j].y + eyes[j].height*0.5));
j=1;
Point2f c2( (float)(face.x + eyes[j].x + eyes[j].width*0.5), (float)(face.y + eyes[j].y + eyes[j].height*0.5));
Point2f pivot;
double a0,a1;
if(c1.x<c2.x){
pivot = c1;
a0 = atan2(c2.y-c1.y, c2.x-c1.x);
}else{
pivot = c2;
a0 = atan2(c1.y-c2.y, c1.x-c2.x);
}
scale = (float)(norm(Mat(c1)-Mat(c2))/norm(Mat(e1)-Mat(e2)));
mybase= Mat(Mat(s0)*scale).reshape(2);
Point2f ey1 = Point2f((float)((mybase[39].x+mybase[36].x)/2.0),(float)((mybase[39].y+mybase[36].y)/2.0));
Point2f ey2 = Point2f((float)((mybase[45].x+mybase[42].x)/2.0),(float)((mybase[45].y+mybase[42].y)/2.0));
#define TO_DEGREE 180.0/3.14159265
a1 = atan2(ey2.y-ey1.y, ey2.x-ey1.x);
Mat rot = getRotationMatrix2D(Point2f(0,0), (a1-a0)*TO_DEGREE, 1.0);
rot(Rect(0,0,2,2)).convertTo(R, CV_32F);
base_shape = Mat(Mat(R*scale*Mat(Mat(s0).reshape(1)).t()).t()).reshape(2);
ey1 = Point2f((float)((base_shape[39].x+base_shape[36].x)/2.0),(float)((base_shape[39].y+base_shape[36].y)/2.0));
ey2 = Point2f((float)((base_shape[45].x+base_shape[42].x)/2.0),(float)((base_shape[45].y+base_shape[42].y)/2.0));
T.push_back(Point2f(pivot.x-ey1.x,pivot.y-ey1.y));
Trans = Point2f(pivot.x-ey1.x,pivot.y-ey1.y);
return true;
}else{
Trans = Point2f( (float)(face.x + face.width*0.5),(float)(face.y + face.height*0.5));
}
return found;
}
bool parseArguments(int argc, char** argv, CommandLineParser & parser,
String & cascade,
String & model,
String & images,
String & annotations,
String & test_images
){
const String keys =
"{ @f face-cascade | | (required) path to the cascade model file for the face detector }"
"{ @e eyes-cascade | | (required) path to the cascade model file for the eyes detector }"
......@@ -315,4 +287,4 @@ Mentor: Delia Passalacqua
return false;
}
return true;
}
}
/*
By downloading, copying, installing or using the software you agree to this
license. If you do not agree to this license, do not download, install,
copy or use the software.
License Agreement
For Open Source Computer Vision Library
(3-clause BSD License)
Copyright (C) 2013, OpenCV Foundation, all rights reserved.
Third party copyrights are property of their respective owners.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the names of the copyright holders nor the names of the contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
This software is provided by the copyright holders and contributors "as is" and
any express or implied warranties, including, but not limited to, the implied
warranties of merchantability and fitness for a particular purpose are
disclaimed. In no event shall copyright holders or contributors be liable for
any direct, indirect, incidental, special, exemplary, or consequential damages
(including, but not limited to, procurement of substitute goods or services;
loss of use, data, or profits; or business interruption) however caused
and on any theory of liability, whether in contract, strict liability,
or tort (including negligence or otherwise) arising in any way out of
the use of this software, even if advised of the possibility of such damage.
This file was part of GSoC Project: Facemark API for OpenCV
Final report: https://gist.github.com/kurnianggoro/74de9121e122ad0bd825176751d47ecc
Student: Laksono Kurnianggoro
......@@ -63,155 +34,150 @@ Mentor: Delia Passalacqua
* example of the dataset is available at https://ibug.doc.ic.ac.uk/download/annotations/ibug.zip
*--------------------------------------------------*/
#include <stdio.h>
#include <fstream>
#include <sstream>
#include <iostream>
#include "opencv2/core.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/imgproc.hpp"
#include "opencv2/face.hpp"
using namespace std;
using namespace cv;
using namespace cv::face;
CascadeClassifier face_cascade;
bool myDetector( InputArray image, OutputArray roi, void * config=0 );
bool parseArguments(int argc, char** argv, CommandLineParser & , String & cascade,
String & model, String & images, String & annotations, String & testImages
);
int main(int argc, char** argv)
{
CommandLineParser parser(argc, argv,"");
String cascade_path,model_path,images_path, annotations_path, test_images_path;
if(!parseArguments(argc, argv, parser,cascade_path,model_path,images_path, annotations_path, test_images_path))
return -1;
/*create the facemark instance*/
FacemarkLBF::Params params;
params.model_filename = model_path;
params.cascade_face = cascade_path;
Ptr<Facemark> facemark = FacemarkLBF::create(params);
face_cascade.load(params.cascade_face.c_str());
facemark->setFaceDetector(myDetector);
/*Loads the dataset*/
std::vector<String> images_train;
std::vector<String> landmarks_train;
loadDatasetList(images_path,annotations_path,images_train,landmarks_train);
Mat image;
std::vector<Point2f> facial_points;
for(size_t i=0;i<images_train.size();i++){
printf("%i/%i :: %s\n", (int)(i+1), (int)images_train.size(),images_train[i].c_str());
image = imread(images_train[i].c_str());
loadFacePoints(landmarks_train[i],facial_points);
facemark->addTrainingSample(image, facial_points);
}
/*train the Algorithm*/
facemark->training();
/*test using some images*/
String testFiles(images_path), testPts(annotations_path);
if(!test_images_path.empty()){
testFiles = test_images_path;
testPts = test_images_path; //unused
}
std::vector<String> images;
std::vector<String> facePoints;
loadDatasetList(testFiles, testPts, images, facePoints);
std::vector<Rect> rects;
CascadeClassifier cc(params.cascade_face.c_str());
for(size_t i=0;i<images.size();i++){
std::vector<std::vector<Point2f> > landmarks;
cout<<images[i];
Mat img = imread(images[i]);
facemark->getFaces(img, rects);
facemark->fit(img, rects, landmarks);
for(size_t j=0;j<rects.size();j++){
drawFacemarks(img, landmarks[j], Scalar(0,0,255));
rectangle(img, rects[j], Scalar(255,0,255));
}
if(rects.size()>0){
cout<<endl;
imshow("result", img);
waitKey(0);
}else{
cout<<"face not found"<<endl;
}
}
}
bool myDetector( InputArray image, OutputArray roi, void * config ){
Mat gray;
std::vector<Rect> & faces = *(std::vector<Rect>*) roi.getObj();
faces.clear();
if(config!=0){
//do nothing
}
if(image.channels()>1){
cvtColor(image,gray,CV_BGR2GRAY);
}else{
gray = image.getMat().clone();
}
equalizeHist( gray, gray );
face_cascade.detectMultiScale( gray, faces, 1.4, 2, CV_HAAR_SCALE_IMAGE, Size(30, 30) );
return true;
}
bool parseArguments(int argc, char** argv, CommandLineParser & parser,
String & cascade,
String & model,
String & images,
String & annotations,
String & test_images
){
const String keys =
"{ @c cascade | | (required) path to the face cascade xml file fo the face detector }"
"{ @i images | | (required) path of a text file contains the list of paths to all training images}"
"{ @a annotations | | (required) Path of a text file contains the list of paths to all annotations files}"
"{ @m model | | (required) path to save the trained model }"
"{ t test-images | | Path of a text file contains the list of paths to the test images}"
"{ help h usage ? | | facemark_demo_lbf -cascade -images -annotations -model [-t] \n"
" example: facemark_demo_lbf ../face_cascade.xml ../images_train.txt ../points_train.txt ../lbf.model}"
;
parser = CommandLineParser(argc, argv,keys);
parser.about("hello");
if (parser.has("help")){
parser.printMessage();
return false;
}
cascade = String(parser.get<String>("cascade"));
model = String(parser.get<string>("model"));
images = String(parser.get<string>("images"));
annotations = String(parser.get<string>("annotations"));
test_images = String(parser.get<string>("t"));
cout<<"cascade : "<<cascade.c_str()<<endl;
cout<<"model : "<<model.c_str()<<endl;
cout<<"images : "<<images.c_str()<<endl;
cout<<"annotations : "<<annotations.c_str()<<endl;
if(cascade.empty() || model.empty() || images.empty() || annotations.empty()){
std::cerr << "one or more required arguments are not found" << '\n';
parser.printMessage();
return false;
}
return true;
}
#include <stdio.h>
#include <fstream>
#include <sstream>
#include <iostream>
#include "opencv2/core.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/imgproc.hpp"
#include "opencv2/face.hpp"
using namespace std;
using namespace cv;
using namespace cv::face;
static bool myDetector( InputArray image, OutputArray roi, CascadeClassifier *face_detector);
static bool parseArguments(int argc, char** argv, CommandLineParser & , String & cascade,
String & model, String & images, String & annotations, String & testImages
);
int main(int argc, char** argv)
{
CommandLineParser parser(argc, argv,"");
String cascade_path,model_path,images_path, annotations_path, test_images_path;
if(!parseArguments(argc, argv, parser,cascade_path,model_path,images_path, annotations_path, test_images_path))
return -1;
/*create the facemark instance*/
FacemarkLBF::Params params;
params.model_filename = model_path;
params.cascade_face = cascade_path;
Ptr<Facemark> facemark = FacemarkLBF::create(params);
CascadeClassifier face_cascade;
face_cascade.load(params.cascade_face.c_str());
facemark->setFaceDetector((FN_FaceDetector)myDetector, &face_cascade);
/*Loads the dataset*/
std::vector<String> images_train;
std::vector<String> landmarks_train;
loadDatasetList(images_path,annotations_path,images_train,landmarks_train);
Mat image;
std::vector<Point2f> facial_points;
for(size_t i=0;i<images_train.size();i++){
printf("%i/%i :: %s\n", (int)(i+1), (int)images_train.size(),images_train[i].c_str());
image = imread(images_train[i].c_str());
loadFacePoints(landmarks_train[i],facial_points);
facemark->addTrainingSample(image, facial_points);
}
/*train the Algorithm*/
facemark->training();
/*test using some images*/
String testFiles(images_path), testPts(annotations_path);
if(!test_images_path.empty()){
testFiles = test_images_path;
testPts = test_images_path; //unused
}
std::vector<String> images;
std::vector<String> facePoints;
loadDatasetList(testFiles, testPts, images, facePoints);
std::vector<Rect> rects;
CascadeClassifier cc(params.cascade_face.c_str());
for(size_t i=0;i<images.size();i++){
std::vector<std::vector<Point2f> > landmarks;
cout<<images[i];
Mat img = imread(images[i]);
facemark->getFaces(img, rects);
facemark->fit(img, rects, landmarks);
for(size_t j=0;j<rects.size();j++){
drawFacemarks(img, landmarks[j], Scalar(0,0,255));
rectangle(img, rects[j], Scalar(255,0,255));
}
if(rects.size()>0){
cout<<endl;
imshow("result", img);
waitKey(0);
}else{
cout<<"face not found"<<endl;
}
}
}
bool myDetector(InputArray image, OutputArray faces, CascadeClassifier *face_cascade)
{
Mat gray;
if (image.channels() > 1)
cvtColor(image, gray, COLOR_BGR2GRAY);
else
gray = image.getMat().clone();
equalizeHist(gray, gray);
std::vector<Rect> faces_;
face_cascade->detectMultiScale(gray, faces_, 1.4, 2, CASCADE_SCALE_IMAGE, Size(30, 30));
Mat(faces_).copyTo(faces);
return true;
}
bool parseArguments(int argc, char** argv, CommandLineParser & parser,
String & cascade,
String & model,
String & images,
String & annotations,
String & test_images
){
const String keys =
"{ @c cascade | | (required) path to the face cascade xml file fo the face detector }"
"{ @i images | | (required) path of a text file contains the list of paths to all training images}"
"{ @a annotations | | (required) Path of a text file contains the list of paths to all annotations files}"
"{ @m model | | (required) path to save the trained model }"
"{ t test-images | | Path of a text file contains the list of paths to the test images}"
"{ help h usage ? | | facemark_demo_lbf -cascade -images -annotations -model [-t] \n"
" example: facemark_demo_lbf ../face_cascade.xml ../images_train.txt ../points_train.txt ../lbf.model}"
;
parser = CommandLineParser(argc, argv,keys);
parser.about("hello");
if (parser.has("help")){
parser.printMessage();
return false;
}
cascade = String(parser.get<String>("cascade"));
model = String(parser.get<string>("model"));
images = String(parser.get<string>("images"));
annotations = String(parser.get<string>("annotations"));
test_images = String(parser.get<string>("t"));
cout<<"cascade : "<<cascade.c_str()<<endl;
cout<<"model : "<<model.c_str()<<endl;
cout<<"images : "<<images.c_str()<<endl;
cout<<"annotations : "<<annotations.c_str()<<endl;
if(cascade.empty() || model.empty() || images.empty() || annotations.empty()){
std::cerr << "one or more required arguments are not found" << '\n';
parser.printMessage();
return false;
}
return true;
}
/*
By downloading, copying, installing or using the software you agree to this
license. If you do not agree to this license, do not download, install,
copy or use the software.
License Agreement
For Open Source Computer Vision Library
(3-clause BSD License)
Copyright (C) 2013, OpenCV Foundation, all rights reserved.
Third party copyrights are property of their respective owners.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the names of the copyright holders nor the names of the contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
This software is provided by the copyright holders and contributors "as is" and
any express or implied warranties, including, but not limited to, the implied
warranties of merchantability and fitness for a particular purpose are
disclaimed. In no event shall copyright holders or contributors be liable for
any direct, indirect, incidental, special, exemplary, or consequential damages
(including, but not limited to, procurement of substitute goods or services;
loss of use, data, or profits; or business interruption) however caused
and on any theory of liability, whether in contract, strict liability,
or tort (including negligence or otherwise) arising in any way out of
the use of this software, even if advised of the possibility of such damage.
This file was part of GSoC Project: Facemark API for OpenCV
This file contains results of GSoC Project: Facemark API for OpenCV
Final report: https://gist.github.com/kurnianggoro/74de9121e122ad0bd825176751d47ecc
Student: Laksono Kurnianggoro
Mentor: Delia Passalacqua
......@@ -47,7 +18,7 @@ Mentor: Delia Passalacqua
#include <stdio.h>
#include <ctime>
#include <iostream>
#include <iostream>
#include "opencv2/core.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/imgproc.hpp"
......@@ -57,9 +28,8 @@ using namespace std;
using namespace cv;
using namespace cv::face;
CascadeClassifier face_cascade;
bool myDetector( InputArray image, OutputArray ROIs, void * config = 0);
bool parseArguments(int argc, char** argv, CommandLineParser & parser,
static bool myDetector(InputArray image, OutputArray ROIs, CascadeClassifier *face_cascade);
static bool parseArguments(int argc, char** argv, CommandLineParser & parser,
String & cascade, String & model,String & video);
int main(int argc, char** argv ){
......@@ -68,6 +38,7 @@ int main(int argc, char** argv ){
if(!parseArguments(argc, argv, parser,cascade_path,model_path,video_path))
return -1;
CascadeClassifier face_cascade;
face_cascade.load(cascade_path);
FacemarkLBF::Params params;
......@@ -75,7 +46,7 @@ int main(int argc, char** argv ){
params.cascade_face = cascade_path;
Ptr<Facemark> facemark = FacemarkLBF::create(params);
facemark->setFaceDetector(myDetector);
facemark->setFaceDetector((FN_FaceDetector)myDetector, &face_cascade);
facemark->loadModel(params.model_filename.c_str());
VideoCapture capture(video_path);
......@@ -144,23 +115,20 @@ int main(int argc, char** argv ){
waitKey(0); // key press to close window
}
bool myDetector( InputArray image, OutputArray ROIs, void * config ){
bool myDetector(InputArray image, OutputArray faces, CascadeClassifier *face_cascade)
{
Mat gray;
std::vector<Rect> & faces = *(std::vector<Rect>*) ROIs.getObj();
faces.clear();
if(config!=0){
//do nothing
}
if(image.channels()>1){
cvtColor(image.getMat(),gray,CV_BGR2GRAY);
}else{
if (image.channels() > 1)
cvtColor(image, gray, COLOR_BGR2GRAY);
else
gray = image.getMat().clone();
}
equalizeHist( gray, gray );
face_cascade.detectMultiScale( gray, faces, 1.4, 2, CV_HAAR_SCALE_IMAGE, Size(30, 30) );
equalizeHist(gray, gray);
std::vector<Rect> faces_;
face_cascade->detectMultiScale(gray, faces_, 1.4, 2, CASCADE_SCALE_IMAGE, Size(30, 30));
Mat(faces_).copyTo(faces);
return true;
}
......@@ -169,34 +137,34 @@ bool parseArguments(int argc, char** argv, CommandLineParser & parser,
String & model,
String & video
){
const String keys =
"{ @c cascade | | (required) path to the cascade model file for the face detector }"
"{ @m model | | (required) path to the trained model }"
"{ @v video | | (required) path input video}"
"{ help h usage ? | | facemark_lbf_fitting -cascade -model -video [-t]\n"
" example: facemark_lbf_fitting ../face_cascade.xml ../LBF.model ../video.mp4}"
;
parser = CommandLineParser(argc, argv,keys);
parser.about("hello");
if (parser.has("help")){
parser.printMessage();
return false;
}
cascade = String(parser.get<String>("cascade"));
model = String(parser.get<string>("model"));
video = String(parser.get<string>("video"));
if(cascade.empty() || model.empty() || video.empty() ){
std::cerr << "one or more required arguments are not found" << '\n';
cout<<"cascade : "<<cascade.c_str()<<endl;
cout<<"model : "<<model.c_str()<<endl;
cout<<"video : "<<video.c_str()<<endl;
parser.printMessage();
return false;
}
return true;
const String keys =
"{ @c cascade | | (required) path to the cascade model file for the face detector }"
"{ @m model | | (required) path to the trained model }"
"{ @v video | | (required) path input video}"
"{ help h usage ? | | facemark_lbf_fitting -cascade -model -video [-t]\n"
" example: facemark_lbf_fitting ../face_cascade.xml ../LBF.model ../video.mp4}"
;
parser = CommandLineParser(argc, argv,keys);
parser.about("hello");
if (parser.has("help")){
parser.printMessage();
return false;
}
cascade = String(parser.get<String>("cascade"));
model = String(parser.get<string>("model"));
video = String(parser.get<string>("video"));
if(cascade.empty() || model.empty() || video.empty() ){
std::cerr << "one or more required arguments are not found" << '\n';
cout<<"cascade : "<<cascade.c_str()<<endl;
cout<<"model : "<<model.c_str()<<endl;
cout<<"video : "<<video.c_str()<<endl;
parser.printMessage();
return false;
}
return true;
}
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
/*
By downloading, copying, installing or using the software you agree to this
license. If you do not agree to this license, do not download, install,
copy or use the software.
License Agreement
For Open Source Computer Vision Library
(3-clause BSD License)
Copyright (C) 2013, OpenCV Foundation, all rights reserved.
Third party copyrights are property of their respective owners.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the names of the copyright holders nor the names of the contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
This software is provided by the copyright holders and contributors "as is" and
any express or implied warranties, including, but not limited to, the implied
warranties of merchantability and fitness for a particular purpose are
disclaimed. In no event shall copyright holders or contributors be liable for
any direct, indirect, incidental, special, exemplary, or consequential damages
(including, but not limited to, procurement of substitute goods or services;
loss of use, data, or profits; or business interruption) however caused
and on any theory of liability, whether in contract, strict liability,
or tort (including negligence or otherwise) arising in any way out of
the use of this software, even if advised of the possibility of such damage.
This file was part of GSoC Project: Facemark API for OpenCV
This file contains results of GSoC Project: Facemark API for OpenCV
Final report: https://gist.github.com/kurnianggoro/74de9121e122ad0bd825176751d47ecc
Student: Laksono Kurnianggoro
Mentor: Delia Passalacqua
*/
#include "precomp.hpp"
#include "opencv2/face.hpp"
#include "opencv2/core.hpp"
#include "precomp.hpp"
/*dataset parser*/
#include <fstream>
......@@ -46,192 +22,194 @@ Mentor: Delia Passalacqua
namespace cv {
namespace face {
CParams::CParams(String s, double sf, int minN, Size minSz, Size maxSz){
cascade = s;
scaleFactor = sf;
minNeighbors = minN;
minSize = minSz;
maxSize = maxSz;
}
bool getFaces(InputArray image, OutputArray faces, void * parameters){
Mat gray;
std::vector<Rect> roi;
using namespace std;
if(parameters!=0){
CParams * params = (CParams *)parameters;
cvtColor( image.getMat(), gray, CV_BGR2GRAY );
equalizeHist( gray, gray );
CParams::CParams(String s, double sf, int minN, Size minSz, Size maxSz){
cascade = s;
scaleFactor = sf;
minNeighbors = minN;
minSize = minSz;
maxSize = maxSz;
CascadeClassifier face_cascade;
if( !face_cascade.load( params->cascade ) ){ printf("--(!)Error loading face_cascade\n"); return false; };
face_cascade.detectMultiScale( gray, roi, params->scaleFactor, params->minNeighbors, 0|CV_HAAR_SCALE_IMAGE, params->minSize, params->maxSize);
if (!face_cascade.load(cascade))
{
CV_Error_(Error::StsBadArg, ("Error loading face_cascade: %s", cascade.c_str()));
}
}
bool getFaces(InputArray image, OutputArray faces, CParams* params)
{
CV_Assert(params);
Mat gray;
std::vector<Rect> roi;
cvtColor(image.getMat(), gray, COLOR_BGR2GRAY);
equalizeHist(gray, gray);
params->face_cascade.detectMultiScale( gray, roi, params->scaleFactor, params->minNeighbors, CASCADE_SCALE_IMAGE, params->minSize, params->maxSize);
Mat(roi).copyTo(faces);
return true;
}
bool loadDatasetList(String imageList, String groundTruth, std::vector<String> & images, std::vector<String> & landmarks){
std::string line;
/*clear the output containers*/
images.clear();
landmarks.clear();
/*open the files*/
std::ifstream infile;
infile.open(imageList.c_str(), std::ios::in);
std::ifstream ss_gt;
ss_gt.open(groundTruth.c_str(), std::ios::in);
if ((!infile) || !(ss_gt)) {
printf("No valid input file was given, please check the given filename.\n");
return false;
}
Mat(roi).copyTo(faces);
return true;
}else{
return false;
}
/*load the images path*/
while (getline (infile, line)){
images.push_back(line);
}
/*load the points*/
while (getline (ss_gt, line)){
landmarks.push_back(line);
}
bool loadDatasetList(String imageList, String groundTruth, std::vector<String> & images, std::vector<String> & landmarks){
std::string line;
/*clear the output containers*/
images.clear();
landmarks.clear();
/*open the files*/
std::ifstream infile;
infile.open(imageList.c_str(), std::ios::in);
std::ifstream ss_gt;
ss_gt.open(groundTruth.c_str(), std::ios::in);
if ((!infile) || !(ss_gt)) {
printf("No valid input file was given, please check the given filename.\n");
return false;
}
return true;
}
/*load the images path*/
while (getline (infile, line)){
images.push_back(line);
}
bool loadTrainingData(String filename, std::vector<String> & images, OutputArray _facePoints, char delim, float offset){
std::string line;
std::string item;
std::vector<Point2f> pts;
std::vector<float> raw;
/*load the points*/
while (getline (ss_gt, line)){
landmarks.push_back(line);
}
// FIXIT
std::vector<std::vector<Point2f> > & facePoints =
*(std::vector<std::vector<Point2f> >*) _facePoints.getObj();
return true;
std::ifstream infile;
infile.open(filename.c_str(), std::ios::in);
if (!infile) {
CV_Error_(Error::StsBadArg, ("No valid input file was given, please check the given filename: %s", filename.c_str()));
}
bool loadTrainingData(String filename, std::vector<String> & images, OutputArray _facePoints, char delim, float offset){
std::string line;
std::string item;
std::vector<Point2f> pts;
std::vector<float> raw;
/*clear the output containers*/
images.clear();
facePoints.clear();
std::vector<std::vector<Point2f> > & facePoints =
*(std::vector<std::vector<Point2f> >*) _facePoints.getObj();
/*the main loading process*/
while (getline (infile, line)){
std::istringstream ss(line); // string stream for the current line
/*pop the image path*/
getline (ss, item, delim);
images.push_back(item);
std::ifstream infile;
infile.open(filename.c_str(), std::ios::in);
if (!infile) {
std::string error_message = "No valid input file was given, please check the given filename.";
CV_Error(CV_StsBadArg, error_message);
/*load all numbers*/
raw.clear();
while (getline (ss, item, delim)){
raw.push_back((float)atof(item.c_str()));
}
/*clear the output containers*/
images.clear();
facePoints.clear();
/*the main loading process*/
while (getline (infile, line)){
std::istringstream ss(line); // string stream for the current line
/*pop the image path*/
getline (ss, item, delim);
images.push_back(item);
/*load all numbers*/
raw.clear();
while (getline (ss, item, delim)){
raw.push_back((float)atof(item.c_str()));
}
/*convert to opencv points*/
pts.clear();
for(unsigned i = 0;i< raw.size();i+=2){
pts.push_back(Point2f(raw[i]+offset,raw[i+1]+offset));
}
facePoints.push_back(pts);
} // main loading process
return true;
}
/*convert to opencv points*/
pts.clear();
for(unsigned i = 0;i< raw.size();i+=2){
pts.push_back(Point2f(raw[i]+offset,raw[i+1]+offset));
}
facePoints.push_back(pts);
} // main loading process
bool loadTrainingData(String imageList, String groundTruth, std::vector<String> & images, OutputArray _facePoints, float offset){
std::string line;
std::vector<Point2f> facePts;
return true;
}
std::vector<std::vector<Point2f> > & facePoints =
*(std::vector<std::vector<Point2f> >*) _facePoints.getObj();
bool loadTrainingData(String imageList, String groundTruth, std::vector<String> & images, OutputArray _facePoints, float offset){
std::string line;
std::vector<Point2f> facePts;
/*clear the output containers*/
images.clear();
facePoints.clear();
// FIXIT
std::vector<std::vector<Point2f> > & facePoints =
*(std::vector<std::vector<Point2f> >*) _facePoints.getObj();
/*load the images path*/
std::ifstream infile;
infile.open(imageList.c_str(), std::ios::in);
if (!infile) {
std::string error_message = "No valid input file was given, please check the given filename.";
CV_Error(CV_StsBadArg, error_message);
}
/*clear the output containers*/
images.clear();
facePoints.clear();
while (getline (infile, line)){
images.push_back(line);
}
/*load the images path*/
std::ifstream infile;
infile.open(imageList.c_str(), std::ios::in);
if (!infile) {
CV_Error_(Error::StsBadArg, ("No valid input file was given, please check the given filename: %s", imageList.c_str()));
}
/*load the points*/
std::ifstream ss_gt(groundTruth.c_str());
while (getline (ss_gt, line)){
facePts.clear();
loadFacePoints(line, facePts, offset);
facePoints.push_back(facePts);
}
while (getline (infile, line)){
images.push_back(line);
}
return true;
/*load the points*/
std::ifstream ss_gt(groundTruth.c_str());
while (getline (ss_gt, line)){
facePts.clear();
loadFacePoints(line, facePts, offset);
facePoints.push_back(facePts);
}
bool loadFacePoints(String filename, OutputArray points, float offset){
std::vector<Point2f> & pts = *(std::vector<Point2f> *)points.getObj();
return true;
}
std::string line, item;
std::ifstream infile(filename.c_str());
bool loadFacePoints(String filename, OutputArray points, float offset){
vector<Point2f> pts;
/*pop the version*/
std::getline(infile, line);
CV_Assert(line.compare(0,7,"version")==0);
std::string line, item;
std::ifstream infile(filename.c_str());
/*pop the number of points*/
std::getline(infile, line);
CV_Assert(line.compare(0,8,"n_points")==0);
/*pop the version*/
std::getline(infile, line);
CV_Assert(line.compare(0,7,"version")==0);
/*get the number of points*/
std::string item_npts;
int npts;
/*pop the number of points*/
std::getline(infile, line);
CV_Assert(line.compare(0,8,"n_points")==0);
std::istringstream linestream(line);
linestream>>item_npts>>npts;
/*get the number of points*/
std::string item_npts;
int npts;
/*pop out '{' character*/
std::getline(infile, line);
std::istringstream linestream(line);
linestream>>item_npts>>npts;
/*main process*/
int cnt = 0;
std::string x, y;
pts.clear();
while (std::getline(infile, line) && cnt<npts )
{
cnt+=1;
/*pop out '{' character*/
std::getline(infile, line);
std::istringstream ss(line);
ss>>x>>y;
pts.push_back(Point2f((float)atof(x.c_str())+offset,(float)atof(y.c_str())+offset));
/*main process*/
int cnt = 0;
std::string x, y;
pts.clear();
while (std::getline(infile, line) && cnt<npts )
{
cnt+=1;
}
std::istringstream ss(line);
ss>>x>>y;
pts.push_back(Point2f((float)atof(x.c_str())+offset,(float)atof(y.c_str())+offset));
return true;
}
void drawFacemarks(InputOutputArray image, InputArray points, Scalar color){
Mat img = image.getMat();
std::vector<Point2f> pts = *(std::vector<Point2f>*)points.getObj();
for(size_t i=0;i<pts.size();i++){
circle(img, pts[i],3, color,-1);
}
} //drawPoints
Mat(pts).copyTo(points);
return true;
}
void drawFacemarks(InputOutputArray image, InputArray points, Scalar color){
Mat img = image.getMat();
vector<Point2f> pts = points.getMat();
for(size_t i=0;i<pts.size();i++){
circle(img, pts[i],3, color,-1);
}
}
} /* namespace face */
} /* namespace cv */
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
/*
By downloading, copying, installing or using the software you agree to this
license. If you do not agree to this license, do not download, install,
copy or use the software.
License Agreement
For Open Source Computer Vision Library
(3-clause BSD License)
Copyright (C) 2013, OpenCV Foundation, all rights reserved.
Third party copyrights are property of their respective owners.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the names of the copyright holders nor the names of the contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
This software is provided by the copyright holders and contributors "as is" and
any express or implied warranties, including, but not limited to, the implied
warranties of merchantability and fitness for a particular purpose are
disclaimed. In no event shall copyright holders or contributors be liable for
any direct, indirect, incidental, special, exemplary, or consequential damages
(including, but not limited to, procurement of substitute goods or services;
loss of use, data, or profits; or business interruption) however caused
and on any theory of liability, whether in contract, strict liability,
or tort (including negligence or otherwise) arising in any way out of
the use of this software, even if advised of the possibility of such damage.
This file was part of GSoC Project: Facemark API for OpenCV
This file contains results of GSoC Project: Facemark API for OpenCV
Final report: https://gist.github.com/kurnianggoro/74de9121e122ad0bd825176751d47ecc
Student: Laksono Kurnianggoro
Mentor: Delia Passalacqua
*/
#include "opencv2/face.hpp"
#include "precomp.hpp"
#include "opencv2/face.hpp"
namespace cv {
namespace face {
/*
* Parameters
*/
FacemarkAAM::Params::Params(){
model_filename = "";
m = 200;
n = 10;
n_iter = 50;
verbose = true;
save_model = true;
scales.push_back(1.0);
max_m = 550;
max_n = 136;
texture_max_m = 145;
}
/*
* Parameters
*/
FacemarkAAM::Params::Params(){
model_filename = "";
m = 200;
n = 10;
n_iter = 50;
verbose = true;
save_model = true;
scales.push_back(1.0);
max_m = 550;
max_n = 136;
texture_max_m = 145;
}
FacemarkAAM::Config::Config(Mat rot, Point2f trans, float scaling,int scale_id){
R = rot.clone();
t = trans;
scale = scaling;
model_scale_idx = scale_id;
}
void FacemarkAAM::Params::read( const cv::FileNode& fn ){
*this = FacemarkAAM::Params();
if (!fn["model_filename"].empty()) fn["model_filename"] >> model_filename;
if (!fn["m"].empty()) fn["m"] >> m;
if (!fn["n"].empty()) fn["n"] >> m;
if (!fn["n_iter"].empty()) fn["n_iter"] >> m;
if (!fn["verbose"].empty()) fn["verbose"] >> m;
if (!fn["max_m"].empty()) fn["max_m"] >> m;
if (!fn["max_n"].empty()) fn["max_n"] >> m;
if (!fn["texture_max_m"].empty()) fn["texture_max_m"] >> m;
if (!fn["scales"].empty()) fn["scales"] >> m;
}
void FacemarkAAM::Params::write( cv::FileStorage& fs ) const{
fs << "model_filename" << model_filename;
fs << "m" << m;
fs << "n" << n;
fs << "n_iter" << n_iter;
fs << "verbose" << verbose;
fs << "max_m" << verbose;
fs << "max_n" << verbose;
fs << "texture_max_m" << verbose;
fs << "scales" << verbose;
}
class FacemarkAAMImpl : public FacemarkAAM {
public:
FacemarkAAMImpl( const FacemarkAAM::Params &parameters = FacemarkAAM::Params() );
void read( const FileNode& /*fn*/ );
void write( FileStorage& /*fs*/ ) const;
void saveModel(String fs);
void loadModel(String fs);
bool setFaceDetector(bool(*f)(InputArray , OutputArray, void * ), void* userData);
bool getFaces(InputArray image, OutputArray faces);
bool getData(void * items);
protected:
bool fit( InputArray image, InputArray faces, InputOutputArray landmarks, void * runtime_params);//!< from many ROIs
bool fitImpl( const Mat image, std::vector<Point2f>& landmarks,const Mat R,const Point2f T,const float scale, const int sclIdx=0 );
bool addTrainingSample(InputArray image, InputArray landmarks);
void training(void* parameters);
Mat procrustes(std::vector<Point2f> , std::vector<Point2f> , Mat & , Scalar & , float & );
void calcMeanShape(std::vector<std::vector<Point2f> > ,std::vector<Point2f> & );
void procrustesAnalysis(std::vector<std::vector<Point2f> > , std::vector<std::vector<Point2f> > & , std::vector<Point2f> & );
inline Mat linearize(Mat );
inline Mat linearize(std::vector<Point2f> );
void getProjection(const Mat , Mat &, int );
void calcSimilarityEig(std::vector<Point2f> ,Mat , Mat & , Mat & );
Mat orthonormal(Mat );
void delaunay(std::vector<Point2f> , std::vector<Vec3i> & );
Mat createMask(std::vector<Point2f> , Rect );
Mat createTextureBase(std::vector<Point2f> , std::vector<Vec3i> , Rect , std::vector<std::vector<Point> > & );
Mat warpImage(const Mat ,const std::vector<Point2f> ,const std::vector<Point2f> ,
const std::vector<Vec3i> , const Rect , const std::vector<std::vector<Point> > );
template <class T>
Mat getFeature(const Mat , std::vector<int> map);
void createMaskMapping(const Mat mask, const Mat mask2, std::vector<int> & , std::vector<int> &, std::vector<int> &);
void warpUpdate(std::vector<Point2f> & shape, Mat delta, std::vector<Point2f> s0, Mat S, Mat Q, std::vector<Vec3i> triangles,std::vector<std::vector<int> > Tp);
Mat computeWarpParts(std::vector<Point2f> curr_shape,std::vector<Point2f> s0, Mat ds0, std::vector<Vec3i> triangles,std::vector<std::vector<int> > Tp);
void image_jacobian(const Mat gx, const Mat gy, const Mat Jx, const Mat Jy, Mat & G);
void gradient(const Mat M, Mat & gx, Mat & gy);
void createWarpJacobian(Mat S, Mat Q, std::vector<Vec3i> , Model::Texture & T, Mat & Wx_dp, Mat & Wy_dp, std::vector<std::vector<int> > & Tp);
std::vector<Mat> images;
std::vector<std::vector<Point2f> > facePoints;
FacemarkAAM::Params params;
FacemarkAAM::Model AAM;
FN_FaceDetector faceDetector;
void* faceDetectorData;
private:
bool isModelTrained;
};
FacemarkAAM::Config::Config(Mat rot, Point2f trans, float scaling,int scale_id){
R = rot.clone();
t = trans;
scale = scaling;
model_scale_idx = scale_id;
/*
* Constructor
*/
Ptr<FacemarkAAM> FacemarkAAM::create(const FacemarkAAM::Params &parameters){
return Ptr<FacemarkAAMImpl>(new FacemarkAAMImpl(parameters));
}
FacemarkAAMImpl::FacemarkAAMImpl( const FacemarkAAM::Params &parameters ) :
params( parameters ),
faceDetector(NULL), faceDetectorData(NULL)
{
isModelTrained = false;
}
void FacemarkAAMImpl::read( const cv::FileNode& fn ){
params.read( fn );
}
void FacemarkAAMImpl::write( cv::FileStorage& fs ) const {
params.write( fs );
}
bool FacemarkAAMImpl::setFaceDetector(bool(*f)(InputArray , OutputArray, void *), void* userData){
faceDetector = f;
faceDetectorData = userData;
return true;
}
bool FacemarkAAMImpl::getFaces(InputArray image, OutputArray faces)
{
if (!faceDetector)
return false;
return faceDetector(image, faces, faceDetectorData);
}
bool FacemarkAAMImpl::getData(void * items){
CV_Assert(items);
Data* data = (Data*)items;
data->s0 = AAM.s0;
return true;
}
bool FacemarkAAMImpl::addTrainingSample(InputArray image, InputArray landmarks){
// FIXIT
std::vector<Point2f> & _landmarks = *(std::vector<Point2f>*)landmarks.getObj();
images.push_back(image.getMat());
facePoints.push_back(_landmarks);
return true;
}
void FacemarkAAMImpl::training(void* parameters){
if(parameters!=0){/*do nothing*/}
if (images.size()<1) {
CV_Error(Error::StsBadArg, "Training data is not provided. Consider to add using addTrainingSample() function!");
}
void FacemarkAAM::Params::read( const cv::FileNode& fn ){
*this = FacemarkAAM::Params();
if (!fn["model_filename"].empty()) fn["model_filename"] >> model_filename;
if (!fn["m"].empty()) fn["m"] >> m;
if (!fn["n"].empty()) fn["n"] >> m;
if (!fn["n_iter"].empty()) fn["n_iter"] >> m;
if (!fn["verbose"].empty()) fn["verbose"] >> m;
if (!fn["max_m"].empty()) fn["max_m"] >> m;
if (!fn["max_n"].empty()) fn["max_n"] >> m;
if (!fn["texture_max_m"].empty()) fn["texture_max_m"] >> m;
if (!fn["scales"].empty()) fn["scales"] >> m;
if(strcmp(params.model_filename.c_str(),"")==0 && params.save_model){
CV_Error(Error::StsBadArg, "The model_filename parameter should be set!");
}
void FacemarkAAM::Params::write( cv::FileStorage& fs ) const{
fs << "model_filename" << model_filename;
fs << "m" << m;
fs << "n" << n;
fs << "n_iter" << n_iter;
fs << "verbose" << verbose;
fs << "max_m" << verbose;
fs << "max_n" << verbose;
fs << "texture_max_m" << verbose;
fs << "scales" << verbose;
}
std::vector<std::vector<Point2f> > normalized;
Mat erode_kernel = getStructuringElement(MORPH_RECT, Size(3,3), Point(1,1));
Mat image;
class FacemarkAAMImpl : public FacemarkAAM {
public:
FacemarkAAMImpl( const FacemarkAAM::Params &parameters = FacemarkAAM::Params() );
void read( const FileNode& /*fn*/ );
void write( FileStorage& /*fs*/ ) const;
void saveModel(String fs);
void loadModel(String fs);
bool setFaceDetector(bool(*f)(InputArray , OutputArray, void * ));
bool getFaces( InputArray image ,OutputArray faces, void * extra_params);
bool getData(void * items);
protected:
bool fit( InputArray image, InputArray faces, InputOutputArray landmarks, void * runtime_params);//!< from many ROIs
bool fitImpl( const Mat image, std::vector<Point2f>& landmarks,const Mat R,const Point2f T,const float scale, const int sclIdx=0 );
bool addTrainingSample(InputArray image, InputArray landmarks);
void training(void* parameters);
Mat procrustes(std::vector<Point2f> , std::vector<Point2f> , Mat & , Scalar & , float & );
void calcMeanShape(std::vector<std::vector<Point2f> > ,std::vector<Point2f> & );
void procrustesAnalysis(std::vector<std::vector<Point2f> > , std::vector<std::vector<Point2f> > & , std::vector<Point2f> & );
inline Mat linearize(Mat );
inline Mat linearize(std::vector<Point2f> );
void getProjection(const Mat , Mat &, int );
void calcSimilarityEig(std::vector<Point2f> ,Mat , Mat & , Mat & );
Mat orthonormal(Mat );
void delaunay(std::vector<Point2f> , std::vector<Vec3i> & );
Mat createMask(std::vector<Point2f> , Rect );
Mat createTextureBase(std::vector<Point2f> , std::vector<Vec3i> , Rect , std::vector<std::vector<Point> > & );
Mat warpImage(const Mat ,const std::vector<Point2f> ,const std::vector<Point2f> ,
const std::vector<Vec3i> , const Rect , const std::vector<std::vector<Point> > );
template <class T>
Mat getFeature(const Mat , std::vector<int> map);
void createMaskMapping(const Mat mask, const Mat mask2, std::vector<int> & , std::vector<int> &, std::vector<int> &);
void warpUpdate(std::vector<Point2f> & shape, Mat delta, std::vector<Point2f> s0, Mat S, Mat Q, std::vector<Vec3i> triangles,std::vector<std::vector<int> > Tp);
Mat computeWarpParts(std::vector<Point2f> curr_shape,std::vector<Point2f> s0, Mat ds0, std::vector<Vec3i> triangles,std::vector<std::vector<int> > Tp);
void image_jacobian(const Mat gx, const Mat gy, const Mat Jx, const Mat Jy, Mat & G);
void gradient(const Mat M, Mat & gx, Mat & gy);
void createWarpJacobian(Mat S, Mat Q, std::vector<Vec3i> , Model::Texture & T, Mat & Wx_dp, Mat & Wy_dp, std::vector<std::vector<int> > & Tp);
std::vector<Mat> images;
std::vector<std::vector<Point2f> > facePoints;
FacemarkAAM::Params params;
FacemarkAAM::Model AAM;
bool(*faceDetector)(InputArray , OutputArray, void *);
bool isSetDetector;
private:
bool isModelTrained;
};
/*
* Constructor
*/
Ptr<FacemarkAAM> FacemarkAAM::create(const FacemarkAAM::Params &parameters){
return Ptr<FacemarkAAMImpl>(new FacemarkAAMImpl(parameters));
}
int param_max_m = params.max_m;//550;
int param_max_n = params.max_n;//136;
FacemarkAAMImpl::FacemarkAAMImpl( const FacemarkAAM::Params &parameters ) :
params( parameters )
{
isSetDetector =false;
isModelTrained = false;
}
AAM.scales = params.scales;
AAM.textures.resize(AAM.scales.size());
void FacemarkAAMImpl::read( const cv::FileNode& fn ){
params.read( fn );
}
/*-------------- A. Load the training data---------*/
procrustesAnalysis(facePoints, normalized,AAM.s0);
void FacemarkAAMImpl::write( cv::FileStorage& fs ) const {
params.write( fs );
/*-------------- B. Create the shape model---------*/
Mat s0_lin = linearize(AAM.s0).t() ;
// linearize all shapes data, all x and then all y for each shape
Mat M;
for(unsigned i=0;i<normalized.size();i++){
M.push_back(linearize(normalized[i]).t()-s0_lin);
}
bool FacemarkAAMImpl::setFaceDetector(bool(*f)(InputArray , OutputArray, void *)){
faceDetector = f;
isSetDetector = true;
return true;
}
/* get PCA Projection vectors */
Mat S;
getProjection(M.t(),S,param_max_n);
/* Create similarity eig*/
Mat shape_S,shape_Q;
calcSimilarityEig(AAM.s0,S,AAM.Q,AAM.S);
/* ----------C. Create the coordinate frame ------------*/
delaunay(AAM.s0,AAM.triangles);
bool FacemarkAAMImpl::getFaces( InputArray image , OutputArray roi, void * extra_params){
for(size_t scale=0; scale<AAM.scales.size();scale++){
AAM.textures[scale].max_m = params.texture_max_m;//145;
if(params.verbose) printf("Training for scale %f ...\n", AAM.scales[scale]);
Mat s0_scaled_m = Mat(AAM.s0)/AAM.scales[scale]; // scale the shape
std::vector<Point2f> s0_scaled = s0_scaled_m.reshape(2); //convert to points
if(!isSetDetector){
return false;
/*get the min and max of x and y coordinate*/
double min_x, max_x, min_y, max_y;
s0_scaled_m = s0_scaled_m.reshape(1);
Mat s0_scaled_x = s0_scaled_m.col(0);
Mat s0_scaled_y = s0_scaled_m.col(1);
minMaxIdx(s0_scaled_x, &min_x, &max_x);
minMaxIdx(s0_scaled_y, &min_y, &max_y);
std::vector<Point2f> base_shape = Mat(Mat(s0_scaled)-Scalar(min_x-2.0,min_y-2.0)).reshape(2);
AAM.textures[scale].base_shape = base_shape;
AAM.textures[scale].resolution = Rect(0,0,(int)ceil(max_x-min_x+3),(int)ceil(max_y-min_y+3));
Mat base_texture = createTextureBase(base_shape, AAM.triangles, AAM.textures[scale].resolution, AAM.textures[scale].textureIdx);
Mat mask1 = base_texture>0;
Mat mask2;
erode(mask1, mask1, erode_kernel);
erode(mask1, mask2, erode_kernel);
Mat warped;
std::vector<int> fe_map;
createMaskMapping(mask1,mask2, AAM.textures[scale].ind1, AAM.textures[scale].ind2,fe_map);//ok
/* ------------ Part D. Get textures -------------*/
Mat texture_feats, feat;
if(params.verbose) printf("(1/4) Feature extraction ...\n");
for(size_t i=0; i<images.size();i++){
if(params.verbose) printf("extract features from image #%i/%i\n", (int)(i+1), (int)images.size());
warped = warpImage(images[i],base_shape, facePoints[i], AAM.triangles, AAM.textures[scale].resolution,AAM.textures[scale].textureIdx);
feat = getFeature<uchar>(warped, AAM.textures[scale].ind1);
texture_feats.push_back(feat.t());
}
Mat T= texture_feats.t();
if(extra_params!=0){
//do nothing
}
/* -------------- E. Create the texture model -----------------*/
reduce(T,AAM.textures[scale].A0,1, REDUCE_AVG);
std::vector<Rect> faces;
faces.clear();
if(params.verbose) printf("(2/4) Compute the feature average ...\n");
Mat A0_mtx = repeat(AAM.textures[scale].A0,1,T.cols);
Mat textures_normalized = T - A0_mtx;
faceDetector(image.getMat(), faces, extra_params);
Mat(faces).copyTo(roi);
return true;
}
if(params.verbose) printf("(3/4) Projecting the features ...\n");
getProjection(textures_normalized, AAM.textures[scale].A ,param_max_m);
AAM.textures[scale].AA0 = getFeature<float>(AAM.textures[scale].A0, fe_map);
bool FacemarkAAMImpl::getData(void * items){
if(items==0){
return true;
}else{
Data * data = (Data*)items;
data->s0 = AAM.s0;
return true;
if(params.verbose) printf("(4/4) Extraction of the eroded face features ...\n");
Mat U_data, ud;
for(int i =0;i<AAM.textures[scale].A.cols;i++){
Mat c = AAM.textures[scale].A.col(i);
ud = getFeature<float>(c,fe_map);
U_data.push_back(ud.t());
}
Mat U = U_data.t();
AAM.textures[scale].AA = orthonormal(U);
} // scale
images.clear();
if(params.save_model){
if(params.verbose) printf("Saving the model\n");
saveModel(params.model_filename);
}
isModelTrained = true;
if(params.verbose) printf("Training is completed\n");
}
bool FacemarkAAMImpl::addTrainingSample(InputArray image, InputArray landmarks){
std::vector<Point2f> & _landmarks = *(std::vector<Point2f>*)landmarks.getObj();
bool FacemarkAAMImpl::fit( InputArray image, InputArray roi, InputOutputArray _landmarks, void * runtime_params)
{
std::vector<Rect> & faces = *(std::vector<Rect> *)roi.getObj();
if(faces.size()<1) return false;
images.push_back(image.getMat());
facePoints.push_back(_landmarks);
std::vector<std::vector<Point2f> > & landmarks =
*(std::vector<std::vector<Point2f> >*) _landmarks.getObj();
landmarks.resize(faces.size());
return true;
}
Mat img = image.getMat();
if(runtime_params!=0){
void FacemarkAAMImpl::training(void* parameters){
if(parameters!=0){/*do nothing*/}
if (images.size()<1) {
std::string error_message =
"Training data is not provided. Consider to add using addTrainingSample() function!";
CV_Error(CV_StsBadArg, error_message);
std::vector<Config> conf = *(std::vector<Config>*)runtime_params;
if (conf.size()!=faces.size()) {
CV_Error(Error::StsBadArg, "Number of faces and extra_parameters are different!");
}
if(strcmp(params.model_filename.c_str(),"")==0 && params.save_model){
std::string error_message = "The model_filename parameter should be set!";
CV_Error(CV_StsBadArg, error_message);
for(size_t i=0; i<conf.size();i++){
fitImpl(img, landmarks[i], conf[i].R,conf[i].t, conf[i].scale, conf[i].model_scale_idx);
}
}else{
Mat R = Mat::eye(2, 2, CV_32F);
Point2f t = Point2f((float)(img.cols/2.0),(float)(img.rows/2.0));
float scale = 1.0;
std::vector<std::vector<Point2f> > normalized;
Mat erode_kernel = getStructuringElement(MORPH_RECT, Size(3,3), Point(1,1));
Mat image;
int param_max_m = params.max_m;//550;
int param_max_n = params.max_n;//136;
AAM.scales = params.scales;
AAM.textures.resize(AAM.scales.size());
/*-------------- A. Load the training data---------*/
procrustesAnalysis(facePoints, normalized,AAM.s0);
/*-------------- B. Create the shape model---------*/
Mat s0_lin = linearize(AAM.s0).t() ;
// linearize all shapes data, all x and then all y for each shape
Mat M;
for(unsigned i=0;i<normalized.size();i++){
M.push_back(linearize(normalized[i]).t()-s0_lin);
for(unsigned i=0; i<faces.size();i++){
fitImpl(img, landmarks[i], R,t, scale);
}
}
/* get PCA Projection vectors */
Mat S;
getProjection(M.t(),S,param_max_n);
/* Create similarity eig*/
Mat shape_S,shape_Q;
calcSimilarityEig(AAM.s0,S,AAM.Q,AAM.S);
/* ----------C. Create the coordinate frame ------------*/
delaunay(AAM.s0,AAM.triangles);
for(size_t scale=0; scale<AAM.scales.size();scale++){
AAM.textures[scale].max_m = params.texture_max_m;//145;
if(params.verbose) printf("Training for scale %f ...\n", AAM.scales[scale]);
Mat s0_scaled_m = Mat(AAM.s0)/AAM.scales[scale]; // scale the shape
std::vector<Point2f> s0_scaled = s0_scaled_m.reshape(2); //convert to points
/*get the min and max of x and y coordinate*/
double min_x, max_x, min_y, max_y;
s0_scaled_m = s0_scaled_m.reshape(1);
Mat s0_scaled_x = s0_scaled_m.col(0);
Mat s0_scaled_y = s0_scaled_m.col(1);
minMaxIdx(s0_scaled_x, &min_x, &max_x);
minMaxIdx(s0_scaled_y, &min_y, &max_y);
std::vector<Point2f> base_shape = Mat(Mat(s0_scaled)-Scalar(min_x-2.0,min_y-2.0)).reshape(2);
AAM.textures[scale].base_shape = base_shape;
AAM.textures[scale].resolution = Rect(0,0,(int)ceil(max_x-min_x+3),(int)ceil(max_y-min_y+3));
Mat base_texture = createTextureBase(base_shape, AAM.triangles, AAM.textures[scale].resolution, AAM.textures[scale].textureIdx);
Mat mask1 = base_texture>0;
Mat mask2;
erode(mask1, mask1, erode_kernel);
erode(mask1, mask2, erode_kernel);
Mat warped;
std::vector<int> fe_map;
createMaskMapping(mask1,mask2, AAM.textures[scale].ind1, AAM.textures[scale].ind2,fe_map);//ok
/* ------------ Part D. Get textures -------------*/
Mat texture_feats, feat;
if(params.verbose) printf("(1/4) Feature extraction ...\n");
for(size_t i=0; i<images.size();i++){
if(params.verbose) printf("extract features from image #%i/%i\n", (int)(i+1), (int)images.size());
warped = warpImage(images[i],base_shape, facePoints[i], AAM.triangles, AAM.textures[scale].resolution,AAM.textures[scale].textureIdx);
feat = getFeature<uchar>(warped, AAM.textures[scale].ind1);
texture_feats.push_back(feat.t());
}
Mat T= texture_feats.t();
/* -------------- E. Create the texture model -----------------*/
reduce(T,AAM.textures[scale].A0,1, CV_REDUCE_AVG);
if(params.verbose) printf("(2/4) Compute the feature average ...\n");
Mat A0_mtx = repeat(AAM.textures[scale].A0,1,T.cols);
Mat textures_normalized = T - A0_mtx;
if(params.verbose) printf("(3/4) Projecting the features ...\n");
getProjection(textures_normalized, AAM.textures[scale].A ,param_max_m);
AAM.textures[scale].AA0 = getFeature<float>(AAM.textures[scale].A0, fe_map);
return true;
}
if(params.verbose) printf("(4/4) Extraction of the eroded face features ...\n");
Mat U_data, ud;
for(int i =0;i<AAM.textures[scale].A.cols;i++){
Mat c = AAM.textures[scale].A.col(i);
ud = getFeature<float>(c,fe_map);
U_data.push_back(ud.t());
}
Mat U = U_data.t();
AAM.textures[scale].AA = orthonormal(U);
} // scale
images.clear();
if(params.save_model){
if(params.verbose) printf("Saving the model\n");
saveModel(params.model_filename);
}
isModelTrained = true;
if(params.verbose) printf("Training is completed\n");
}
bool FacemarkAAMImpl::fitImpl( const Mat image, std::vector<Point2f>& landmarks, const Mat R, const Point2f T, const float scale, int _scl){
if (landmarks.size()>0)
landmarks.clear();
bool FacemarkAAMImpl::fit( InputArray image, InputArray roi, InputOutputArray _landmarks, void * runtime_params)
{
std::vector<Rect> & faces = *(std::vector<Rect> *)roi.getObj();
if(faces.size()<1) return false;
CV_Assert(isModelTrained);
std::vector<std::vector<Point2f> > & landmarks =
*(std::vector<std::vector<Point2f> >*) _landmarks.getObj();
landmarks.resize(faces.size());
int param_n = params.n, param_m = params.m;
int scl = _scl<(int)AAM.scales.size()?_scl:(int)AAM.scales.size();
Mat img = image.getMat();
if(runtime_params!=0){
/*variables*/
std::vector<Point2f> s0 = Mat(Mat(AAM.s0)/AAM.scales[scl]).reshape(2);
std::vector<Config> conf = *(std::vector<Config>*)runtime_params;
if (conf.size()!=faces.size()) {
std::string error_message =
"Number of faces and extra_parameters are different!";
CV_Error(CV_StsBadArg, error_message);
}
for(size_t i=0; i<conf.size();i++){
fitImpl(img, landmarks[i], conf[i].R,conf[i].t, conf[i].scale, conf[i].model_scale_idx);
}
}else{
Mat R = Mat::eye(2, 2, CV_32F);
Point2f t = Point2f((float)(img.cols/2.0),(float)(img.rows/2.0));
float scale = 1.0;
/*pre-computation*/
Mat S = Mat(AAM.S, Range::all(), Range(0,param_n>AAM.S.cols?AAM.S.cols:param_n)).clone(); // chop the shape data
std::vector<std::vector<int> > Tp;
Mat Wx_dp, Wy_dp;
createWarpJacobian(S, AAM.Q, AAM.triangles, AAM.textures[scl],Wx_dp, Wy_dp, Tp);
for(unsigned i=0; i<faces.size();i++){
fitImpl(img, landmarks[i], R,t, scale);
}
}
std::vector<Point2f> s0_init = Mat(Mat(R*scale*AAM.scales[scl]*Mat(Mat(s0).reshape(1)).t()).t()).reshape(2);
std::vector<Point2f> curr_shape = Mat(Mat(s0_init)+Scalar(T.x,T.y));
curr_shape = Mat(1.0/scale*Mat(curr_shape)).reshape(2);
return true;
Mat imgray;
Mat img;
if(image.channels()>1){
cvtColor(image,imgray,COLOR_BGR2GRAY);
}else{
imgray = image;
}
bool FacemarkAAMImpl::fitImpl( const Mat image, std::vector<Point2f>& landmarks, const Mat R, const Point2f T, const float scale, int _scl){
if (landmarks.size()>0)
landmarks.clear();
resize(imgray,img,Size(int(image.cols/scale),int(image.rows/scale)));// matlab use bicubic interpolation, the result is float numbers
CV_Assert(isModelTrained);
/*chop the textures model*/
int maxCol = param_m;
if(AAM.textures[scl].A.cols<param_m)maxCol = AAM.textures[scl].A.cols;
if(AAM.textures[scl].AA.cols<maxCol)maxCol = AAM.textures[scl].AA.cols;
int param_n = params.n, param_m = params.m;
int scl = _scl<(int)AAM.scales.size()?_scl:(int)AAM.scales.size();
Mat A = Mat(AAM.textures[scl].A,Range(0,AAM.textures[scl].A.rows), Range(0,maxCol)).clone();
Mat AA = Mat(AAM.textures[scl].AA,Range(0,AAM.textures[scl].AA.rows), Range(0,maxCol)).clone();
/*variables*/
std::vector<Point2f> s0 = Mat(Mat(AAM.s0)/AAM.scales[scl]).reshape(2);
/*iteratively update the fitting*/
Mat I, II, warped, c, gx, gy, Irec, Irec_feat, dc;
Mat refI, refII, refWarped, ref_c, ref_gx, ref_gy, refIrec, refIrec_feat, ref_dc ;
for(int t=0;t<params.n_iter;t++){
warped = warpImage(img,AAM.textures[scl].base_shape, curr_shape,
AAM.triangles,
AAM.textures[scl].resolution ,
AAM.textures[scl].textureIdx);
/*pre-computation*/
Mat S = Mat(AAM.S, Range::all(), Range(0,param_n>AAM.S.cols?AAM.S.cols:param_n)).clone(); // chop the shape data
std::vector<std::vector<int> > Tp;
Mat Wx_dp, Wy_dp;
createWarpJacobian(S, AAM.Q, AAM.triangles, AAM.textures[scl],Wx_dp, Wy_dp, Tp);
I = getFeature<uchar>(warped, AAM.textures[scl].ind1);
II = getFeature<uchar>(warped, AAM.textures[scl].ind2);
std::vector<Point2f> s0_init = Mat(Mat(R*scale*AAM.scales[scl]*Mat(Mat(s0).reshape(1)).t()).t()).reshape(2);
std::vector<Point2f> curr_shape = Mat(Mat(s0_init)+Scalar(T.x,T.y));
curr_shape = Mat(1.0/scale*Mat(curr_shape)).reshape(2);
Mat imgray;
Mat img;
if(image.channels()>1){
cvtColor(image,imgray,CV_BGR2GRAY);
if(t==0){
c = A.t()*(I-AAM.textures[scl].A0); //little bit different to matlab, probably due to datatype
}else{
imgray = image;
c = c+dc;
}
resize(imgray,img,Size(int(image.cols/scale),int(image.rows/scale)));// matlab use bicubic interpolation, the result is float numbers
/*chop the textures model*/
int maxCol = param_m;
if(AAM.textures[scl].A.cols<param_m)maxCol = AAM.textures[scl].A.cols;
if(AAM.textures[scl].AA.cols<maxCol)maxCol = AAM.textures[scl].AA.cols;
Mat A = Mat(AAM.textures[scl].A,Range(0,AAM.textures[scl].A.rows), Range(0,maxCol)).clone();
Mat AA = Mat(AAM.textures[scl].AA,Range(0,AAM.textures[scl].AA.rows), Range(0,maxCol)).clone();
/*iteratively update the fitting*/
Mat I, II, warped, c, gx, gy, Irec, Irec_feat, dc;
Mat refI, refII, refWarped, ref_c, ref_gx, ref_gy, refIrec, refIrec_feat, ref_dc ;
for(int t=0;t<params.n_iter;t++){
warped = warpImage(img,AAM.textures[scl].base_shape, curr_shape,
AAM.triangles,
AAM.textures[scl].resolution ,
AAM.textures[scl].textureIdx);
I = getFeature<uchar>(warped, AAM.textures[scl].ind1);
II = getFeature<uchar>(warped, AAM.textures[scl].ind2);
if(t==0){
c = A.t()*(I-AAM.textures[scl].A0); //little bit different to matlab, probably due to datatype
}else{
c = c+dc;
}
Irec_feat = (AAM.textures[scl].A0+A*c);
Irec = Mat::zeros(AAM.textures[scl].resolution.width, AAM.textures[scl].resolution.height, CV_32FC1);
Irec_feat = (AAM.textures[scl].A0+A*c);
Irec = Mat::zeros(AAM.textures[scl].resolution.width, AAM.textures[scl].resolution.height, CV_32FC1);
for(int j=0;j<(int)AAM.textures[scl].ind1.size();j++){
Irec.at<float>(AAM.textures[scl].ind1[j]) = Irec_feat.at<float>(j);
}
Mat irec = Irec.t();
for(int j=0;j<(int)AAM.textures[scl].ind1.size();j++){
Irec.at<float>(AAM.textures[scl].ind1[j]) = Irec_feat.at<float>(j);
}
Mat irec = Irec.t();
gradient(irec, gx, gy);
gradient(irec, gx, gy);
Mat Jc;
image_jacobian(Mat(gx.t()).reshape(0,1).t(),Mat(gy.t()).reshape(0,1).t(),Wx_dp, Wy_dp,Jc);
Mat Jc;
image_jacobian(Mat(gx.t()).reshape(0,1).t(),Mat(gy.t()).reshape(0,1).t(),Wx_dp, Wy_dp,Jc);
Mat J;
std::vector<float> Irec_vec;
for(size_t j=0;j<AAM.textures[scl].ind2.size();j++){
J.push_back(Jc.row(AAM.textures[scl].ind2[j]));
Irec_vec.push_back(Irec.at<float>(AAM.textures[scl].ind2[j]));
}
Mat J;
std::vector<float> Irec_vec;
for(size_t j=0;j<AAM.textures[scl].ind2.size();j++){
J.push_back(Jc.row(AAM.textures[scl].ind2[j]));
Irec_vec.push_back(Irec.at<float>(AAM.textures[scl].ind2[j]));
}
/*compute Jfsic and Hfsic*/
Mat Jfsic = J - AA*(AA.t()*J);
Mat Hfsic = Jfsic.t()*Jfsic;
Mat iHfsic;
invert(Hfsic, iHfsic);
/*compute Jfsic and Hfsic*/
Mat Jfsic = J - AA*(AA.t()*J);
Mat Hfsic = Jfsic.t()*Jfsic;
Mat iHfsic;
invert(Hfsic, iHfsic);
/*compute dp dq and dc*/
Mat dqp = iHfsic*Jfsic.t()*(II-AAM.textures[scl].AA0);
dc = AA.t()*(II-Mat(Irec_vec)-J*dqp);
warpUpdate(curr_shape, dqp, s0,S, AAM.Q, AAM.triangles,Tp);
}
landmarks = Mat(scale*Mat(curr_shape)).reshape(2);
return true;
/*compute dp dq and dc*/
Mat dqp = iHfsic*Jfsic.t()*(II-AAM.textures[scl].AA0);
dc = AA.t()*(II-Mat(Irec_vec)-J*dqp);
warpUpdate(curr_shape, dqp, s0,S, AAM.Q, AAM.triangles,Tp);
}
landmarks = Mat(scale*Mat(curr_shape)).reshape(2);
return true;
}
void FacemarkAAMImpl::saveModel(String s){
FileStorage fs(s.c_str(),FileStorage::WRITE_BASE64);
fs << "AAM_tri" << AAM.triangles;
fs << "scales" << AAM.scales;
fs << "s0" << AAM.s0;
fs << "S" << AAM.S;
fs << "Q" << AAM.Q;
void FacemarkAAMImpl::saveModel(String s){
FileStorage fs(s.c_str(),FileStorage::WRITE_BASE64);
fs << "AAM_tri" << AAM.triangles;
fs << "scales" << AAM.scales;
fs << "s0" << AAM.s0;
fs << "S" << AAM.S;
fs << "Q" << AAM.Q;
String x;
for(int i=0;i< (int)AAM.scales.size();i++){
x = cv::format("scale%i_max_m",i);
fs << x << AAM.textures[i].max_m;
String x;
for(int i=0;i< (int)AAM.scales.size();i++){
x = cv::format("scale%i_max_m",i);
fs << x << AAM.textures[i].max_m;
x = cv::format("scale%i_resolution",i);
fs << x << AAM.textures[i].resolution;
x = cv::format("scale%i_resolution",i);
fs << x << AAM.textures[i].resolution;
x = cv::format("scale%i_textureIdx",i);
fs << x << AAM.textures[i].textureIdx;
x = cv::format("scale%i_textureIdx",i);
fs << x << AAM.textures[i].textureIdx;
x = cv::format("scale%i_base_shape",i);
fs << x << AAM.textures[i].base_shape;
x = cv::format("scale%i_base_shape",i);
fs << x << AAM.textures[i].base_shape;
x = cv::format("scale%i_A",i);
fs << x << AAM.textures[i].A;
x = cv::format("scale%i_A",i);
fs << x << AAM.textures[i].A;
x = cv::format("scale%i_A0",i);
fs << x << AAM.textures[i].A0;
x = cv::format("scale%i_A0",i);
fs << x << AAM.textures[i].A0;
x = cv::format("scale%i_AA",i);
fs << x << AAM.textures[i].AA;
x = cv::format("scale%i_AA",i);
fs << x << AAM.textures[i].AA;
x = cv::format("scale%i_AA0",i);
fs << x << AAM.textures[i].AA0;
x = cv::format("scale%i_AA0",i);
fs << x << AAM.textures[i].AA0;
x = cv::format("scale%i_ind1",i);
fs << x << AAM.textures[i].ind1;
x = cv::format("scale%i_ind1",i);
fs << x << AAM.textures[i].ind1;
x = cv::format("scale%i_ind2",i);
fs << x << AAM.textures[i].ind2;
x = cv::format("scale%i_ind2",i);
fs << x << AAM.textures[i].ind2;
}
fs.release();
if(params.verbose) printf("The model is successfully saved! \n");
}
fs.release();
if(params.verbose) printf("The model is successfully saved! \n");
}
void FacemarkAAMImpl::loadModel(String s){
FileStorage fs(s.c_str(),FileStorage::READ);
String x;
fs["AAM_tri"] >> AAM.triangles;
fs["scales"] >> AAM.scales;
fs["s0"] >> AAM.s0;
fs["S"] >> AAM.S;
fs["Q"] >> AAM.Q;
void FacemarkAAMImpl::loadModel(String s){
FileStorage fs(s.c_str(),FileStorage::READ);
String x;
fs["AAM_tri"] >> AAM.triangles;
fs["scales"] >> AAM.scales;
fs["s0"] >> AAM.s0;
fs["S"] >> AAM.S;
fs["Q"] >> AAM.Q;
AAM.textures.resize(AAM.scales.size());
for(int i=0;i< (int)AAM.scales.size();i++){
x = cv::format("scale%i_max_m",i);
fs[x] >> AAM.textures[i].max_m;
AAM.textures.resize(AAM.scales.size());
for(int i=0;i< (int)AAM.scales.size();i++){
x = cv::format("scale%i_max_m",i);
fs[x] >> AAM.textures[i].max_m;
x = cv::format("scale%i_resolution",i);
fs[x] >> AAM.textures[i].resolution;
x = cv::format("scale%i_resolution",i);
fs[x] >> AAM.textures[i].resolution;
x = cv::format("scale%i_textureIdx",i);
fs[x] >> AAM.textures[i].textureIdx;
x = cv::format("scale%i_textureIdx",i);
fs[x] >> AAM.textures[i].textureIdx;
x = cv::format("scale%i_base_shape",i);
fs[x] >> AAM.textures[i].base_shape;
x = cv::format("scale%i_base_shape",i);
fs[x] >> AAM.textures[i].base_shape;
x = cv::format("scale%i_A",i);
fs[x] >> AAM.textures[i].A;
x = cv::format("scale%i_A",i);
fs[x] >> AAM.textures[i].A;
x = cv::format("scale%i_A0",i);
fs[x] >> AAM.textures[i].A0;
x = cv::format("scale%i_A0",i);
fs[x] >> AAM.textures[i].A0;
x = cv::format("scale%i_AA",i);
fs[x] >> AAM.textures[i].AA;
x = cv::format("scale%i_AA",i);
fs[x] >> AAM.textures[i].AA;
x = cv::format("scale%i_AA0",i);
fs[x] >> AAM.textures[i].AA0;
x = cv::format("scale%i_AA0",i);
fs[x] >> AAM.textures[i].AA0;
x = cv::format("scale%i_ind1",i);
fs[x] >> AAM.textures[i].ind1;
x = cv::format("scale%i_ind1",i);
fs[x] >> AAM.textures[i].ind1;
x = cv::format("scale%i_ind2",i);
fs[x] >> AAM.textures[i].ind2;
}
fs.release();
isModelTrained = true;
if(params.verbose) printf("the model has been loaded\n");
x = cv::format("scale%i_ind2",i);
fs[x] >> AAM.textures[i].ind2;
}
Mat FacemarkAAMImpl::procrustes(std::vector<Point2f> P, std::vector<Point2f> Q, Mat & rot, Scalar & trans, float & scale){
// calculate average
Scalar mx = mean(P);
Scalar my = mean(Q);
fs.release();
isModelTrained = true;
if(params.verbose) printf("the model has been loaded\n");
}
// zero centered data
Mat X0 = Mat(P) - mx;
Mat Y0 = Mat(Q) - my;
Mat FacemarkAAMImpl::procrustes(std::vector<Point2f> P, std::vector<Point2f> Q, Mat & rot, Scalar & trans, float & scale){
// calculate magnitude
Mat Xs, Ys;
multiply(X0,X0,Xs);
multiply(Y0,Y0,Ys);
// calculate average
Scalar mx = mean(P);
Scalar my = mean(Q);
// calculate the sum
Mat sumXs, sumYs;
reduce(Xs,sumXs, 0, CV_REDUCE_SUM);
reduce(Ys,sumYs, 0, CV_REDUCE_SUM);
// zero centered data
Mat X0 = Mat(P) - mx;
Mat Y0 = Mat(Q) - my;
//calculate the normrnd
double normX = sqrt(Mat(sumXs.reshape(1)).at<float>(0)+Mat(sumXs.reshape(1)).at<float>(1));
double normY = sqrt(Mat(sumYs.reshape(1)).at<float>(0)+Mat(sumYs.reshape(1)).at<float>(1));
// calculate magnitude
Mat Xs, Ys;
multiply(X0,X0,Xs);
multiply(Y0,Y0,Ys);
//normalization
X0 = X0/normX;
Y0 = Y0/normY;
// calculate the sum
Mat sumXs, sumYs;
reduce(Xs,sumXs, 0, REDUCE_SUM);
reduce(Ys,sumYs, 0, REDUCE_SUM);
//reshape, convert to 2D Matrix
Mat Xn=X0.reshape(1);
Mat Yn=Y0.reshape(1);
//calculate the normrnd
double normX = sqrt(Mat(sumXs.reshape(1)).at<float>(0)+Mat(sumXs.reshape(1)).at<float>(1));
double normY = sqrt(Mat(sumYs.reshape(1)).at<float>(0)+Mat(sumYs.reshape(1)).at<float>(1));
//calculate the covariance matrix
Mat M = Xn.t()*Yn;
//normalization
X0 = X0/normX;
Y0 = Y0/normY;
// decompose
Mat U,S,Vt;
SVD::compute(M, S, U, Vt);
//reshape, convert to 2D Matrix
Mat Xn=X0.reshape(1);
Mat Yn=Y0.reshape(1);
// extract the transformations
scale = (S.at<float>(0)+S.at<float>(1))*(float)normX/(float)normY;
rot = Vt.t()*U.t();
//calculate the covariance matrix
Mat M = Xn.t()*Yn;
Mat muX(mx),mX; muX.pop_back();muX.pop_back();
Mat muY(my),mY; muY.pop_back();muY.pop_back();
muX.convertTo(mX,CV_32FC1);
muY.convertTo(mY,CV_32FC1);
// decompose
Mat U,S,Vt;
SVD::compute(M, S, U, Vt);
Mat t = mX.t()-scale*mY.t()*rot;
trans[0] = t.at<float>(0);
trans[1] = t.at<float>(1);
// extract the transformations
scale = (S.at<float>(0)+S.at<float>(1))*(float)normX/(float)normY;
rot = Vt.t()*U.t();
// calculate the recovered form
Mat Qmat = Mat(Q).reshape(1);
Mat muX(mx),mX; muX.pop_back();muX.pop_back();
Mat muY(my),mY; muY.pop_back();muY.pop_back();
muX.convertTo(mX,CV_32FC1);
muY.convertTo(mY,CV_32FC1);
return Mat(scale*Qmat*rot+trans).clone();
}
Mat t = mX.t()-scale*mY.t()*rot;
trans[0] = t.at<float>(0);
trans[1] = t.at<float>(1);
void FacemarkAAMImpl::procrustesAnalysis(std::vector<std::vector<Point2f> > shapes, std::vector<std::vector<Point2f> > & normalized, std::vector<Point2f> & new_mean){
// calculate the recovered form
Mat Qmat = Mat(Q).reshape(1);
std::vector<Scalar> mean_every_shape;
mean_every_shape.resize(shapes.size());
return Mat(scale*Qmat*rot+trans).clone();
}
Point2f temp;
void FacemarkAAMImpl::procrustesAnalysis(std::vector<std::vector<Point2f> > shapes, std::vector<std::vector<Point2f> > & normalized, std::vector<Point2f> & new_mean){
// calculate the mean of every shape
for(size_t i=0; i< shapes.size();i++){
mean_every_shape[i] = mean(shapes[i]);
}
std::vector<Scalar> mean_every_shape;
mean_every_shape.resize(shapes.size());
//normalize every shapes
Mat tShape;
normalized.clear();
for(size_t i=0; i< shapes.size();i++){
normalized.push_back((Mat)(Mat(shapes[i]) - mean_every_shape[i]));
}
// calculate the mean shape
std::vector<Point2f> mean_shape;
calcMeanShape(normalized, mean_shape);
// update the mean shape and normalized shapes iteratively
int maxIter = 100;
Mat R;
Scalar t;
float s;
Mat aligned;
for(int i=0;i<maxIter;i++){
// align
for(unsigned k=0;k< normalized.size();k++){
aligned=procrustes(mean_shape, normalized[k], R, t, s);
aligned.reshape(2).copyTo(normalized[k]);
}
Point2f temp;
//calc new mean
calcMeanShape(normalized, new_mean);
// align the new mean
aligned=procrustes(mean_shape, new_mean, R, t, s);
// update
aligned.reshape(2).copyTo(mean_shape);
}
// calculate the mean of every shape
for(size_t i=0; i< shapes.size();i++){
mean_every_shape[i] = mean(shapes[i]);
}
void FacemarkAAMImpl::calcMeanShape(std::vector<std::vector<Point2f> > shapes,std::vector<Point2f> & mean){
mean.resize(shapes[0].size());
Point2f tmp;
for(unsigned i=0;i<shapes[0].size();i++){
tmp.x=0;
tmp.y=0;
for(unsigned k=0;k< shapes.size();k++){
tmp.x+= shapes[k][i].x;
tmp.y+= shapes[k][i].y;
}
tmp.x/=shapes.size();
tmp.y/=shapes.size();
mean[i] = tmp;
}
//normalize every shapes
Mat tShape;
normalized.clear();
for(size_t i=0; i< shapes.size();i++){
normalized.push_back((Mat)(Mat(shapes[i]) - mean_every_shape[i]));
}
void FacemarkAAMImpl::getProjection(const Mat M, Mat & P, int n){
Mat U,S,Vt,S1, Ut;
int k;
if(M.rows < M.cols){
// SVD::compute(M*M.t(), S, U, Vt);
eigen(M*M.t(), S, Ut); U=Ut.t();
// find the minimum between number of non-zero eigval,
// compressed dim, row, and column
// threshold(S,S1,0.00001,1,THRESH_BINARY);
k= S.rows; //countNonZero(S1);
if(k>n)k=n;
if(k>M.rows)k=M.rows;
if(k>M.cols)k=M.cols;
// cut the column of eigen vector
U.colRange(0,k).copyTo(P);
}else{
// SVD::compute(M.t()*M, S, U, Vt);
eigen(M.t()*M, S, Ut);U=Ut.t();
// threshold(S,S1,0.00001,1,THRESH_BINARY);
k= S.rows; //countNonZero(S1);
if(k>n)k=n;
if(k>M.rows)k=M.rows;
if(k>M.cols)k=M.cols;
// cut the eigen values to k-amount
Mat D = Mat::zeros(k,k,CV_32FC1);
Mat diag = D.diag();
Mat s; pow(S,-0.5,s);
s(Range(0,k), Range::all()).copyTo(diag);
// cut the eigen vector to k-column,
P = Mat(M*U.colRange(0,k)*D).clone();
// calculate the mean shape
std::vector<Point2f> mean_shape;
calcMeanShape(normalized, mean_shape);
// update the mean shape and normalized shapes iteratively
int maxIter = 100;
Mat R;
Scalar t;
float s;
Mat aligned;
for(int i=0;i<maxIter;i++){
// align
for(unsigned k=0;k< normalized.size();k++){
aligned=procrustes(mean_shape, normalized[k], R, t, s);
aligned.reshape(2).copyTo(normalized[k]);
}
//calc new mean
calcMeanShape(normalized, new_mean);
// align the new mean
aligned=procrustes(mean_shape, new_mean, R, t, s);
// update
aligned.reshape(2).copyTo(mean_shape);
}
}
void FacemarkAAMImpl::calcMeanShape(std::vector<std::vector<Point2f> > shapes,std::vector<Point2f> & mean){
mean.resize(shapes[0].size());
Point2f tmp;
for(unsigned i=0;i<shapes[0].size();i++){
tmp.x=0;
tmp.y=0;
for(unsigned k=0;k< shapes.size();k++){
tmp.x+= shapes[k][i].x;
tmp.y+= shapes[k][i].y;
}
tmp.x/=shapes.size();
tmp.y/=shapes.size();
mean[i] = tmp;
}
}
void FacemarkAAMImpl::getProjection(const Mat M, Mat & P, int n){
Mat U,S,Vt,S1, Ut;
int k;
if(M.rows < M.cols){
// SVD::compute(M*M.t(), S, U, Vt);
eigen(M*M.t(), S, Ut); U=Ut.t();
// find the minimum between number of non-zero eigval,
// compressed dim, row, and column
// threshold(S,S1,0.00001,1,THRESH_BINARY);
k= S.rows; //countNonZero(S1);
if(k>n)k=n;
if(k>M.rows)k=M.rows;
if(k>M.cols)k=M.cols;
// cut the column of eigen vector
U.colRange(0,k).copyTo(P);
}else{
// SVD::compute(M.t()*M, S, U, Vt);
eigen(M.t()*M, S, Ut);U=Ut.t();
// threshold(S,S1,0.00001,1,THRESH_BINARY);
k= S.rows; //countNonZero(S1);
if(k>n)k=n;
if(k>M.rows)k=M.rows;
if(k>M.cols)k=M.cols;
// cut the eigen values to k-amount
Mat D = Mat::zeros(k,k,CV_32FC1);
Mat diag = D.diag();
Mat s; pow(S,-0.5,s);
s(Range(0,k), Range::all()).copyTo(diag);
// cut the eigen vector to k-column,
P = Mat(M*U.colRange(0,k)*D).clone();
Mat FacemarkAAMImpl::orthonormal(Mat Mo){
Mat M;
Mo.convertTo(M,CV_32FC1);
}
}
// TODO: float precission is only 1e-7, but MATLAB version use thresh=2.2204e-16
float thresh = (float)2.2204e-6;
Mat FacemarkAAMImpl::orthonormal(Mat Mo){
Mat M;
Mo.convertTo(M,CV_32FC1);
Mat O = Mat::zeros(M.rows, M.cols, CV_32FC1);
// TODO: float precission is only 1e-7, but MATLAB version use thresh=2.2204e-16
float thresh = (float)2.2204e-6;
int k = 0; //storing index
Mat O = Mat::zeros(M.rows, M.cols, CV_32FC1);
Mat w,nv;
float n;
for(int i=0;i<M.cols;i++){
Mat v = M.col(i); // processed column to orthogonalize
int k = 0; //storing index
// subtract projection over previous vectors
for(int j=0;j<k;j++){
Mat o=O.col(j);
w = v-o*(o.t()*v);
w.copyTo(v);
}
Mat w,nv;
float n;
for(int i=0;i<M.cols;i++){
Mat v = M.col(i); // processed column to orthogonalize
// only keep non zero vector
n = (float)norm(v);
if(n>thresh){
Mat ok=O.col(k);
// nv=v/n;
normalize(v,nv);
nv.copyTo(ok);
k+=1;
}
// subtract projection over previous vectors
for(int j=0;j<k;j++){
Mat o=O.col(j);
w = v-o*(o.t()*v);
w.copyTo(v);
}
// only keep non zero vector
n = (float)norm(v);
if(n>thresh){
Mat ok=O.col(k);
// nv=v/n;
normalize(v,nv);
nv.copyTo(ok);
k+=1;
}
return O.colRange(0,k).clone();
}
void FacemarkAAMImpl::calcSimilarityEig(std::vector<Point2f> s0,Mat S, Mat & Q_orth, Mat & S_orth){
int npts = (int)s0.size();
Mat Q = Mat::zeros(2*npts,4,CV_32FC1);
Mat c0 = Q.col(0);
Mat c1 = Q.col(1);
Mat c2 = Q.col(2);
Mat c3 = Q.col(3);
/*c0 = s0(:)*/
Mat w = linearize(s0);
// w.convertTo(w, CV_64FC1);
w.copyTo(c0);
/*c1 = [-s0(npts:2*npts); s0(0:npts-1)]*/
Mat s0_mat = Mat(s0).reshape(1);
// s0_mat.convertTo(s0_mat, CV_64FC1);
Mat swapper = Mat::zeros(2,npts,CV_32FC1);
Mat s00 = s0_mat.col(0);
Mat s01 = s0_mat.col(1);
Mat sw0 = swapper.row(0);
Mat sw1 = swapper.row(1);
Mat(s00.t()).copyTo(sw1);
s01 = -s01;
Mat(s01.t()).copyTo(sw0);
Mat(swapper.reshape(1,2*npts)).copyTo(c1);
/*c2 - [ones(npts); zeros(npts)]*/
Mat ones = Mat::ones(1,npts,CV_32FC1);
Mat c2_mat = Mat::zeros(2,npts,CV_32FC1);
Mat c20 = c2_mat.row(0);
ones.copyTo(c20);
Mat(c2_mat.reshape(1,2*npts)).copyTo(c2);
/*c3 - [zeros(npts); ones(npts)]*/
Mat c3_mat = Mat::zeros(2,npts,CV_32FC1);
Mat c31 = c3_mat.row(1);
ones.copyTo(c31);
Mat(c3_mat.reshape(1,2*npts)).copyTo(c3);
Mat Qo = orthonormal(Q);
Mat all = Qo.t();
all.push_back(S.t());
Mat allOrth = orthonormal(all.t());
Q_orth = allOrth.colRange(0,4).clone();
S_orth = allOrth.colRange(4,allOrth.cols).clone();
return O.colRange(0,k).clone();
}
void FacemarkAAMImpl::calcSimilarityEig(std::vector<Point2f> s0,Mat S, Mat & Q_orth, Mat & S_orth){
int npts = (int)s0.size();
Mat Q = Mat::zeros(2*npts,4,CV_32FC1);
Mat c0 = Q.col(0);
Mat c1 = Q.col(1);
Mat c2 = Q.col(2);
Mat c3 = Q.col(3);
/*c0 = s0(:)*/
Mat w = linearize(s0);
// w.convertTo(w, CV_64FC1);
w.copyTo(c0);
/*c1 = [-s0(npts:2*npts); s0(0:npts-1)]*/
Mat s0_mat = Mat(s0).reshape(1);
// s0_mat.convertTo(s0_mat, CV_64FC1);
Mat swapper = Mat::zeros(2,npts,CV_32FC1);
Mat s00 = s0_mat.col(0);
Mat s01 = s0_mat.col(1);
Mat sw0 = swapper.row(0);
Mat sw1 = swapper.row(1);
Mat(s00.t()).copyTo(sw1);
s01 = -s01;
Mat(s01.t()).copyTo(sw0);
Mat(swapper.reshape(1,2*npts)).copyTo(c1);
/*c2 - [ones(npts); zeros(npts)]*/
Mat ones = Mat::ones(1,npts,CV_32FC1);
Mat c2_mat = Mat::zeros(2,npts,CV_32FC1);
Mat c20 = c2_mat.row(0);
ones.copyTo(c20);
Mat(c2_mat.reshape(1,2*npts)).copyTo(c2);
/*c3 - [zeros(npts); ones(npts)]*/
Mat c3_mat = Mat::zeros(2,npts,CV_32FC1);
Mat c31 = c3_mat.row(1);
ones.copyTo(c31);
Mat(c3_mat.reshape(1,2*npts)).copyTo(c3);
Mat Qo = orthonormal(Q);
Mat all = Qo.t();
all.push_back(S.t());
Mat allOrth = orthonormal(all.t());
Q_orth = allOrth.colRange(0,4).clone();
S_orth = allOrth.colRange(4,allOrth.cols).clone();
}
inline Mat FacemarkAAMImpl::linearize(Mat s){ // all x values and then all y values
return Mat(s.reshape(1).t()).reshape(1,2*s.rows);
}
inline Mat FacemarkAAMImpl::linearize(std::vector<Point2f> s){ // all x values and then all y values
return linearize(Mat(s));
}
void FacemarkAAMImpl::delaunay(std::vector<Point2f> s, std::vector<Vec3i> & triangles){
triangles.clear();
std::vector<int> idx;
std::vector<Vec6f> tp;
double min_x, max_x, min_y, max_y;
Mat S = Mat(s).reshape(1);
Mat s_x = S.col(0);
Mat s_y = S.col(1);
minMaxIdx(s_x, &min_x, &max_x);
minMaxIdx(s_y, &min_y, &max_y);
// TODO: set the rectangle as configurable parameter
Subdiv2D subdiv(Rect(-500,-500,1000,1000));
subdiv.insert(s);
int a,b;
subdiv.locate(s.back(),a,b);
idx.resize(b+1);
Point2f p;
for(unsigned i=0;i<s.size();i++){
subdiv.locate(s[i],a,b);
idx[b] = i;
}
inline Mat FacemarkAAMImpl::linearize(Mat s){ // all x values and then all y values
return Mat(s.reshape(1).t()).reshape(1,2*s.rows);
}
inline Mat FacemarkAAMImpl::linearize(std::vector<Point2f> s){ // all x values and then all y values
return linearize(Mat(s));
}
int v1,v2,v3;
subdiv.getTriangleList(tp);
for(unsigned i=0;i<tp.size();i++){
Vec6f t = tp[i];
//accept only vertex point
if(t[0]>=min_x && t[0]<=max_x && t[1]>=min_y && t[1]<=max_y
&& t[2]>=min_x && t[2]<=max_x && t[3]>=min_y && t[3]<=max_y
&& t[4]>=min_x && t[4]<=max_x && t[5]>=min_y && t[5]<=max_y
){
subdiv.locate(Point2f(t[0],t[1]),a,v1);
subdiv.locate(Point2f(t[2],t[3]),a,v2);
subdiv.locate(Point2f(t[4],t[5]),a,v3);
triangles.push_back(Vec3i(idx[v1],idx[v2],idx[v3]));
} //if
} // for
}
Mat FacemarkAAMImpl::createMask(std::vector<Point2f> base_shape, Rect res){
Mat mask = Mat::zeros(res.height, res.width, CV_8U);
std::vector<Point> hull;
std::vector<Point> shape;
Mat(base_shape).convertTo(shape, CV_32S);
convexHull(shape,hull);
fillConvexPoly(mask, &hull[0], (int)hull.size(), 255, 8 ,0);
return mask.clone();
}
Mat FacemarkAAMImpl::createTextureBase(std::vector<Point2f> shape, std::vector<Vec3i> triangles, Rect res, std::vector<std::vector<Point> > & textureIdx){
// max supported amount of triangles only 255
Mat mask = Mat::zeros(res.height, res.width, CV_8U);
std::vector<Point2f> p(3);
textureIdx.clear();
for(size_t i=0;i<triangles.size();i++){
p[0] = shape[triangles[i][0]];
p[1] = shape[triangles[i][1]];
p[2] = shape[triangles[i][2]];
void FacemarkAAMImpl::delaunay(std::vector<Point2f> s, std::vector<Vec3i> & triangles){
triangles.clear();
std::vector<int> idx;
std::vector<Vec6f> tp;
double min_x, max_x, min_y, max_y;
Mat S = Mat(s).reshape(1);
Mat s_x = S.col(0);
Mat s_y = S.col(1);
minMaxIdx(s_x, &min_x, &max_x);
minMaxIdx(s_y, &min_y, &max_y);
// TODO: set the rectangle as configurable parameter
Subdiv2D subdiv(Rect(-500,-500,1000,1000));
subdiv.insert(s);
int a,b;
subdiv.locate(s.back(),a,b);
idx.resize(b+1);
Point2f p;
for(unsigned i=0;i<s.size();i++){
subdiv.locate(s[i],a,b);
idx[b] = i;
std::vector<Point> polygon;
approxPolyDP(p,polygon, 1.0, true);
fillConvexPoly(mask, &polygon[0], (int)polygon.size(), (double)i+1,8,0 );
std::vector<Point> list;
for(int y=0;y<res.height;y++){
for(int x=0;x<res.width;x++){
if(mask.at<uchar>(y,x)==(uchar)(i+1)){
list.push_back(Point(x,y));
}
}
}
textureIdx.push_back(list);
int v1,v2,v3;
subdiv.getTriangleList(tp);
for(unsigned i=0;i<tp.size();i++){
Vec6f t = tp[i];
//accept only vertex point
if(t[0]>=min_x && t[0]<=max_x && t[1]>=min_y && t[1]<=max_y
&& t[2]>=min_x && t[2]<=max_x && t[3]>=min_y && t[3]<=max_y
&& t[4]>=min_x && t[4]<=max_x && t[5]>=min_y && t[5]<=max_y
){
subdiv.locate(Point2f(t[0],t[1]),a,v1);
subdiv.locate(Point2f(t[2],t[3]),a,v2);
subdiv.locate(Point2f(t[4],t[5]),a,v3);
triangles.push_back(Vec3i(idx[v1],idx[v2],idx[v3]));
} //if
} // for
}
Mat FacemarkAAMImpl::createMask(std::vector<Point2f> base_shape, Rect res){
Mat mask = Mat::zeros(res.height, res.width, CV_8U);
std::vector<Point> hull;
std::vector<Point> shape;
Mat(base_shape).convertTo(shape, CV_32S);
convexHull(shape,hull);
fillConvexPoly(mask, &hull[0], (int)hull.size(), 255, 8 ,0);
return mask.clone();
return mask.clone();
}
Mat FacemarkAAMImpl::warpImage(
const Mat img, const std::vector<Point2f> target_shape,
const std::vector<Point2f> curr_shape, const std::vector<Vec3i> triangles,
const Rect res, const std::vector<std::vector<Point> > textureIdx)
{
// TODO: this part can be optimized, collect tranformation pair form all triangles first, then do one time remapping
Mat warped = Mat::zeros(res.height, res.width, CV_8U);
Mat warped2 = Mat::zeros(res.height, res.width, CV_8U);
Mat image,part, warped_part;
if(img.channels()>1){
cvtColor(img,image,COLOR_BGR2GRAY);
}else{
image = img;
}
Mat FacemarkAAMImpl::createTextureBase(std::vector<Point2f> shape, std::vector<Vec3i> triangles, Rect res, std::vector<std::vector<Point> > & textureIdx){
// max supported amount of triangles only 255
Mat mask = Mat::zeros(res.height, res.width, CV_8U);
Mat A,R,t;
A = Mat::zeros(2,3,CV_64F);
std::vector<Point2f> target(3),source(3);
std::vector<Point> polygon;
for(size_t i=0;i<triangles.size();i++){
target[0] = target_shape[triangles[i][0]];
target[1] = target_shape[triangles[i][1]];
target[2] = target_shape[triangles[i][2]];
source[0] = curr_shape[triangles[i][0]];
source[1] = curr_shape[triangles[i][1]];
source[2] = curr_shape[triangles[i][2]];
Mat target_mtx = Mat(target).reshape(1)-1.0;
Mat source_mtx = Mat(source).reshape(1)-1.0;
Mat U = target_mtx.col(0);
Mat V = target_mtx.col(1);
Mat X = source_mtx.col(0);
Mat Y = source_mtx.col(1);
double denominator = (target[1].x-target[0].x)*(target[2].y-target[0].y)-
(target[1].y-target[0].y)*(target[2].x-target[0].x);
// denominator = 1.0/denominator;
A.at<double>(0) = ((target[2].y-target[0].y)*(source[1].x-source[0].x)-
(target[1].y-target[0].y)*(source[2].x-source[0].x))/denominator;
A.at<double>(1) = ((target[1].x-target[0].x)*(source[2].x-source[0].x)-
(target[2].x-target[0].x)*(source[1].x-source[0].x))/denominator;
A.at<double>(2) =X.at<float>(0) + ((V.at<float>(0) * (U.at<float>(2) - U.at<float>(0)) - U.at<float>(0)*(V.at<float>(2) - V.at<float>(0))) * (X.at<float>(1) - X.at<float>(0)) + (U.at<float>(0) * (V.at<float>(1) - V.at<float>(0)) - V.at<float>(0)*(U.at<float>(1) - U.at<float>(0))) * (X.at<float>(2) - X.at<float>(0))) / denominator;
A.at<double>(3) =((V.at<float>(2) - V.at<float>(0)) * (Y.at<float>(1) - Y.at<float>(0)) - (V.at<float>(1) - V.at<float>(0)) * (Y.at<float>(2) - Y.at<float>(0))) / denominator;
A.at<double>(4) = ((U.at<float>(1) - U.at<float>(0)) * (Y.at<float>(2) - Y.at<float>(0)) - (U.at<float>(2) - U.at<float>(0)) * (Y.at<float>(1) - Y.at<float>(0))) / denominator;
A.at<double>(5) = Y.at<float>(0) + ((V.at<float>(0) * (U.at<float>(2) - U.at<float>(0)) - U.at<float>(0) * (V.at<float>(2) - V.at<float>(0))) * (Y.at<float>(1) - Y.at<float>(0)) + (U.at<float>(0) * (V.at<float>(1) - V.at<float>(0)) - V.at<float>(0)*(U.at<float>(1) - U.at<float>(0))) * (Y.at<float>(2) - Y.at<float>(0))) / denominator;
// A = getAffineTransform(target,source);
R=A.colRange(0,2);
t=A.colRange(2,3);
Mat pts_ori = Mat(textureIdx[i]).reshape(1);
Mat pts = pts_ori.t(); //matlab
Mat bx = pts_ori.col(0);
Mat by = pts_ori.col(1);
Mat base_ind = (by-1)*res.width+bx;
Mat pts_f;
pts.convertTo(pts_f,CV_64FC1);
pts_f.push_back(Mat::ones(1,(int)textureIdx[i].size(),CV_64FC1));
Mat trans = (A*pts_f).t();
Mat T; trans.convertTo(T, CV_32S); // this rounding make the result a little bit different to matlab
Mat mx = T.col(0);
Mat my = T.col(1);
Mat ind = (my-1)*image.cols+mx;
int maxIdx = image.rows*image.cols;
int idx;
for(int k=0;k<ind.rows;k++){
idx=ind.at<int>(k);
if(idx>=0 && idx<maxIdx){
warped.at<uchar>(base_ind.at<int>(k)) = (uchar)(image.at<uchar>(idx));
}
std::vector<Point2f> p(3);
textureIdx.clear();
for(size_t i=0;i<triangles.size();i++){
p[0] = shape[triangles[i][0]];
p[1] = shape[triangles[i][1]];
p[2] = shape[triangles[i][2]];
}
warped.copyTo(warped2);
}
return warped2.clone();
}
std::vector<Point> polygon;
approxPolyDP(p,polygon, 1.0, true);
fillConvexPoly(mask, &polygon[0], (int)polygon.size(), (double)i+1,8,0 );
template <class T>
Mat FacemarkAAMImpl::getFeature(const Mat m, std::vector<int> map){
std::vector<float> feat;
Mat M = m.t();//matlab
for(size_t i=0;i<map.size();i++){
feat.push_back((float)M.at<T>(map[i]));
}
return Mat(feat).clone();
}
std::vector<Point> list;
for(int y=0;y<res.height;y++){
for(int x=0;x<res.width;x++){
if(mask.at<uchar>(y,x)==(uchar)(i+1)){
list.push_back(Point(x,y));
}
}
}
textureIdx.push_back(list);
void FacemarkAAMImpl::createMaskMapping(const Mat m1, const Mat m2, std::vector<int> & ind1, std::vector<int> & ind2, std::vector<int> & ind3){
}
int cnt = 0, idx=0;
return mask.clone();
}
ind1.clear();
ind2.clear();
ind3.clear();
Mat FacemarkAAMImpl::warpImage(
const Mat img, const std::vector<Point2f> target_shape,
const std::vector<Point2f> curr_shape, const std::vector<Vec3i> triangles,
const Rect res, const std::vector<std::vector<Point> > textureIdx)
{
// TODO: this part can be optimized, collect tranformation pair form all triangles first, then do one time remapping
Mat warped = Mat::zeros(res.height, res.width, CV_8U);
Mat warped2 = Mat::zeros(res.height, res.width, CV_8U);
Mat image,part, warped_part;
if(img.channels()>1){
cvtColor(img,image,CV_BGR2GRAY);
}else{
image = img;
}
Mat mask = m1.t();//matlab
Mat mask2 = m2.t();//matlab
Mat A,R,t;
A = Mat::zeros(2,3,CV_64F);
std::vector<Point2f> target(3),source(3);
std::vector<Point> polygon;
for(size_t i=0;i<triangles.size();i++){
target[0] = target_shape[triangles[i][0]];
target[1] = target_shape[triangles[i][1]];
target[2] = target_shape[triangles[i][2]];
source[0] = curr_shape[triangles[i][0]];
source[1] = curr_shape[triangles[i][1]];
source[2] = curr_shape[triangles[i][2]];
Mat target_mtx = Mat(target).reshape(1)-1.0;
Mat source_mtx = Mat(source).reshape(1)-1.0;
Mat U = target_mtx.col(0);
Mat V = target_mtx.col(1);
Mat X = source_mtx.col(0);
Mat Y = source_mtx.col(1);
double denominator = (target[1].x-target[0].x)*(target[2].y-target[0].y)-
(target[1].y-target[0].y)*(target[2].x-target[0].x);
// denominator = 1.0/denominator;
A.at<double>(0) = ((target[2].y-target[0].y)*(source[1].x-source[0].x)-
(target[1].y-target[0].y)*(source[2].x-source[0].x))/denominator;
A.at<double>(1) = ((target[1].x-target[0].x)*(source[2].x-source[0].x)-
(target[2].x-target[0].x)*(source[1].x-source[0].x))/denominator;
A.at<double>(2) =X.at<float>(0) + ((V.at<float>(0) * (U.at<float>(2) - U.at<float>(0)) - U.at<float>(0)*(V.at<float>(2) - V.at<float>(0))) * (X.at<float>(1) - X.at<float>(0)) + (U.at<float>(0) * (V.at<float>(1) - V.at<float>(0)) - V.at<float>(0)*(U.at<float>(1) - U.at<float>(0))) * (X.at<float>(2) - X.at<float>(0))) / denominator;
A.at<double>(3) =((V.at<float>(2) - V.at<float>(0)) * (Y.at<float>(1) - Y.at<float>(0)) - (V.at<float>(1) - V.at<float>(0)) * (Y.at<float>(2) - Y.at<float>(0))) / denominator;
A.at<double>(4) = ((U.at<float>(1) - U.at<float>(0)) * (Y.at<float>(2) - Y.at<float>(0)) - (U.at<float>(2) - U.at<float>(0)) * (Y.at<float>(1) - Y.at<float>(0))) / denominator;
A.at<double>(5) = Y.at<float>(0) + ((V.at<float>(0) * (U.at<float>(2) - U.at<float>(0)) - U.at<float>(0) * (V.at<float>(2) - V.at<float>(0))) * (Y.at<float>(1) - Y.at<float>(0)) + (U.at<float>(0) * (V.at<float>(1) - V.at<float>(0)) - V.at<float>(0)*(U.at<float>(1) - U.at<float>(0))) * (Y.at<float>(2) - Y.at<float>(0))) / denominator;
// A = getAffineTransform(target,source);
R=A.colRange(0,2);
t=A.colRange(2,3);
Mat pts_ori = Mat(textureIdx[i]).reshape(1);
Mat pts = pts_ori.t(); //matlab
Mat bx = pts_ori.col(0);
Mat by = pts_ori.col(1);
Mat base_ind = (by-1)*res.width+bx;
Mat pts_f;
pts.convertTo(pts_f,CV_64FC1);
pts_f.push_back(Mat::ones(1,(int)textureIdx[i].size(),CV_64FC1));
Mat trans = (A*pts_f).t();
Mat T; trans.convertTo(T, CV_32S); // this rounding make the result a little bit different to matlab
Mat mx = T.col(0);
Mat my = T.col(1);
Mat ind = (my-1)*image.cols+mx;
int maxIdx = image.rows*image.cols;
int idx;
for(int k=0;k<ind.rows;k++){
idx=ind.at<int>(k);
if(idx>=0 && idx<maxIdx){
warped.at<uchar>(base_ind.at<int>(k)) = (uchar)(image.at<uchar>(idx));
for(int i=0;i<mask.rows;i++){
for(int j=0;j<mask.cols;j++){
if(mask.at<uchar>(i,j)>0){
if(mask2.at<uchar>(i,j)>0){
ind2.push_back(idx);
ind3.push_back(cnt);
}
ind1.push_back(idx);
cnt +=1;
}
warped.copyTo(warped2);
}
idx+=1;
} // j
} // i
return warped2.clone();
}
}
template <class T>
Mat FacemarkAAMImpl::getFeature(const Mat m, std::vector<int> map){
std::vector<float> feat;
Mat M = m.t();//matlab
for(size_t i=0;i<map.size();i++){
feat.push_back((float)M.at<T>(map[i]));
}
return Mat(feat).clone();
}
void FacemarkAAMImpl::image_jacobian(const Mat gx, const Mat gy, const Mat Jx, const Mat Jy, Mat & G){
void FacemarkAAMImpl::createMaskMapping(const Mat m1, const Mat m2, std::vector<int> & ind1, std::vector<int> & ind2, std::vector<int> & ind3){
Mat Gx = repeat(gx,1,Jx.cols);
Mat Gy = repeat(gy,1,Jx.cols);
int cnt = 0, idx=0;
Mat G1,G2;
multiply(Gx,Jx,G1);
multiply(Gy,Jy,G2);
ind1.clear();
ind2.clear();
ind3.clear();
G=G1+G2;
}
Mat mask = m1.t();//matlab
Mat mask2 = m2.t();//matlab
void FacemarkAAMImpl::warpUpdate(std::vector<Point2f> & shape, Mat delta, std::vector<Point2f> s0, Mat S, Mat Q, std::vector<Vec3i> triangles,std::vector<std::vector<int> > Tp){
std::vector<Point2f> new_shape;
int nSimEig = 4;
for(int i=0;i<mask.rows;i++){
for(int j=0;j<mask.cols;j++){
if(mask.at<uchar>(i,j)>0){
if(mask2.at<uchar>(i,j)>0){
ind2.push_back(idx);
ind3.push_back(cnt);
}
/*get dr, dp and compute ds0*/
Mat dr = -Mat(delta, Range(0,nSimEig));
Mat dp = -Mat(delta, Range(nSimEig, delta.rows));
ind1.push_back(idx);
cnt +=1;
}
idx+=1;
} // j
} // i
Mat ds0 = S*dp + Q*dr;
Mat ds0_mat = Mat::zeros((int)s0.size(),2, CV_32FC1);
Mat c0 = ds0_mat.col(0);
Mat c1 = ds0_mat.col(1);
Mat(ds0, Range(0,(int)s0.size())).copyTo(c0);
Mat(ds0, Range((int)s0.size(),(int)s0.size()*2)).copyTo(c1);
}
Mat s_new = computeWarpParts(shape,s0,ds0_mat, triangles, Tp);
void FacemarkAAMImpl::image_jacobian(const Mat gx, const Mat gy, const Mat Jx, const Mat Jy, Mat & G){
Mat diff =linearize(Mat(s_new - Mat(s0).reshape(1)));
Mat Gx = repeat(gx,1,Jx.cols);
Mat Gy = repeat(gy,1,Jx.cols);
Mat r = Q.t()*diff;
Mat p = S.t()*diff;
Mat G1,G2;
multiply(Gx,Jx,G1);
multiply(Gy,Jy,G2);
Mat s = linearize(s0) +S*p + Q*r;
Mat(Mat(s.t()).reshape(0,2).t()).reshape(2).copyTo(shape);
}
G=G1+G2;
}
Mat FacemarkAAMImpl::computeWarpParts(std::vector<Point2f> curr_shape,std::vector<Point2f> s0, Mat ds0, std::vector<Vec3i> triangles,std::vector<std::vector<int> > Tp){
void FacemarkAAMImpl::warpUpdate(std::vector<Point2f> & shape, Mat delta, std::vector<Point2f> s0, Mat S, Mat Q, std::vector<Vec3i> triangles,std::vector<std::vector<int> > Tp){
std::vector<Point2f> new_shape;
int nSimEig = 4;
std::vector<Point2f> new_shape;
std::vector<Point2f> ds = ds0.reshape(2);
/*get dr, dp and compute ds0*/
Mat dr = -Mat(delta, Range(0,nSimEig));
Mat dp = -Mat(delta, Range(nSimEig, delta.rows));
float mx,my;
Mat A;
std::vector<Point2f> target(3),source(3);
std::vector<double> p(3);
p[2] = 1;
for(size_t i=0;i<s0.size();i++){
p[0] = s0[i].x + ds[i].x;
p[1] = s0[i].y + ds[i].y;
std::vector<Point2f> v;
std::vector<float>vx, vy;
for(size_t j=0;j<Tp[i].size();j++){
int idx = Tp[i][j];
target[0] = s0[triangles[idx][0]];
target[1] = s0[triangles[idx][1]];
target[2] = s0[triangles[idx][2]];
Mat ds0 = S*dp + Q*dr;
Mat ds0_mat = Mat::zeros((int)s0.size(),2, CV_32FC1);
Mat c0 = ds0_mat.col(0);
Mat c1 = ds0_mat.col(1);
Mat(ds0, Range(0,(int)s0.size())).copyTo(c0);
Mat(ds0, Range((int)s0.size(),(int)s0.size()*2)).copyTo(c1);
source[0] = curr_shape[triangles[idx][0]];
source[1] = curr_shape[triangles[idx][1]];
source[2] = curr_shape[triangles[idx][2]];
Mat s_new = computeWarpParts(shape,s0,ds0_mat, triangles, Tp);
A = getAffineTransform(target,source);
Mat diff =linearize(Mat(s_new - Mat(s0).reshape(1)));
Mat(A*Mat(p)).reshape(2).copyTo(v);
vx.push_back(v[0].x);
vy.push_back(v[0].y);
}// j
Mat r = Q.t()*diff;
Mat p = S.t()*diff;
/*find the median*/
size_t n = vx.size()/2;
nth_element(vx.begin(), vx.begin()+n, vx.end());
mx = vx[n];
nth_element(vy.begin(), vy.begin()+n, vy.end());
my = vy[n];
Mat s = linearize(s0) +S*p + Q*r;
Mat(Mat(s.t()).reshape(0,2).t()).reshape(2).copyTo(shape);
}
new_shape.push_back(Point2f(mx,my));
} // s0.size()
Mat FacemarkAAMImpl::computeWarpParts(std::vector<Point2f> curr_shape,std::vector<Point2f> s0, Mat ds0, std::vector<Vec3i> triangles,std::vector<std::vector<int> > Tp){
std::vector<Point2f> new_shape;
std::vector<Point2f> ds = ds0.reshape(2);
float mx,my;
Mat A;
std::vector<Point2f> target(3),source(3);
std::vector<double> p(3);
p[2] = 1;
for(size_t i=0;i<s0.size();i++){
p[0] = s0[i].x + ds[i].x;
p[1] = s0[i].y + ds[i].y;
std::vector<Point2f> v;
std::vector<float>vx, vy;
for(size_t j=0;j<Tp[i].size();j++){
int idx = Tp[i][j];
target[0] = s0[triangles[idx][0]];
target[1] = s0[triangles[idx][1]];
target[2] = s0[triangles[idx][2]];
source[0] = curr_shape[triangles[idx][0]];
source[1] = curr_shape[triangles[idx][1]];
source[2] = curr_shape[triangles[idx][2]];
A = getAffineTransform(target,source);
Mat(A*Mat(p)).reshape(2).copyTo(v);
vx.push_back(v[0].x);
vy.push_back(v[0].y);
}// j
/*find the median*/
size_t n = vx.size()/2;
nth_element(vx.begin(), vx.begin()+n, vx.end());
mx = vx[n];
nth_element(vy.begin(), vy.begin()+n, vy.end());
my = vy[n];
new_shape.push_back(Point2f(mx,my));
} // s0.size()
return Mat(new_shape).reshape(1).clone();
}
return Mat(new_shape).reshape(1).clone();
}
void FacemarkAAMImpl::gradient(const Mat M, Mat & gx, Mat & gy){
gx = Mat::zeros(M.size(),CV_32FC1);
gy = Mat::zeros(M.size(),CV_32FC1);
/*gx*/
for(int i=0;i<M.rows;i++){
for(int j=0;j<M.cols;j++){
if(j>0 && j<M.cols-1){
gx.at<float>(i,j) = ((float)0.5)*(M.at<float>(i,j+1)-M.at<float>(i,j-1));
}else if (j==0){
gx.at<float>(i,j) = M.at<float>(i,j+1)-M.at<float>(i,j);
}else{
gx.at<float>(i,j) = M.at<float>(i,j)-M.at<float>(i,j-1);
}
void FacemarkAAMImpl::gradient(const Mat M, Mat & gx, Mat & gy){
gx = Mat::zeros(M.size(),CV_32FC1);
gy = Mat::zeros(M.size(),CV_32FC1);
/*gx*/
for(int i=0;i<M.rows;i++){
for(int j=0;j<M.cols;j++){
if(j>0 && j<M.cols-1){
gx.at<float>(i,j) = ((float)0.5)*(M.at<float>(i,j+1)-M.at<float>(i,j-1));
}else if (j==0){
gx.at<float>(i,j) = M.at<float>(i,j+1)-M.at<float>(i,j);
}else{
gx.at<float>(i,j) = M.at<float>(i,j)-M.at<float>(i,j-1);
}
}
/*gy*/
for(int i=0;i<M.rows;i++){
for(int j=0;j<M.cols;j++){
if(i>0 && i<M.rows-1){
gy.at<float>(i,j) = ((float)0.5)*(M.at<float>(i+1,j)-M.at<float>(i-1,j));
}else if (i==0){
gy.at<float>(i,j) = M.at<float>(i+1,j)-M.at<float>(i,j);
}else{
gy.at<float>(i,j) = M.at<float>(i,j)-M.at<float>(i-1,j);
}
}
}
/*gy*/
for(int i=0;i<M.rows;i++){
for(int j=0;j<M.cols;j++){
if(i>0 && i<M.rows-1){
gy.at<float>(i,j) = ((float)0.5)*(M.at<float>(i+1,j)-M.at<float>(i-1,j));
}else if (i==0){
gy.at<float>(i,j) = M.at<float>(i+1,j)-M.at<float>(i,j);
}else{
gy.at<float>(i,j) = M.at<float>(i,j)-M.at<float>(i-1,j);
}
}
}
}
void FacemarkAAMImpl::createWarpJacobian(Mat S, Mat Q, std::vector<Vec3i> triangles, Model::Texture & T, Mat & Wx_dp, Mat & Wy_dp, std::vector<std::vector<int> > & Tp){
}
std::vector<Point2f> base_shape = T.base_shape;
Rect resolution = T.resolution;
void FacemarkAAMImpl::createWarpJacobian(Mat S, Mat Q, std::vector<Vec3i> triangles, Model::Texture & T, Mat & Wx_dp, Mat & Wy_dp, std::vector<std::vector<int> > & Tp){
std::vector<std::vector<int> >triangles_on_a_point;
std::vector<Point2f> base_shape = T.base_shape;
Rect resolution = T.resolution;
int npts = (int)base_shape.size();
std::vector<std::vector<int> >triangles_on_a_point;
Mat dW_dxdyt ;
/*get triangles for each point*/
std::vector<int> trianglesIdx;
triangles_on_a_point.resize(npts);
for(int i=0;i<(int)triangles.size();i++){
triangles_on_a_point[triangles[i][0]].push_back(i);
triangles_on_a_point[triangles[i][1]].push_back(i);
triangles_on_a_point[triangles[i][2]].push_back(i);
}
Tp = triangles_on_a_point;
/*calculate dW_dxdy*/
float v0x,v0y,v1x,v1y,v2x,v2y, denominator;
for(int k=0;k<npts;k++){
Mat acc = Mat::zeros(resolution.height, resolution.width, CV_32F);
/*for each triangle on k-th point*/
for(size_t i=0;i<triangles_on_a_point[k].size();i++){
int tId = triangles_on_a_point[k][i];
Vec3i v;
if(triangles[tId][0]==k ){
v=Vec3i(triangles[tId][0],triangles[tId][1],triangles[tId][2]);
}else if(triangles[tId][1]==k){
v=Vec3i(triangles[tId][1],triangles[tId][0],triangles[tId][2]);
}else{
v=Vec3i(triangles[tId][2],triangles[tId][0],triangles[tId][1]);
}
int npts = (int)base_shape.size();
v0x = base_shape[v[0]].x;
v0y = base_shape[v[0]].y;
v1x = base_shape[v[1]].x;
v1y = base_shape[v[1]].y;
v2x = base_shape[v[2]].x;
v2y = base_shape[v[2]].y;
Mat dW_dxdyt ;
/*get triangles for each point*/
std::vector<int> trianglesIdx;
triangles_on_a_point.resize(npts);
for(int i=0;i<(int)triangles.size();i++){
triangles_on_a_point[triangles[i][0]].push_back(i);
triangles_on_a_point[triangles[i][1]].push_back(i);
triangles_on_a_point[triangles[i][2]].push_back(i);
}
Tp = triangles_on_a_point;
/*calculate dW_dxdy*/
float v0x,v0y,v1x,v1y,v2x,v2y, denominator;
for(int k=0;k<npts;k++){
Mat acc = Mat::zeros(resolution.height, resolution.width, CV_32F);
/*for each triangle on k-th point*/
for(size_t i=0;i<triangles_on_a_point[k].size();i++){
int tId = triangles_on_a_point[k][i];
Vec3i v;
if(triangles[tId][0]==k ){
v=Vec3i(triangles[tId][0],triangles[tId][1],triangles[tId][2]);
}else if(triangles[tId][1]==k){
v=Vec3i(triangles[tId][1],triangles[tId][0],triangles[tId][2]);
}else{
v=Vec3i(triangles[tId][2],triangles[tId][0],triangles[tId][1]);
}
denominator = (v1x-v0x)*(v2y-v0y)-(v1y-v0y)*(v2x-v0x);
v0x = base_shape[v[0]].x;
v0y = base_shape[v[0]].y;
v1x = base_shape[v[1]].x;
v1y = base_shape[v[1]].y;
v2x = base_shape[v[2]].x;
v2y = base_shape[v[2]].y;
Mat pixels = Mat(T.textureIdx[tId]).reshape(1); // same, just different order
Mat p;
denominator = (v1x-v0x)*(v2y-v0y)-(v1y-v0y)*(v2x-v0x);
pixels.convertTo(p,CV_32F, 1.0,1.0); //matlab use offset
Mat x = p.col(0);
Mat y = p.col(1);
Mat pixels = Mat(T.textureIdx[tId]).reshape(1); // same, just different order
Mat p;
Mat alpha = (x-v0x)*(v2y-v0y)-(y-v0y)*(v2x-v0x);
Mat beta = (v1x-v0x)*(y-v0y)-(v1y-v0y)*(x-v0x);
pixels.convertTo(p,CV_32F, 1.0,1.0); //matlab use offset
Mat x = p.col(0);
Mat y = p.col(1);
Mat res = 1.0 - alpha/denominator - beta/denominator; // same just different order
Mat alpha = (x-v0x)*(v2y-v0y)-(y-v0y)*(v2x-v0x);
Mat beta = (v1x-v0x)*(y-v0y)-(v1y-v0y)*(x-v0x);
/*remap to image form*/
Mat dx = Mat::zeros(resolution.height, resolution.width, CV_32F);
for(int j=0;j<res.rows;j++){
dx.at<float>((int)(y.at<float>(j)-1.0), (int)(x.at<float>(j)-1.0)) = res.at<float>(j); // matlab use offset
};
Mat res = 1.0 - alpha/denominator - beta/denominator; // same just different order
acc = acc+dx;
}
/*remap to image form*/
Mat dx = Mat::zeros(resolution.height, resolution.width, CV_32F);
for(int j=0;j<res.rows;j++){
dx.at<float>((int)(y.at<float>(j)-1.0), (int)(x.at<float>(j)-1.0)) = res.at<float>(j); // matlab use offset
};
acc = acc+dx;
}
Mat vectorized = Mat(acc.t()).reshape(0,1);
dW_dxdyt.push_back(vectorized.clone());
Mat vectorized = Mat(acc.t()).reshape(0,1);
dW_dxdyt.push_back(vectorized.clone());
}// k
}// k
Mat dx_dp;
hconcat(Q, S, dx_dp);
Mat dx_dp;
hconcat(Q, S, dx_dp);
Mat dW_dxdy = dW_dxdyt.t();
Wx_dp = dW_dxdy* Mat(dx_dp,Range(0,npts));
Wy_dp = dW_dxdy* Mat(dx_dp,Range(npts,2*npts));
Mat dW_dxdy = dW_dxdyt.t();
Wx_dp = dW_dxdy* Mat(dx_dp,Range(0,npts));
Wy_dp = dW_dxdy* Mat(dx_dp,Range(npts,2*npts));
} //createWarpJacobian
} //createWarpJacobian
} /* namespace face */
} /* namespace cv */
This source diff could not be displayed because it is too large. You can view the blob instead.
......@@ -52,5 +52,7 @@
#include <set>
#include <limits>
#include <iostream>
#endif
/*
By downloading, copying, installing or using the software you agree to this
license. If you do not agree to this license, do not download, install,
copy or use the software.
License Agreement
For Open Source Computer Vision Library
(3-clause BSD License)
Copyright (C) 2013, OpenCV Foundation, all rights reserved.
Third party copyrights are property of their respective owners.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the names of the copyright holders nor the names of the contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
This software is provided by the copyright holders and contributors "as is" and
any express or implied warranties, including, but not limited to, the implied
warranties of merchantability and fitness for a particular purpose are
disclaimed. In no event shall copyright holders or contributors be liable for
any direct, indirect, incidental, special, exemplary, or consequential damages
(including, but not limited to, procurement of substitute goods or services;
loss of use, data, or profits; or business interruption) however caused
and on any theory of liability, whether in contract, strict liability,
or tort (including negligence or otherwise) arising in any way out of
the use of this software, even if advised of the possibility of such damage.
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
This file was part of GSoC Project: Facemark API for OpenCV
/*
This file contains results of GSoC Project: Facemark API for OpenCV
Final report: https://gist.github.com/kurnianggoro/74de9121e122ad0bd825176751d47ecc
Student: Laksono Kurnianggoro
Mentor: Delia Passalacqua
......
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
/*
By downloading, copying, installing or using the software you agree to this
license. If you do not agree to this license, do not download, install,
copy or use the software.
License Agreement
For Open Source Computer Vision Library
(3-clause BSD License)
Copyright (C) 2013, OpenCV Foundation, all rights reserved.
Third party copyrights are property of their respective owners.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the names of the copyright holders nor the names of the contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
This software is provided by the copyright holders and contributors "as is" and
any express or implied warranties, including, but not limited to, the implied
warranties of merchantability and fitness for a particular purpose are
disclaimed. In no event shall copyright holders or contributors be liable for
any direct, indirect, incidental, special, exemplary, or consequential damages
(including, but not limited to, procurement of substitute goods or services;
loss of use, data, or profits; or business interruption) however caused
and on any theory of liability, whether in contract, strict liability,
or tort (including negligence or otherwise) arising in any way out of
the use of this software, even if advised of the possibility of such damage.
This file was part of GSoC Project: Facemark API for OpenCV
This file contains results of GSoC Project: Facemark API for OpenCV
Final report: https://gist.github.com/kurnianggoro/74de9121e122ad0bd825176751d47ecc
Student: Laksono Kurnianggoro
Mentor: Delia Passalacqua
......@@ -50,24 +25,19 @@ using namespace std;
using namespace cv;
using namespace cv::face;
CascadeClassifier face_detector;
static bool customDetector( InputArray image, OutputArray ROIs, void * config = 0 ){
static bool customDetector( InputArray image, OutputArray ROIs, CascadeClassifier *face_detector){
Mat gray;
std::vector<Rect> & faces = *(std::vector<Rect>*) ROIs.getObj();
faces.clear();
if(config!=0){
//do nothing
}
if(image.channels()>1){
cvtColor(image.getMat(),gray,CV_BGR2GRAY);
cvtColor(image.getMat(),gray, COLOR_BGR2GRAY);
}else{
gray = image.getMat().clone();
}
equalizeHist( gray, gray );
face_detector.detectMultiScale( gray, faces, 1.4, 2, CV_HAAR_SCALE_IMAGE, Size(30, 30) );
face_detector->detectMultiScale( gray, faces, 1.4, 2, CASCADE_SCALE_IMAGE, Size(30, 30) );
return true;
}
......@@ -82,11 +52,11 @@ TEST(CV_Face_FacemarkAAM, can_create_default) {
TEST(CV_Face_FacemarkAAM, can_set_custom_detector) {
string cascade_filename =
cvtest::findDataFile("cascadeandhog/cascades/lbpcascade_frontalface.xml", true);
CascadeClassifier face_detector;
EXPECT_TRUE(face_detector.load(cascade_filename));
Ptr<Facemark> facemark = FacemarkAAM::create();
EXPECT_TRUE(facemark->setFaceDetector(customDetector));
EXPECT_TRUE(facemark->setFaceDetector((cv::face::FN_FaceDetector)customDetector, &face_detector));
}
TEST(CV_Face_FacemarkAAM, test_workflow) {
......@@ -106,6 +76,9 @@ TEST(CV_Face_FacemarkAAM, test_workflow) {
string cascade_filename =
cvtest::findDataFile("cascadeandhog/cascades/lbpcascade_frontalface.xml", true);
CascadeClassifier face_detector;
EXPECT_TRUE(face_detector.load(cascade_filename));
FacemarkAAM::Params params;
params.n = 1;
params.m = 1;
......@@ -115,7 +88,8 @@ TEST(CV_Face_FacemarkAAM, test_workflow) {
Mat image;
std::vector<Point2f> landmarks;
for(size_t i=0;i<images_train.size();i++){
for(size_t i=0;i<images_train.size();i++)
{
image = imread(images_train[i].c_str());
EXPECT_TRUE(loadFacePoints(points_train[i].c_str(),landmarks));
EXPECT_TRUE(landmarks.size()>0);
......@@ -125,7 +99,7 @@ TEST(CV_Face_FacemarkAAM, test_workflow) {
EXPECT_NO_THROW(facemark->training());
/*------------ Fitting Part ---------------*/
facemark->setFaceDetector(customDetector);
EXPECT_TRUE(facemark->setFaceDetector((cv::face::FN_FaceDetector)customDetector, &face_detector));
string image_filename = cvtest::findDataFile("face/david1.jpg", true);
image = imread(image_filename.c_str());
EXPECT_TRUE(!image.empty());
......
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
/*
By downloading, copying, installing or using the software you agree to this
license. If you do not agree to this license, do not download, install,
copy or use the software.
License Agreement
For Open Source Computer Vision Library
(3-clause BSD License)
Copyright (C) 2013, OpenCV Foundation, all rights reserved.
Third party copyrights are property of their respective owners.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the names of the copyright holders nor the names of the contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
This software is provided by the copyright holders and contributors "as is" and
any express or implied warranties, including, but not limited to, the implied
warranties of merchantability and fitness for a particular purpose are
disclaimed. In no event shall copyright holders or contributors be liable for
any direct, indirect, incidental, special, exemplary, or consequential damages
(including, but not limited to, procurement of substitute goods or services;
loss of use, data, or profits; or business interruption) however caused
and on any theory of liability, whether in contract, strict liability,
or tort (including negligence or otherwise) arising in any way out of
the use of this software, even if advised of the possibility of such damage.
This file was part of GSoC Project: Facemark API for OpenCV
This file contains results of GSoC Project: Facemark API for OpenCV
Final report: https://gist.github.com/kurnianggoro/74de9121e122ad0bd825176751d47ecc
Student: Laksono Kurnianggoro
Mentor: Delia Passalacqua
......@@ -61,13 +36,13 @@ static bool myCustomDetector( InputArray image, OutputArray ROIs, void * config
}
if(image.channels()>1){
cvtColor(image.getMat(),gray,CV_BGR2GRAY);
cvtColor(image.getMat(),gray,COLOR_BGR2GRAY);
}else{
gray = image.getMat().clone();
}
equalizeHist( gray, gray );
cascade_detector.detectMultiScale( gray, faces, 1.4, 2, CV_HAAR_SCALE_IMAGE, Size(30, 30) );
cascade_detector.detectMultiScale( gray, faces, 1.4, 2, CASCADE_SCALE_IMAGE, Size(30, 30) );
return true;
}
......@@ -141,8 +116,3 @@ TEST(CV_Face_FacemarkLBF, test_workflow) {
EXPECT_TRUE(facemark->fit(image, rects, facial_points));
EXPECT_TRUE(facial_points[0].size()>0);
}
TEST(CV_Face_FacemarkLBF, get_data) {
Ptr<Facemark> facemark = FacemarkLBF::create();
EXPECT_TRUE(facemark->getData());
}
......@@ -69,31 +69,25 @@ struct Conf {
Conf(cv::String s, double d){
model_path = s;
scaleFactor = d;
face_detector.load(model_path);
};
CascadeClassifier face_detector;
};
bool myDetector( InputArray image, OutputArray roi, void * config ){
bool myDetector(InputArray image, OutputArray faces, Conf *conf){
Mat gray;
std::vector<Rect> & faces = *(std::vector<Rect>*) roi.getObj();
faces.clear();
if(config!=0){
Conf* conf = (Conf*)config;
if(image.channels()>1){
cvtColor(image,gray,CV_BGR2GRAY);
}else{
gray = image.getMat().clone();
}
equalizeHist( gray, gray );
if (image.channels() > 1)
cvtColor(image, gray, COLOR_BGR2GRAY);
else
gray = image.getMat().clone();
CascadeClassifier face_cascade(conf->model_path);
face_cascade.detectMultiScale( gray, faces, conf->scaleFactor, 2, CV_HAAR_SCALE_IMAGE, Size(30, 30) );
return true;
}else{
return false;
}
equalizeHist(gray, gray);
std::vector<Rect> faces_;
conf->face_cascade.detectMultiScale(gray, faces_, conf->scaleFactor, 2, CASCADE_SCALE_IMAGE, Size(30, 30) );
Mat(faces_).copyTo(faces);
return true;
}
@endcode
......@@ -101,8 +95,8 @@ bool myDetector( InputArray image, OutputArray roi, void * config ){
The following snippet demonstrates how to set the custom detector to the facemark object and use it to detect the faces. Keep in mind that some facemark object might use the face detector during the training process.
@code
Conf* config = new Conf("../data/lbpcascade_frontalface.xml",1.4);
facemark->setFaceDetector(myDetector);
Conf config("../data/lbpcascade_frontalface.xml", 1.4);
facemark->setFaceDetector(myDetector, &config); // we must guarantee proper lifetime of "config" object
@endcode
Here is the snippet for detecting face using the user defined face detector function.
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment