Commit a113dc61 authored by Alexander Alekhin's avatar Alexander Alekhin

Merge remote-tracking branch 'upstream/3.4' into merge-3.4

parents 2f516f1a fae2d927
...@@ -103,13 +103,12 @@ Ptr<DetectorParameters> DetectorParameters::create() { ...@@ -103,13 +103,12 @@ Ptr<DetectorParameters> DetectorParameters::create() {
*/ */
static void _convertToGrey(InputArray _in, OutputArray _out) { static void _convertToGrey(InputArray _in, OutputArray _out) {
CV_Assert(_in.getMat().channels() == 1 || _in.getMat().channels() == 3); CV_Assert(_in.type() == CV_8UC1 || _in.type() == CV_8UC3);
_out.create(_in.getMat().size(), CV_8UC1); if(_in.type() == CV_8UC3)
if(_in.getMat().type() == CV_8UC3) cvtColor(_in, _out, COLOR_BGR2GRAY);
cvtColor(_in.getMat(), _out.getMat(), COLOR_BGR2GRAY);
else else
_in.getMat().copyTo(_out); _in.copyTo(_out);
} }
......
...@@ -345,10 +345,10 @@ static int _selectAndRefineChessboardCorners(InputArray _allCorners, InputArray ...@@ -345,10 +345,10 @@ static int _selectAndRefineChessboardCorners(InputArray _allCorners, InputArray
// corner refinement, first convert input image to grey // corner refinement, first convert input image to grey
Mat grey; Mat grey;
if(_image.getMat().type() == CV_8UC3) if(_image.type() == CV_8UC3)
cvtColor(_image.getMat(), grey, COLOR_BGR2GRAY); cvtColor(_image, grey, COLOR_BGR2GRAY);
else else
_image.getMat().copyTo(grey); _image.copyTo(grey);
const Ptr<DetectorParameters> params = DetectorParameters::create(); // use default params for corner refinement const Ptr<DetectorParameters> params = DetectorParameters::create(); // use default params for corner refinement
...@@ -754,10 +754,10 @@ void detectCharucoDiamond(InputArray _image, InputArrayOfArrays _markerCorners, ...@@ -754,10 +754,10 @@ void detectCharucoDiamond(InputArray _image, InputArrayOfArrays _markerCorners,
// convert input image to grey // convert input image to grey
Mat grey; Mat grey;
if(_image.getMat().type() == CV_8UC3) if(_image.type() == CV_8UC3)
cvtColor(_image.getMat(), grey, COLOR_BGR2GRAY); cvtColor(_image, grey, COLOR_BGR2GRAY);
else else
_image.getMat().copyTo(grey); _image.copyTo(grey);
// for each of the detected markers, try to find a diamond // for each of the detected markers, try to find a diamond
for(unsigned int i = 0; i < _markerIds.total(); i++) { for(unsigned int i = 0; i < _markerIds.total(); i++) {
......
...@@ -292,7 +292,7 @@ static void drawPlot(const cv::Mat curve, const std::string figureTitle, const i ...@@ -292,7 +292,7 @@ static void drawPlot(const cv::Mat curve, const std::string figureTitle, const i
*/ */
cv::waitKey(10); cv::waitKey(10);
} }
}catch(cv::Exception e) }catch(const cv::Exception& e)
{ {
std::cerr<<"Error using Retina : "<<e.what()<<std::endl; std::cerr<<"Error using Retina : "<<e.what()<<std::endl;
} }
......
...@@ -353,7 +353,7 @@ static void loadNewFrame(const std::string filenamePrototype, const int currentF ...@@ -353,7 +353,7 @@ static void loadNewFrame(const std::string filenamePrototype, const int currentF
// jump to next frame // jump to next frame
++currentFrameIndex; ++currentFrameIndex;
} }
}catch(cv::Exception e) }catch(const cv::Exception& e)
{ {
std::cerr<<"Error using Retina : "<<e.what()<<std::endl; std::cerr<<"Error using Retina : "<<e.what()<<std::endl;
} }
......
...@@ -146,7 +146,7 @@ int main(int argc, char* argv[]) { ...@@ -146,7 +146,7 @@ int main(int argc, char* argv[]) {
cv::waitKey(5); cv::waitKey(5);
} }
}catch(cv::Exception e) }catch(const cv::Exception& e)
{ {
std::cerr<<"Error using Retina : "<<e.what()<<std::endl; std::cerr<<"Error using Retina : "<<e.what()<<std::endl;
} }
......
...@@ -137,7 +137,7 @@ int main(int argc, char* argv[]) { ...@@ -137,7 +137,7 @@ int main(int argc, char* argv[]) {
cv::imshow("Retina Magno", retinaOutput_magno); cv::imshow("Retina Magno", retinaOutput_magno);
cv::waitKey(10); cv::waitKey(10);
} }
}catch(cv::Exception e) }catch(const cv::Exception& e)
{ {
std::cerr<<"Error using Retina or end of video sequence reached : "<<e.what()<<std::endl; std::cerr<<"Error using Retina or end of video sequence reached : "<<e.what()<<std::endl;
} }
......
...@@ -109,7 +109,7 @@ int main(int argc, char* argv[]) ...@@ -109,7 +109,7 @@ int main(int argc, char* argv[])
} }
printf("Average: %.4fms\n", (double)total_time / total_loop_count / cv::getTickFrequency() * 1000.0); printf("Average: %.4fms\n", (double)total_time / total_loop_count / cv::getTickFrequency() * 1000.0);
} }
catch(cv::Exception e) catch(const cv::Exception& e)
{ {
std::cerr << "Error using Retina : " << e.what() << std::endl; std::cerr << "Error using Retina : " << e.what() << std::endl;
} }
......
...@@ -368,7 +368,7 @@ void RetinaImpl::setup(String retinaParameterFile, const bool applyDefaultSetupO ...@@ -368,7 +368,7 @@ void RetinaImpl::setup(String retinaParameterFile, const bool applyDefaultSetupO
cv::FileStorage fs(retinaParameterFile, cv::FileStorage::READ); cv::FileStorage fs(retinaParameterFile, cv::FileStorage::READ);
setup(fs, applyDefaultSetupOnFailure); setup(fs, applyDefaultSetupOnFailure);
} }
catch(Exception &e) catch(const Exception &e)
{ {
printf("Retina::setup: wrong/unappropriate xml parameter file : error report :`n=>%s\n", e.what()); printf("Retina::setup: wrong/unappropriate xml parameter file : error report :`n=>%s\n", e.what());
if (applyDefaultSetupOnFailure) if (applyDefaultSetupOnFailure)
...@@ -422,7 +422,7 @@ void RetinaImpl::setup(cv::FileStorage &fs, const bool applyDefaultSetupOnFailur ...@@ -422,7 +422,7 @@ void RetinaImpl::setup(cv::FileStorage &fs, const bool applyDefaultSetupOnFailur
setupIPLMagnoChannel(_retinaParameters.IplMagno.normaliseOutput, _retinaParameters.IplMagno.parasolCells_beta, _retinaParameters.IplMagno.parasolCells_tau, _retinaParameters.IplMagno.parasolCells_k, _retinaParameters.IplMagno.amacrinCellsTemporalCutFrequency,_retinaParameters.IplMagno.V0CompressionParameter, _retinaParameters.IplMagno.localAdaptintegration_tau, _retinaParameters.IplMagno.localAdaptintegration_k); setupIPLMagnoChannel(_retinaParameters.IplMagno.normaliseOutput, _retinaParameters.IplMagno.parasolCells_beta, _retinaParameters.IplMagno.parasolCells_tau, _retinaParameters.IplMagno.parasolCells_k, _retinaParameters.IplMagno.amacrinCellsTemporalCutFrequency,_retinaParameters.IplMagno.V0CompressionParameter, _retinaParameters.IplMagno.localAdaptintegration_tau, _retinaParameters.IplMagno.localAdaptintegration_k);
} }
catch(Exception &e) catch(const Exception &e)
{ {
printf("RetinaImpl::setup: resetting retina with default parameters\n"); printf("RetinaImpl::setup: resetting retina with default parameters\n");
if (applyDefaultSetupOnFailure) if (applyDefaultSetupOnFailure)
......
...@@ -127,7 +127,7 @@ void RetinaOCLImpl::setup(String retinaParameterFile, const bool applyDefaultSet ...@@ -127,7 +127,7 @@ void RetinaOCLImpl::setup(String retinaParameterFile, const bool applyDefaultSet
cv::FileStorage fs(retinaParameterFile, cv::FileStorage::READ); cv::FileStorage fs(retinaParameterFile, cv::FileStorage::READ);
setup(fs, applyDefaultSetupOnFailure); setup(fs, applyDefaultSetupOnFailure);
} }
catch(Exception &e) catch(const Exception &e)
{ {
std::cout << "RetinaOCLImpl::setup: wrong/inappropriate xml parameter file : error report :`n=>" << e.what() << std::endl; std::cout << "RetinaOCLImpl::setup: wrong/inappropriate xml parameter file : error report :`n=>" << e.what() << std::endl;
if (applyDefaultSetupOnFailure) if (applyDefaultSetupOnFailure)
...@@ -181,7 +181,7 @@ void RetinaOCLImpl::setup(cv::FileStorage &fs, const bool applyDefaultSetupOnFai ...@@ -181,7 +181,7 @@ void RetinaOCLImpl::setup(cv::FileStorage &fs, const bool applyDefaultSetupOnFai
setupIPLMagnoChannel(_retinaParameters.IplMagno.normaliseOutput, _retinaParameters.IplMagno.parasolCells_beta, _retinaParameters.IplMagno.parasolCells_tau, _retinaParameters.IplMagno.parasolCells_k, _retinaParameters.IplMagno.amacrinCellsTemporalCutFrequency, _retinaParameters.IplMagno.V0CompressionParameter, _retinaParameters.IplMagno.localAdaptintegration_tau, _retinaParameters.IplMagno.localAdaptintegration_k); setupIPLMagnoChannel(_retinaParameters.IplMagno.normaliseOutput, _retinaParameters.IplMagno.parasolCells_beta, _retinaParameters.IplMagno.parasolCells_tau, _retinaParameters.IplMagno.parasolCells_k, _retinaParameters.IplMagno.amacrinCellsTemporalCutFrequency, _retinaParameters.IplMagno.V0CompressionParameter, _retinaParameters.IplMagno.localAdaptintegration_tau, _retinaParameters.IplMagno.localAdaptintegration_k);
} }
catch(Exception &e) catch(const Exception &e)
{ {
std::cout << "RetinaOCLImpl::setup: resetting retina with default parameters" << std::endl; std::cout << "RetinaOCLImpl::setup: resetting retina with default parameters" << std::endl;
if (applyDefaultSetupOnFailure) if (applyDefaultSetupOnFailure)
......
...@@ -298,7 +298,7 @@ void TransientAreasSegmentationModuleImpl::setup(String segmentationParameterFil ...@@ -298,7 +298,7 @@ void TransientAreasSegmentationModuleImpl::setup(String segmentationParameterFil
// opening retinaParameterFile in read mode // opening retinaParameterFile in read mode
cv::FileStorage fs(segmentationParameterFile, cv::FileStorage::READ); cv::FileStorage fs(segmentationParameterFile, cv::FileStorage::READ);
setup(fs, applyDefaultSetupOnFailure); setup(fs, applyDefaultSetupOnFailure);
}catch(cv::Exception &e) }catch(const cv::Exception &e)
{ {
printf("Retina::setup: wrong/unappropriate xml parameter file : error report :`n=>%s\n", e.what()); printf("Retina::setup: wrong/unappropriate xml parameter file : error report :`n=>%s\n", e.what());
if (applyDefaultSetupOnFailure) if (applyDefaultSetupOnFailure)
...@@ -338,7 +338,7 @@ void TransientAreasSegmentationModuleImpl::setup(cv::FileStorage &fs, const bool ...@@ -338,7 +338,7 @@ void TransientAreasSegmentationModuleImpl::setup(cv::FileStorage &fs, const bool
currFn["contextEnergy_spatialConstant"]>>_segmentationParameters.contextEnergy_spatialConstant; currFn["contextEnergy_spatialConstant"]>>_segmentationParameters.contextEnergy_spatialConstant;
setup(_segmentationParameters); setup(_segmentationParameters);
}catch(cv::Exception &e) }catch(const cv::Exception &e)
{ {
std::cout<<"Retina::setup: resetting retina with default parameters"<<std::endl; std::cout<<"Retina::setup: resetting retina with default parameters"<<std::endl;
if (applyDefaultSetupOnFailure) if (applyDefaultSetupOnFailure)
......
...@@ -222,26 +222,24 @@ namespace cnn_3dobj ...@@ -222,26 +222,24 @@ namespace cnn_3dobj
{ {
/* Convert the input image to the input image format of the network. */ /* Convert the input image to the input image format of the network. */
cv::Mat sample; cv::Mat sample;
if (img.channels() == 3 && num_channels == 1) if (num_channels == 1)
cv::cvtColor(img, sample, CV_BGR2GRAY); cv::cvtColor(img, sample, COLOR_BGR2GRAY);
else if (img.channels() == 4 && num_channels == 1)
cv::cvtColor(img, sample, CV_BGRA2GRAY);
else if (img.channels() == 4 && num_channels == 3) else if (img.channels() == 4 && num_channels == 3)
cv::cvtColor(img, sample, CV_BGRA2BGR); cv::cvtColor(img, sample, COLOR_BGRA2BGR);
else if (img.channels() == 1 && num_channels == 3) else if (img.channels() == 1 && num_channels == 3)
cv::cvtColor(img, sample, CV_GRAY2BGR); cv::cvtColor(img, sample, COLOR_GRAY2BGR);
else else
sample = img; sample = img;
cv::Mat sample_resized; cv::Mat sample_resized;
if (sample.size() != input_geometry) if (sample.size() != input_geometry)
cv::resize(sample, sample_resized, input_geometry); cv::resize(sample, sample_resized, input_geometry);
else else
sample_resized = sample; sample_resized = sample;
cv::Mat sample_float; cv::Mat sample_float;
if (num_channels == 3) sample_resized.convertTo(sample_float, CV_32F);
sample_resized.convertTo(sample_float, CV_32FC3);
else
sample_resized.convertTo(sample_float, CV_32FC1);
cv::Mat sample_normalized; cv::Mat sample_normalized;
if (net_ready == 2) if (net_ready == 2)
cv::subtract(sample_float, mean_, sample_normalized); cv::subtract(sample_float, mean_, sample_normalized);
......
...@@ -84,7 +84,7 @@ main(int argc, char** argv) ...@@ -84,7 +84,7 @@ main(int argc, char** argv)
// convert to grayscale // convert to grayscale
cv::Mat imgGray; cv::Mat imgGray;
cv::cvtColor(imgRead, imgGray, CV_BGR2GRAY); cv::cvtColor(imgRead, imgGray, COLOR_BGR2GRAY);
cvv::debugFilter(imgRead, imgGray, CVVISUAL_LOCATION, "to gray"); cvv::debugFilter(imgRead, imgGray, CVVISUAL_LOCATION, "to gray");
// detect ORB features // detect ORB features
......
...@@ -69,8 +69,8 @@ void DiffFilterFunction::applyFilter(InputArray in, OutputArray out) const ...@@ -69,8 +69,8 @@ void DiffFilterFunction::applyFilter(InputArray in, OutputArray out) const
} }
cv::Mat originalHSV, filteredHSV; cv::Mat originalHSV, filteredHSV;
cv::cvtColor(in.at(0).get(), originalHSV, CV_BGR2HSV); cv::cvtColor(in.at(0).get(), originalHSV, COLOR_BGR2HSV);
cv::cvtColor(in.at(1).get(), filteredHSV, CV_BGR2HSV); cv::cvtColor(in.at(1).get(), filteredHSV, COLOR_BGR2HSV);
auto diffHSV = cv::abs(originalHSV - filteredHSV); auto diffHSV = cv::abs(originalHSV - filteredHSV);
std::array<cv::Mat, 3> splitVector; std::array<cv::Mat, 3> splitVector;
......
...@@ -93,7 +93,7 @@ int main(int argc, const char *argv[]) { ...@@ -93,7 +93,7 @@ int main(int argc, const char *argv[]) {
// input filename is given. // input filename is given.
try { try {
read_csv(fn_csv, images, labels, labelsInfo); read_csv(fn_csv, images, labels, labelsInfo);
} catch (cv::Exception& e) { } catch (const cv::Exception& e) {
cerr << "Error opening file \"" << fn_csv << "\". Reason: " << e.msg << endl; cerr << "Error opening file \"" << fn_csv << "\". Reason: " << e.msg << endl;
// nothing more we can do // nothing more we can do
exit(1); exit(1);
......
...@@ -85,7 +85,7 @@ int main(int argc, const char *argv[]) { ...@@ -85,7 +85,7 @@ int main(int argc, const char *argv[]) {
// input filename is given. // input filename is given.
try { try {
read_csv(fn_csv, images, labels); read_csv(fn_csv, images, labels);
} catch (cv::Exception& e) { } catch (const cv::Exception& e) {
cerr << "Error opening file \"" << fn_csv << "\". Reason: " << e.msg << endl; cerr << "Error opening file \"" << fn_csv << "\". Reason: " << e.msg << endl;
// nothing more we can do // nothing more we can do
exit(1); exit(1);
......
...@@ -85,7 +85,7 @@ int main(int argc, const char *argv[]) { ...@@ -85,7 +85,7 @@ int main(int argc, const char *argv[]) {
// input filename is given. // input filename is given.
try { try {
read_csv(fn_csv, images, labels); read_csv(fn_csv, images, labels);
} catch (cv::Exception& e) { } catch (const cv::Exception& e) {
cerr << "Error opening file \"" << fn_csv << "\". Reason: " << e.msg << endl; cerr << "Error opening file \"" << fn_csv << "\". Reason: " << e.msg << endl;
// nothing more we can do // nothing more we can do
exit(1); exit(1);
......
...@@ -62,7 +62,7 @@ int main(int argc, const char *argv[]) { ...@@ -62,7 +62,7 @@ int main(int argc, const char *argv[]) {
// input filename is given. // input filename is given.
try { try {
read_csv(fn_csv, images, labels); read_csv(fn_csv, images, labels);
} catch (cv::Exception& e) { } catch (const cv::Exception& e) {
cerr << "Error opening file \"" << fn_csv << "\". Reason: " << e.msg << endl; cerr << "Error opening file \"" << fn_csv << "\". Reason: " << e.msg << endl;
// nothing more we can do // nothing more we can do
exit(1); exit(1);
......
...@@ -85,7 +85,7 @@ int main(int argc, const char *argv[]) { ...@@ -85,7 +85,7 @@ int main(int argc, const char *argv[]) {
// input filename is given. // input filename is given.
try { try {
read_csv(fn_csv, images, labels); read_csv(fn_csv, images, labels);
} catch (cv::Exception& e) { } catch (const cv::Exception& e) {
cerr << "Error opening file \"" << fn_csv << "\". Reason: " << e.msg << endl; cerr << "Error opening file \"" << fn_csv << "\". Reason: " << e.msg << endl;
// nothing more we can do // nothing more we can do
exit(1); exit(1);
......
...@@ -68,7 +68,7 @@ int main(int argc, const char *argv[]) { ...@@ -68,7 +68,7 @@ int main(int argc, const char *argv[]) {
// Read in the data (fails if no valid input filename is given, but you'll get an error message): // Read in the data (fails if no valid input filename is given, but you'll get an error message):
try { try {
read_csv(fn_csv, images, labels); read_csv(fn_csv, images, labels);
} catch (cv::Exception& e) { } catch (const cv::Exception& e) {
cerr << "Error opening file \"" << fn_csv << "\". Reason: " << e.msg << endl; cerr << "Error opening file \"" << fn_csv << "\". Reason: " << e.msg << endl;
// nothing more we can do // nothing more we can do
exit(1); exit(1);
......
...@@ -46,7 +46,7 @@ resize(img,img,Size(460,460),0,0,INTER_LINEAR_EXACT); ...@@ -46,7 +46,7 @@ resize(img,img,Size(460,460),0,0,INTER_LINEAR_EXACT);
Mat gray; Mat gray;
std::vector<Rect> faces; std::vector<Rect> faces;
if(img.channels()>1){ if(img.channels()>1){
cvtColor(img.getMat(),gray,CV_BGR2GRAY); cvtColor(img.getMat(),gray,COLOR_BGR2GRAY);
} }
else{ else{
gray = img.getMat().clone(); gray = img.getMat().clone();
......
...@@ -65,7 +65,7 @@ bool myDetector( InputArray image, OutputArray ROIs ){ ...@@ -65,7 +65,7 @@ bool myDetector( InputArray image, OutputArray ROIs ){
Mat gray; Mat gray;
std::vector<Rect> faces; std::vector<Rect> faces;
if(image.channels()>1){ if(image.channels()>1){
cvtColor(image.getMat(),gray,CV_BGR2GRAY); cvtColor(image.getMat(),gray,COLOR_BGR2GRAY);
} }
else{ else{
gray = image.getMat().clone(); gray = image.getMat().clone();
...@@ -174,4 +174,4 @@ filename specified.As the training starts successfully you will see something li ...@@ -174,4 +174,4 @@ filename specified.As the training starts successfully you will see something li
**The error rate on test images depends on the number of images used for training used as follows :** **The error rate on test images depends on the number of images used for training used as follows :**
![](images/test.png) ![](images/test.png)
\ No newline at end of file
...@@ -26,7 +26,7 @@ bool myDetector( InputArray image, OutputArray ROIs ){ ...@@ -26,7 +26,7 @@ bool myDetector( InputArray image, OutputArray ROIs ){
Mat gray; Mat gray;
std::vector<Rect> faces; std::vector<Rect> faces;
if(image.channels()>1){ if(image.channels()>1){
cvtColor(image.getMat(),gray,CV_BGR2GRAY); cvtColor(image.getMat(),gray,COLOR_BGR2GRAY);
} }
else{ else{
gray = image.getMat().clone(); gray = image.getMat().clone();
...@@ -107,4 +107,4 @@ Sample video: ...@@ -107,4 +107,4 @@ Sample video:
@htmlonly @htmlonly
<iframe width="560" height="315" src="https://www.youtube.com/embed/ZtaV07T90D8" frameborder="0" allowfullscreen></iframe> <iframe width="560" height="315" src="https://www.youtube.com/embed/ZtaV07T90D8" frameborder="0" allowfullscreen></iframe>
@endhtmlonly @endhtmlonly
\ No newline at end of file
...@@ -25,7 +25,7 @@ bool myDetector( InputArray image, OutputArray ROIs ){ ...@@ -25,7 +25,7 @@ bool myDetector( InputArray image, OutputArray ROIs ){
Mat gray; Mat gray;
std::vector<Rect> faces; std::vector<Rect> faces;
if(image.channels()>1){ if(image.channels()>1){
cvtColor(image.getMat(),gray,CV_BGR2GRAY); cvtColor(image.getMat(),gray,COLOR_BGR2GRAY);
} }
else{ else{
gray = image.getMat().clone(); gray = image.getMat().clone();
...@@ -144,4 +144,4 @@ Second image ...@@ -144,4 +144,4 @@ Second image
Results after swapping Results after swapping
---------------------- ----------------------
![](images/face_swapped.jpg) ![](images/face_swapped.jpg)
\ No newline at end of file
...@@ -27,18 +27,10 @@ public: ...@@ -27,18 +27,10 @@ public:
input.type() == CV_8U); input.type() == CV_8U);
cv::resize(input, resizeImg, cv::Size(8,8), 0, 0, INTER_LINEAR_EXACT); cv::resize(input, resizeImg, cv::Size(8,8), 0, 0, INTER_LINEAR_EXACT);
if(input.type() == CV_8UC3) if(input.channels() > 1)
{ cv::cvtColor(resizeImg, grayImg, COLOR_BGR2GRAY);
cv::cvtColor(resizeImg, grayImg, CV_BGR2GRAY);
}
else if(input.type() == CV_8UC4)
{
cv::cvtColor(resizeImg, grayImg, CV_BGRA2GRAY);
}
else else
{
grayImg = resizeImg; grayImg = resizeImg;
}
uchar const imgMean = static_cast<uchar>(cvRound(cv::mean(grayImg)[0])); uchar const imgMean = static_cast<uchar>(cvRound(cv::mean(grayImg)[0]));
cv::compare(grayImg, imgMean, bitsImg, CMP_GT); cv::compare(grayImg, imgMean, bitsImg, CMP_GT);
......
...@@ -40,18 +40,10 @@ public: ...@@ -40,18 +40,10 @@ public:
input.type() == CV_8U); input.type() == CV_8U);
cv::resize(input, resizeImg_, cv::Size(imgWidth,imgHeight), 0, 0, INTER_LINEAR_EXACT); cv::resize(input, resizeImg_, cv::Size(imgWidth,imgHeight), 0, 0, INTER_LINEAR_EXACT);
if(input.type() == CV_8UC3) if(input.channels() > 1)
{ cv::cvtColor(resizeImg_, grayImg_, COLOR_BGR2GRAY);
cv::cvtColor(resizeImg_, grayImg_, CV_BGR2GRAY);
}
else if(input.type() == CV_8UC4)
{
cv::cvtColor(resizeImg_, grayImg_, CV_BGRA2GRAY);
}
else else
{
grayImg_ = resizeImg_; grayImg_ = resizeImg_;
}
int pixColStep = blockWidth; int pixColStep = blockWidth;
int pixRowStep = blockHeigth; int pixRowStep = blockHeigth;
......
...@@ -28,25 +28,24 @@ public: ...@@ -28,25 +28,24 @@ public:
} }
else if(input.type() == CV_8UC4) else if(input.type() == CV_8UC4)
{ {
cv::cvtColor(input, colorImg_, CV_BGRA2BGR); cv::cvtColor(input, colorImg_, COLOR_BGRA2BGR);
} }
else else
{ {
cv::cvtColor(input, colorImg_, CV_GRAY2BGR); cv::cvtColor(input, colorImg_, COLOR_GRAY2BGR);
} }
cv::resize(colorImg_, resizeImg_, cv::Size(512,512), 0, 0, cv::resize(colorImg_, resizeImg_, cv::Size(512,512), 0, 0, INTER_CUBIC);
INTER_CUBIC);
cv::GaussianBlur(resizeImg_, blurImg_, cv::Size(3,3), 0, 0); cv::GaussianBlur(resizeImg_, blurImg_, cv::Size(3,3), 0, 0);
cv::cvtColor(blurImg_, colorSpace_, CV_BGR2HSV); cv::cvtColor(blurImg_, colorSpace_, COLOR_BGR2HSV);
cv::split(colorSpace_, channels_); cv::split(colorSpace_, channels_);
outputArr.create(1, 42, CV_64F); outputArr.create(1, 42, CV_64F);
cv::Mat hash = outputArr.getMat(); cv::Mat hash = outputArr.getMat();
hash.setTo(0); hash.setTo(0);
computeMoments(hash.ptr<double>(0)); computeMoments(hash.ptr<double>(0));
cv::cvtColor(blurImg_, colorSpace_, CV_BGR2YCrCb); cv::cvtColor(blurImg_, colorSpace_, COLOR_BGR2YCrCb);
cv::split(colorSpace_, channels_); cv::split(colorSpace_, channels_);
computeMoments(hash.ptr<double>(0) + 21); computeMoments(hash.ptr<double>(0) + 21);
} }
......
...@@ -105,18 +105,11 @@ public: ...@@ -105,18 +105,11 @@ public:
input.type() == CV_8UC3 || input.type() == CV_8UC3 ||
input.type() == CV_8U); input.type() == CV_8U);
if(input.type() == CV_8UC3) if(input.channels() > 1)
{ cv::cvtColor(input, grayImg, COLOR_BGR2GRAY);
cv::cvtColor(input, grayImg, CV_BGR2GRAY);
}
else if(input.type() == CV_8UC4)
{
cv::cvtColor(input, grayImg, CV_BGRA2GRAY);
}
else else
{
grayImg = input; grayImg = input;
}
//pHash use Canny-deritch filter to blur the image //pHash use Canny-deritch filter to blur the image
cv::GaussianBlur(grayImg, blurImg, cv::Size(7, 7), 0); cv::GaussianBlur(grayImg, blurImg, cv::Size(7, 7), 0);
cv::resize(blurImg, resizeImg, cv::Size(512, 512), 0, 0, INTER_CUBIC); cv::resize(blurImg, resizeImg, cv::Size(512, 512), 0, 0, INTER_CUBIC);
......
...@@ -21,18 +21,10 @@ public: ...@@ -21,18 +21,10 @@ public:
input.type() == CV_8U); input.type() == CV_8U);
cv::resize(input, resizeImg, cv::Size(32,32), 0, 0, INTER_LINEAR_EXACT); cv::resize(input, resizeImg, cv::Size(32,32), 0, 0, INTER_LINEAR_EXACT);
if(input.type() == CV_8UC3) if(input.channels() > 1)
{ cv::cvtColor(resizeImg, grayImg, COLOR_BGR2GRAY);
cv::cvtColor(resizeImg, grayImg, CV_BGR2GRAY);
}
else if(input.type() == CV_8UC4)
{
cv::cvtColor(resizeImg, grayImg, CV_BGRA2GRAY);
}
else else
{
grayImg = resizeImg; grayImg = resizeImg;
}
grayImg.convertTo(grayFImg, CV_32F); grayImg.convertTo(grayFImg, CV_32F);
cv::dct(grayFImg, dctImg); cv::dct(grayFImg, dctImg);
......
...@@ -53,11 +53,11 @@ public: ...@@ -53,11 +53,11 @@ public:
if(input.type() == CV_8UC3) if(input.type() == CV_8UC3)
{ {
cv::cvtColor(input, grayImg_, CV_BGR2GRAY); cv::cvtColor(input, grayImg_, COLOR_BGR2GRAY);
} }
else if(input.type() == CV_8UC4) else if(input.type() == CV_8UC4)
{ {
cv::cvtColor(input, grayImg_, CV_BGRA2GRAY); cv::cvtColor(input, grayImg_, COLOR_BGRA2GRAY);
} }
else else
{ {
......
...@@ -548,7 +548,7 @@ void BinaryDescriptor::computeImpl( const Mat& imageSrc, std::vector<KeyLine>& k ...@@ -548,7 +548,7 @@ void BinaryDescriptor::computeImpl( const Mat& imageSrc, std::vector<KeyLine>& k
if( imageSrc.channels() != 1 ) if( imageSrc.channels() != 1 )
cvtColor( imageSrc, image, COLOR_BGR2GRAY ); cvtColor( imageSrc, image, COLOR_BGR2GRAY );
else else
image = imageSrc.clone(); image = imageSrc;
/*check whether image's depth is different from 0 */ /*check whether image's depth is different from 0 */
if( image.depth() != 0 ) if( image.depth() != 0 )
...@@ -627,7 +627,6 @@ void BinaryDescriptor::computeImpl( const Mat& imageSrc, std::vector<KeyLine>& k ...@@ -627,7 +627,6 @@ void BinaryDescriptor::computeImpl( const Mat& imageSrc, std::vector<KeyLine>& k
{ {
for ( size_t j = 0; j < sl[i].size(); ) for ( size_t j = 0; j < sl[i].size(); )
{ {
//if( (int) ( sl[i][j] ).octaveCount > params.numOfOctave_ )
if( (int) ( sl[i][j] ).octaveCount > octaveIndex ) if( (int) ( sl[i][j] ).octaveCount > octaveIndex )
( sl[i] ).erase( ( sl[i] ).begin() + j ); ( sl[i] ).erase( ( sl[i] ).begin() + j );
else j++; else j++;
......
...@@ -85,9 +85,9 @@ addVariant("{{ fun.name }}", {{ fun.req|inputs|length }}, {{ fun.opt|inputs|leng ...@@ -85,9 +85,9 @@ addVariant("{{ fun.name }}", {{ fun.req|inputs|length }}, {{ fun.opt|inputs|leng
// [out =] namespace.fun(src1, ..., srcn, dst1, ..., dstn, opt1, ..., optn); // [out =] namespace.fun(src1, ..., srcn, dst1, ..., dstn, opt1, ..., optn);
try { try {
{{ compose(fun) }} {{ compose(fun) }}
} catch(cv::Exception& e) { } catch(const cv::Exception& e) {
error(std::string("cv::exception caught: ").append(e.what()).c_str()); error(std::string("cv::exception caught: ").append(e.what()).c_str());
} catch(std::exception& e) { } catch(const std::exception& e) {
error(std::string("std::exception caught: ").append(e.what()).c_str()); error(std::string("std::exception caught: ").append(e.what()).c_str());
} catch(...) { } catch(...) {
error("Uncaught exception occurred in {{fun.name}}"); error("Uncaught exception occurred in {{fun.name}}");
......
...@@ -25,7 +25,7 @@ void mexFunction(int nlhs, mxArray* plhs[], ...@@ -25,7 +25,7 @@ void mexFunction(int nlhs, mxArray* plhs[],
// [out =] namespace.fun(src1, ..., srcn, dst1, ..., dstn, opt1, ..., optn); // [out =] namespace.fun(src1, ..., srcn, dst1, ..., dstn, opt1, ..., optn);
try { try {
throw cv::Exception(-1, "OpenCV exception thrown", __func__, __FILE__, __LINE__); throw cv::Exception(-1, "OpenCV exception thrown", __func__, __FILE__, __LINE__);
} catch(cv::Exception& e) { } catch(const cv::Exception& e) {
mexErrMsgTxt(e.what()); mexErrMsgTxt(e.what());
} catch(...) { } catch(...) {
mexErrMsgTxt("Incorrect exception caught!"); mexErrMsgTxt("Incorrect exception caught!");
......
...@@ -24,7 +24,7 @@ void mexFunction(int nlhs, mxArray* plhs[], ...@@ -24,7 +24,7 @@ void mexFunction(int nlhs, mxArray* plhs[],
// [out =] namespace.fun(src1, ..., srcn, dst1, ..., dstn, opt1, ..., optn); // [out =] namespace.fun(src1, ..., srcn, dst1, ..., dstn, opt1, ..., optn);
try { try {
throw std::exception(); throw std::exception();
} catch(std::exception& e) { } catch(const std::exception& e) {
mexErrMsgTxt(e.what()); mexErrMsgTxt(e.what());
} catch(...) { } catch(...) {
mexErrMsgTxt("Incorrect exception caught!"); mexErrMsgTxt("Incorrect exception caught!");
......
...@@ -149,7 +149,7 @@ static SceneNode& _getSceneNode(SceneManager* sceneMgr, const String& name) ...@@ -149,7 +149,7 @@ static SceneNode& _getSceneNode(SceneManager* sceneMgr, const String& name)
if(mo) if(mo)
return *mo->getParentSceneNode()->getParentSceneNode(); return *mo->getParentSceneNode()->getParentSceneNode();
} }
catch (ItemIdentityException&) catch (const ItemIdentityException&)
{ {
// ignore // ignore
} }
...@@ -159,7 +159,7 @@ static SceneNode& _getSceneNode(SceneManager* sceneMgr, const String& name) ...@@ -159,7 +159,7 @@ static SceneNode& _getSceneNode(SceneManager* sceneMgr, const String& name)
if (!mo) if (!mo)
mo = sceneMgr->getMovableObject(name, "Light"); mo = sceneMgr->getMovableObject(name, "Light");
} }
catch (ItemIdentityException&) catch (const ItemIdentityException&)
{ {
// ignore // ignore
} }
......
...@@ -70,9 +70,9 @@ static void showDifference(const Mat& image1, const Mat& image2, const char* tit ...@@ -70,9 +70,9 @@ static void showDifference(const Mat& image1, const Mat& image2, const char* tit
image1.convertTo(img1, CV_32FC3); image1.convertTo(img1, CV_32FC3);
image2.convertTo(img2, CV_32FC3); image2.convertTo(img2, CV_32FC3);
if(img1.channels() != 1) if(img1.channels() != 1)
cvtColor(img1, img1, COLOR_RGB2GRAY); cvtColor(img1, img1, COLOR_BGR2GRAY);
if(img2.channels() != 1) if(img2.channels() != 1)
cvtColor(img2, img2, COLOR_RGB2GRAY); cvtColor(img2, img2, COLOR_BGR2GRAY);
Mat imgDiff; Mat imgDiff;
img1.copyTo(imgDiff); img1.copyTo(imgDiff);
...@@ -270,11 +270,11 @@ static void calcHomographyFeature(const Mat& image1, const Mat& image2) ...@@ -270,11 +270,11 @@ static void calcHomographyFeature(const Mat& image1, const Mat& image2)
Mat gray_image2; Mat gray_image2;
// Convert to Grayscale // Convert to Grayscale
if(image1.channels() != 1) if(image1.channels() != 1)
cvtColor(image1, gray_image1, COLOR_RGB2GRAY); cvtColor(image1, gray_image1, COLOR_BGR2GRAY);
else else
image1.copyTo(gray_image1); image1.copyTo(gray_image1);
if(image2.channels() != 1) if(image2.channels() != 1)
cvtColor(image2, gray_image2, COLOR_RGB2GRAY); cvtColor(image2, gray_image2, COLOR_BGR2GRAY);
else else
image2.copyTo(gray_image2); image2.copyTo(gray_image2);
...@@ -332,7 +332,7 @@ static void calcHomographyFeature(const Mat& image1, const Mat& image2) ...@@ -332,7 +332,7 @@ static void calcHomographyFeature(const Mat& image1, const Mat& image2)
} }
// Find the Homography Matrix // Find the Homography Matrix
Mat H = findHomography( obj, scene, CV_RANSAC ); Mat H = findHomography( obj, scene, RANSAC );
// Use the Homography Matrix to warp the images // Use the Homography Matrix to warp the images
Mat result; Mat result;
Mat Hinv = H.inv(); Mat Hinv = H.inv();
...@@ -390,7 +390,7 @@ static void comparePixelVsFeature(const Mat& img1_8b, const Mat& img2_8b) ...@@ -390,7 +390,7 @@ static void comparePixelVsFeature(const Mat& img1_8b, const Mat& img2_8b)
int main(void) int main(void)
{ {
Mat img1; Mat img1;
img1 = imread("home.png", CV_LOAD_IMAGE_UNCHANGED); img1 = imread("home.png", IMREAD_UNCHANGED);
if(!img1.data) { if(!img1.data) {
cout << "Could not open or find file" << endl; cout << "Could not open or find file" << endl;
return -1; return -1;
...@@ -405,13 +405,13 @@ int main(void) ...@@ -405,13 +405,13 @@ int main(void)
testProjective(img1); testProjective(img1);
#ifdef COMPARE_FEATURES #ifdef COMPARE_FEATURES
Mat imgcmp1 = imread("LR_05.png", CV_LOAD_IMAGE_UNCHANGED); Mat imgcmp1 = imread("LR_05.png", IMREAD_UNCHANGED);
if(!imgcmp1.data) { if(!imgcmp1.data) {
cout << "Could not open or find file" << endl; cout << "Could not open or find file" << endl;
return -1; return -1;
} }
Mat imgcmp2 = imread("LR_06.png", CV_LOAD_IMAGE_UNCHANGED); Mat imgcmp2 = imread("LR_06.png", IMREAD_UNCHANGED);
if(!imgcmp2.data) { if(!imgcmp2.data) {
cout << "Could not open or find file" << endl; cout << "Could not open or find file" << endl;
return -1; return -1;
......
...@@ -102,7 +102,7 @@ void sobelExtractor(const Mat img, const Rect roi, Mat& feat){ ...@@ -102,7 +102,7 @@ void sobelExtractor(const Mat img, const Rect roi, Mat& feat){
//! [insideimage] //! [insideimage]
patch=img(region).clone(); patch=img(region).clone();
cvtColor(patch,patch, CV_BGR2GRAY); cvtColor(patch,patch, COLOR_BGR2GRAY);
//! [padding] //! [padding]
// add some padding to compensate when the patch is outside image border // add some padding to compensate when the patch is outside image border
......
...@@ -31,7 +31,7 @@ namespace cv{ ...@@ -31,7 +31,7 @@ namespace cv{
Mat hsv; Mat hsv;
img.convertTo(hsv,CV_32F,1.0/255.0); img.convertTo(hsv,CV_32F,1.0/255.0);
cvtColor(hsv,hsv,CV_BGR2HSV); cvtColor(hsv,hsv,COLOR_BGR2HSV);
HShist=Mat_<double>(nh,ns,0.0); HShist=Mat_<double>(nh,ns,0.0);
Vhist=Mat_<double>(1,nv,0.0); Vhist=Mat_<double>(1,nv,0.0);
......
...@@ -126,7 +126,7 @@ bool TrackerBoostingImpl::initImpl( const Mat& image, const Rect2d& boundingBox ...@@ -126,7 +126,7 @@ bool TrackerBoostingImpl::initImpl( const Mat& image, const Rect2d& boundingBox
Mat_<int> intImage; Mat_<int> intImage;
Mat_<double> intSqImage; Mat_<double> intSqImage;
Mat image_; Mat image_;
cvtColor( image, image_, CV_RGB2GRAY ); cvtColor( image, image_, COLOR_BGR2GRAY );
integral( image_, intImage, intSqImage, CV_32S ); integral( image_, intImage, intSqImage, CV_32S );
TrackerSamplerCS::Params CSparameters; TrackerSamplerCS::Params CSparameters;
CSparameters.overlap = params.samplerOverlap; CSparameters.overlap = params.samplerOverlap;
...@@ -208,7 +208,7 @@ bool TrackerBoostingImpl::updateImpl( const Mat& image, Rect2d& boundingBox ) ...@@ -208,7 +208,7 @@ bool TrackerBoostingImpl::updateImpl( const Mat& image, Rect2d& boundingBox )
Mat_<int> intImage; Mat_<int> intImage;
Mat_<double> intSqImage; Mat_<double> intSqImage;
Mat image_; Mat image_;
cvtColor( image, image_, CV_RGB2GRAY ); cvtColor( image, image_, COLOR_BGR2GRAY );
integral( image_, intImage, intSqImage, CV_32S ); integral( image_, intImage, intSqImage, CV_32S );
//get the last location [AAM] X(k-1) //get the last location [AAM] X(k-1)
Ptr<TrackerTargetState> lastLocation = model->getLastTargetState(); Ptr<TrackerTargetState> lastLocation = model->getLastTargetState();
......
...@@ -201,7 +201,7 @@ std::vector<Mat> TrackerCSRTImpl::get_features(const Mat &patch, const Size2i &f ...@@ -201,7 +201,7 @@ std::vector<Mat> TrackerCSRTImpl::get_features(const Mat &patch, const Size2i &f
} }
if(params.use_gray) { if(params.use_gray) {
Mat gray_m; Mat gray_m;
cvtColor(patch, gray_m, CV_BGR2GRAY); cvtColor(patch, gray_m, COLOR_BGR2GRAY);
resize(gray_m, gray_m, feature_size, 0, 0, INTER_CUBIC); resize(gray_m, gray_m, feature_size, 0, 0, INTER_CUBIC);
gray_m.convertTo(gray_m, CV_32FC1, 1.0/255.0, -0.5); gray_m.convertTo(gray_m, CV_32FC1, 1.0/255.0, -0.5);
features.push_back(gray_m); features.push_back(gray_m);
...@@ -465,15 +465,11 @@ Point2f TrackerCSRTImpl::estimate_new_position(const Mat &image) ...@@ -465,15 +465,11 @@ Point2f TrackerCSRTImpl::estimate_new_position(const Mat &image)
// ********************************************************************* // *********************************************************************
bool TrackerCSRTImpl::updateImpl(const Mat& image_, Rect2d& boundingBox) bool TrackerCSRTImpl::updateImpl(const Mat& image_, Rect2d& boundingBox)
{ {
//treat gray image as color image
Mat image; Mat image;
if(image_.channels() == 1) { if(image_.channels() == 1) //treat gray image as color image
std::vector<Mat> channels(3); cvtColor(image_, image, COLOR_GRAY2BGR);
channels[0] = channels[1] = channels[2] = image_; else
merge(channels, image);
} else {
image = image_; image = image_;
}
object_center = estimate_new_position(image); object_center = estimate_new_position(image);
if (object_center.x < 0 && object_center.y < 0) if (object_center.x < 0 && object_center.y < 0)
...@@ -512,15 +508,11 @@ bool TrackerCSRTImpl::updateImpl(const Mat& image_, Rect2d& boundingBox) ...@@ -512,15 +508,11 @@ bool TrackerCSRTImpl::updateImpl(const Mat& image_, Rect2d& boundingBox)
// ********************************************************************* // *********************************************************************
bool TrackerCSRTImpl::initImpl(const Mat& image_, const Rect2d& boundingBox) bool TrackerCSRTImpl::initImpl(const Mat& image_, const Rect2d& boundingBox)
{ {
//treat gray image as color image
Mat image; Mat image;
if(image_.channels() == 1) { if(image_.channels() == 1) //treat gray image as color image
std::vector<Mat> channels(3); cvtColor(image_, image, COLOR_GRAY2BGR);
channels[0] = channels[1] = channels[2] = image_; else
merge(channels, image);
} else {
image = image_; image = image_;
}
current_scale_factor = 1.0; current_scale_factor = 1.0;
image_size = image.size(); image_size = image.size();
......
...@@ -552,7 +552,7 @@ double get_min(const Mat &m) ...@@ -552,7 +552,7 @@ double get_min(const Mat &m)
Mat bgr2hsv(const Mat &img) Mat bgr2hsv(const Mat &img)
{ {
Mat hsv_img; Mat hsv_img;
cvtColor(img, hsv_img, CV_BGR2HSV); cvtColor(img, hsv_img, COLOR_BGR2HSV);
std::vector<Mat> hsv_img_channels; std::vector<Mat> hsv_img_channels;
split(hsv_img, hsv_img_channels); split(hsv_img, hsv_img_channels);
hsv_img_channels.at(0).convertTo(hsv_img_channels.at(0), CV_8UC1, 255.0 / 180.0); hsv_img_channels.at(0).convertTo(hsv_img_channels.at(0), CV_8UC1, 255.0 / 180.0);
......
...@@ -700,7 +700,7 @@ namespace cv{ ...@@ -700,7 +700,7 @@ namespace cv{
break; break;
default: // GRAY default: // GRAY
if(img.channels()>1) if(img.channels()>1)
cvtColor(patch,feat, CV_BGR2GRAY); cvtColor(patch,feat, COLOR_BGR2GRAY);
else else
feat=patch; feat=patch;
//feat.convertTo(feat,CV_32F); //feat.convertTo(feat,CV_32F);
......
...@@ -319,3 +319,12 @@ year={2016}, ...@@ -319,3 +319,12 @@ year={2016},
publisher={Springer International Publishing}, publisher={Springer International Publishing},
pages={617--632}, pages={617--632},
} }
@inproceedings{BarronPoole2016,
author = {Jonathan T Barron and Ben Poole},
title={The Fast Bilateral Solver},
booktitle={European Conference on Computer Vision (ECCV)},
year={2016},
publisher={Springer International Publishing},
pages={617--632},
}
...@@ -51,25 +51,25 @@ namespace ximgproc { ...@@ -51,25 +51,25 @@ namespace ximgproc {
* *
* For more details about this implementation, please see http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.476.5736&rep=rep1&type=pdf * For more details about this implementation, please see http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.476.5736&rep=rep1&type=pdf
* *
* @param _op Source 8-bit or 16bit image, 1-channel or 3-channel image. * @param op Source 8-bit or 16bit image, 1-channel or 3-channel image.
* @param _dst result CV_32FC image with same number of channel than _op. * @param dst result CV_32FC image with same number of channel than _op.
* @param alphaDerive double see paper * @param alpha double see paper
* @param alphaMean double see paper * @param omega double see paper
* *
*/ */
CV_EXPORTS void GradientDericheY(InputArray _op, OutputArray _dst, double alphaDerive,double alphaMean); CV_EXPORTS_W void GradientDericheY(InputArray op, OutputArray dst, double alpha,double omega);
/** /**
* @brief Applies X Deriche filter to an image. * @brief Applies X Deriche filter to an image.
* *
* For more details about this implementation, please see http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.476.5736&rep=rep1&type=pdf * For more details about this implementation, please see http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.476.5736&rep=rep1&type=pdf
* *
* @param _op Source 8-bit or 16bit image, 1-channel or 3-channel image. * @param op Source 8-bit or 16bit image, 1-channel or 3-channel image.
* @param _dst result CV_32FC image with same number of channel than _op. * @param dst result CV_32FC image with same number of channel than _op.
* @param alphaDerive double see paper * @param alpha double see paper
* @param alphaMean double see paper * @param omega double see paper
* *
*/ */
CV_EXPORTS void GradientDericheX(InputArray _op, OutputArray _dst, double alphaDerive,double alphaMean); CV_EXPORTS_W void GradientDericheX(InputArray op, OutputArray dst, double alpha,double omega);
} }
} }
......
...@@ -437,8 +437,7 @@ guide then use FastBilateralSolverFilter interface to avoid extra computations. ...@@ -437,8 +437,7 @@ guide then use FastBilateralSolverFilter interface to avoid extra computations.
@note Confidence images with CV_8U depth are expected to in [0, 255] and CV_32F in [0, 1] range. @note Confidence images with CV_8U depth are expected to in [0, 255] and CV_32F in [0, 1] range.
*/ */
CV_EXPORTS_W void fastBilateralSolverFilter(InputArray guide, InputArray src, InputArray confidence, OutputArray dst, double sigma_spatial = 8, double sigma_luma = 8, double sigma_chroma = 8, int num_iter = 25, double max_tol = 1e-5); CV_EXPORTS_W void fastBilateralSolverFilter(InputArray guide, InputArray src, InputArray confidence, OutputArray dst, double sigma_spatial = 8, double sigma_luma = 8, double sigma_chroma = 8, int num_iter = 25, double max_tol = 1e-5);
//////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////
/** @brief Interface for implementations of Fast Global Smoother filter. /** @brief Interface for implementations of Fast Global Smoother filter.
......
import sys
import numpy as np
import cv2 as cv
def AddSlider(sliderName,windowName,minSlider,maxSlider,valDefault, update=[]):
if update is None:
cv.createTrackbar(sliderName, windowName, valDefault,maxSlider-minSlider+1)
else:
cv.createTrackbar(sliderName, windowName, valDefault,maxSlider-minSlider+1, update)
cv.setTrackbarMin(sliderName, windowName, minSlider)
cv.setTrackbarMax(sliderName, windowName, maxSlider)
cv.setTrackbarPos(sliderName, windowName, valDefault)
class Filtrage:
def __init__(self):
self.s =0
self.alpha = 100
self.omega = 100
self.updateFiltre=True
self.img=[]
self.dximg=[]
self.dyimg=[]
self.module=[]
def DericheFilter(self):
self.dximg = cv.ximgproc.GradientDericheX( self.img, self.alpha/100., self.omega/1000. )
self.dyimg = cv.ximgproc.GradientDericheY( self.img, self.alpha/100., self.omega/1000. )
dx2=self.dximg*self.dximg
dy2=self.dyimg*self.dyimg
self.module = np.sqrt(dx2+dy2)
cv.normalize(src=self.module,dst=self.module,norm_type=cv.NORM_MINMAX)
def SlideBarDeriche(self):
cv.destroyWindow(self.filename)
cv.namedWindow(self.filename)
AddSlider("alpha",self.filename,1,400,self.alpha,self.UpdateAlpha)
AddSlider("omega",self.filename,1,1000,self.omega,self.UpdateOmega)
def UpdateOmega(self,x ):
self.updateFiltre=True
self.omega=x
def UpdateAlpha(self,x ):
self.updateFiltre=True
self.alpha=x
def run(self,argv):
# Load the source image
self.filename = argv[0] if len(argv) > 0 else "../doc/pics/corridor_fld.jpg"
self.img=cv.imread(self.filename,cv.IMREAD_GRAYSCALE)
if self.img is None:
print ('cannot read file')
return
self.SlideBarDeriche()
while True:
cv.imshow(self.filename,self.img)
if self.updateFiltre:
self.DericheFilter()
cv.imshow("module",self.module)
self.updateFiltre =False
code = cv.waitKey(10)
if code==27:
break
if __name__ == '__main__':
Filtrage().run(sys.argv[1:])
...@@ -305,7 +305,6 @@ int main(int argc, char** argv) ...@@ -305,7 +305,6 @@ int main(int argc, char** argv)
(void)fbs_luma; (void)fbs_luma;
(void)fbs_chroma; (void)fbs_chroma;
#endif #endif
} }
else if(filter=="wls_no_conf") else if(filter=="wls_no_conf")
{ {
......
This diff is collapsed.
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
#include "test_precomp.hpp"
namespace opencv_test { namespace {
TEST(ximgproc_DericheFilter, regression)
{
Mat img = Mat::zeros(64, 64, CV_8UC3);
Mat res = Mat::zeros(64, 64, CV_32FC3);
img.at<Vec3b>(31, 31) = Vec3b(1, 2, 4);
double a = 0.5;
double w = 0.0005;
Mat dst;
ximgproc::GradientDericheX(img, dst, a, w);
double c = pow(1 - exp(-a), 2.0) * exp(a);
double k = pow(a*(1 - exp(-a)), 2.0) / (1 + 2 * a*exp(-a) - exp(-2 * a));
for (int i = 0; i < img.rows; i++)
{
double n = -31 + i;
for (int j = 0; j < img.cols; j++)
{
double m = -31 + j;
double x = -c * exp(-a * fabs(m))*sin(w*m);
x = x * (k*(a*sin(w*fabs(n)) + w * cos(w*fabs(n)))*exp(-a * fabs(n))) / (a*a + w * w);
x = x / (w*w);
float xx=static_cast<float>(x);
res.at<Vec3f>(i, j) = Vec3f(xx, 2 * xx, 4 * xx);
}
}
EXPECT_LE(cv::norm(res, dst, NORM_INF), 1e-5);
Mat dst2;
ximgproc::GradientDericheY(img, dst2, a, w);
cv::transpose(dst2, dst2);
EXPECT_LE(cv::norm(dst2, dst, NORM_INF), 1e-5);
}
}
} // namespace
...@@ -82,7 +82,7 @@ TEST(FastBilateralSolverTest, SplatSurfaceAccuracy) ...@@ -82,7 +82,7 @@ TEST(FastBilateralSolverTest, SplatSurfaceAccuracy)
// When filtering a constant image we should get the same image: // When filtering a constant image we should get the same image:
double normL1 = cvtest::norm(src, res, NORM_L1)/src.total()/src.channels(); double normL1 = cvtest::norm(src, res, NORM_L1)/src.total()/src.channels();
EXPECT_LE(normL1, 1.0); EXPECT_LE(normL1, 1.0/64);
} }
} }
...@@ -91,7 +91,8 @@ TEST(FastBilateralSolverTest, ReferenceAccuracy) ...@@ -91,7 +91,8 @@ TEST(FastBilateralSolverTest, ReferenceAccuracy)
string dir = getDataDir() + "cv/edgefilter"; string dir = getDataDir() + "cv/edgefilter";
Mat src = imread(dir + "/kodim23.png"); Mat src = imread(dir + "/kodim23.png");
Mat ref = imread(dir + "/fgs/kodim23_lambda=1000_sigma=10.png"); Mat ref = imread(dir + "/fbs/kodim23_spatial=16_luma=16_chroma=16.png");
Mat confidence(src.size(), CV_MAKE_TYPE(CV_8U, 1), 255); Mat confidence(src.size(), CV_MAKE_TYPE(CV_8U, 1), 255);
ASSERT_FALSE(src.empty()); ASSERT_FALSE(src.empty());
...@@ -103,7 +104,7 @@ TEST(FastBilateralSolverTest, ReferenceAccuracy) ...@@ -103,7 +104,7 @@ TEST(FastBilateralSolverTest, ReferenceAccuracy)
double totalMaxError = 1.0/64.0*src.total()*src.channels(); double totalMaxError = 1.0/64.0*src.total()*src.channels();
EXPECT_LE(cvtest::norm(res, ref, NORM_L2), totalMaxError); EXPECT_LE(cvtest::norm(res, ref, NORM_L2), totalMaxError);
EXPECT_LE(cvtest::norm(res, ref, NORM_INF), 100); EXPECT_LE(cvtest::norm(res, ref, NORM_INF), 1);
} }
INSTANTIATE_TEST_CASE_P(FullSet, FastBilateralSolverTest,Combine(Values(szODD, szQVGA), SrcTypes::all(), GuideTypes::all())); INSTANTIATE_TEST_CASE_P(FullSet, FastBilateralSolverTest,Combine(Values(szODD, szQVGA), SrcTypes::all(), GuideTypes::all()));
......
...@@ -3,9 +3,6 @@ ...@@ -3,9 +3,6 @@
#include "opencv2/imgproc.hpp" #include "opencv2/imgproc.hpp"
#include "opencv2/highgui.hpp" #include "opencv2/highgui.hpp"
#include "opencv2/core/utility.hpp"
#include "opencv2/imgproc/types_c.h"
#include <ctime> #include <ctime>
#include <iostream> #include <iostream>
...@@ -41,22 +38,22 @@ int main( int argc, const char** argv ) ...@@ -41,22 +38,22 @@ int main( int argc, const char** argv )
std::string maskFilename = parser.get<std::string>("m"); std::string maskFilename = parser.get<std::string>("m");
std::string outFilename = parser.get<std::string>("o"); std::string outFilename = parser.get<std::string>("o");
cv::Mat src = cv::imread(inFilename, -1); cv::Mat src = cv::imread(inFilename, cv::IMREAD_UNCHANGED);
if ( src.empty() ) if ( src.empty() )
{ {
printf( "Cannot read image file: %s\n", inFilename.c_str() ); printf( "Cannot read image file: %s\n", inFilename.c_str() );
return -1; return -1;
} }
cv::cvtColor(src, src, CV_RGB2Lab); cv::cvtColor(src, src, cv::COLOR_BGR2Lab);
cv::Mat mask = cv::imread(maskFilename, 0); cv::Mat mask = cv::imread(maskFilename, cv::IMREAD_GRAYSCALE);
if ( mask.empty() ) if ( mask.empty() )
{ {
printf( "Cannot read image file: %s\n", maskFilename.c_str() ); printf( "Cannot read image file: %s\n", maskFilename.c_str() );
return -1; return -1;
} }
cv::threshold(mask, mask, 128, 255, CV_THRESH_BINARY | CV_THRESH_OTSU); cv::threshold(mask, mask, 128, 255, cv::THRESH_BINARY | cv::THRESH_OTSU);
cv::Mat res(src.size(), src.type()); cv::Mat res(src.size(), src.type());
...@@ -65,7 +62,7 @@ int main( int argc, const char** argv ) ...@@ -65,7 +62,7 @@ int main( int argc, const char** argv )
std::cout << "time = " << (clock() - time) std::cout << "time = " << (clock() - time)
/ double(CLOCKS_PER_SEC) << std::endl; / double(CLOCKS_PER_SEC) << std::endl;
cv::cvtColor(res, res, CV_Lab2RGB); cv::cvtColor(res, res, cv::COLOR_Lab2BGR);
if ( outFilename == "" ) if ( outFilename == "" )
{ {
...@@ -78,4 +75,4 @@ int main( int argc, const char** argv ) ...@@ -78,4 +75,4 @@ int main( int argc, const char** argv )
cv::imwrite(outFilename, res); cv::imwrite(outFilename, res);
return 0; return 0;
} }
\ No newline at end of file
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment