Commit d511587c authored by Alexander Alekhin's avatar Alexander Alekhin

Merge pull request #1911 from alalek:legacy_constants

parents b686f430 10b48672
...@@ -35,7 +35,7 @@ SobelFilterWidget::SobelFilterWidget(QWidget *parent) ...@@ -35,7 +35,7 @@ SobelFilterWidget::SobelFilterWidget(QWidget *parent)
ksize_->addItem("3"); ksize_->addItem("3");
ksize_->addItem("5"); ksize_->addItem("5");
ksize_->addItem("7"); ksize_->addItem("7");
ksize_->addItem("CV_SCHARR(-1)"); ksize_->addItem("FILTER_SCHARR(-1)");
ksize_->setCurrentIndex(1); ksize_->setCurrentIndex(1);
borderType_->addItem("BORDER_DEFAULT"); borderType_->addItem("BORDER_DEFAULT");
...@@ -133,7 +133,7 @@ void SobelFilterWidget::applyFilter(InputArray in, OutputArray out) const ...@@ -133,7 +133,7 @@ void SobelFilterWidget::applyFilter(InputArray in, OutputArray out) const
ksize = 7; ksize = 7;
break; break;
case 4: case 4:
ksize = CV_SCHARR; ksize = FILTER_SCHARR;
break; break;
} }
...@@ -258,15 +258,15 @@ std::pair<bool, QString> SobelFilterWidget::checkInput(InputArray in) const ...@@ -258,15 +258,15 @@ std::pair<bool, QString> SobelFilterWidget::checkInput(InputArray in) const
ksize = 7; ksize = 7;
break; break;
case 4: case 4:
ksize = CV_SCHARR; ksize = FILTER_SCHARR;
break; break;
} }
if (ksize == CV_SCHARR) if (ksize == FILTER_SCHARR)
{ {
if (dx + dy != 1) if (dx + dy != 1)
{ {
return { false, "ksize=CV_SCHARR but dx+dy != 1" }; return { false, "ksize=FILTER_SCHARR but dx+dy != 1" };
} }
} }
else else
......
...@@ -131,7 +131,7 @@ If you've set the threshold to 0.0 as we did above, then: ...@@ -131,7 +131,7 @@ If you've set the threshold to 0.0 as we did above, then:
@code @code
// //
Mat img = imread("person1/3.jpg", CV_LOAD_IMAGE_GRAYSCALE); Mat img = imread("person1/3.jpg", IMREAD_GRAYSCALE);
// Get a prediction from the model. Note: We've set a threshold of 0.0 above, // Get a prediction from the model. Note: We've set a threshold of 0.0 above,
// since the distance is almost always larger than 0.0, you'll get -1 as // since the distance is almost always larger than 0.0, you'll get -1 as
// label, which indicates, this face is unknown // label, which indicates, this face is unknown
...@@ -176,13 +176,13 @@ public: ...@@ -176,13 +176,13 @@ public:
vector<Mat> images; vector<Mat> images;
vector<int> labels; vector<int> labels;
// images for first person // images for first person
images.push_back(imread("person0/0.jpg", CV_LOAD_IMAGE_GRAYSCALE)); labels.push_back(0); images.push_back(imread("person0/0.jpg", IMREAD_GRAYSCALE)); labels.push_back(0);
images.push_back(imread("person0/1.jpg", CV_LOAD_IMAGE_GRAYSCALE)); labels.push_back(0); images.push_back(imread("person0/1.jpg", IMREAD_GRAYSCALE)); labels.push_back(0);
images.push_back(imread("person0/2.jpg", CV_LOAD_IMAGE_GRAYSCALE)); labels.push_back(0); images.push_back(imread("person0/2.jpg", IMREAD_GRAYSCALE)); labels.push_back(0);
// images for second person // images for second person
images.push_back(imread("person1/0.jpg", CV_LOAD_IMAGE_GRAYSCALE)); labels.push_back(1); images.push_back(imread("person1/0.jpg", IMREAD_GRAYSCALE)); labels.push_back(1);
images.push_back(imread("person1/1.jpg", CV_LOAD_IMAGE_GRAYSCALE)); labels.push_back(1); images.push_back(imread("person1/1.jpg", IMREAD_GRAYSCALE)); labels.push_back(1);
images.push_back(imread("person1/2.jpg", CV_LOAD_IMAGE_GRAYSCALE)); labels.push_back(1); images.push_back(imread("person1/2.jpg", IMREAD_GRAYSCALE)); labels.push_back(1);
@endcode @endcode
Now that you have read some images, we can create a new FaceRecognizer. In this example I'll create Now that you have read some images, we can create a new FaceRecognizer. In this example I'll create
...@@ -275,7 +275,7 @@ public: ...@@ -275,7 +275,7 @@ public:
// Do your initialization here (create the cv::FaceRecognizer model) ... // Do your initialization here (create the cv::FaceRecognizer model) ...
// ... // ...
// Read in a sample image: // Read in a sample image:
Mat img = imread("person1/3.jpg", CV_LOAD_IMAGE_GRAYSCALE); Mat img = imread("person1/3.jpg", IMREAD_GRAYSCALE);
// And get a prediction from the cv::FaceRecognizer: // And get a prediction from the cv::FaceRecognizer:
int predicted = model->predict(img); int predicted = model->predict(img);
@endcode @endcode
...@@ -286,7 +286,7 @@ public: ...@@ -286,7 +286,7 @@ public:
using namespace cv; using namespace cv;
// Do your initialization here (create the cv::FaceRecognizer model) ... // Do your initialization here (create the cv::FaceRecognizer model) ...
// ... // ...
Mat img = imread("person1/3.jpg", CV_LOAD_IMAGE_GRAYSCALE); Mat img = imread("person1/3.jpg", IMREAD_GRAYSCALE);
// Some variables for the predicted label and associated confidence (e.g. distance): // Some variables for the predicted label and associated confidence (e.g. distance):
int predicted_label = -1; int predicted_label = -1;
double predicted_confidence = 0.0; double predicted_confidence = 0.0;
......
...@@ -63,7 +63,7 @@ static vector<Mat> sample_patches( ...@@ -63,7 +63,7 @@ static vector<Mat> sample_patches(
vector<Mat> patches; vector<Mat> patches;
size_t patch_count = 0; size_t patch_count = 0;
for (size_t i = 0; i < filenames.size(); ++i) { for (size_t i = 0; i < filenames.size(); ++i) {
Mat img = imread(filenames[i], CV_LOAD_IMAGE_GRAYSCALE); Mat img = imread(filenames[i], IMREAD_GRAYSCALE);
for (int row = 0; row + n_rows < img.rows; row += n_rows) { for (int row = 0; row + n_rows < img.rows; row += n_rows) {
for (int col = 0; col + n_cols < img.cols; col += n_cols) { for (int col = 0; col + n_cols < img.cols; col += n_cols) {
patches.push_back(img(Rect(col, row, n_cols, n_rows)).clone()); patches.push_back(img(Rect(col, row, n_cols, n_rows)).clone());
...@@ -84,7 +84,7 @@ static vector<Mat> read_imgs(const string& path) ...@@ -84,7 +84,7 @@ static vector<Mat> read_imgs(const string& path)
glob(path, filenames); glob(path, filenames);
vector<Mat> imgs; vector<Mat> imgs;
for (size_t i = 0; i < filenames.size(); ++i) { for (size_t i = 0; i < filenames.size(); ++i) {
imgs.push_back(imread(filenames[i], CV_LOAD_IMAGE_GRAYSCALE)); imgs.push_back(imread(filenames[i], IMREAD_GRAYSCALE));
} }
return imgs; return imgs;
} }
...@@ -165,7 +165,7 @@ void WBDetectorImpl::train( ...@@ -165,7 +165,7 @@ void WBDetectorImpl::train(
for (; img_i < neg_filenames.size(); ++img_i) { for (; img_i < neg_filenames.size(); ++img_i) {
cerr << "win " << bootstrap_count << "/" << stage_neg cerr << "win " << bootstrap_count << "/" << stage_neg
<< " img " << (img_i + 1) << "/" << neg_filenames.size() << "\r"; << " img " << (img_i + 1) << "/" << neg_filenames.size() << "\r";
Mat img = imread(neg_filenames[img_i], CV_LOAD_IMAGE_GRAYSCALE); Mat img = imread(neg_filenames[img_i], IMREAD_GRAYSCALE);
vector<Rect> bboxes; vector<Rect> bboxes;
Mat1f confidences; Mat1f confidences;
boost_.detect(eval, img, scales, bboxes, confidences); boost_.detect(eval, img, scales, bboxes, confidences);
......
...@@ -27,7 +27,7 @@ int main(int argc, char **argv) ...@@ -27,7 +27,7 @@ int main(int argc, char **argv)
assert(argc == 6); assert(argc == 6);
vector<Rect> bboxes; vector<Rect> bboxes;
vector<double> confidences; vector<double> confidences;
Mat img = imread(argv[3], CV_LOAD_IMAGE_GRAYSCALE); Mat img = imread(argv[3], IMREAD_GRAYSCALE);
FileStorage fs(argv[2], FileStorage::READ); FileStorage fs(argv[2], FileStorage::READ);
detector->read(fs.getFirstTopLevelNode()); detector->read(fs.getFirstTopLevelNode());
detector->detect(img, bboxes, confidences); detector->detect(img, bboxes, confidences);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment