Commit 10b48672 authored by Alexander Alekhin's avatar Alexander Alekhin

fix legacy contants

parent ec4d5c85
......@@ -35,7 +35,7 @@ SobelFilterWidget::SobelFilterWidget(QWidget *parent)
ksize_->addItem("3");
ksize_->addItem("5");
ksize_->addItem("7");
ksize_->addItem("CV_SCHARR(-1)");
ksize_->addItem("FILTER_SCHARR(-1)");
ksize_->setCurrentIndex(1);
borderType_->addItem("BORDER_DEFAULT");
......@@ -133,7 +133,7 @@ void SobelFilterWidget::applyFilter(InputArray in, OutputArray out) const
ksize = 7;
break;
case 4:
ksize = CV_SCHARR;
ksize = FILTER_SCHARR;
break;
}
......@@ -258,15 +258,15 @@ std::pair<bool, QString> SobelFilterWidget::checkInput(InputArray in) const
ksize = 7;
break;
case 4:
ksize = CV_SCHARR;
ksize = FILTER_SCHARR;
break;
}
if (ksize == CV_SCHARR)
if (ksize == FILTER_SCHARR)
{
if (dx + dy != 1)
{
return { false, "ksize=CV_SCHARR but dx+dy != 1" };
return { false, "ksize=FILTER_SCHARR but dx+dy != 1" };
}
}
else
......
......@@ -131,7 +131,7 @@ If you've set the threshold to 0.0 as we did above, then:
@code
//
Mat img = imread("person1/3.jpg", CV_LOAD_IMAGE_GRAYSCALE);
Mat img = imread("person1/3.jpg", IMREAD_GRAYSCALE);
// Get a prediction from the model. Note: We've set a threshold of 0.0 above,
// since the distance is almost always larger than 0.0, you'll get -1 as
// label, which indicates, this face is unknown
......@@ -176,13 +176,13 @@ public:
vector<Mat> images;
vector<int> labels;
// images for first person
images.push_back(imread("person0/0.jpg", CV_LOAD_IMAGE_GRAYSCALE)); labels.push_back(0);
images.push_back(imread("person0/1.jpg", CV_LOAD_IMAGE_GRAYSCALE)); labels.push_back(0);
images.push_back(imread("person0/2.jpg", CV_LOAD_IMAGE_GRAYSCALE)); labels.push_back(0);
images.push_back(imread("person0/0.jpg", IMREAD_GRAYSCALE)); labels.push_back(0);
images.push_back(imread("person0/1.jpg", IMREAD_GRAYSCALE)); labels.push_back(0);
images.push_back(imread("person0/2.jpg", IMREAD_GRAYSCALE)); labels.push_back(0);
// images for second person
images.push_back(imread("person1/0.jpg", CV_LOAD_IMAGE_GRAYSCALE)); labels.push_back(1);
images.push_back(imread("person1/1.jpg", CV_LOAD_IMAGE_GRAYSCALE)); labels.push_back(1);
images.push_back(imread("person1/2.jpg", CV_LOAD_IMAGE_GRAYSCALE)); labels.push_back(1);
images.push_back(imread("person1/0.jpg", IMREAD_GRAYSCALE)); labels.push_back(1);
images.push_back(imread("person1/1.jpg", IMREAD_GRAYSCALE)); labels.push_back(1);
images.push_back(imread("person1/2.jpg", IMREAD_GRAYSCALE)); labels.push_back(1);
@endcode
Now that you have read some images, we can create a new FaceRecognizer. In this example I'll create
......@@ -275,7 +275,7 @@ public:
// Do your initialization here (create the cv::FaceRecognizer model) ...
// ...
// Read in a sample image:
Mat img = imread("person1/3.jpg", CV_LOAD_IMAGE_GRAYSCALE);
Mat img = imread("person1/3.jpg", IMREAD_GRAYSCALE);
// And get a prediction from the cv::FaceRecognizer:
int predicted = model->predict(img);
@endcode
......@@ -286,7 +286,7 @@ public:
using namespace cv;
// Do your initialization here (create the cv::FaceRecognizer model) ...
// ...
Mat img = imread("person1/3.jpg", CV_LOAD_IMAGE_GRAYSCALE);
Mat img = imread("person1/3.jpg", IMREAD_GRAYSCALE);
// Some variables for the predicted label and associated confidence (e.g. distance):
int predicted_label = -1;
double predicted_confidence = 0.0;
......
......@@ -63,7 +63,7 @@ static vector<Mat> sample_patches(
vector<Mat> patches;
size_t patch_count = 0;
for (size_t i = 0; i < filenames.size(); ++i) {
Mat img = imread(filenames[i], CV_LOAD_IMAGE_GRAYSCALE);
Mat img = imread(filenames[i], IMREAD_GRAYSCALE);
for (int row = 0; row + n_rows < img.rows; row += n_rows) {
for (int col = 0; col + n_cols < img.cols; col += n_cols) {
patches.push_back(img(Rect(col, row, n_cols, n_rows)).clone());
......@@ -84,7 +84,7 @@ static vector<Mat> read_imgs(const string& path)
glob(path, filenames);
vector<Mat> imgs;
for (size_t i = 0; i < filenames.size(); ++i) {
imgs.push_back(imread(filenames[i], CV_LOAD_IMAGE_GRAYSCALE));
imgs.push_back(imread(filenames[i], IMREAD_GRAYSCALE));
}
return imgs;
}
......@@ -165,7 +165,7 @@ void WBDetectorImpl::train(
for (; img_i < neg_filenames.size(); ++img_i) {
cerr << "win " << bootstrap_count << "/" << stage_neg
<< " img " << (img_i + 1) << "/" << neg_filenames.size() << "\r";
Mat img = imread(neg_filenames[img_i], CV_LOAD_IMAGE_GRAYSCALE);
Mat img = imread(neg_filenames[img_i], IMREAD_GRAYSCALE);
vector<Rect> bboxes;
Mat1f confidences;
boost_.detect(eval, img, scales, bboxes, confidences);
......
......@@ -27,7 +27,7 @@ int main(int argc, char **argv)
assert(argc == 6);
vector<Rect> bboxes;
vector<double> confidences;
Mat img = imread(argv[3], CV_LOAD_IMAGE_GRAYSCALE);
Mat img = imread(argv[3], IMREAD_GRAYSCALE);
FileStorage fs(argv[2], FileStorage::READ);
detector->read(fs.getFirstTopLevelNode());
detector->detect(img, bboxes, confidences);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment