Commit 901e61d1 authored by Alexander Alekhin's avatar Alexander Alekhin

Merge pull request #1863 from sturkmen72:minor_changes

parents 6e3eea54 2074cc48
......@@ -103,13 +103,12 @@ Ptr<DetectorParameters> DetectorParameters::create() {
*/
static void _convertToGrey(InputArray _in, OutputArray _out) {
CV_Assert(_in.getMat().channels() == 1 || _in.getMat().channels() == 3);
CV_Assert(_in.type() == CV_8UC1 || _in.type() == CV_8UC3);
_out.create(_in.getMat().size(), CV_8UC1);
if(_in.getMat().type() == CV_8UC3)
cvtColor(_in.getMat(), _out.getMat(), COLOR_BGR2GRAY);
if(_in.type() == CV_8UC3)
cvtColor(_in, _out, COLOR_BGR2GRAY);
else
_in.getMat().copyTo(_out);
_in.copyTo(_out);
}
......
......@@ -345,10 +345,10 @@ static int _selectAndRefineChessboardCorners(InputArray _allCorners, InputArray
// corner refinement, first convert input image to grey
Mat grey;
if(_image.getMat().type() == CV_8UC3)
cvtColor(_image.getMat(), grey, COLOR_BGR2GRAY);
if(_image.type() == CV_8UC3)
cvtColor(_image, grey, COLOR_BGR2GRAY);
else
_image.getMat().copyTo(grey);
_image.copyTo(grey);
const Ptr<DetectorParameters> params = DetectorParameters::create(); // use default params for corner refinement
......@@ -754,10 +754,10 @@ void detectCharucoDiamond(InputArray _image, InputArrayOfArrays _markerCorners,
// convert input image to grey
Mat grey;
if(_image.getMat().type() == CV_8UC3)
cvtColor(_image.getMat(), grey, COLOR_BGR2GRAY);
if(_image.type() == CV_8UC3)
cvtColor(_image, grey, COLOR_BGR2GRAY);
else
_image.getMat().copyTo(grey);
_image.copyTo(grey);
// for each of the detected markers, try to find a diamond
for(unsigned int i = 0; i < _markerIds.total(); i++) {
......
......@@ -222,26 +222,24 @@ namespace cnn_3dobj
{
/* Convert the input image to the input image format of the network. */
cv::Mat sample;
if (img.channels() == 3 && num_channels == 1)
cv::cvtColor(img, sample, CV_BGR2GRAY);
else if (img.channels() == 4 && num_channels == 1)
cv::cvtColor(img, sample, CV_BGRA2GRAY);
if (num_channels == 1)
cv::cvtColor(img, sample, COLOR_BGR2GRAY);
else if (img.channels() == 4 && num_channels == 3)
cv::cvtColor(img, sample, CV_BGRA2BGR);
cv::cvtColor(img, sample, COLOR_BGRA2BGR);
else if (img.channels() == 1 && num_channels == 3)
cv::cvtColor(img, sample, CV_GRAY2BGR);
cv::cvtColor(img, sample, COLOR_GRAY2BGR);
else
sample = img;
cv::Mat sample_resized;
if (sample.size() != input_geometry)
cv::resize(sample, sample_resized, input_geometry);
else
sample_resized = sample;
cv::Mat sample_float;
if (num_channels == 3)
sample_resized.convertTo(sample_float, CV_32FC3);
else
sample_resized.convertTo(sample_float, CV_32FC1);
sample_resized.convertTo(sample_float, CV_32F);
cv::Mat sample_normalized;
if (net_ready == 2)
cv::subtract(sample_float, mean_, sample_normalized);
......
......@@ -84,7 +84,7 @@ main(int argc, char** argv)
// convert to grayscale
cv::Mat imgGray;
cv::cvtColor(imgRead, imgGray, CV_BGR2GRAY);
cv::cvtColor(imgRead, imgGray, COLOR_BGR2GRAY);
cvv::debugFilter(imgRead, imgGray, CVVISUAL_LOCATION, "to gray");
// detect ORB features
......
......@@ -69,8 +69,8 @@ void DiffFilterFunction::applyFilter(InputArray in, OutputArray out) const
}
cv::Mat originalHSV, filteredHSV;
cv::cvtColor(in.at(0).get(), originalHSV, CV_BGR2HSV);
cv::cvtColor(in.at(1).get(), filteredHSV, CV_BGR2HSV);
cv::cvtColor(in.at(0).get(), originalHSV, COLOR_BGR2HSV);
cv::cvtColor(in.at(1).get(), filteredHSV, COLOR_BGR2HSV);
auto diffHSV = cv::abs(originalHSV - filteredHSV);
std::array<cv::Mat, 3> splitVector;
......
......@@ -46,7 +46,7 @@ resize(img,img,Size(460,460),0,0,INTER_LINEAR_EXACT);
Mat gray;
std::vector<Rect> faces;
if(img.channels()>1){
cvtColor(img.getMat(),gray,CV_BGR2GRAY);
cvtColor(img.getMat(),gray,COLOR_BGR2GRAY);
}
else{
gray = img.getMat().clone();
......
......@@ -65,7 +65,7 @@ bool myDetector( InputArray image, OutputArray ROIs ){
Mat gray;
std::vector<Rect> faces;
if(image.channels()>1){
cvtColor(image.getMat(),gray,CV_BGR2GRAY);
cvtColor(image.getMat(),gray,COLOR_BGR2GRAY);
}
else{
gray = image.getMat().clone();
......@@ -174,4 +174,4 @@ filename specified.As the training starts successfully you will see something li
**The error rate on test images depends on the number of images used for training used as follows :**
![](images/test.png)
\ No newline at end of file
![](images/test.png)
......@@ -26,7 +26,7 @@ bool myDetector( InputArray image, OutputArray ROIs ){
Mat gray;
std::vector<Rect> faces;
if(image.channels()>1){
cvtColor(image.getMat(),gray,CV_BGR2GRAY);
cvtColor(image.getMat(),gray,COLOR_BGR2GRAY);
}
else{
gray = image.getMat().clone();
......@@ -107,4 +107,4 @@ Sample video:
@htmlonly
<iframe width="560" height="315" src="https://www.youtube.com/embed/ZtaV07T90D8" frameborder="0" allowfullscreen></iframe>
@endhtmlonly
\ No newline at end of file
@endhtmlonly
......@@ -25,7 +25,7 @@ bool myDetector( InputArray image, OutputArray ROIs ){
Mat gray;
std::vector<Rect> faces;
if(image.channels()>1){
cvtColor(image.getMat(),gray,CV_BGR2GRAY);
cvtColor(image.getMat(),gray,COLOR_BGR2GRAY);
}
else{
gray = image.getMat().clone();
......@@ -144,4 +144,4 @@ Second image
Results after swapping
----------------------
![](images/face_swapped.jpg)
\ No newline at end of file
![](images/face_swapped.jpg)
......@@ -27,18 +27,10 @@ public:
input.type() == CV_8U);
cv::resize(input, resizeImg, cv::Size(8,8), 0, 0, INTER_LINEAR_EXACT);
if(input.type() == CV_8UC3)
{
cv::cvtColor(resizeImg, grayImg, CV_BGR2GRAY);
}
else if(input.type() == CV_8UC4)
{
cv::cvtColor(resizeImg, grayImg, CV_BGRA2GRAY);
}
if(input.channels() > 1)
cv::cvtColor(resizeImg, grayImg, COLOR_BGR2GRAY);
else
{
grayImg = resizeImg;
}
uchar const imgMean = static_cast<uchar>(cvRound(cv::mean(grayImg)[0]));
cv::compare(grayImg, imgMean, bitsImg, CMP_GT);
......
......@@ -40,18 +40,10 @@ public:
input.type() == CV_8U);
cv::resize(input, resizeImg_, cv::Size(imgWidth,imgHeight), 0, 0, INTER_LINEAR_EXACT);
if(input.type() == CV_8UC3)
{
cv::cvtColor(resizeImg_, grayImg_, CV_BGR2GRAY);
}
else if(input.type() == CV_8UC4)
{
cv::cvtColor(resizeImg_, grayImg_, CV_BGRA2GRAY);
}
if(input.channels() > 1)
cv::cvtColor(resizeImg_, grayImg_, COLOR_BGR2GRAY);
else
{
grayImg_ = resizeImg_;
}
int pixColStep = blockWidth;
int pixRowStep = blockHeigth;
......
......@@ -28,25 +28,24 @@ public:
}
else if(input.type() == CV_8UC4)
{
cv::cvtColor(input, colorImg_, CV_BGRA2BGR);
cv::cvtColor(input, colorImg_, COLOR_BGRA2BGR);
}
else
{
cv::cvtColor(input, colorImg_, CV_GRAY2BGR);
cv::cvtColor(input, colorImg_, COLOR_GRAY2BGR);
}
cv::resize(colorImg_, resizeImg_, cv::Size(512,512), 0, 0,
INTER_CUBIC);
cv::resize(colorImg_, resizeImg_, cv::Size(512,512), 0, 0, INTER_CUBIC);
cv::GaussianBlur(resizeImg_, blurImg_, cv::Size(3,3), 0, 0);
cv::cvtColor(blurImg_, colorSpace_, CV_BGR2HSV);
cv::cvtColor(blurImg_, colorSpace_, COLOR_BGR2HSV);
cv::split(colorSpace_, channels_);
outputArr.create(1, 42, CV_64F);
cv::Mat hash = outputArr.getMat();
hash.setTo(0);
computeMoments(hash.ptr<double>(0));
cv::cvtColor(blurImg_, colorSpace_, CV_BGR2YCrCb);
cv::cvtColor(blurImg_, colorSpace_, COLOR_BGR2YCrCb);
cv::split(colorSpace_, channels_);
computeMoments(hash.ptr<double>(0) + 21);
}
......
......@@ -105,18 +105,11 @@ public:
input.type() == CV_8UC3 ||
input.type() == CV_8U);
if(input.type() == CV_8UC3)
{
cv::cvtColor(input, grayImg, CV_BGR2GRAY);
}
else if(input.type() == CV_8UC4)
{
cv::cvtColor(input, grayImg, CV_BGRA2GRAY);
}
if(input.channels() > 1)
cv::cvtColor(input, grayImg, COLOR_BGR2GRAY);
else
{
grayImg = input;
}
//pHash use Canny-deritch filter to blur the image
cv::GaussianBlur(grayImg, blurImg, cv::Size(7, 7), 0);
cv::resize(blurImg, resizeImg, cv::Size(512, 512), 0, 0, INTER_CUBIC);
......
......@@ -21,18 +21,10 @@ public:
input.type() == CV_8U);
cv::resize(input, resizeImg, cv::Size(32,32), 0, 0, INTER_LINEAR_EXACT);
if(input.type() == CV_8UC3)
{
cv::cvtColor(resizeImg, grayImg, CV_BGR2GRAY);
}
else if(input.type() == CV_8UC4)
{
cv::cvtColor(resizeImg, grayImg, CV_BGRA2GRAY);
}
if(input.channels() > 1)
cv::cvtColor(resizeImg, grayImg, COLOR_BGR2GRAY);
else
{
grayImg = resizeImg;
}
grayImg.convertTo(grayFImg, CV_32F);
cv::dct(grayFImg, dctImg);
......
......@@ -53,11 +53,11 @@ public:
if(input.type() == CV_8UC3)
{
cv::cvtColor(input, grayImg_, CV_BGR2GRAY);
cv::cvtColor(input, grayImg_, COLOR_BGR2GRAY);
}
else if(input.type() == CV_8UC4)
{
cv::cvtColor(input, grayImg_, CV_BGRA2GRAY);
cv::cvtColor(input, grayImg_, COLOR_BGRA2GRAY);
}
else
{
......
......@@ -548,7 +548,7 @@ void BinaryDescriptor::computeImpl( const Mat& imageSrc, std::vector<KeyLine>& k
if( imageSrc.channels() != 1 )
cvtColor( imageSrc, image, COLOR_BGR2GRAY );
else
image = imageSrc.clone();
image = imageSrc;
/*check whether image's depth is different from 0 */
if( image.depth() != 0 )
......@@ -627,7 +627,6 @@ void BinaryDescriptor::computeImpl( const Mat& imageSrc, std::vector<KeyLine>& k
{
for ( size_t j = 0; j < sl[i].size(); )
{
//if( (int) ( sl[i][j] ).octaveCount > params.numOfOctave_ )
if( (int) ( sl[i][j] ).octaveCount > octaveIndex )
( sl[i] ).erase( ( sl[i] ).begin() + j );
else j++;
......
......@@ -73,9 +73,9 @@ static void showDifference(const Mat& image1, const Mat& image2, const char* tit
image1.convertTo(img1, CV_32FC3);
image2.convertTo(img2, CV_32FC3);
if(img1.channels() != 1)
cvtColor(img1, img1, CV_RGB2GRAY);
cvtColor(img1, img1, COLOR_BGR2GRAY);
if(img2.channels() != 1)
cvtColor(img2, img2, CV_RGB2GRAY);
cvtColor(img2, img2, COLOR_BGR2GRAY);
Mat imgDiff;
img1.copyTo(imgDiff);
......@@ -273,11 +273,11 @@ static void calcHomographyFeature(const Mat& image1, const Mat& image2)
Mat gray_image2;
// Convert to Grayscale
if(image1.channels() != 1)
cvtColor(image1, gray_image1, CV_RGB2GRAY);
cvtColor(image1, gray_image1, COLOR_BGR2GRAY);
else
image1.copyTo(gray_image1);
if(image2.channels() != 1)
cvtColor(image2, gray_image2, CV_RGB2GRAY);
cvtColor(image2, gray_image2, COLOR_BGR2GRAY);
else
image2.copyTo(gray_image2);
......@@ -335,7 +335,7 @@ static void calcHomographyFeature(const Mat& image1, const Mat& image2)
}
// Find the Homography Matrix
Mat H = findHomography( obj, scene, CV_RANSAC );
Mat H = findHomography( obj, scene, RANSAC );
// Use the Homography Matrix to warp the images
Mat result;
Mat Hinv = H.inv();
......@@ -393,7 +393,7 @@ static void comparePixelVsFeature(const Mat& img1_8b, const Mat& img2_8b)
int main(void)
{
Mat img1;
img1 = imread("home.png", CV_LOAD_IMAGE_UNCHANGED);
img1 = imread("home.png", IMREAD_UNCHANGED);
if(!img1.data) {
cout << "Could not open or find file" << endl;
return -1;
......@@ -408,13 +408,13 @@ int main(void)
testProjective(img1);
#ifdef COMPARE_FEATURES
Mat imgcmp1 = imread("LR_05.png", CV_LOAD_IMAGE_UNCHANGED);
Mat imgcmp1 = imread("LR_05.png", IMREAD_UNCHANGED);
if(!imgcmp1.data) {
cout << "Could not open or find file" << endl;
return -1;
}
Mat imgcmp2 = imread("LR_06.png", CV_LOAD_IMAGE_UNCHANGED);
Mat imgcmp2 = imread("LR_06.png", IMREAD_UNCHANGED);
if(!imgcmp2.data) {
cout << "Could not open or find file" << endl;
return -1;
......
......@@ -102,7 +102,7 @@ void sobelExtractor(const Mat img, const Rect roi, Mat& feat){
//! [insideimage]
patch=img(region).clone();
cvtColor(patch,patch, CV_BGR2GRAY);
cvtColor(patch,patch, COLOR_BGR2GRAY);
//! [padding]
// add some padding to compensate when the patch is outside image border
......
......@@ -31,7 +31,7 @@ namespace cv{
Mat hsv;
img.convertTo(hsv,CV_32F,1.0/255.0);
cvtColor(hsv,hsv,CV_BGR2HSV);
cvtColor(hsv,hsv,COLOR_BGR2HSV);
HShist=Mat_<double>(nh,ns,0.0);
Vhist=Mat_<double>(1,nv,0.0);
......
......@@ -126,7 +126,7 @@ bool TrackerBoostingImpl::initImpl( const Mat& image, const Rect2d& boundingBox
Mat_<int> intImage;
Mat_<double> intSqImage;
Mat image_;
cvtColor( image, image_, CV_RGB2GRAY );
cvtColor( image, image_, COLOR_BGR2GRAY );
integral( image_, intImage, intSqImage, CV_32S );
TrackerSamplerCS::Params CSparameters;
CSparameters.overlap = params.samplerOverlap;
......@@ -208,7 +208,7 @@ bool TrackerBoostingImpl::updateImpl( const Mat& image, Rect2d& boundingBox )
Mat_<int> intImage;
Mat_<double> intSqImage;
Mat image_;
cvtColor( image, image_, CV_RGB2GRAY );
cvtColor( image, image_, COLOR_BGR2GRAY );
integral( image_, intImage, intSqImage, CV_32S );
//get the last location [AAM] X(k-1)
Ptr<TrackerTargetState> lastLocation = model->getLastTargetState();
......
......@@ -201,7 +201,7 @@ std::vector<Mat> TrackerCSRTImpl::get_features(const Mat &patch, const Size2i &f
}
if(params.use_gray) {
Mat gray_m;
cvtColor(patch, gray_m, CV_BGR2GRAY);
cvtColor(patch, gray_m, COLOR_BGR2GRAY);
resize(gray_m, gray_m, feature_size, 0, 0, INTER_CUBIC);
gray_m.convertTo(gray_m, CV_32FC1, 1.0/255.0, -0.5);
features.push_back(gray_m);
......@@ -465,15 +465,11 @@ Point2f TrackerCSRTImpl::estimate_new_position(const Mat &image)
// *********************************************************************
bool TrackerCSRTImpl::updateImpl(const Mat& image_, Rect2d& boundingBox)
{
//treat gray image as color image
Mat image;
if(image_.channels() == 1) {
std::vector<Mat> channels(3);
channels[0] = channels[1] = channels[2] = image_;
merge(channels, image);
} else {
if(image_.channels() == 1) //treat gray image as color image
cvtColor(image_, image, COLOR_GRAY2BGR);
else
image = image_;
}
object_center = estimate_new_position(image);
if (object_center.x < 0 && object_center.y < 0)
......@@ -512,15 +508,11 @@ bool TrackerCSRTImpl::updateImpl(const Mat& image_, Rect2d& boundingBox)
// *********************************************************************
bool TrackerCSRTImpl::initImpl(const Mat& image_, const Rect2d& boundingBox)
{
//treat gray image as color image
Mat image;
if(image_.channels() == 1) {
std::vector<Mat> channels(3);
channels[0] = channels[1] = channels[2] = image_;
merge(channels, image);
} else {
if(image_.channels() == 1) //treat gray image as color image
cvtColor(image_, image, COLOR_GRAY2BGR);
else
image = image_;
}
current_scale_factor = 1.0;
image_size = image.size();
......
......@@ -552,7 +552,7 @@ double get_min(const Mat &m)
Mat bgr2hsv(const Mat &img)
{
Mat hsv_img;
cvtColor(img, hsv_img, CV_BGR2HSV);
cvtColor(img, hsv_img, COLOR_BGR2HSV);
std::vector<Mat> hsv_img_channels;
split(hsv_img, hsv_img_channels);
hsv_img_channels.at(0).convertTo(hsv_img_channels.at(0), CV_8UC1, 255.0 / 180.0);
......
......@@ -700,7 +700,7 @@ namespace cv{
break;
default: // GRAY
if(img.channels()>1)
cvtColor(patch,feat, CV_BGR2GRAY);
cvtColor(patch,feat, COLOR_BGR2GRAY);
else
feat=patch;
//feat.convertTo(feat,CV_32F);
......
......@@ -3,9 +3,6 @@
#include "opencv2/imgproc.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/core/utility.hpp"
#include "opencv2/imgproc/types_c.h"
#include <ctime>
#include <iostream>
......@@ -41,22 +38,22 @@ int main( int argc, const char** argv )
std::string maskFilename = parser.get<std::string>("m");
std::string outFilename = parser.get<std::string>("o");
cv::Mat src = cv::imread(inFilename, -1);
cv::Mat src = cv::imread(inFilename, cv::IMREAD_UNCHANGED);
if ( src.empty() )
{
printf( "Cannot read image file: %s\n", inFilename.c_str() );
return -1;
}
cv::cvtColor(src, src, CV_RGB2Lab);
cv::cvtColor(src, src, cv::COLOR_BGR2Lab);
cv::Mat mask = cv::imread(maskFilename, 0);
cv::Mat mask = cv::imread(maskFilename, cv::IMREAD_GRAYSCALE);
if ( mask.empty() )
{
printf( "Cannot read image file: %s\n", maskFilename.c_str() );
return -1;
}
cv::threshold(mask, mask, 128, 255, CV_THRESH_BINARY | CV_THRESH_OTSU);
cv::threshold(mask, mask, 128, 255, cv::THRESH_BINARY | cv::THRESH_OTSU);
cv::Mat res(src.size(), src.type());
......@@ -65,7 +62,7 @@ int main( int argc, const char** argv )
std::cout << "time = " << (clock() - time)
/ double(CLOCKS_PER_SEC) << std::endl;
cv::cvtColor(res, res, CV_Lab2RGB);
cv::cvtColor(res, res, cv::COLOR_Lab2BGR);
if ( outFilename == "" )
{
......@@ -78,4 +75,4 @@ int main( int argc, const char** argv )
cv::imwrite(outFilename, res);
return 0;
}
\ No newline at end of file
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment