Commit ae5c87ea authored by Ilya Lavrenov's avatar Ilya Lavrenov

fixed warnings

parent 079ff5c0
...@@ -137,12 +137,12 @@ public: ...@@ -137,12 +137,12 @@ public:
* @param newParameters : a parameters structures updated with the new target configuration * @param newParameters : a parameters structures updated with the new target configuration
* @param applyDefaultSetupOnFailure : set to true if an error must be thrown on error * @param applyDefaultSetupOnFailure : set to true if an error must be thrown on error
*/ */
virtual void setup(TransientAreasSegmentationModule::SegmentationParameters newParameters)=0; virtual void setup(SegmentationParameters newParameters)=0;
/** /**
* @return the current parameters setup * @return the current parameters setup
*/ */
virtual struct TransientAreasSegmentationModule::TransientAreasSegmentationModule::SegmentationParameters getParameters()=0; virtual SegmentationParameters getParameters()=0;
/** /**
* parameters setup display method * parameters setup display method
......
...@@ -86,7 +86,8 @@ namespace cv ...@@ -86,7 +86,8 @@ namespace cv
namespace bioinspired namespace bioinspired
{ {
class TransientAreasSegmentationModuleImpl: protected BasicRetinaFilter class TransientAreasSegmentationModuleImpl :
protected BasicRetinaFilter
{ {
public: public:
...@@ -136,7 +137,7 @@ public: ...@@ -136,7 +137,7 @@ public:
/** /**
* @return the current parameters setup * @return the current parameters setup
*/ */
struct TransientAreasSegmentationModule::TransientAreasSegmentationModule::SegmentationParameters getParameters(); struct TransientAreasSegmentationModule::SegmentationParameters getParameters();
/** /**
* parameters setup display method * parameters setup display method
...@@ -219,19 +220,21 @@ protected: ...@@ -219,19 +220,21 @@ protected:
// Buffer conversion utilities // Buffer conversion utilities
void _convertValarrayBuffer2cvMat(const std::valarray<bool> &grayMatrixToConvert, const unsigned int nbRows, const unsigned int nbColumns, OutputArray outBuffer); void _convertValarrayBuffer2cvMat(const std::valarray<bool> &grayMatrixToConvert, const unsigned int nbRows, const unsigned int nbColumns, OutputArray outBuffer);
bool _convertCvMat2ValarrayBuffer(InputArray inputMat, std::valarray<float> &outputValarrayMatrix); bool _convertCvMat2ValarrayBuffer(InputArray inputMat, std::valarray<float> &outputValarrayMatrix);
const TransientAreasSegmentationModuleImpl & operator = (const TransientAreasSegmentationModuleImpl &);
}; };
class TransientAreasSegmentationModuleImpl_: public TransientAreasSegmentationModule class TransientAreasSegmentationModuleImpl_: public TransientAreasSegmentationModule
{ {
public: public:
TransientAreasSegmentationModuleImpl_(const Size size):_segmTool(size){}; TransientAreasSegmentationModuleImpl_(const Size size):_segmTool(size){};
inline virtual Size getSize(){return _segmTool.getSize();}; inline virtual Size getSize(){return _segmTool.getSize();};
inline virtual void write( cv::FileStorage& fs ) const{_segmTool.write(fs);}; inline virtual void write( cv::FileStorage& fs ) const{_segmTool.write(fs);};
inline virtual void setup(String segmentationParameterFile, const bool applyDefaultSetupOnFailure){_segmTool.setup(segmentationParameterFile, applyDefaultSetupOnFailure);}; inline virtual void setup(String segmentationParameterFile, const bool applyDefaultSetupOnFailure){_segmTool.setup(segmentationParameterFile, applyDefaultSetupOnFailure);};
inline virtual void setup(cv::FileStorage &fs, const bool applyDefaultSetupOnFailure){_segmTool.setup(fs, applyDefaultSetupOnFailure);}; inline virtual void setup(cv::FileStorage &fs, const bool applyDefaultSetupOnFailure){_segmTool.setup(fs, applyDefaultSetupOnFailure);};
inline virtual void setup(TransientAreasSegmentationModule::SegmentationParameters newParameters){_segmTool.setup(newParameters);}; inline virtual void setup(TransientAreasSegmentationModule::SegmentationParameters newParameters){_segmTool.setup(newParameters);};
inline virtual const String printSetup(){return _segmTool.printSetup();}; inline virtual const String printSetup(){return _segmTool.printSetup();};
inline virtual struct TransientAreasSegmentationModule::TransientAreasSegmentationModule::SegmentationParameters getParameters(){return _segmTool.getParameters();}; inline virtual struct TransientAreasSegmentationModule::SegmentationParameters getParameters(){return _segmTool.getParameters();};
inline virtual void write( String fs ) const{_segmTool.write(fs);}; inline virtual void write( String fs ) const{_segmTool.write(fs);};
inline virtual void run(InputArray inputToSegment, const int channelIndex){_segmTool.run(inputToSegment, channelIndex);}; inline virtual void run(InputArray inputToSegment, const int channelIndex){_segmTool.run(inputToSegment, channelIndex);};
inline virtual void getSegmentationPicture(OutputArray transientAreas){return _segmTool.getSegmentationPicture(transientAreas);}; inline virtual void getSegmentationPicture(OutputArray transientAreas){return _segmTool.getSegmentationPicture(transientAreas);};
......
...@@ -125,7 +125,7 @@ static void testEuclidean(const Mat& img1) ...@@ -125,7 +125,7 @@ static void testEuclidean(const Mat& img1)
Mat img2; Mat img2;
// Warp original image // Warp original image
double theta = 3*M_PI/180; double theta = 3*CV_PI/180;
double cosT = cos(theta); double cosT = cos(theta);
double sinT = sin(theta); double sinT = sin(theta);
Matx<double, 2, 2> linTr(cosT, -sinT, sinT, cosT); Matx<double, 2, 2> linTr(cosT, -sinT, sinT, cosT);
...@@ -163,7 +163,7 @@ static void testSimilarity(const Mat& img1) ...@@ -163,7 +163,7 @@ static void testSimilarity(const Mat& img1)
Mat img2; Mat img2;
// Warp original image // Warp original image
double theta = 3*M_PI/180; double theta = 3*CV_PI/180;
double scale = 0.95; double scale = 0.95;
double a = scale*cos(theta); double a = scale*cos(theta);
double b = scale*sin(theta); double b = scale*sin(theta);
......
...@@ -40,7 +40,6 @@ ...@@ -40,7 +40,6 @@
#include <opencv2/calib3d.hpp> #include <opencv2/calib3d.hpp>
#include <opencv2/imgproc.hpp> #include <opencv2/imgproc.hpp>
#include <dirent.h>
#include <iostream> #include <iostream>
#include <fstream> #include <fstream>
...@@ -187,7 +186,7 @@ int main(int argc, char** argv) ...@@ -187,7 +186,7 @@ int main(int argc, char** argv)
// scale depth // scale depth
Mat depth_flt; Mat depth_flt;
depth.convertTo(depth_flt, CV_32FC1, 1.f/5000.f); depth.convertTo(depth_flt, CV_32FC1, 1.f/5000.f);
#if not BILATERAL_FILTER #if !BILATERAL_FILTER
depth_flt.setTo(std::numeric_limits<float>::quiet_NaN(), depth == 0); depth_flt.setTo(std::numeric_limits<float>::quiet_NaN(), depth == 0);
depth = depth_flt; depth = depth_flt;
#else #else
......
...@@ -114,7 +114,7 @@ namespace ...@@ -114,7 +114,7 @@ namespace
{ {
const cv::Mat_<unsigned short> &depth(depth_in); const cv::Mat_<unsigned short> &depth(depth_in);
cv::Mat depth_out_tmp; cv::Mat depth_out_tmp;
computeImpl<unsigned short, float>(depth, depth_out_tmp, 0.001); computeImpl<unsigned short, float>(depth, depth_out_tmp, 0.001f);
depth_out_tmp.convertTo(depth_out, CV_16U); depth_out_tmp.convertTo(depth_out, CV_16U);
break; break;
} }
...@@ -142,16 +142,16 @@ namespace ...@@ -142,16 +142,16 @@ namespace
void void
computeImpl(const cv::Mat_<DepthDepth> &depth_in, cv::Mat & depth_out, ContainerDepth scale) const computeImpl(const cv::Mat_<DepthDepth> &depth_in, cv::Mat & depth_out, ContainerDepth scale) const
{ {
const ContainerDepth theta_mean = 30. * CV_PI / 180; const ContainerDepth theta_mean = (float)(30. * CV_PI / 180);
int rows = depth_in.rows; int rows = depth_in.rows;
int cols = depth_in.cols; int cols = depth_in.cols;
// Precompute some data // Precompute some data
const ContainerDepth sigma_L = 0.8 + 0.035 * theta_mean / (CV_PI / 2 - theta_mean); const ContainerDepth sigma_L = (float)(0.8 + 0.035 * theta_mean / (CV_PI / 2 - theta_mean));
cv::Mat_<ContainerDepth> sigma_z(rows, cols); cv::Mat_<ContainerDepth> sigma_z(rows, cols);
for (int y = 0; y < rows; ++y) for (int y = 0; y < rows; ++y)
for (int x = 0; x < cols; ++x) for (int x = 0; x < cols; ++x)
sigma_z(y, x) = 0.0012 + 0.0019 * (depth_in(y, x) * scale - 0.4) * (depth_in(y, x) * scale - 0.4); sigma_z(y, x) = (float)(0.0012 + 0.0019 * (depth_in(y, x) * scale - 0.4) * (depth_in(y, x) * scale - 0.4));
ContainerDepth difference_threshold = 10; ContainerDepth difference_threshold = 10;
cv::Mat_<ContainerDepth> Dw_sum = cv::Mat_<ContainerDepth>::zeros(rows, cols), w_sum = cv::Mat_<ContainerDepth> Dw_sum = cv::Mat_<ContainerDepth>::zeros(rows, cols), w_sum =
...@@ -170,9 +170,9 @@ namespace ...@@ -170,9 +170,9 @@ namespace
ContainerDepth(j) * ContainerDepth(j) + ContainerDepth(i) * ContainerDepth(i)); ContainerDepth(j) * ContainerDepth(j) + ContainerDepth(i) * ContainerDepth(i));
ContainerDepth delta_z; ContainerDepth delta_z;
if (depth_in(y, x) > depth_in(y + j, x + i)) if (depth_in(y, x) > depth_in(y + j, x + i))
delta_z = depth_in(y, x) - depth_in(y + j, x + i); delta_z = (float)(depth_in(y, x) - depth_in(y + j, x + i));
else else
delta_z = depth_in(y + j, x + i) - depth_in(y, x); delta_z = (float)(depth_in(y + j, x + i) - depth_in(y, x));
if (delta_z < difference_threshold) if (delta_z < difference_threshold)
{ {
delta_z *= scale; delta_z *= scale;
......
...@@ -104,9 +104,9 @@ namespace ...@@ -104,9 +104,9 @@ namespace
size_t n_points; size_t n_points;
if (depth.depth() == CV_16U) if (depth.depth() == CV_16U)
n_points = convertDepthToFloat<uint16_t>(depth, mask, 1.0 / 1000.0f, u_mat, v_mat, z_mat); n_points = convertDepthToFloat<ushort>(depth, mask, 1.0f / 1000.0f, u_mat, v_mat, z_mat);
else if (depth.depth() == CV_16S) else if (depth.depth() == CV_16S)
n_points = convertDepthToFloat<int16_t>(depth, mask, 1.0 / 1000.0f, u_mat, v_mat, z_mat); n_points = convertDepthToFloat<short>(depth, mask, 1.0f / 1000.0f, u_mat, v_mat, z_mat);
else else
{ {
CV_Assert(depth.type() == CV_32F); CV_Assert(depth.type() == CV_32F);
...@@ -199,9 +199,9 @@ namespace cv ...@@ -199,9 +199,9 @@ namespace cv
cv::Mat_<float> z_mat; cv::Mat_<float> z_mat;
if (depth.depth() == CV_16U) if (depth.depth() == CV_16U)
convertDepthToFloat<uint16_t>(depth, 1.0 / 1000.0f, points_float, z_mat); convertDepthToFloat<ushort>(depth, 1.0f / 1000.0f, points_float, z_mat);
else if (depth.depth() == CV_16U) else if (depth.depth() == CV_16U)
convertDepthToFloat<int16_t>(depth, 1.0 / 1000.0f, points_float, z_mat); convertDepthToFloat<short>(depth, 1.0f / 1000.0f, points_float, z_mat);
else else
{ {
CV_Assert(depth.type() == CV_32F); CV_Assert(depth.type() == CV_32F);
......
...@@ -72,14 +72,14 @@ convertDepthToFloat(const cv::Mat& depth, const cv::Mat& mask, float scale, cv:: ...@@ -72,14 +72,14 @@ convertDepthToFloat(const cv::Mat& depth, const cv::Mat& mask, float scale, cv::
for (int u = 0; u < depth_size.width; u++, ++r) for (int u = 0; u < depth_size.width; u++, ++r)
if (*r) if (*r)
{ {
u_mat(n_points, 0) = u; u_mat((int)n_points, 0) = (float)u;
v_mat(n_points, 0) = v; v_mat((int)n_points, 0) = (float)v;
T depth_i = depth.at<T>(v, u); T depth_i = depth.at<T>(v, u);
if (cvIsNaN(depth_i) || (depth_i == std::numeric_limits<T>::min()) || (depth_i == std::numeric_limits<T>::max())) if (cvIsNaN(depth_i) || (depth_i == std::numeric_limits<T>::min()) || (depth_i == std::numeric_limits<T>::max()))
z_mat(n_points, 0) = std::numeric_limits<float>::quiet_NaN(); z_mat((int)n_points, 0) = std::numeric_limits<float>::quiet_NaN();
else else
z_mat(n_points, 0) = depth_i * scale; z_mat((int)n_points, 0) = depth_i * scale;
++n_points; ++n_points;
} }
...@@ -104,7 +104,7 @@ convertDepthToFloat(const cv::Mat& depth, float scale, const cv::Mat &uv_mat, cv ...@@ -104,7 +104,7 @@ convertDepthToFloat(const cv::Mat& depth, float scale, const cv::Mat &uv_mat, cv
for (cv::Mat_<cv::Vec2f>::const_iterator uv_iter = uv_mat.begin<cv::Vec2f>(), uv_end = uv_mat.end<cv::Vec2f>(); for (cv::Mat_<cv::Vec2f>::const_iterator uv_iter = uv_mat.begin<cv::Vec2f>(), uv_end = uv_mat.end<cv::Vec2f>();
uv_iter != uv_end; ++uv_iter, ++z_mat_iter) uv_iter != uv_end; ++uv_iter, ++z_mat_iter)
{ {
T depth_i = depth.at < T > ((*uv_iter)[1], (*uv_iter)[0]); T depth_i = depth.at < T > ((int)(*uv_iter)[1], (int)(*uv_iter)[0]);
if (cvIsNaN(depth_i) || (depth_i == std::numeric_limits < T > ::min()) if (cvIsNaN(depth_i) || (depth_i == std::numeric_limits < T > ::min())
|| (depth_i == std::numeric_limits < T > ::max())) || (depth_i == std::numeric_limits < T > ::max()))
......
...@@ -111,10 +111,10 @@ namespace ...@@ -111,10 +111,10 @@ namespace
// In the paper, z goes away from the camera, y goes down, x goes right // In the paper, z goes away from the camera, y goes down, x goes right
// OpenCV has the same conventions // OpenCV has the same conventions
// Theta goes from z to x (and actually goes from -pi/2 to pi/2, phi goes from z to y // Theta goes from z to x (and actually goes from -pi/2 to pi/2, phi goes from z to y
float theta = std::atan2(row_points->val[0], row_points->val[2]); float theta = (float)std::atan2(row_points->val[0], row_points->val[2]);
*row_cos_theta = std::cos(theta); *row_cos_theta = std::cos(theta);
*row_sin_theta = std::sin(theta); *row_sin_theta = std::sin(theta);
float phi = std::asin(row_points->val[1] / (*row_r)); float phi = (float)std::asin(row_points->val[1] / (*row_r));
*row_cos_phi = std::cos(phi); *row_cos_phi = std::cos(phi);
*row_sin_phi = std::sin(phi); *row_sin_phi = std::sin(phi);
} }
...@@ -195,7 +195,7 @@ namespace ...@@ -195,7 +195,7 @@ namespace
if ((K_ori.cols != K_ori_.cols) || (K_ori.rows != K_ori_.rows) || (K_ori.type() != K_ori_.type())) if ((K_ori.cols != K_ori_.cols) || (K_ori.rows != K_ori_.rows) || (K_ori.type() != K_ori_.type()))
return false; return false;
bool K_test = !(cv::countNonZero(K_ori != K_ori_)); bool K_test = !(cv::countNonZero(K_ori != K_ori_));
return (rows == rows_) && (cols = cols_) && (window_size == window_size_) && (depth == depth_) && (K_test) return (rows == rows_) && (cols == cols_) && (window_size == window_size_) && (depth == depth_) && (K_test)
&& (method == method_); && (method == method_);
} }
protected: protected:
...@@ -330,9 +330,9 @@ template<typename T, typename U> ...@@ -330,9 +330,9 @@ template<typename T, typename U>
void void
multiply_by_K_inv(const cv::Matx<T, 3, 3> & K_inv, U a, U b, U c, cv::Vec<T, 3> &res) multiply_by_K_inv(const cv::Matx<T, 3, 3> & K_inv, U a, U b, U c, cv::Vec<T, 3> &res)
{ {
res[0] = K_inv(0, 0) * a + K_inv(0, 1) * b + K_inv(0, 2) * c; res[0] = (T)(K_inv(0, 0) * a + K_inv(0, 1) * b + K_inv(0, 2) * c);
res[1] = K_inv(1, 1) * b + K_inv(1, 2) * c; res[1] = (T)(K_inv(1, 1) * b + K_inv(1, 2) * c);
res[2] = c; res[2] = (T)c;
} }
namespace namespace
...@@ -424,7 +424,7 @@ namespace ...@@ -424,7 +424,7 @@ namespace
// Define K_inv by hand, just for higher accuracy // Define K_inv by hand, just for higher accuracy
Mat33T K_inv = cv::Matx<T, 3, 3>::eye(), K; Mat33T K_inv = cv::Matx<T, 3, 3>::eye(), K;
K_.copyTo(K); K_.copyTo(K);
K_inv(0, 0) = 1.0 / K(0, 0); K_inv(0, 0) = 1.0f / K(0, 0);
K_inv(0, 1) = -K(0, 1) / (K(0, 0) * K(1, 1)); K_inv(0, 1) = -K(0, 1) / (K(0, 0) * K(1, 1));
K_inv(0, 2) = (K(0, 1) * K(1, 2) - K(0, 2) * K(1, 1)) / (K(0, 0) * K(1, 1)); K_inv(0, 2) = (K(0, 1) * K(1, 2) - K(0, 2) * K(1, 1)) / (K(0, 0) * K(1, 1));
K_inv(1, 1) = 1 / K(1, 1); K_inv(1, 1) = 1 / K(1, 1);
...@@ -527,8 +527,8 @@ namespace ...@@ -527,8 +527,8 @@ namespace
getDerivKernels(kx_dy_, ky_dy_, 0, 1, window_size_, true, depth_); getDerivKernels(kx_dy_, ky_dy_, 0, 1, window_size_, true, depth_);
// Get the mapping function for SRI // Get the mapping function for SRI
float min_theta = std::asin(sin_theta(0, 0)), max_theta = std::asin(sin_theta(0, cols_ - 1)); float min_theta = (float)std::asin(sin_theta(0, 0)), max_theta = (float)std::asin(sin_theta(0, cols_ - 1));
float min_phi = std::asin(sin_phi(0, cols_/2-1)), max_phi = std::asin(sin_phi(rows_ - 1, cols_/2-1)); float min_phi = (float)std::asin(sin_phi(0, cols_/2-1)), max_phi = (float) std::asin(sin_phi(rows_ - 1, cols_/2-1));
std::vector<cv::Point3f> points3d(cols_ * rows_); std::vector<cv::Point3f> points3d(cols_ * rows_);
R_hat_.create(rows_, cols_); R_hat_.create(rows_, cols_);
...@@ -566,11 +566,11 @@ namespace ...@@ -566,11 +566,11 @@ namespace
//map for converting from Spherical coordinate space to Euclidean space //map for converting from Spherical coordinate space to Euclidean space
euclideanMap_.create(rows_,cols_); euclideanMap_.create(rows_,cols_);
float invFx = 1.0f/K_.at<T>(0,0), cx = K_.at<T>(0,2); float invFx = (float)(1.0f/K_.at<T>(0,0)), cx = (float)K_.at<T>(0,2);
double invFy = 1.0f/K_.at<T>(1,1), cy = K_.at<T>(1,2); double invFy = 1.0f/K_.at<T>(1,1), cy = K_.at<T>(1,2);
for (int i = 0; i < rows_; i++) for (int i = 0; i < rows_; i++)
{ {
float y = (i - cy)*invFy; float y = (float)((i - cy)*invFy);
for (int j = 0; j < cols_; j++) for (int j = 0; j < cols_; j++)
{ {
float x = (j - cx)*invFx; float x = (j - cx)*invFx;
......
This diff is collapsed.
...@@ -523,6 +523,8 @@ private: ...@@ -523,6 +523,8 @@ private:
unsigned char plane_index_; unsigned char plane_index_;
/** THe block size as defined in the main algorithm */ /** THe block size as defined in the main algorithm */
int block_size_; int block_size_;
const InlierFinder & operator = (const InlierFinder &);
} }
; ;
...@@ -563,7 +565,7 @@ namespace cv ...@@ -563,7 +565,7 @@ namespace cv
size_t index_plane = 0; size_t index_plane = 0;
std::vector<cv::Vec4f> plane_coefficients; std::vector<cv::Vec4f> plane_coefficients;
float mse_min = threshold_ * threshold_; float mse_min = (float)(threshold_ * threshold_);
while (!plane_queue.empty()) while (!plane_queue.empty())
{ {
...@@ -572,16 +574,17 @@ namespace cv ...@@ -572,16 +574,17 @@ namespace cv
if (front_tile.mse_ > mse_min) if (front_tile.mse_ > mse_min)
break; break;
InlierFinder inlier_finder(threshold_, points3d, normals, index_plane, block_size_); InlierFinder inlier_finder((float)threshold_, points3d, normals, (unsigned char)index_plane, block_size_);
// Construct the plane for the first tile // Construct the plane for the first tile
int x = front_tile.x_, y = front_tile.y_; int x = front_tile.x_, y = front_tile.y_;
const cv::Vec3f & n = plane_grid.n_(y, x); const cv::Vec3f & n = plane_grid.n_(y, x);
cv::Ptr<PlaneBase> plane; cv::Ptr<PlaneBase> plane;
if ((sensor_error_a_ == 0) && (sensor_error_b_ == 0) && (sensor_error_c_ == 0)) if ((sensor_error_a_ == 0) && (sensor_error_b_ == 0) && (sensor_error_c_ == 0))
plane = cv::Ptr<PlaneBase>(new Plane(plane_grid.m_(y, x), n, index_plane)); plane = cv::Ptr<PlaneBase>(new Plane(plane_grid.m_(y, x), n, (int)index_plane));
else else
plane = cv::Ptr<PlaneBase>(new PlaneABC(plane_grid.m_(y, x), n, index_plane, sensor_error_a_, sensor_error_b_, sensor_error_c_)); plane = cv::Ptr<PlaneBase>(new PlaneABC(plane_grid.m_(y, x), n, (int)index_plane,
(float)sensor_error_a_, (float)sensor_error_b_, (float)sensor_error_c_));
cv::Mat_<unsigned char> plane_mask = cv::Mat_<unsigned char>::zeros(points3d.rows / block_size_, cv::Mat_<unsigned char> plane_mask = cv::Mat_<unsigned char>::zeros(points3d.rows / block_size_,
points3d.cols / block_size_); points3d.cols / block_size_);
...@@ -631,7 +634,7 @@ namespace cv ...@@ -631,7 +634,7 @@ namespace cv
// Fill the plane coefficients // Fill the plane coefficients
if (plane_coefficients.empty()) if (plane_coefficients.empty())
return; return;
plane_coefficients_out.create(plane_coefficients.size(), 1, CV_32FC4); plane_coefficients_out.create((int)plane_coefficients.size(), 1, CV_32FC4);
cv::Mat plane_coefficients_mat = plane_coefficients_out.getMat(); cv::Mat plane_coefficients_mat = plane_coefficients_out.getMat();
float* data = plane_coefficients_mat.ptr<float>(0); float* data = plane_coefficients_mat.ptr<float>(0);
for(size_t i=0; i<plane_coefficients.size(); ++i) for(size_t i=0; i<plane_coefficients.size(); ++i)
......
...@@ -84,7 +84,7 @@ namespace cv ...@@ -84,7 +84,7 @@ namespace cv
obj.info()->addParam(obj, "transformType", obj.transformType); obj.info()->addParam(obj, "transformType", obj.transformType);
obj.info()->addParam(obj, "maxTranslation", obj.maxTranslation); obj.info()->addParam(obj, "maxTranslation", obj.maxTranslation);
obj.info()->addParam(obj, "maxRotation", obj.maxRotation); obj.info()->addParam(obj, "maxRotation", obj.maxRotation);
obj.info()->addParam(obj, "normalsComputer", obj.normalsComputer, true);) obj.info()->addParam<cv::RgbdNormals>(obj, "normalsComputer", obj.normalsComputer, true, NULL, NULL);)
CV_INIT_ALGORITHM(RgbdICPOdometry, "RGBD.RgbdICPOdometry", CV_INIT_ALGORITHM(RgbdICPOdometry, "RGBD.RgbdICPOdometry",
obj.info()->addParam(obj, "cameraMatrix", obj.cameraMatrix); obj.info()->addParam(obj, "cameraMatrix", obj.cameraMatrix);
...@@ -97,7 +97,7 @@ namespace cv ...@@ -97,7 +97,7 @@ namespace cv
obj.info()->addParam(obj, "transformType", obj.transformType); obj.info()->addParam(obj, "transformType", obj.transformType);
obj.info()->addParam(obj, "maxTranslation", obj.maxTranslation); obj.info()->addParam(obj, "maxTranslation", obj.maxTranslation);
obj.info()->addParam(obj, "maxRotation", obj.maxRotation); obj.info()->addParam(obj, "maxRotation", obj.maxRotation);
obj.info()->addParam(obj, "normalsComputer", obj.normalsComputer, true);) obj.info()->addParam<cv::RgbdNormals>(obj, "normalsComputer", obj.normalsComputer, true, NULL, NULL);)
bool bool
initModule_rgbd(void); initModule_rgbd(void);
......
...@@ -62,13 +62,13 @@ namespace cv ...@@ -62,13 +62,13 @@ namespace cv
if (in_depth == CV_16U) if (in_depth == CV_16U)
{ {
in.convertTo(out, depth, 1 / 1000.0); //convert to float so that it is in meters in.convertTo(out, depth, 1 / 1000.0); //convert to float so that it is in meters
cv::Mat valid_mask = in == std::numeric_limits<uint16_t>::min(); // Should we do std::numeric_limits<uint16_t>::max() too ? cv::Mat valid_mask = in == std::numeric_limits<ushort>::min(); // Should we do std::numeric_limits<ushort>::max() too ?
out.setTo(std::numeric_limits<float>::quiet_NaN(), valid_mask); //set a$ out.setTo(std::numeric_limits<float>::quiet_NaN(), valid_mask); //set a$
} }
if (in_depth == CV_16S) if (in_depth == CV_16S)
{ {
in.convertTo(out, depth, 1 / 1000.0); //convert to float so tha$ in.convertTo(out, depth, 1 / 1000.0); //convert to float so tha$
cv::Mat valid_mask = (in == std::numeric_limits<int16_t>::min()) | (in == std::numeric_limits<int16_t>::max()); // Should we do std::numeric_limits<uint16_t>::max() too ? cv::Mat valid_mask = (in == std::numeric_limits<short>::min()) | (in == std::numeric_limits<short>::max()); // Should we do std::numeric_limits<ushort>::max() too ?
out.setTo(std::numeric_limits<float>::quiet_NaN(), valid_mask); //set a$ out.setTo(std::numeric_limits<float>::quiet_NaN(), valid_mask); //set a$
} }
if ((in_depth == CV_32F) || (in_depth == CV_64F)) if ((in_depth == CV_32F) || (in_depth == CV_64F))
......
...@@ -65,7 +65,7 @@ rayPlaneIntersection(const cv::Vec3d& uv1, double centroid_dot_normal, const cv: ...@@ -65,7 +65,7 @@ rayPlaneIntersection(const cv::Vec3d& uv1, double centroid_dot_normal, const cv:
std::cout << "warning, LdotNormal nearly 0! " << LdotNormal << std::endl; std::cout << "warning, LdotNormal nearly 0! " << LdotNormal << std::endl;
std::cout << "contents of L, Normal: " << cv::Mat(L) << ", " << cv::Mat(normal) << std::endl; std::cout << "contents of L, Normal: " << cv::Mat(L) << ", " << cv::Mat(normal) << std::endl;
} }
cv::Vec3f xyz(d * L(0), d * L(1), d * L(2)); cv::Vec3f xyz((float)(d * L(0)), (float)(d * L(1)), (float)(d * L(2)));
return xyz; return xyz;
} }
...@@ -99,7 +99,7 @@ struct Plane ...@@ -99,7 +99,7 @@ struct Plane
n[1] = rng.uniform(-0.5, 0.5); n[1] = rng.uniform(-0.5, 0.5);
n[2] = -0.3; //rng.uniform(-1.f, 0.5f); n[2] = -0.3; //rng.uniform(-1.f, 0.5f);
n = n / cv::norm(n); n = n / cv::norm(n);
set_d(rng.uniform(-2.0, 0.6)); set_d((float)rng.uniform(-2.0, 0.6));
} }
void void
...@@ -146,11 +146,11 @@ gen_points_3d(std::vector<Plane>& planes_out, cv::Mat_<unsigned char> &plane_mas ...@@ -146,11 +146,11 @@ gen_points_3d(std::vector<Plane>& planes_out, cv::Mat_<unsigned char> &plane_mas
{ {
for (int u = 0; u < W; u++) for (int u = 0; u < W; u++)
{ {
unsigned int plane_index = (u / float(W)) * planes.size(); unsigned int plane_index = (unsigned int)((u / float(W)) * planes.size());
Plane plane = planes[plane_index]; Plane plane = planes[plane_index];
outp(v, u) = plane.intersection(u, v, Kinv); outp(v, u) = plane.intersection((float)u, (float)v, Kinv);
outn(v, u) = plane.n; outn(v, u) = plane.n;
plane_mask(v, u) = plane_index; plane_mask(v, u) = (uchar)plane_index;
} }
} }
planes_out = planes; planes_out = planes;
...@@ -210,6 +210,9 @@ protected: ...@@ -210,6 +210,9 @@ protected:
errors[1][0] = 0.02; errors[1][0] = 0.02;
errors[1][1] = 0.04; errors[1][1] = 0.04;
break; break;
default:
method = (cv::RgbdNormals::RGBD_NORMALS_METHOD)-1;
CV_Error(0, "");
} }
for (unsigned char j = 0; j < 2; ++j) for (unsigned char j = 0; j < 2; ++j)
......
...@@ -192,7 +192,7 @@ bool CV_OdometryTest::readData(Mat& image, Mat& depth) const ...@@ -192,7 +192,7 @@ bool CV_OdometryTest::readData(Mat& image, Mat& depth) const
return false; return false;
} }
CV_DbgAssert(gray.type() == CV_8UC1); CV_DbgAssert(image.type() == CV_8UC1);
CV_DbgAssert(depth.type() == CV_16UC1); CV_DbgAssert(depth.type() == CV_16UC1);
{ {
Mat depth_flt; Mat depth_flt;
......
...@@ -124,12 +124,12 @@ void getSegment( int segmentId, int numSegments, int bbCounter, int& startFrame, ...@@ -124,12 +124,12 @@ void getSegment( int segmentId, int numSegments, int bbCounter, int& startFrame,
void getMatOfRects( const vector<Rect>& bbs, Mat& bbs_mat ) void getMatOfRects( const vector<Rect>& bbs, Mat& bbs_mat )
{ {
for ( size_t b = 0; b < bbs.size(); b++ ) for ( int b = 0, size = (int)bbs.size(); b < size; b++ )
{ {
bbs_mat.at<float>( b, 0 ) = bbs[b].x; bbs_mat.at<float>( b, 0 ) = (float)bbs[b].x;
bbs_mat.at<float>( b, 1 ) = bbs[b].y; bbs_mat.at<float>( b, 1 ) = (float)bbs[b].y;
bbs_mat.at<float>( b, 2 ) = bbs[b].width; bbs_mat.at<float>( b, 2 ) = (float)bbs[b].width;
bbs_mat.at<float>( b, 3 ) = bbs[b].height; bbs_mat.at<float>( b, 3 ) = (float)bbs[b].height;
} }
} }
...@@ -149,7 +149,7 @@ PERF_TEST_P(tracking, mil, testing::Combine(TESTSET_NAMES, SEGMENTS)) ...@@ -149,7 +149,7 @@ PERF_TEST_P(tracking, mil, testing::Combine(TESTSET_NAMES, SEGMENTS))
string gtFile = getDataPath( TRACKING_DIR + "/" + video + "/gt.txt" ); string gtFile = getDataPath( TRACKING_DIR + "/" + video + "/gt.txt" );
if( !getGroundTruth( gtFile, gtBBs ) ) if( !getGroundTruth( gtFile, gtBBs ) )
FAIL()<< "Ground truth file " << gtFile << " can not be read" << endl; FAIL()<< "Ground truth file " << gtFile << " can not be read" << endl;
int bbCounter = gtBBs.size(); int bbCounter = (int)gtBBs.size();
Mat frame; Mat frame;
bool initialized = false; bool initialized = false;
...@@ -197,7 +197,7 @@ PERF_TEST_P(tracking, mil, testing::Combine(TESTSET_NAMES, SEGMENTS)) ...@@ -197,7 +197,7 @@ PERF_TEST_P(tracking, mil, testing::Combine(TESTSET_NAMES, SEGMENTS))
} }
} }
//save the bounding boxes in a Mat //save the bounding boxes in a Mat
Mat bbs_mat( bbs.size(), 4, CV_32F ); Mat bbs_mat( (int)bbs.size(), 4, CV_32F );
getMatOfRects( bbs, bbs_mat ); getMatOfRects( bbs, bbs_mat );
SANITY_CHECK( bbs_mat, 15, ERROR_RELATIVE ); SANITY_CHECK( bbs_mat, 15, ERROR_RELATIVE );
...@@ -220,7 +220,7 @@ PERF_TEST_P(tracking, boosting, testing::Combine(TESTSET_NAMES, SEGMENTS)) ...@@ -220,7 +220,7 @@ PERF_TEST_P(tracking, boosting, testing::Combine(TESTSET_NAMES, SEGMENTS))
string gtFile = getDataPath( TRACKING_DIR + "/" + video + "/gt.txt" ); string gtFile = getDataPath( TRACKING_DIR + "/" + video + "/gt.txt" );
if( !getGroundTruth( gtFile, gtBBs ) ) if( !getGroundTruth( gtFile, gtBBs ) )
FAIL()<< "Ground truth file " << gtFile << " can not be read" << endl; FAIL()<< "Ground truth file " << gtFile << " can not be read" << endl;
int bbCounter = gtBBs.size(); int bbCounter = (int)gtBBs.size();
Mat frame; Mat frame;
bool initialized = false; bool initialized = false;
...@@ -267,7 +267,7 @@ PERF_TEST_P(tracking, boosting, testing::Combine(TESTSET_NAMES, SEGMENTS)) ...@@ -267,7 +267,7 @@ PERF_TEST_P(tracking, boosting, testing::Combine(TESTSET_NAMES, SEGMENTS))
} }
} }
//save the bounding boxes in a Mat //save the bounding boxes in a Mat
Mat bbs_mat( bbs.size(), 4, CV_32F ); Mat bbs_mat( (int)bbs.size(), 4, CV_32F );
getMatOfRects( bbs, bbs_mat ); getMatOfRects( bbs, bbs_mat );
SANITY_CHECK( bbs_mat, 15, ERROR_RELATIVE ); SANITY_CHECK( bbs_mat, 15, ERROR_RELATIVE );
......
...@@ -58,7 +58,7 @@ static void onMouse( int event, int x, int y, int, void* ) ...@@ -58,7 +58,7 @@ static void onMouse( int event, int x, int y, int, void* )
//draw the bounding box //draw the bounding box
Mat currentFrame; Mat currentFrame;
image.copyTo( currentFrame ); image.copyTo( currentFrame );
rectangle( currentFrame, Point( boundingBox.x, boundingBox.y ), Point( x, y ), Scalar( 255, 0, 0 ), 2, 1 ); rectangle( currentFrame, Point( (int)boundingBox.x, (int)boundingBox.y ), Point( x, y ), Scalar( 255, 0, 0 ), 2, 1 );
imshow( "Tracking API", currentFrame ); imshow( "Tracking API", currentFrame );
} }
break; break;
...@@ -82,18 +82,23 @@ int main( int argc, char** argv ) ...@@ -82,18 +82,23 @@ int main( int argc, char** argv )
int coords[4]={0,0,0,0}; int coords[4]={0,0,0,0};
bool initFrameWasGivenInCommandLine=false; bool initFrameWasGivenInCommandLine=false;
do{
do
{
String initBoundingBox=parser.get<String>(3); String initBoundingBox=parser.get<String>(3);
for(size_t pos=0,ctr=0;ctr<4;ctr++){ for(size_t pos=0,ctr=0;ctr<4;ctr++)
{
size_t npos=initBoundingBox.find_first_of(',',pos); size_t npos=initBoundingBox.find_first_of(',',pos);
if(npos==string::npos && ctr<3){ if(npos==string::npos && ctr<3)
{
printf("bounding box should be given in format \"x1,y1,x2,y2\",where x's and y's are integer cordinates of opposed corners of bdd box\n"); printf("bounding box should be given in format \"x1,y1,x2,y2\",where x's and y's are integer cordinates of opposed corners of bdd box\n");
printf("got: %s\n",initBoundingBox.substr(pos,string::npos).c_str()); printf("got: %s\n",initBoundingBox.substr(pos,string::npos).c_str());
printf("manual selection of bounding box will be employed\n"); printf("manual selection of bounding box will be employed\n");
break; break;
} }
int num=atoi(initBoundingBox.substr(pos,(ctr==3)?(string::npos):(npos-pos)).c_str()); int num=atoi(initBoundingBox.substr(pos,(ctr==3)?(string::npos):(npos-pos)).c_str());
if(num<=0){ if(num<=0)
{
printf("bounding box should be given in format \"x1,y1,x2,y2\",where x's and y's are integer cordinates of opposed corners of bdd box\n"); printf("bounding box should be given in format \"x1,y1,x2,y2\",where x's and y's are integer cordinates of opposed corners of bdd box\n");
printf("got: %s\n",initBoundingBox.substr(pos,npos-pos).c_str()); printf("got: %s\n",initBoundingBox.substr(pos,npos-pos).c_str());
printf("manual selection of bounding box will be employed\n"); printf("manual selection of bounding box will be employed\n");
...@@ -102,10 +107,9 @@ int main( int argc, char** argv ) ...@@ -102,10 +107,9 @@ int main( int argc, char** argv )
coords[ctr]=num; coords[ctr]=num;
pos=npos+1; pos=npos+1;
} }
if(coords[0]>0 && coords[1]>0 && coords[2]>0 && coords[3]>0){ if(coords[0]>0 && coords[1]>0 && coords[2]>0 && coords[3]>0)
initFrameWasGivenInCommandLine=true; initFrameWasGivenInCommandLine=true;
} } while((void)0, 0);
}while(0);
//open the capture //open the capture
VideoCapture cap; VideoCapture cap;
......
...@@ -106,7 +106,7 @@ namespace cv{ ...@@ -106,7 +106,7 @@ namespace cv{
//Mat_<double> maxrow=_particles.row(std::max_element(_logweight.begin(),_logweight.end())-_logweight.begin()); //Mat_<double> maxrow=_particles.row(std::max_element(_logweight.begin(),_logweight.end())-_logweight.begin());
double max_element; double max_element;
minMaxLoc(_logweight, 0, &max_element); minMaxLoc(_logweight, 0, &max_element);
Mat_<double> maxrow=_particles.row(max_element); Mat_<double> maxrow=_particles.row((int)max_element);
for(;num_particles<new_particles.rows;num_particles++){ for(;num_particles<new_particles.rows;num_particles++){
maxrow.copyTo(new_particles.row(num_particles)); maxrow.copyTo(new_particles.row(num_particles));
} }
......
...@@ -23,6 +23,8 @@ namespace cv{ ...@@ -23,6 +23,8 @@ namespace cv{
Mat_<double> HShist, Vhist; Mat_<double> HShist, Vhist;
}; };
TrackingHistogram _origHist; TrackingHistogram _origHist;
const TrackingFunctionPF & operator = (const TrackingFunctionPF &);
}; };
TrackingFunctionPF::TrackingHistogram::TrackingHistogram(const Mat& img,int nh,int ns,int nv){ TrackingFunctionPF::TrackingHistogram::TrackingHistogram(const Mat& img,int nh,int ns,int nv){
......
...@@ -686,7 +686,7 @@ void CvHaarEvaluator::FeatureHaar::generateRandomFeature( Size patchSize ) ...@@ -686,7 +686,7 @@ void CvHaarEvaluator::FeatureHaar::generateRandomFeature( Size patchSize )
valid = true; valid = true;
} }
else else
CV_Assert( false ); CV_Error(CV_StsAssert, "");
} }
m_initSize = patchSize; m_initSize = patchSize;
...@@ -742,14 +742,14 @@ float CvHaarEvaluator::FeatureHaar::getSum( const Mat& image, Rect imageROI ) co ...@@ -742,14 +742,14 @@ float CvHaarEvaluator::FeatureHaar::getSum( const Mat& image, Rect imageROI ) co
int depth = image.depth(); int depth = image.depth();
if( depth == CV_8U || depth == CV_32S ) if( depth == CV_8U || depth == CV_32S )
value = image.at<int>( OriginY + Height, OriginX + Width ) + image.at<int>( OriginY, OriginX ) - image.at<int>( OriginY, OriginX + Width ) value = static_cast<float>(image.at<int>( OriginY + Height, OriginX + Width ) + image.at<int>( OriginY, OriginX ) - image.at<int>( OriginY, OriginX + Width )
- image.at<int>( OriginY + Height, OriginX ); - image.at<int>( OriginY + Height, OriginX ));
else if( depth == CV_64F ) else if( depth == CV_64F )
value = image.at<double>( OriginY + Height, OriginX + Width ) + image.at<double>( OriginY, OriginX ) value = static_cast<float>(image.at<double>( OriginY + Height, OriginX + Width ) + image.at<double>( OriginY, OriginX )
- image.at<double>( OriginY, OriginX + Width ) - image.at<double>( OriginY + Height, OriginX ); - image.at<double>( OriginY, OriginX + Width ) - image.at<double>( OriginY + Height, OriginX ));
else if( depth == CV_32F ) else if( depth == CV_32F )
value = image.at<float>( OriginY + Height, OriginX + Width ) + image.at<float>( OriginY, OriginX ) - image.at<float>( OriginY, OriginX + Width ) value = static_cast<float>(image.at<float>( OriginY + Height, OriginX + Width ) + image.at<float>( OriginY, OriginX ) - image.at<float>( OriginY, OriginX + Width )
- image.at<float>( OriginY + Height, OriginX ); - image.at<float>( OriginY + Height, OriginX ));
return value; return value;
} }
......
...@@ -287,7 +287,7 @@ void BaseClassifier::trainClassifier( const Mat& image, int target, float import ...@@ -287,7 +287,7 @@ void BaseClassifier::trainClassifier( const Mat& image, int target, float import
double A = 1; double A = 1;
int K = 0; int K = 0;
int K_max = 10; int K_max = 10;
while ( 1 ) for ( ; ; )
{ {
double U_k = (double) rand() / RAND_MAX; double U_k = (double) rand() / RAND_MAX;
A *= U_k; A *= U_k;
...@@ -572,7 +572,7 @@ void Detector::prepareDetectionsMemory( int numDetections ) ...@@ -572,7 +572,7 @@ void Detector::prepareDetectionsMemory( int numDetections )
void Detector::classifySmooth( const std::vector<Mat>& images, float minMargin ) void Detector::classifySmooth( const std::vector<Mat>& images, float minMargin )
{ {
int numPatches = images.size(); int numPatches = static_cast<int>(images.size());
prepareConfidencesMemory( numPatches ); prepareConfidencesMemory( numPatches );
......
...@@ -99,7 +99,7 @@ ClfMilBoost::Params::Params() ...@@ -99,7 +99,7 @@ ClfMilBoost::Params::Params()
{ {
_numSel = 50; _numSel = 50;
_numFeat = 250; _numFeat = 250;
_lRate = 0.85; _lRate = 0.85f;
} }
ClfMilBoost::ClfMilBoost() ClfMilBoost::ClfMilBoost()
......
...@@ -56,7 +56,7 @@ TrackerBoosting::Params::Params() ...@@ -56,7 +56,7 @@ TrackerBoosting::Params::Params()
{ {
numClassifiers = 100; numClassifiers = 100;
samplerOverlap = 0.99f; samplerOverlap = 0.99f;
samplerSearchFactor = 1.8; samplerSearchFactor = 1.8f;
iterationInit = 50; iterationInit = 50;
featureSetNumFeatures = ( numClassifiers * 10 ) + iterationInit; featureSetNumFeatures = ( numClassifiers * 10 ) + iterationInit;
} }
...@@ -141,7 +141,7 @@ bool TrackerBoosting::initImpl( const Mat& image, const Rect2d& boundingBox ) ...@@ -141,7 +141,7 @@ bool TrackerBoosting::initImpl( const Mat& image, const Rect2d& boundingBox )
TrackerFeatureHAAR::Params HAARparameters; TrackerFeatureHAAR::Params HAARparameters;
HAARparameters.numFeatures = params.featureSetNumFeatures; HAARparameters.numFeatures = params.featureSetNumFeatures;
HAARparameters.isIntegral = true; HAARparameters.isIntegral = true;
HAARparameters.rectSize = Size( boundingBox.width, boundingBox.height ); HAARparameters.rectSize = Size( static_cast<int>(boundingBox.width), static_cast<int>(boundingBox.height) );
Ptr<TrackerFeature> trackerFeature = Ptr<TrackerFeatureHAAR>( new TrackerFeatureHAAR( HAARparameters ) ); Ptr<TrackerFeature> trackerFeature = Ptr<TrackerFeatureHAAR>( new TrackerFeatureHAAR( HAARparameters ) );
if( !featureSet->addTrackerFeature( trackerFeature ) ) if( !featureSet->addTrackerFeature( trackerFeature ) )
return false; return false;
...@@ -155,7 +155,7 @@ bool TrackerBoosting::initImpl( const Mat& image, const Rect2d& boundingBox ) ...@@ -155,7 +155,7 @@ bool TrackerBoosting::initImpl( const Mat& image, const Rect2d& boundingBox )
model = Ptr<TrackerBoostingModel>( new TrackerBoostingModel( boundingBox ) ); model = Ptr<TrackerBoostingModel>( new TrackerBoostingModel( boundingBox ) );
Ptr<TrackerStateEstimatorAdaBoosting> stateEstimator = Ptr<TrackerStateEstimatorAdaBoosting>( Ptr<TrackerStateEstimatorAdaBoosting> stateEstimator = Ptr<TrackerStateEstimatorAdaBoosting>(
new TrackerStateEstimatorAdaBoosting( params.numClassifiers, params.iterationInit, params.featureSetNumFeatures, new TrackerStateEstimatorAdaBoosting( params.numClassifiers, params.iterationInit, params.featureSetNumFeatures,
Size( boundingBox.width, boundingBox.height ), ROI ) ); Size( static_cast<int>(boundingBox.width), static_cast<int>(boundingBox.height) ), ROI ) );
model->setTrackerStateEstimator( stateEstimator ); model->setTrackerStateEstimator( stateEstimator );
//Run model estimation and update for iterationInit iterations //Run model estimation and update for iterationInit iterations
...@@ -163,9 +163,9 @@ bool TrackerBoosting::initImpl( const Mat& image, const Rect2d& boundingBox ) ...@@ -163,9 +163,9 @@ bool TrackerBoosting::initImpl( const Mat& image, const Rect2d& boundingBox )
{ {
//compute temp features //compute temp features
TrackerFeatureHAAR::Params HAARparameters2; TrackerFeatureHAAR::Params HAARparameters2;
HAARparameters2.numFeatures = ( posSamples.size() + negSamples.size() ); HAARparameters2.numFeatures = static_cast<int>( posSamples.size() + negSamples.size() );
HAARparameters2.isIntegral = true; HAARparameters2.isIntegral = true;
HAARparameters2.rectSize = Size( boundingBox.width, boundingBox.height ); HAARparameters2.rectSize = Size( static_cast<int>(boundingBox.width), static_cast<int>(boundingBox.height) );
Ptr<TrackerFeatureHAAR> trackerFeature2 = Ptr<TrackerFeatureHAAR>( new TrackerFeatureHAAR( HAARparameters2 ) ); Ptr<TrackerFeatureHAAR> trackerFeature2 = Ptr<TrackerFeatureHAAR>( new TrackerFeatureHAAR( HAARparameters2 ) );
model.staticCast<TrackerBoostingModel>()->setMode( TrackerBoostingModel::MODE_NEGATIVE, negSamples ); model.staticCast<TrackerBoostingModel>()->setMode( TrackerBoostingModel::MODE_NEGATIVE, negSamples );
...@@ -182,7 +182,7 @@ bool TrackerBoosting::initImpl( const Mat& image, const Rect2d& boundingBox ) ...@@ -182,7 +182,7 @@ bool TrackerBoosting::initImpl( const Mat& image, const Rect2d& boundingBox )
if( replacedClassifier[j] != -1 && swappedClassified[j] != -1 ) if( replacedClassifier[j] != -1 && swappedClassified[j] != -1 )
{ {
trackerFeature.staticCast<TrackerFeatureHAAR>()->swapFeature( replacedClassifier[j], swappedClassified[j] ); trackerFeature.staticCast<TrackerFeatureHAAR>()->swapFeature( replacedClassifier[j], swappedClassified[j] );
trackerFeature.staticCast<TrackerFeatureHAAR>()->swapFeature( swappedClassified[j], trackerFeature2->getFeatureAt( j ) ); trackerFeature.staticCast<TrackerFeatureHAAR>()->swapFeature( swappedClassified[j], trackerFeature2->getFeatureAt( (int)j ) );
} }
} }
} }
...@@ -199,7 +199,7 @@ bool TrackerBoosting::updateImpl( const Mat& image, Rect2d& boundingBox ) ...@@ -199,7 +199,7 @@ bool TrackerBoosting::updateImpl( const Mat& image, Rect2d& boundingBox )
integral( image_, intImage, intSqImage, CV_32S ); integral( image_, intImage, intSqImage, CV_32S );
//get the last location [AAM] X(k-1) //get the last location [AAM] X(k-1)
Ptr<TrackerTargetState> lastLocation = model->getLastTargetState(); Ptr<TrackerTargetState> lastLocation = model->getLastTargetState();
Rect lastBoundingBox( lastLocation->getTargetPosition().x, lastLocation->getTargetPosition().y, lastLocation->getTargetWidth(), Rect lastBoundingBox( (int)lastLocation->getTargetPosition().x, (int)lastLocation->getTargetPosition().y, lastLocation->getTargetWidth(),
lastLocation->getTargetHeight() ); lastLocation->getTargetHeight() );
//sampling new frame based on last location //sampling new frame based on last location
...@@ -244,7 +244,7 @@ bool TrackerBoosting::updateImpl( const Mat& image, Rect2d& boundingBox ) ...@@ -244,7 +244,7 @@ bool TrackerBoosting::updateImpl( const Mat& image, Rect2d& boundingBox )
} }
Ptr<TrackerTargetState> currentState = model->getLastTargetState(); Ptr<TrackerTargetState> currentState = model->getLastTargetState();
boundingBox = Rect( currentState->getTargetPosition().x, currentState->getTargetPosition().y, currentState->getTargetWidth(), boundingBox = Rect( (int)currentState->getTargetPosition().x, (int)currentState->getTargetPosition().y, currentState->getTargetWidth(),
currentState->getTargetHeight() ); currentState->getTargetHeight() );
/*//TODO debug /*//TODO debug
...@@ -276,9 +276,9 @@ bool TrackerBoosting::updateImpl( const Mat& image, Rect2d& boundingBox ) ...@@ -276,9 +276,9 @@ bool TrackerBoosting::updateImpl( const Mat& image, Rect2d& boundingBox )
//compute temp features //compute temp features
TrackerFeatureHAAR::Params HAARparameters2; TrackerFeatureHAAR::Params HAARparameters2;
HAARparameters2.numFeatures = ( posSamples.size() + negSamples.size() ); HAARparameters2.numFeatures = static_cast<int>( posSamples.size() + negSamples.size() );
HAARparameters2.isIntegral = true; HAARparameters2.isIntegral = true;
HAARparameters2.rectSize = Size( boundingBox.width, boundingBox.height ); HAARparameters2.rectSize = Size( static_cast<int>(boundingBox.width), static_cast<int>(boundingBox.height) );
Ptr<TrackerFeatureHAAR> trackerFeature2 = Ptr<TrackerFeatureHAAR>( new TrackerFeatureHAAR( HAARparameters2 ) ); Ptr<TrackerFeatureHAAR> trackerFeature2 = Ptr<TrackerFeatureHAAR>( new TrackerFeatureHAAR( HAARparameters2 ) );
//model estimate //model estimate
...@@ -299,7 +299,7 @@ bool TrackerBoosting::updateImpl( const Mat& image, Rect2d& boundingBox ) ...@@ -299,7 +299,7 @@ bool TrackerBoosting::updateImpl( const Mat& image, Rect2d& boundingBox )
{ {
featureSet->getTrackerFeature().at( 0 ).second.staticCast<TrackerFeatureHAAR>()->swapFeature( replacedClassifier[j], swappedClassified[j] ); featureSet->getTrackerFeature().at( 0 ).second.staticCast<TrackerFeatureHAAR>()->swapFeature( replacedClassifier[j], swappedClassified[j] );
featureSet->getTrackerFeature().at( 0 ).second.staticCast<TrackerFeatureHAAR>()->swapFeature( swappedClassified[j], featureSet->getTrackerFeature().at( 0 ).second.staticCast<TrackerFeatureHAAR>()->swapFeature( swappedClassified[j],
trackerFeature2->getFeatureAt( j ) ); trackerFeature2->getFeatureAt( (int)j ) );
} }
} }
......
...@@ -55,7 +55,7 @@ TrackerBoostingModel::TrackerBoostingModel( const Rect& boundingBox ) ...@@ -55,7 +55,7 @@ TrackerBoostingModel::TrackerBoostingModel( const Rect& boundingBox )
Ptr<TrackerStateEstimatorAdaBoosting::TrackerAdaBoostingTargetState> initState = Ptr<TrackerStateEstimatorAdaBoosting::TrackerAdaBoostingTargetState> initState =
Ptr<TrackerStateEstimatorAdaBoosting::TrackerAdaBoostingTargetState>( Ptr<TrackerStateEstimatorAdaBoosting::TrackerAdaBoostingTargetState>(
new TrackerStateEstimatorAdaBoosting::TrackerAdaBoostingTargetState( Point2f( boundingBox.x, boundingBox.y ), boundingBox.width, new TrackerStateEstimatorAdaBoosting::TrackerAdaBoostingTargetState( Point2f( (float)boundingBox.x, (float)boundingBox.y ), boundingBox.width,
boundingBox.height, true, Mat() ) ); boundingBox.height, true, Mat() ) );
trajectory.push_back( initState ); trajectory.push_back( initState );
maxCMLength = 10; maxCMLength = 10;
...@@ -98,7 +98,7 @@ void TrackerBoostingModel::responseToConfidenceMap( const std::vector<Mat>& resp ...@@ -98,7 +98,7 @@ void TrackerBoostingModel::responseToConfidenceMap( const std::vector<Mat>& resp
Size currentSize; Size currentSize;
Point currentOfs; Point currentOfs;
currentSample.at( i ).locateROI( currentSize, currentOfs ); currentSample.at( i ).locateROI( currentSize, currentOfs );
bool foreground; bool foreground = false;
if( mode == MODE_POSITIVE || mode == MODE_CLASSIFY ) if( mode == MODE_POSITIVE || mode == MODE_CLASSIFY )
{ {
foreground = true; foreground = true;
...@@ -107,7 +107,7 @@ void TrackerBoostingModel::responseToConfidenceMap( const std::vector<Mat>& resp ...@@ -107,7 +107,7 @@ void TrackerBoostingModel::responseToConfidenceMap( const std::vector<Mat>& resp
{ {
foreground = false; foreground = false;
} }
const Mat resp = responses[0].col( i ); const Mat resp = responses[0].col( (int)i );
//create the state //create the state
Ptr<TrackerStateEstimatorAdaBoosting::TrackerAdaBoostingTargetState> currentState = Ptr< Ptr<TrackerStateEstimatorAdaBoosting::TrackerAdaBoostingTargetState> currentState = Ptr<
...@@ -115,7 +115,7 @@ void TrackerBoostingModel::responseToConfidenceMap( const std::vector<Mat>& resp ...@@ -115,7 +115,7 @@ void TrackerBoostingModel::responseToConfidenceMap( const std::vector<Mat>& resp
new TrackerStateEstimatorAdaBoosting::TrackerAdaBoostingTargetState( currentOfs, currentSample.at( i ).cols, currentSample.at( i ).rows, new TrackerStateEstimatorAdaBoosting::TrackerAdaBoostingTargetState( currentOfs, currentSample.at( i ).cols, currentSample.at( i ).rows,
foreground, resp ) ); foreground, resp ) );
confidenceMap.push_back( std::make_pair( currentState, 0 ) ); confidenceMap.push_back( std::make_pair( currentState, 0.0f ) );
} }
} }
......
...@@ -203,10 +203,10 @@ bool TrackerFeatureHAAR::extractSelected( const std::vector<int> selFeatures, co ...@@ -203,10 +203,10 @@ bool TrackerFeatureHAAR::extractSelected( const std::vector<int> selFeatures, co
} }
int numFeatures = featureEvaluator->getNumFeatures(); int numFeatures = featureEvaluator->getNumFeatures();
int numSelFeatures = selFeatures.size(); int numSelFeatures = (int)selFeatures.size();
//response = Mat_<float>( Size( images.size(), numFeatures ) ); //response = Mat_<float>( Size( images.size(), numFeatures ) );
response.create( Size( images.size(), numFeatures ), CV_32F ); response.create( Size( (int)images.size(), numFeatures ), CV_32F );
response.setTo( 0 ); response.setTo( 0 );
//double t = getTickCount(); //double t = getTickCount();
...@@ -222,7 +222,7 @@ bool TrackerFeatureHAAR::extractSelected( const std::vector<int> selFeatures, co ...@@ -222,7 +222,7 @@ bool TrackerFeatureHAAR::extractSelected( const std::vector<int> selFeatures, co
CvHaarEvaluator::FeatureHaar& feature = featureEvaluator->getFeatures( selFeatures[j] ); CvHaarEvaluator::FeatureHaar& feature = featureEvaluator->getFeatures( selFeatures[j] );
feature.eval( images[i], Rect( 0, 0, c, r ), &res ); feature.eval( images[i], Rect( 0, 0, c, r ), &res );
//( Mat_<float>( response ) )( j, i ) = res; //( Mat_<float>( response ) )( j, i ) = res;
response.at<float>( selFeatures[j], i ) = res; response.at<float>( selFeatures[j], (int)i ) = res;
} }
} }
//t = ( (double) getTickCount() - t ) / getTickFrequency(); //t = ( (double) getTickCount() - t ) / getTickFrequency();
...@@ -273,11 +273,11 @@ bool TrackerFeatureHAAR::computeImpl( const std::vector<Mat>& images, Mat& respo ...@@ -273,11 +273,11 @@ bool TrackerFeatureHAAR::computeImpl( const std::vector<Mat>& images, Mat& respo
int numFeatures = featureEvaluator->getNumFeatures(); int numFeatures = featureEvaluator->getNumFeatures();
response = Mat_<float>( Size( images.size(), numFeatures ) ); response = Mat_<float>( Size( (int)images.size(), numFeatures ) );
std::vector<CvHaarEvaluator::FeatureHaar> f = featureEvaluator->getFeatures(); std::vector<CvHaarEvaluator::FeatureHaar> f = featureEvaluator->getFeatures();
//for each sample compute #n_feature -> put each feature (n Rect) in response //for each sample compute #n_feature -> put each feature (n Rect) in response
parallel_for_( Range( 0, images.size() ), Parallel_compute( featureEvaluator, images, response ) ); parallel_for_( Range( 0, (int)images.size() ), Parallel_compute( featureEvaluator, images, response ) );
/*for ( size_t i = 0; i < images.size(); i++ ) /*for ( size_t i = 0; i < images.size(); i++ )
{ {
......
...@@ -158,7 +158,7 @@ bool TrackerMIL::initImpl( const Mat& image, const Rect2d& boundingBox ) ...@@ -158,7 +158,7 @@ bool TrackerMIL::initImpl( const Mat& image, const Rect2d& boundingBox )
//compute HAAR features //compute HAAR features
TrackerFeatureHAAR::Params HAARparameters; TrackerFeatureHAAR::Params HAARparameters;
HAARparameters.numFeatures = params.featureSetNumFeatures; HAARparameters.numFeatures = params.featureSetNumFeatures;
HAARparameters.rectSize = Size( boundingBox.width, boundingBox.height ); HAARparameters.rectSize = Size( (int)boundingBox.width, (int)boundingBox.height );
HAARparameters.isIntegral = true; HAARparameters.isIntegral = true;
Ptr<TrackerFeature> trackerFeature = Ptr<TrackerFeatureHAAR>( new TrackerFeatureHAAR( HAARparameters ) ); Ptr<TrackerFeature> trackerFeature = Ptr<TrackerFeatureHAAR>( new TrackerFeatureHAAR( HAARparameters ) );
featureSet->addTrackerFeature( trackerFeature ); featureSet->addTrackerFeature( trackerFeature );
...@@ -191,7 +191,7 @@ bool TrackerMIL::updateImpl( const Mat& image, Rect2d& boundingBox ) ...@@ -191,7 +191,7 @@ bool TrackerMIL::updateImpl( const Mat& image, Rect2d& boundingBox )
//get the last location [AAM] X(k-1) //get the last location [AAM] X(k-1)
Ptr<TrackerTargetState> lastLocation = model->getLastTargetState(); Ptr<TrackerTargetState> lastLocation = model->getLastTargetState();
Rect lastBoundingBox( lastLocation->getTargetPosition().x, lastLocation->getTargetPosition().y, lastLocation->getTargetWidth(), Rect lastBoundingBox( (int)lastLocation->getTargetPosition().x, (int)lastLocation->getTargetPosition().y, lastLocation->getTargetWidth(),
lastLocation->getTargetHeight() ); lastLocation->getTargetHeight() );
//sampling new frame based on last location //sampling new frame based on last location
...@@ -229,7 +229,7 @@ bool TrackerMIL::updateImpl( const Mat& image, Rect2d& boundingBox ) ...@@ -229,7 +229,7 @@ bool TrackerMIL::updateImpl( const Mat& image, Rect2d& boundingBox )
} }
Ptr<TrackerTargetState> currentState = model->getLastTargetState(); Ptr<TrackerTargetState> currentState = model->getLastTargetState();
boundingBox = Rect( currentState->getTargetPosition().x, currentState->getTargetPosition().y, currentState->getTargetWidth(), boundingBox = Rect( (int)currentState->getTargetPosition().x, (int)currentState->getTargetPosition().y, currentState->getTargetWidth(),
currentState->getTargetHeight() ); currentState->getTargetHeight() );
/*//TODO debug /*//TODO debug
......
...@@ -57,7 +57,7 @@ TrackerMILModel::TrackerMILModel( const Rect& boundingBox ) ...@@ -57,7 +57,7 @@ TrackerMILModel::TrackerMILModel( const Rect& boundingBox )
height = boundingBox.height; height = boundingBox.height;
Ptr<TrackerStateEstimatorMILBoosting::TrackerMILTargetState> initState = Ptr<TrackerStateEstimatorMILBoosting::TrackerMILTargetState>( Ptr<TrackerStateEstimatorMILBoosting::TrackerMILTargetState> initState = Ptr<TrackerStateEstimatorMILBoosting::TrackerMILTargetState>(
new TrackerStateEstimatorMILBoosting::TrackerMILTargetState( Point2f( boundingBox.x, boundingBox.y ), boundingBox.width, boundingBox.height, new TrackerStateEstimatorMILBoosting::TrackerMILTargetState( Point2f( (float)boundingBox.x, (float)boundingBox.y ), boundingBox.width, boundingBox.height,
true, Mat() ) ); true, Mat() ) );
trajectory.push_back( initState ); trajectory.push_back( initState );
} }
...@@ -97,7 +97,7 @@ void TrackerMILModel::responseToConfidenceMap( const std::vector<Mat>& responses ...@@ -97,7 +97,7 @@ void TrackerMILModel::responseToConfidenceMap( const std::vector<Mat>& responses
Ptr<TrackerStateEstimatorMILBoosting::TrackerMILTargetState> currentState = Ptr<TrackerStateEstimatorMILBoosting::TrackerMILTargetState>( Ptr<TrackerStateEstimatorMILBoosting::TrackerMILTargetState> currentState = Ptr<TrackerStateEstimatorMILBoosting::TrackerMILTargetState>(
new TrackerStateEstimatorMILBoosting::TrackerMILTargetState( currentOfs, width, height, foreground, singleResponse ) ); new TrackerStateEstimatorMILBoosting::TrackerMILTargetState( currentOfs, width, height, foreground, singleResponse ) );
confidenceMap.push_back( std::make_pair( currentState, 0 ) ); confidenceMap.push_back( std::make_pair( currentState, 0.0f ) );
} }
......
...@@ -394,7 +394,7 @@ void TrackerStateEstimatorAdaBoosting::updateImpl( std::vector<ConfidenceMap>& c ...@@ -394,7 +394,7 @@ void TrackerStateEstimatorAdaBoosting::updateImpl( std::vector<ConfidenceMap>& c
swappedClassifier[i] = -1; swappedClassifier[i] = -1;
} }
int mapPosition = i + lastConfidenceMap.size() / 2; int mapPosition = (int)(i + lastConfidenceMap.size() / 2);
Ptr<TrackerAdaBoostingTargetState> currentTargetState2 = lastConfidenceMap.at( mapPosition ).first.staticCast<TrackerAdaBoostingTargetState>(); Ptr<TrackerAdaBoostingTargetState> currentTargetState2 = lastConfidenceMap.at( mapPosition ).first.staticCast<TrackerAdaBoostingTargetState>();
currentFg = 1; currentFg = 1;
......
...@@ -138,15 +138,15 @@ std::vector<std::string> TrackerOPETest::splitString( std::string s, std::string ...@@ -138,15 +138,15 @@ std::vector<std::string> TrackerOPETest::splitString( std::string s, std::string
float TrackerOPETest::calcDistance( Rect a, Rect b ) float TrackerOPETest::calcDistance( Rect a, Rect b )
{ {
Point2f p_a( a.x + a.width / 2, a.y + a.height / 2 ); Point2f p_a( (float)(a.x + a.width / 2), (float)(a.y + a.height / 2) );
Point2f p_b( b.x + b.width / 2, b.y + b.height / 2 ); Point2f p_b( (float)(b.x + b.width / 2), (float)(b.y + b.height / 2) );
return sqrt( pow( p_a.x - p_b.x, 2 ) + pow( p_a.y - p_b.y, 2 ) ); return sqrt( pow( p_a.x - p_b.x, 2 ) + pow( p_a.y - p_b.y, 2 ) );
} }
float TrackerOPETest::calcOverlap( Rect a, Rect b ) float TrackerOPETest::calcOverlap( Rect a, Rect b )
{ {
float aArea = a.width * a.height; float aArea = (float)(a.width * a.height);
float bArea = b.width * b.height; float bArea = (float)(b.width * b.height);
if( aArea < bArea ) if( aArea < bArea )
{ {
...@@ -165,8 +165,8 @@ float TrackerOPETest::calcOverlap( Rect a, Rect b ) ...@@ -165,8 +165,8 @@ float TrackerOPETest::calcOverlap( Rect a, Rect b )
Rect rectIntersection = a & b; Rect rectIntersection = a & b;
Rect rectUnion = a | b; Rect rectUnion = a | b;
float iArea = rectIntersection.width * rectIntersection.height; float iArea = (float)(rectIntersection.width * rectIntersection.height);
float uArea = rectUnion.width * rectUnion.height; float uArea = (float)(rectUnion.width * rectUnion.height);
float overlap = iArea / uArea; float overlap = iArea / uArea;
return overlap; return overlap;
} }
......
...@@ -142,15 +142,15 @@ std::vector<std::string> TrackerSRETest::splitString( std::string s, std::string ...@@ -142,15 +142,15 @@ std::vector<std::string> TrackerSRETest::splitString( std::string s, std::string
float TrackerSRETest::calcDistance( Rect a, Rect b ) float TrackerSRETest::calcDistance( Rect a, Rect b )
{ {
Point2f p_a( a.x + a.width / 2, a.y + a.height / 2 ); Point2f p_a( (float)(a.x + a.width / 2), (float)(a.y + a.height / 2) );
Point2f p_b( b.x + b.width / 2, b.y + b.height / 2 ); Point2f p_b( (float)(b.x + b.width / 2), (float)(b.y + b.height / 2) );
return sqrt( pow( p_a.x - p_b.x, 2 ) + pow( p_a.y - p_b.y, 2 ) ); return sqrt( pow( p_a.x - p_b.x, 2 ) + pow( p_a.y - p_b.y, 2 ) );
} }
float TrackerSRETest::calcOverlap( Rect a, Rect b ) float TrackerSRETest::calcOverlap( Rect a, Rect b )
{ {
float aArea = a.width * a.height; float aArea = (float)(a.width * a.height);
float bArea = b.width * b.height; float bArea = (float)(b.width * b.height);
if( aArea < bArea ) if( aArea < bArea )
{ {
...@@ -169,8 +169,8 @@ float TrackerSRETest::calcOverlap( Rect a, Rect b ) ...@@ -169,8 +169,8 @@ float TrackerSRETest::calcOverlap( Rect a, Rect b )
Rect rectIntersection = a & b; Rect rectIntersection = a & b;
Rect rectUnion = a | b; Rect rectUnion = a | b;
float iArea = rectIntersection.width * rectIntersection.height; float iArea = (float)(rectIntersection.width * rectIntersection.height);
float uArea = rectUnion.width * rectUnion.height; float uArea = (float)(rectUnion.width * rectUnion.height);
float overlap = iArea / uArea; float overlap = iArea / uArea;
return overlap; return overlap;
} }
...@@ -327,81 +327,81 @@ void TrackerSRETest::checkDataTest() ...@@ -327,81 +327,81 @@ void TrackerSRETest::checkDataTest()
{ {
case 1: case 1:
//center shift left //center shift left
bb.x = bb.x - ceil( 0.1 * bb.width ); bb.x = bb.x - (int)ceil( 0.1 * bb.width );
break; break;
case 2: case 2:
//center shift right //center shift right
bb.x = bb.x + ceil( 0.1 * bb.width ); bb.x = bb.x + (int)ceil( 0.1 * bb.width );
break; break;
case 3: case 3:
//center shift up //center shift up
bb.y = bb.y - ceil( 0.1 * bb.height ); bb.y = bb.y - (int)ceil( 0.1 * bb.height );
break; break;
case 4: case 4:
//center shift down //center shift down
bb.y = bb.y + ceil( 0.1 * bb.height ); bb.y = bb.y + (int)ceil( 0.1 * bb.height );
break; break;
case 5: case 5:
//corner shift top-left //corner shift top-left
bb.x = round( bb.x - ( 0.1 * bb.width ) ); bb.x = (int)round( bb.x - 0.1 * bb.width );
bb.y = round( bb.y - ( 0.1 * bb.height ) ); bb.y = (int)round( bb.y - 0.1 * bb.height );
bb.width = xLimit - bb.x + 1; bb.width = xLimit - bb.x + 1;
bb.height = yLimit - bb.y + 1; bb.height = yLimit - bb.y + 1;
break; break;
case 6: case 6:
//corner shift top-right //corner shift top-right
xLimit = round( xLimit + 0.1 * bb.width ); xLimit = (int)round( xLimit + 0.1 * bb.width );
bb.y = round( bb.y - ( 0.1 * bb.height ) ); bb.y = (int)round( bb.y - 0.1 * bb.height );
bb.width = xLimit - bb.x + 1; bb.width = xLimit - bb.x + 1;
bb.height = yLimit - bb.y + 1; bb.height = yLimit - bb.y + 1;
break; break;
case 7: case 7:
//corner shift bottom-left //corner shift bottom-left
bb.x = round( bb.x - ( 0.1 * bb.width ) ); bb.x = (int)round( bb.x - 0.1 * bb.width );
yLimit = round( yLimit + 0.1 * bb.height ); yLimit = (int)round( yLimit + 0.1 * bb.height );
bb.width = xLimit - bb.x + 1; bb.width = xLimit - bb.x + 1;
bb.height = yLimit - bb.y + 1; bb.height = yLimit - bb.y + 1;
break; break;
case 8: case 8:
//corner shift bottom-right //corner shift bottom-right
xLimit = round( xLimit + 0.1 * bb.width ); xLimit = (int)round( xLimit + 0.1 * bb.width );
yLimit = round( yLimit + 0.1 * bb.height ); yLimit = (int)round( yLimit + 0.1 * bb.height );
bb.width = xLimit - bb.x + 1; bb.width = xLimit - bb.x + 1;
bb.height = yLimit - bb.y + 1; bb.height = yLimit - bb.y + 1;
break; break;
case 9: case 9:
//scale 0.8 //scale 0.8
ratio = 0.8; ratio = 0.8f;
w = ratio * bb.width; w = (int)(ratio * bb.width);
h = ratio * bb.height; h = (int)(ratio * bb.height);
bb = Rect( center.x - ( w / 2 ), center.y - ( h / 2 ), w, h ); bb = Rect( center.x - ( w / 2 ), center.y - ( h / 2 ), w, h );
break; break;
case 10: case 10:
//scale 0.9 //scale 0.9
ratio = 0.9; ratio = 0.9f;
w = ratio * bb.width; w = (int)(ratio * bb.width);
h = ratio * bb.height; h = (int)(ratio * bb.height);
bb = Rect( center.x - ( w / 2 ), center.y - ( h / 2 ), w, h ); bb = Rect( center.x - ( w / 2 ), center.y - ( h / 2 ), w, h );
break; break;
case 11: case 11:
//scale 1.1 //scale 1.1
ratio = 1.1; ratio = 1.1f;
w = ratio * bb.width; w = (int)(ratio * bb.width);
h = ratio * bb.height; h = (int)(ratio * bb.height);
bb = Rect( center.x - ( w / 2 ), center.y - ( h / 2 ), w, h ); bb = Rect( center.x - ( w / 2 ), center.y - ( h / 2 ), w, h );
break; break;
case 12: case 12:
//scale 1.2 //scale 1.2
ratio = 1.2; ratio = 1.2f;
w = ratio * bb.width; w = (int)(ratio * bb.width);
h = ratio * bb.height; h = (int)(ratio * bb.height);
bb = Rect( center.x - ( w / 2 ), center.y - ( h / 2 ), w, h ); bb = Rect( center.x - ( w / 2 ), center.y - ( h / 2 ), w, h );
break; break;
......
...@@ -150,15 +150,15 @@ std::vector<std::string> TrackerTRETest::splitString( std::string s, std::string ...@@ -150,15 +150,15 @@ std::vector<std::string> TrackerTRETest::splitString( std::string s, std::string
float TrackerTRETest::calcDistance( Rect a, Rect b ) float TrackerTRETest::calcDistance( Rect a, Rect b )
{ {
Point2f p_a( a.x + a.width / 2, a.y + a.height / 2 ); Point2f p_a( (float)(a.x + a.width / 2), (float)(a.y + a.height / 2) );
Point2f p_b( b.x + b.width / 2, b.y + b.height / 2 ); Point2f p_b( (float)(b.x + b.width / 2), (float)(b.y + b.height / 2) );
return sqrt( pow( p_a.x - p_b.x, 2 ) + pow( p_a.y - p_b.y, 2 ) ); return sqrt( pow( p_a.x - p_b.x, 2 ) + pow( p_a.y - p_b.y, 2 ) );
} }
float TrackerTRETest::calcOverlap( Rect a, Rect b ) float TrackerTRETest::calcOverlap( Rect a, Rect b )
{ {
float aArea = a.width * a.height; float aArea = (float)(a.width * a.height);
float bArea = b.width * b.height; float bArea = (float)(b.width * b.height);
if( aArea < bArea ) if( aArea < bArea )
{ {
...@@ -177,8 +177,8 @@ float TrackerTRETest::calcOverlap( Rect a, Rect b ) ...@@ -177,8 +177,8 @@ float TrackerTRETest::calcOverlap( Rect a, Rect b )
Rect rectIntersection = a & b; Rect rectIntersection = a & b;
Rect rectUnion = a | b; Rect rectUnion = a | b;
float iArea = rectIntersection.width * rectIntersection.height; float iArea = (float)(rectIntersection.width * rectIntersection.height);
float uArea = rectUnion.width * rectUnion.height; float uArea = (float)(rectUnion.width * rectUnion.height);
float overlap = iArea / uArea; float overlap = iArea / uArea;
return overlap; return overlap;
} }
...@@ -361,7 +361,7 @@ void TrackerTRETest::checkDataTest() ...@@ -361,7 +361,7 @@ void TrackerTRETest::checkDataTest()
gtStartFrame = startFrame; gtStartFrame = startFrame;
//compute the start and the and for each segment //compute the start and the and for each segment
int segmentLength = sizeof ( SEGMENTS)/sizeof(int); int segmentLength = sizeof ( SEGMENTS)/sizeof(int);
int numFrame = validSequence.size() / segmentLength; int numFrame = (int)(validSequence.size() / segmentLength);
startFrame += ( segmentIdx - 1 ) * numFrame; startFrame += ( segmentIdx - 1 ) * numFrame;
endFrame = startFrame + numFrame; endFrame = startFrame + numFrame;
...@@ -389,8 +389,7 @@ void TrackerTRETest::checkDataTest() ...@@ -389,8 +389,7 @@ void TrackerTRETest::checkDataTest()
gt2.close(); gt2.close();
if( segmentIdx == ( sizeof ( SEGMENTS)/sizeof(int) ) ) if( segmentIdx == ( sizeof ( SEGMENTS)/sizeof(int) ) )
endFrame = bbs.size(); endFrame = (int)bbs.size();
} }
void TrackerTRETest::run() void TrackerTRETest::run()
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment