Commit 0279ba95 authored by Alexander Shishkov's avatar Alexander Shishkov

fixed warnings in linemod on Windows

parent e7e37330
...@@ -774,7 +774,7 @@ protected: ...@@ -774,7 +774,7 @@ protected:
* *
* \todo Max response, to allow optimization of summing (255/MAX) features as uint8 * \todo Max response, to allow optimization of summing (255/MAX) features as uint8
*/ */
class Modality class CV_EXPORTS Modality
{ {
public: public:
// Virtual destructor // Virtual destructor
...@@ -821,7 +821,7 @@ protected: ...@@ -821,7 +821,7 @@ protected:
/** /**
* \brief Modality that computes quantized gradient orientations from a color image. * \brief Modality that computes quantized gradient orientations from a color image.
*/ */
class ColorGradient : public Modality class CV_EXPORTS ColorGradient : public Modality
{ {
public: public:
/** /**
...@@ -856,7 +856,7 @@ protected: ...@@ -856,7 +856,7 @@ protected:
/** /**
* \brief Modality that computes quantized surface normals from a dense depth map. * \brief Modality that computes quantized surface normals from a dense depth map.
*/ */
class DepthNormal : public Modality class CV_EXPORTS DepthNormal : public Modality
{ {
public: public:
/** /**
...@@ -900,7 +900,7 @@ void colormap(const Mat& quantized, Mat& dst); ...@@ -900,7 +900,7 @@ void colormap(const Mat& quantized, Mat& dst);
/** /**
* \brief Represents a successful template match. * \brief Represents a successful template match.
*/ */
struct Match struct CV_EXPORTS Match
{ {
Match() Match()
{ {
...@@ -1020,7 +1020,7 @@ public: ...@@ -1020,7 +1020,7 @@ public:
int numTemplates() const; int numTemplates() const;
int numTemplates(const std::string& class_id) const; int numTemplates(const std::string& class_id) const;
int numClasses() const { return class_templates.size(); } int numClasses() const { return static_cast<int>(class_templates.size()); }
std::vector<std::string> classIds() const; std::vector<std::string> classIds() const;
......
...@@ -292,11 +292,11 @@ void quantizedOrientations(const Mat& src, Mat& magnitude, ...@@ -292,11 +292,11 @@ void quantizedOrientations(const Mat& src, Mat& magnitude,
float * ptr0y = (float *)sobel_dy.data; float * ptr0y = (float *)sobel_dy.data;
float * ptrmg = (float *)magnitude.data; float * ptrmg = (float *)magnitude.data;
const int length1 = sobel_3dx.step1(); const int length1 = static_cast<const int>(sobel_3dx.step1());
const int length2 = sobel_3dy.step1(); const int length2 = static_cast<const int>(sobel_3dy.step1());
const int length3 = sobel_dx.step1(); const int length3 = static_cast<const int>(sobel_dx.step1());
const int length4 = sobel_dy.step1(); const int length4 = static_cast<const int>(sobel_dy.step1());
const int length5 = magnitude.step1(); const int length5 = static_cast<const int>(magnitude.step1());
const int length0 = sobel_3dy.cols * 3; const int length0 = sobel_3dy.cols * 3;
for (int r = 0; r < sobel_3dy.rows; ++r) for (int r = 0; r < sobel_3dy.rows; ++r)
...@@ -539,7 +539,7 @@ bool ColorGradientPyramid::extractTemplate(Template& templ) const ...@@ -539,7 +539,7 @@ bool ColorGradientPyramid::extractTemplate(Template& templ) const
std::stable_sort(candidates.begin(), candidates.end()); std::stable_sort(candidates.begin(), candidates.end());
// Use heuristic based on surplus of candidates in narrow outline for initial distance threshold // Use heuristic based on surplus of candidates in narrow outline for initial distance threshold
float distance = candidates.size() / num_features + 1; float distance = static_cast<float>(candidates.size() / num_features + 1);
selectScatteredFeatures(candidates, templ.features, num_features, distance); selectScatteredFeatures(candidates, templ.features, num_features, distance);
// Size determined externally, needs to match templates for other modalities // Size determined externally, needs to match templates for other modalities
...@@ -690,9 +690,9 @@ void quantizedNormals(const Mat& src, Mat& dst, int distance_threshold, ...@@ -690,9 +690,9 @@ void quantizedNormals(const Mat& src, Mat& dst, int distance_threshold,
/// @todo Magic number 1150 is focal length? This is something like /// @todo Magic number 1150 is focal length? This is something like
/// f in SXGA mode, but in VGA is more like 530. /// f in SXGA mode, but in VGA is more like 530.
float l_nx = 1150 * l_ddx; float l_nx = static_cast<float>(1150 * l_ddx);
float l_ny = 1150 * l_ddy; float l_ny = static_cast<float>(1150 * l_ddy);
float l_nz = -l_det * l_d; float l_nz = static_cast<float>(-l_det * l_d);
float l_sqrt = sqrtf(l_nx * l_nx + l_ny * l_ny + l_nz * l_nz); float l_sqrt = sqrtf(l_nx * l_nx + l_ny * l_ny + l_nz * l_nz);
...@@ -706,9 +706,9 @@ void quantizedNormals(const Mat& src, Mat& dst, int distance_threshold, ...@@ -706,9 +706,9 @@ void quantizedNormals(const Mat& src, Mat& dst, int distance_threshold,
//*lp_norm = fabs(l_nz)*255; //*lp_norm = fabs(l_nz)*255;
int l_val1 = l_nx * l_offsetx + l_offsetx; int l_val1 = static_cast<int>(l_nx * l_offsetx + l_offsetx);
int l_val2 = l_ny * l_offsety + l_offsety; int l_val2 = static_cast<int>(l_ny * l_offsety + l_offsety);
int l_val3 = l_nz * GRANULARITY + GRANULARITY; int l_val3 = static_cast<int>(l_nz * GRANULARITY + GRANULARITY);
*lp_norm = NORMAL_LUT[l_val3][l_val2][l_val1]; *lp_norm = NORMAL_LUT[l_val3][l_val2][l_val1];
} }
...@@ -856,8 +856,8 @@ bool DepthNormalPyramid::extractTemplate(Template& templ) const ...@@ -856,8 +856,8 @@ bool DepthNormalPyramid::extractTemplate(Template& templ) const
std::stable_sort(candidates.begin(), candidates.end()); std::stable_sort(candidates.begin(), candidates.end());
// Use heuristic based on object area for initial distance threshold // Use heuristic based on object area for initial distance threshold
int area = no_mask ? normal.total() : countNonZero(local_mask); int area = static_cast<int>(no_mask ? normal.total() : countNonZero(local_mask));
float distance = sqrtf(area) / sqrtf(num_features) + 1.5f; float distance = sqrtf(static_cast<float>(area)) / sqrtf(static_cast<float>(num_features)) + 1.5f;
selectScatteredFeatures(candidates, templ.features, num_features, distance); selectScatteredFeatures(candidates, templ.features, num_features, distance);
// Size determined externally, needs to match templates for other modalities // Size determined externally, needs to match templates for other modalities
...@@ -1000,8 +1000,8 @@ void spread(const Mat& src, Mat& dst, int T) ...@@ -1000,8 +1000,8 @@ void spread(const Mat& src, Mat& dst, int T)
int height = src.rows - r; int height = src.rows - r;
for (int c = 0; c < T; ++c) for (int c = 0; c < T; ++c)
{ {
orUnaligned8u(&src.at<unsigned char>(r, c), src.step1(), dst.ptr(), orUnaligned8u(&src.at<unsigned char>(r, c), static_cast<const int>(src.step1()), dst.ptr(),
dst.step1(), src.cols - c, height); static_cast<const int>(dst.step1()), src.cols - c, height);
} }
} }
} }
...@@ -1366,7 +1366,7 @@ void addSimilarities(const std::vector<Mat>& similarities, Mat& dst) ...@@ -1366,7 +1366,7 @@ void addSimilarities(const std::vector<Mat>& similarities, Mat& dst)
{ {
// NOTE: add() seems to be rather slow in the 8U + 8U -> 16U case // NOTE: add() seems to be rather slow in the 8U + 8U -> 16U case
dst.create(similarities[0].size(), CV_16U); dst.create(similarities[0].size(), CV_16U);
addUnaligned8u16u(similarities[0].ptr(), similarities[1].ptr(), dst.ptr<ushort>(), dst.total()); addUnaligned8u16u(similarities[0].ptr(), similarities[1].ptr(), dst.ptr<ushort>(), static_cast<int>(dst.total()));
/// @todo Optimize 16u + 8u -> 16u when more than 2 modalities /// @todo Optimize 16u + 8u -> 16u when more than 2 modalities
for (size_t i = 2; i < similarities.size(); ++i) for (size_t i = 2; i < similarities.size(); ++i)
...@@ -1385,7 +1385,7 @@ Detector::Detector() ...@@ -1385,7 +1385,7 @@ Detector::Detector()
Detector::Detector(const std::vector< Ptr<Modality> >& modalities, Detector::Detector(const std::vector< Ptr<Modality> >& modalities,
const std::vector<int>& T_pyramid) const std::vector<int>& T_pyramid)
: modalities(modalities), : modalities(modalities),
pyramid_levels(T_pyramid.size()), pyramid_levels(static_cast<int>(T_pyramid.size())),
T_at_level(T_pyramid) T_at_level(T_pyramid)
{ {
} }
...@@ -1396,7 +1396,7 @@ void Detector::match(const std::vector<Mat>& sources, float threshold, std::vect ...@@ -1396,7 +1396,7 @@ void Detector::match(const std::vector<Mat>& sources, float threshold, std::vect
{ {
matches.clear(); matches.clear();
if (quantized_images.needed()) if (quantized_images.needed())
quantized_images.create(1, pyramid_levels * modalities.size(), CV_8U); quantized_images.create(1, static_cast<int>(pyramid_levels * modalities.size()), CV_8U);
assert(sources.size() == modalities.size()); assert(sources.size() == modalities.size());
// Initialize each modality with our sources // Initialize each modality with our sources
...@@ -1441,7 +1441,7 @@ void Detector::match(const std::vector<Mat>& sources, float threshold, std::vect ...@@ -1441,7 +1441,7 @@ void Detector::match(const std::vector<Mat>& sources, float threshold, std::vect
linearize(response_maps[j], memories[j], T); linearize(response_maps[j], memories[j], T);
if (quantized_images.needed()) //use copyTo here to side step reference semantics. if (quantized_images.needed()) //use copyTo here to side step reference semantics.
quantized.copyTo(quantized_images.getMatRef(l*quantizers.size() + i)); quantized.copyTo(quantized_images.getMatRef(static_cast<int>(l*quantizers.size() + i)));
} }
sizes.push_back(quantized.size()); sizes.push_back(quantized.size());
...@@ -1496,13 +1496,13 @@ void Detector::matchClass(const LinearMemoryPyramid& lm_pyramid, ...@@ -1496,13 +1496,13 @@ void Detector::matchClass(const LinearMemoryPyramid& lm_pyramid,
// Compute similarity maps for each modality at lowest pyramid level // Compute similarity maps for each modality at lowest pyramid level
std::vector<Mat> similarities(modalities.size()); std::vector<Mat> similarities(modalities.size());
int lowest_start = tp.size() - modalities.size(); int lowest_start = static_cast<int>(tp.size() - modalities.size());
int lowest_T = T_at_level.back(); int lowest_T = T_at_level.back();
int num_features = 0; int num_features = 0;
for (int i = 0; i < (int)modalities.size(); ++i) for (int i = 0; i < (int)modalities.size(); ++i)
{ {
const Template& templ = tp[lowest_start + i]; const Template& templ = tp[lowest_start + i];
num_features += templ.features.size(); num_features += static_cast<int>(templ.features.size());
similarity(lowest_lm[i], templ, similarities[i], sizes.back(), lowest_T); similarity(lowest_lm[i], templ, similarities[i], sizes.back(), lowest_T);
} }
...@@ -1515,7 +1515,7 @@ void Detector::matchClass(const LinearMemoryPyramid& lm_pyramid, ...@@ -1515,7 +1515,7 @@ void Detector::matchClass(const LinearMemoryPyramid& lm_pyramid,
// threshold scales from half the max response (what you would expect from applying // threshold scales from half the max response (what you would expect from applying
// the template to a completely random image) to the max response. // the template to a completely random image) to the max response.
// NOTE: This assumes max per-feature response is 4, so we scale between [2*nf, 4*nf]. // NOTE: This assumes max per-feature response is 4, so we scale between [2*nf, 4*nf].
int raw_threshold = 2*num_features + (threshold / 100.f) * (2*num_features) + 0.5f; int raw_threshold = static_cast<int>(2*num_features + (threshold / 100.f) * (2*num_features) + 0.5f);
// Find initial matches // Find initial matches
std::vector<Match> candidates; std::vector<Match> candidates;
...@@ -1530,8 +1530,8 @@ void Detector::matchClass(const LinearMemoryPyramid& lm_pyramid, ...@@ -1530,8 +1530,8 @@ void Detector::matchClass(const LinearMemoryPyramid& lm_pyramid,
int offset = lowest_T / 2 + (lowest_T % 2 - 1); int offset = lowest_T / 2 + (lowest_T % 2 - 1);
int x = c * lowest_T + offset; int x = c * lowest_T + offset;
int y = r * lowest_T + offset; int y = r * lowest_T + offset;
int score = (raw_score * 100.f) / (4 * num_features) + 0.5f; float score =(raw_score * 100.f) / (4 * num_features) + 0.5f;
candidates.push_back(Match(x, y, score, class_id, template_id)); candidates.push_back(Match(x, y, score, class_id, static_cast<int>(template_id)));
} }
} }
} }
...@@ -1541,7 +1541,7 @@ void Detector::matchClass(const LinearMemoryPyramid& lm_pyramid, ...@@ -1541,7 +1541,7 @@ void Detector::matchClass(const LinearMemoryPyramid& lm_pyramid,
{ {
const std::vector<LinearMemories>& lms = lm_pyramid[l]; const std::vector<LinearMemories>& lms = lm_pyramid[l];
int T = T_at_level[l]; int T = T_at_level[l];
int start = l * modalities.size(); int start = static_cast<int>(l * modalities.size());
Size size = sizes[l]; Size size = sizes[l];
int border = 8 * T; int border = 8 * T;
int offset = T / 2 + (T % 2 - 1); int offset = T / 2 + (T % 2 - 1);
...@@ -1569,7 +1569,7 @@ void Detector::matchClass(const LinearMemoryPyramid& lm_pyramid, ...@@ -1569,7 +1569,7 @@ void Detector::matchClass(const LinearMemoryPyramid& lm_pyramid,
for (int i = 0; i < (int)modalities.size(); ++i) for (int i = 0; i < (int)modalities.size(); ++i)
{ {
const Template& templ = tp[start + i]; const Template& templ = tp[start + i];
num_features += templ.features.size(); num_features += static_cast<int>(templ.features.size());
similarityLocal(lms[i], templ, similarities[i], size, T, Point(x, y)); similarityLocal(lms[i], templ, similarities[i], size, T, Point(x, y));
} }
addSimilarities(similarities, total_similarity); addSimilarities(similarities, total_similarity);
...@@ -1610,9 +1610,9 @@ void Detector::matchClass(const LinearMemoryPyramid& lm_pyramid, ...@@ -1610,9 +1610,9 @@ void Detector::matchClass(const LinearMemoryPyramid& lm_pyramid,
int Detector::addTemplate(const std::vector<Mat>& sources, const std::string& class_id, int Detector::addTemplate(const std::vector<Mat>& sources, const std::string& class_id,
const Mat& object_mask, Rect* bounding_box) const Mat& object_mask, Rect* bounding_box)
{ {
int num_modalities = modalities.size(); int num_modalities = static_cast<int>(modalities.size());
std::vector<TemplatePyramid>& template_pyramids = class_templates[class_id]; std::vector<TemplatePyramid>& template_pyramids = class_templates[class_id];
int template_id = template_pyramids.size(); int template_id = static_cast<int>(template_pyramids.size());
TemplatePyramid tp; TemplatePyramid tp;
tp.resize(num_modalities * pyramid_levels); tp.resize(num_modalities * pyramid_levels);
...@@ -1646,7 +1646,7 @@ int Detector::addTemplate(const std::vector<Mat>& sources, const std::string& cl ...@@ -1646,7 +1646,7 @@ int Detector::addTemplate(const std::vector<Mat>& sources, const std::string& cl
int Detector::addSyntheticTemplate(const std::vector<Template>& templates, const std::string& class_id) int Detector::addSyntheticTemplate(const std::vector<Template>& templates, const std::string& class_id)
{ {
std::vector<TemplatePyramid>& template_pyramids = class_templates[class_id]; std::vector<TemplatePyramid>& template_pyramids = class_templates[class_id];
int template_id = template_pyramids.size(); int template_id = static_cast<int>(template_pyramids.size());
template_pyramids.push_back(templates); template_pyramids.push_back(templates);
return template_id; return template_id;
} }
...@@ -1664,7 +1664,7 @@ int Detector::numTemplates() const ...@@ -1664,7 +1664,7 @@ int Detector::numTemplates() const
int ret = 0; int ret = 0;
TemplatesMap::const_iterator i = class_templates.begin(), iend = class_templates.end(); TemplatesMap::const_iterator i = class_templates.begin(), iend = class_templates.end();
for ( ; i != iend; ++i) for ( ; i != iend; ++i)
ret += i->second.size(); ret += static_cast<int>(i->second.size());
return ret; return ret;
} }
...@@ -1673,7 +1673,7 @@ int Detector::numTemplates(const std::string& class_id) const ...@@ -1673,7 +1673,7 @@ int Detector::numTemplates(const std::string& class_id) const
TemplatesMap::const_iterator i = class_templates.find(class_id); TemplatesMap::const_iterator i = class_templates.find(class_id);
if (i == class_templates.end()) if (i == class_templates.end())
return 0; return 0;
return i->second.size(); return static_cast<int>(i->second.size());
} }
std::vector<std::string> Detector::classIds() const std::vector<std::string> Detector::classIds() const
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment