Commit d8ace437 authored by Vadim Pisarevsky's avatar Vadim Pisarevsky

fixed some more tests on Windows; changed inheritance Matx -> Vec to Vec -> Matx

parent 5a53d82e
......@@ -89,16 +89,17 @@ bool is_smaller(const std::pair<int, float>& p1, const std::pair<int, float>& p2
void orderContours(const vector<vector<Point> >& contours, Point2f point, vector<std::pair<int, float> >& order)
{
order.clear();
int i, j, n = (int)contours.size();
size_t i, j, n = contours.size();
for(i = 0; i < n; i++)
{
size_t ni = contours[i].size();
double min_dist = std::numeric_limits<double>::max();
for(j = 0; j < n; j++)
for(j = 0; j < ni; j++)
{
double dist = norm(Point2f((float)contours[i][j].x, (float)contours[i][j].y) - point);
min_dist = MIN(min_dist, dist);
}
order.push_back(std::pair<int, float>(i, (float)min_dist));
order.push_back(std::pair<int, float>((int)i, (float)min_dist));
}
std::sort(order.begin(), order.end(), is_smaller);
......
This diff is collapsed.
......@@ -1873,15 +1873,15 @@ static inline MatConstIterator operator - (const MatConstIterator& a, ptrdiff_t
template<typename _Tp> static inline MatConstIterator_<_Tp>
operator + (const MatConstIterator_<_Tp>& a, ptrdiff_t ofs)
{ return (MatConstIterator_<_Tp>&)((const MatConstIterator&)a + ofs); }
{ MatConstIterator t = (const MatConstIterator&)a + ofs; return (MatConstIterator_<_Tp>&)t; }
template<typename _Tp> static inline MatConstIterator_<_Tp>
operator + (ptrdiff_t ofs, const MatConstIterator_<_Tp>& a)
{ return (MatConstIterator_<_Tp>&)((const MatConstIterator&)a + ofs); }
{ MatConstIterator t = (const MatConstIterator&)a + ofs; return (MatConstIterator_<_Tp>&)t; }
template<typename _Tp> static inline MatConstIterator_<_Tp>
operator - (const MatConstIterator_<_Tp>& a, ptrdiff_t ofs)
{ return (MatConstIterator_<_Tp>&)((const MatConstIterator&)a - ofs); }
{ MatConstIterator t = (const MatConstIterator&)a - ofs; return (MatConstIterator_<_Tp>&)t; }
inline uchar* MatConstIterator::operator [](ptrdiff_t i) const
{ return *(*this + i); }
......@@ -1891,15 +1891,15 @@ template<typename _Tp> inline _Tp MatConstIterator_<_Tp>::operator [](ptrdiff_t
template<typename _Tp> static inline MatIterator_<_Tp>
operator + (const MatIterator_<_Tp>& a, ptrdiff_t ofs)
{ return (MatIterator_<_Tp>&)((const MatConstIterator&)a + ofs); }
{ MatConstIterator t = (const MatConstIterator&)a + ofs; return (MatIterator_<_Tp>&)t; }
template<typename _Tp> static inline MatIterator_<_Tp>
operator + (ptrdiff_t ofs, const MatIterator_<_Tp>& a)
{ return (MatIterator_<_Tp>&)((const MatConstIterator&)a + ofs); }
{ MatConstIterator t = (const MatConstIterator&)a + ofs; return (MatIterator_<_Tp>&)t; }
template<typename _Tp> static inline MatIterator_<_Tp>
operator - (const MatIterator_<_Tp>& a, ptrdiff_t ofs)
{ return (MatIterator_<_Tp>&)((const MatConstIterator&)a - ofs); }
{ MatConstIterator t = (const MatConstIterator&)a - ofs; return (MatIterator_<_Tp>&)t; }
template<typename _Tp> inline _Tp& MatIterator_<_Tp>::operator [](ptrdiff_t i) const
{ return *(*this + i); }
......
......@@ -1296,8 +1296,8 @@ inRangeS_( const Mat& srcmat1, const Scalar& _a, const Scalar& _b, Mat& dstmat )
size_t dstep = dstmat.step;
Size size = getContinuousSize( srcmat1, dstmat );
int cn = srcmat1.channels();
_a.convertTo((WT1*)&a, cn);
_b.convertTo((WT1*)&b, cn);
scalarToRawData(_a, &a, CV_MAKETYPE(DataType<WT>::depth, cn));
scalarToRawData(_b, &b, CV_MAKETYPE(DataType<WT>::depth, cn));
for( int y = 0; y < size.height; y++, dst += dstep )
{
......
......@@ -759,6 +759,81 @@ int Mat::checkVector(int _elemChannels, int _depth, bool _requireContinuous) con
(isContinuous() || step.p[1] == step.p[2]*size.p[2])))
? (int)(total()*channels()/_elemChannels) : -1;
}
void scalarToRawData(const Scalar& s, void* _buf, int type, int unroll_to)
{
int i, depth = CV_MAT_DEPTH(type), cn = CV_MAT_CN(type);
CV_Assert(cn <= 4);
switch(depth)
{
case CV_8U:
{
uchar* buf = (uchar*)_buf;
for(i = 0; i < cn; i++)
buf[i] = saturate_cast<uchar>(s.val[i]);
for(; i < unroll_to; i++)
buf[i] = buf[i-cn];
}
break;
case CV_8S:
{
schar* buf = (schar*)_buf;
for(i = 0; i < cn; i++)
buf[i] = saturate_cast<schar>(s.val[i]);
for(; i < unroll_to; i++)
buf[i] = buf[i-cn];
}
break;
case CV_16U:
{
ushort* buf = (ushort*)_buf;
for(i = 0; i < cn; i++)
buf[i] = saturate_cast<ushort>(s.val[i]);
for(; i < unroll_to; i++)
buf[i] = buf[i-cn];
}
break;
case CV_16S:
{
short* buf = (short*)_buf;
for(i = 0; i < cn; i++)
buf[i] = saturate_cast<short>(s.val[i]);
for(; i < unroll_to; i++)
buf[i] = buf[i-cn];
}
break;
case CV_32S:
{
int* buf = (int*)_buf;
for(i = 0; i < cn; i++)
buf[i] = saturate_cast<int>(s.val[i]);
for(; i < unroll_to; i++)
buf[i] = buf[i-cn];
}
break;
case CV_32F:
{
float* buf = (float*)_buf;
for(i = 0; i < cn; i++)
buf[i] = saturate_cast<float>(s.val[i]);
for(; i < unroll_to; i++)
buf[i] = buf[i-cn];
}
break;
case CV_64F:
{
double* buf = (double*)_buf;
for(i = 0; i < cn; i++)
buf[i] = saturate_cast<double>(s.val[i]);
for(; i < unroll_to; i++)
buf[i] = buf[i-cn];
break;
}
default:
CV_Error(CV_StsUnsupportedFormat,"");
}
}
/*************************************************************************************************\
Matrix Operations
......
......@@ -325,7 +325,7 @@ binarySOpCn_( const Mat& srcmat, Mat& dstmat, const Scalar& _scalar )
int cn = dstmat.channels();
Size size = getContinuousSize( srcmat, dstmat, cn );
WT scalar[12];
_scalar.convertTo(scalar, cn, 12);
scalarToRawData(_scalar, scalar, CV_MAKETYPE(DataType<WT>::depth,cn), 12);
for( ; size.height--; src0 += step1, dst0 += step )
{
......
......@@ -156,7 +156,7 @@ public:
{
int index_type;
load_value(stream,index_type);
IndexParams* params = ParamsFactory::instance().create((flann_algorithm_t)index_type);
IndexParams* params = ParamsFactory_instance().create((flann_algorithm_t)index_type);
bestIndex = create_index_by_type(dataset, *params);
bestIndex->loadIndex(stream);
load_value(stream, bestSearchParams);
......
......@@ -123,7 +123,7 @@ NNIndex<T>* load_saved_index(const Matrix<T>& dataset, const string& filename)
throw FLANNException("The index saved belongs to a different dataset");
}
IndexParams* params = ParamsFactory::instance().create(header.index_type);
IndexParams* params = ParamsFactory_instance().create(header.index_type);
NNIndex<T>* nnIndex = create_index_by_type(dataset, *params);
nnIndex->loadIndex(fin);
fclose(fin);
......
......@@ -134,7 +134,7 @@ public:
typedef ObjectFactory<IndexParams, flann_algorithm_t> ParamsFactory;
CV_EXPORTS ParamsFactory& ParamsFactory_instance();
struct CV_EXPORTS SearchParams {
SearchParams(int checks_ = 32) :
......
......@@ -50,7 +50,7 @@ class ObjectFactory
std::map<UniqueIdType, CreateObjectFunc> object_registry;
// singleton class, private constructor
ObjectFactory() {};
//ObjectFactory() {};
public:
typedef typename std::map<UniqueIdType, CreateObjectFunc>::iterator Iterator;
......@@ -81,11 +81,11 @@ public:
return ((*iter).second)();
}
static ObjectFactory<BaseClass,UniqueIdType>& instance()
/*static ObjectFactory<BaseClass,UniqueIdType>& instance()
{
static ObjectFactory<BaseClass,UniqueIdType> the_factory;
return the_factory;
}
}*/
};
......
......@@ -195,16 +195,24 @@ void set_distance_type(flann_distance_t distance_type, int order)
flann_minkowski_order_ = order;
}
static ParamsFactory the_factory;
ParamsFactory& ParamsFactory_instance()
{
return the_factory;
}
class StaticInit
{
public:
StaticInit()
{
ParamsFactory::instance().register_<LinearIndexParams>(LINEAR);
ParamsFactory::instance().register_<KDTreeIndexParams>(KDTREE);
ParamsFactory::instance().register_<KMeansIndexParams>(KMEANS);
ParamsFactory::instance().register_<CompositeIndexParams>(COMPOSITE);
ParamsFactory::instance().register_<AutotunedIndexParams>(AUTOTUNED);
ParamsFactory_instance().register_<LinearIndexParams>(LINEAR);
ParamsFactory_instance().register_<KDTreeIndexParams>(KDTREE);
ParamsFactory_instance().register_<KMeansIndexParams>(KMEANS);
ParamsFactory_instance().register_<CompositeIndexParams>(COMPOSITE);
ParamsFactory_instance().register_<AutotunedIndexParams>(AUTOTUNED);
// ParamsFactory::instance().register_<SavedIndexParams>(SAVED);
}
};
......
......@@ -188,7 +188,7 @@ CvGBTrees::train( const CvMat* _train_data, int _tflag,
const CvMat* _responses, const CvMat* _var_idx,
const CvMat* _sample_idx, const CvMat* _var_type,
const CvMat* _missing_mask,
CvGBTreesParams _params, bool _update ) //update is not supported
CvGBTreesParams _params, bool /*_update*/ ) //update is not supported
{
CvMemStorage* storage = 0;
......@@ -1071,7 +1071,7 @@ bool CvGBTrees::train( const cv::Mat& trainData, int tflag,
bool update )
{
CvMat _trainData = trainData, _responses = responses;
CvMat _varIdx = varIdx, _sampleIdx = sampleIdx, _varType = _varType;
CvMat _varIdx = varIdx, _sampleIdx = sampleIdx, _varType = varType;
CvMat _missingDataMask = missingDataMask;
return train(&_trainData, tflag, &_responses, varIdx.empty() ? &_varIdx : 0,
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment