Commit 59d6279d authored by Olexa Bilaniuk's avatar Olexa Bilaniuk

Merge remote-tracking branch 'refs/remotes/upstream/master'

parents eb7a7884 bdb088dc
......@@ -168,6 +168,13 @@ bool CvCascadeClassifier::train( const string _cascadeDirName,
featureEvaluator = CvFeatureEvaluator::create(cascadeParams.featureType);
featureEvaluator->init( featureParams, numPos + numNeg, cascadeParams.winSize );
stageClassifiers.reserve( numStages );
}else{
// Make sure that if model parameters are preloaded, that people are aware of this,
// even when passing other parameters to the training command
cout << "---------------------------------------------------------------------------------" << endl;
cout << "Training parameters are pre-loaded from the parameter file in data folder!" << endl;
cout << "Please empty this folder if you want to use a NEW set of training parameters." << endl;
cout << "---------------------------------------------------------------------------------" << endl;
}
cout << "PARAMETERS:" << endl;
cout << "cascadeDirName: " << _cascadeDirName << endl;
......
......@@ -41,6 +41,21 @@
#
# ===================================================================================
# Search packages for host system instead of packages for target system.
# in case of cross compilation thess macro should be defined by toolchain file
if(NOT COMMAND find_host_package)
macro(find_host_package)
find_package(${ARGN})
endmacro()
endif()
if(NOT COMMAND find_host_program)
macro(find_host_program)
find_program(${ARGN})
endmacro()
endif()
if(NOT DEFINED OpenCV_MODULES_SUFFIX)
if(ANDROID)
string(REPLACE - _ OpenCV_MODULES_SUFFIX "_${ANDROID_NDK_ABI_NAME}")
......@@ -255,7 +270,7 @@ foreach(__opttype OPT DBG)
# CUDA
if(OpenCV_CUDA_VERSION)
if(NOT CUDA_FOUND)
find_package(CUDA ${OpenCV_CUDA_VERSION} EXACT REQUIRED)
find_host_package(CUDA ${OpenCV_CUDA_VERSION} EXACT REQUIRED)
else()
if(NOT CUDA_VERSION_STRING VERSION_EQUAL OpenCV_CUDA_VERSION)
message(FATAL_ERROR "OpenCV static library was compiled with CUDA ${OpenCV_CUDA_VERSION} support. Please, use the same version or rebuild OpenCV with CUDA ${CUDA_VERSION_STRING}")
......
......@@ -51,3 +51,11 @@ Output
------
![](images/output.png)
Changes for XCode5+ and iOS8+
-----------------------------
With the newer XCode and iOS versions you need to watch out for some specific details
- The *.m file in your project should be renamed to *.mm.
- You have to manually include AssetsLibrary.framework into your project, which is not done anymore by default.
......@@ -309,13 +309,13 @@ cv::String CameraWrapperConnector::getPathLibFolder()
const char* libName=dl_info.dli_fname;
while( ((*libName)=='/') || ((*libName)=='.') )
libName++;
libName++;
char lineBuf[2048];
FILE* file = fopen("/proc/self/smaps", "rt");
if(file)
{
char lineBuf[2048];
while (fgets(lineBuf, sizeof lineBuf, file) != NULL)
{
//verify that line ends with library name
......
......@@ -134,7 +134,7 @@ void cv::cuda::minMax(InputArray _src, double* minVal, double* maxVal, InputArra
*maxVal = vals[1];
}
namespace cv { namespace cuda { namespace internal {
namespace cv { namespace cuda { namespace device {
void findMaxAbs(InputArray _src, OutputArray _dst, InputArray _mask, Stream& stream);
......@@ -155,7 +155,7 @@ namespace
}
}
void cv::cuda::internal::findMaxAbs(InputArray _src, OutputArray _dst, InputArray _mask, Stream& stream)
void cv::cuda::device::findMaxAbs(InputArray _src, OutputArray _dst, InputArray _mask, Stream& stream)
{
typedef void (*func_t)(const GpuMat& _src, const GpuMat& mask, GpuMat& _dst, Stream& stream);
static const func_t funcs[] =
......
......@@ -128,7 +128,7 @@ double cv::cuda::norm(InputArray _src1, InputArray _src2, int normType)
return val;
}
namespace cv { namespace cuda { namespace internal {
namespace cv { namespace cuda { namespace device {
void normL2(cv::InputArray _src, cv::OutputArray _dst, cv::InputArray _mask, Stream& stream);
......@@ -158,7 +158,7 @@ namespace
}
}
void cv::cuda::internal::normL2(InputArray _src, OutputArray _dst, InputArray _mask, Stream& stream)
void cv::cuda::device::normL2(InputArray _src, OutputArray _dst, InputArray _mask, Stream& stream)
{
typedef void (*func_t)(const GpuMat& _src, const GpuMat& mask, GpuMat& _dst, Stream& stream);
static const func_t funcs[] =
......
......@@ -84,7 +84,7 @@ void cv::cuda::sqrIntegral(InputArray, OutputArray, Stream&) { throw_no_cuda();
////////////////////////////////////////////////////////////////////////
// norm
namespace cv { namespace cuda { namespace internal {
namespace cv { namespace cuda { namespace device {
void normL2(cv::InputArray _src, cv::OutputArray _dst, cv::InputArray _mask, Stream& stream);
......@@ -106,11 +106,11 @@ void cv::cuda::calcNorm(InputArray _src, OutputArray dst, int normType, InputArr
}
else if (normType == NORM_L2)
{
cv::cuda::internal::normL2(src_single_channel, dst, mask, stream);
cv::cuda::device::normL2(src_single_channel, dst, mask, stream);
}
else // NORM_INF
{
cv::cuda::internal::findMaxAbs(src_single_channel, dst, mask, stream);
cv::cuda::device::findMaxAbs(src_single_channel, dst, mask, stream);
}
}
......
......@@ -275,8 +275,8 @@ CV_EXPORTS_W int waitKey(int delay = 0);
@param mat Image to be shown.
The function imshow displays an image in the specified window. If the window was created with the
CV_WINDOW_AUTOSIZE flag, the image is shown with its original size. Otherwise, the image is scaled
to fit the window. The function may scale the image, depending on its depth:
CV_WINDOW_AUTOSIZE flag, the image is shown with its original size, however it is still limited by the screen resolution.
Otherwise, the image is scaled to fit the window. The function may scale the image, depending on its depth:
- If the image is 8-bit unsigned, it is displayed as is.
- If the image is 16-bit unsigned or 32-bit integer, the pixels are divided by 256. That is, the
......@@ -287,6 +287,10 @@ to fit the window. The function may scale the image, depending on its depth:
If window was created with OpenGL support, imshow also support ogl::Buffer , ogl::Texture2D and
cuda::GpuMat as input.
If the window was not created before this function, it is assumed creating a window with CV_WINDOW_AUTOSIZE.
If you need to show an image that is bigger than the screen resolution, you will need to call namedWindow("", WINDOW_NORMAL) before the imshow.
@note This function should be followed by waitKey function which displays the image for specified
milliseconds. Otherwise, it won't display the image. For example, waitKey(0) will display the window
infinitely until any keypress (it is suitable for image display). waitKey(25) will display a frame
......
......@@ -1051,7 +1051,7 @@ CV_EXPORTS_W Ptr<LineSegmentDetector> createLineSegmentDetector(
The function computes and returns the \f$\texttt{ksize} \times 1\f$ matrix of Gaussian filter
coefficients:
\f[G_i= \alpha *e^{-(i-( \texttt{ksize} -1)/2)^2/(2* \texttt{sigma} )^2},\f]
\f[G_i= \alpha *e^{-(i-( \texttt{ksize} -1)/2)^2/(2* \texttt{sigma}^2)},\f]
where \f$i=0..\texttt{ksize}-1\f$ and \f$\alpha\f$ is the scale factor chosen so that \f$\sum_i G_i=1\f$.
......@@ -3613,7 +3613,7 @@ CV_EXPORTS_W float intersectConvexConvex( InputArray _p1, InputArray _p2,
/** @brief Fits an ellipse around a set of 2D points.
The function calculates the ellipse that fits (in a least-squares sense) a set of 2D points best of
all. It returns the rotated rectangle in which the ellipse is inscribed. The algorithm @cite Fitzgibbon95
all. It returns the rotated rectangle in which the ellipse is inscribed. The first algorithm described by @cite Fitzgibbon95
is used. Developer should keep in mind that it is possible that the returned
ellipse/rotatedRect data contains negative indices, due to the data points being close to the
border of the containing Mat element.
......
......@@ -239,7 +239,7 @@ Line( Mat& img, Point pt1, Point pt2,
{
if( connectivity == 0 )
connectivity = 8;
if( connectivity == 1 )
else if( connectivity == 1 )
connectivity = 4;
LineIterator iterator(img, pt1, pt2, connectivity, true);
......@@ -288,14 +288,14 @@ LineAA( Mat& img, Point pt1, Point pt2, const void* color )
int x_step, y_step;
int i, j;
int ep_table[9];
int cb = ((uchar*)color)[0], cg = ((uchar*)color)[1], cr = ((uchar*)color)[2];
int _cb, _cg, _cr;
int cb = ((uchar*)color)[0], cg = ((uchar*)color)[1], cr = ((uchar*)color)[2], ca = ((uchar*)color)[3];
int _cb, _cg, _cr, _ca;
int nch = img.channels();
uchar* ptr = img.ptr();
size_t step = img.step;
Size size = img.size();
if( !((nch == 1 || nch == 3) && img.depth() == CV_8U) )
if( !((nch == 1 || nch == 3 || nch == 4) && img.depth() == CV_8U) )
{
Line(img, pt1, pt2, color);
return;
......@@ -468,7 +468,7 @@ LineAA( Mat& img, Point pt1, Point pt2, const void* color )
}
#undef ICV_PUT_POINT
}
else
else if(nch == 1)
{
#define ICV_PUT_POINT() \
{ \
......@@ -543,6 +543,89 @@ LineAA( Mat& img, Point pt1, Point pt2, const void* color )
}
#undef ICV_PUT_POINT
}
else
{
#define ICV_PUT_POINT() \
{ \
_cb = tptr[0]; \
_cb += ((cb - _cb)*a + 127)>> 8;\
_cg = tptr[1]; \
_cg += ((cg - _cg)*a + 127)>> 8;\
_cr = tptr[2]; \
_cr += ((cr - _cr)*a + 127)>> 8;\
_ca = tptr[3]; \
_ca += ((ca - _ca)*a + 127)>> 8;\
tptr[0] = (uchar)_cb; \
tptr[1] = (uchar)_cg; \
tptr[2] = (uchar)_cr; \
tptr[3] = (uchar)_ca; \
}
if( ax > ay )
{
ptr += (pt1.x >> XY_SHIFT) * 4;
while( ecount >= 0 )
{
uchar *tptr = ptr + ((pt1.y >> XY_SHIFT) - 1) * step;
int ep_corr = ep_table[(((scount >= 2) + 1) & (scount | 2)) * 3 +
(((ecount >= 2) + 1) & (ecount | 2))];
int a, dist = (pt1.y >> (XY_SHIFT - 5)) & 31;
a = (ep_corr * FilterTable[dist + 32] >> 8) & 0xff;
ICV_PUT_POINT();
ICV_PUT_POINT();
tptr += step;
a = (ep_corr * FilterTable[dist] >> 8) & 0xff;
ICV_PUT_POINT();
ICV_PUT_POINT();
tptr += step;
a = (ep_corr * FilterTable[63 - dist] >> 8) & 0xff;
ICV_PUT_POINT();
ICV_PUT_POINT();
pt1.y += y_step;
ptr += 4;
scount++;
ecount--;
}
}
else
{
ptr += (pt1.y >> XY_SHIFT) * step;
while( ecount >= 0 )
{
uchar *tptr = ptr + ((pt1.x >> XY_SHIFT) - 1) * 4;
int ep_corr = ep_table[(((scount >= 2) + 1) & (scount | 2)) * 3 +
(((ecount >= 2) + 1) & (ecount | 2))];
int a, dist = (pt1.x >> (XY_SHIFT - 5)) & 31;
a = (ep_corr * FilterTable[dist + 32] >> 8) & 0xff;
ICV_PUT_POINT();
ICV_PUT_POINT();
tptr += step;
a = (ep_corr * FilterTable[dist] >> 8) & 0xff;
ICV_PUT_POINT();
ICV_PUT_POINT();
tptr += step;
a = (ep_corr * FilterTable[63 - dist] >> 8) & 0xff;
ICV_PUT_POINT();
ICV_PUT_POINT();
pt1.x += x_step;
ptr += step;
scount++;
ecount--;
}
}
#undef ICV_PUT_POINT
}
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment