Commit 225566da authored by Alexander Alekhin's avatar Alexander Alekhin

Merge remote-tracking branch 'upstream/3.4' into merge-3.4

parents 0dd1ce1a 0cb3bf95
...@@ -402,8 +402,8 @@ endmacro() ...@@ -402,8 +402,8 @@ endmacro()
set(OCV_COMPILER_FAIL_REGEX set(OCV_COMPILER_FAIL_REGEX
"argument .* is not valid" # GCC 9+ (including support of unicode quotes) "argument .* is not valid" # GCC 9+ (including support of unicode quotes)
"command line option .* is valid for .* but not for C\\+\\+" # GNU "command[- ]line option .* is valid for .* but not for C\\+\\+" # GNU
"command line option .* is valid for .* but not for C" # GNU "command[- ]line option .* is valid for .* but not for C" # GNU
"unrecognized .*option" # GNU "unrecognized .*option" # GNU
"unknown .*option" # Clang "unknown .*option" # Clang
"ignoring unknown option" # MSVC "ignoring unknown option" # MSVC
......
...@@ -188,7 +188,7 @@ blur = cv.GaussianBlur(img,(5,5),0) ...@@ -188,7 +188,7 @@ blur = cv.GaussianBlur(img,(5,5),0)
# find normalized_histogram, and its cumulative distribution function # find normalized_histogram, and its cumulative distribution function
hist = cv.calcHist([blur],[0],None,[256],[0,256]) hist = cv.calcHist([blur],[0],None,[256],[0,256])
hist_norm = hist.ravel()/hist.max() hist_norm = hist.ravel()/hist.sum()
Q = hist_norm.cumsum() Q = hist_norm.cumsum()
bins = np.arange(256) bins = np.arange(256)
...@@ -199,6 +199,8 @@ thresh = -1 ...@@ -199,6 +199,8 @@ thresh = -1
for i in xrange(1,256): for i in xrange(1,256):
p1,p2 = np.hsplit(hist_norm,[i]) # probabilities p1,p2 = np.hsplit(hist_norm,[i]) # probabilities
q1,q2 = Q[i],Q[255]-Q[i] # cum sum of classes q1,q2 = Q[i],Q[255]-Q[i] # cum sum of classes
if q1 < 1.e-6 or q2 < 1.e-6:
continue
b1,b2 = np.hsplit(bins,[i]) # weights b1,b2 = np.hsplit(bins,[i]) # weights
# finding means and variances # finding means and variances
......
...@@ -2388,7 +2388,7 @@ CV_EXPORTS_W void filterSpeckles( InputOutputArray img, double newVal, ...@@ -2388,7 +2388,7 @@ CV_EXPORTS_W void filterSpeckles( InputOutputArray img, double newVal,
//! computes valid disparity ROI from the valid ROIs of the rectified images (that are returned by cv::stereoRectify()) //! computes valid disparity ROI from the valid ROIs of the rectified images (that are returned by cv::stereoRectify())
CV_EXPORTS_W Rect getValidDisparityROI( Rect roi1, Rect roi2, CV_EXPORTS_W Rect getValidDisparityROI( Rect roi1, Rect roi2,
int minDisparity, int numberOfDisparities, int minDisparity, int numberOfDisparities,
int SADWindowSize ); int blockSize );
//! validates disparity using the left-right check. The matrix "cost" should be computed by the stereo correspondence algorithm //! validates disparity using the left-right check. The matrix "cost" should be computed by the stereo correspondence algorithm
CV_EXPORTS_W void validateDisparity( InputOutputArray disparity, InputArray cost, CV_EXPORTS_W void validateDisparity( InputOutputArray disparity, InputArray cost,
...@@ -2813,8 +2813,8 @@ public: ...@@ -2813,8 +2813,8 @@ public:
the smoother the disparity is. P1 is the penalty on the disparity change by plus or minus 1 the smoother the disparity is. P1 is the penalty on the disparity change by plus or minus 1
between neighbor pixels. P2 is the penalty on the disparity change by more than 1 between neighbor between neighbor pixels. P2 is the penalty on the disparity change by more than 1 between neighbor
pixels. The algorithm requires P2 \> P1 . See stereo_match.cpp sample where some reasonably good pixels. The algorithm requires P2 \> P1 . See stereo_match.cpp sample where some reasonably good
P1 and P2 values are shown (like 8\*number_of_image_channels\*SADWindowSize\*SADWindowSize and P1 and P2 values are shown (like 8\*number_of_image_channels\*blockSize\*blockSize and
32\*number_of_image_channels\*SADWindowSize\*SADWindowSize , respectively). 32\*number_of_image_channels\*blockSize\*blockSize , respectively).
@param disp12MaxDiff Maximum allowed difference (in integer pixel units) in the left-right @param disp12MaxDiff Maximum allowed difference (in integer pixel units) in the left-right
disparity check. Set it to a non-positive value to disable the check. disparity check. Set it to a non-positive value to disable the check.
@param preFilterCap Truncation value for the prefiltered image pixels. The algorithm first @param preFilterCap Truncation value for the prefiltered image pixels. The algorithm first
......
...@@ -46,7 +46,7 @@ void solveQuartic(const double *factors, double *realRoots) { ...@@ -46,7 +46,7 @@ void solveQuartic(const double *factors, double *realRoots) {
complex<double> sqrt_2m = sqrt(static_cast<complex<double> >(-2 * p4 / 3 + t)); complex<double> sqrt_2m = sqrt(static_cast<complex<double> >(-2 * p4 / 3 + t));
double B_4A = -a3 / (4 * a4); double B_4A = -a3 / (4 * a4);
double complex1 = 4 * p4 / 3 + t; double complex1 = 4 * p4 / 3 + t;
#if defined(__clang__) && defined(__arm__) && (__clang_major__ == 3 || __clang_minor__ == 4) && !defined(__ANDROID__) #if defined(__clang__) && defined(__arm__) && (__clang_major__ == 3 || __clang_major__ == 4) && !defined(__ANDROID__)
// details: https://github.com/opencv/opencv/issues/11135 // details: https://github.com/opencv/opencv/issues/11135
// details: https://github.com/opencv/opencv/issues/11056 // details: https://github.com/opencv/opencv/issues/11056
complex<double> complex2 = 2 * q4; complex<double> complex2 = 2 * q4;
......
...@@ -25,7 +25,9 @@ namespace cv { namespace dnn { ...@@ -25,7 +25,9 @@ namespace cv { namespace dnn {
class ResizeLayerImpl : public ResizeLayer class ResizeLayerImpl : public ResizeLayer
{ {
public: public:
ResizeLayerImpl(const LayerParams& params) : zoomFactorWidth(0), zoomFactorHeight(0), scaleWidth(0), scaleHeight(0) ResizeLayerImpl(const LayerParams& params) : zoomFactorWidth(params.get<int>("zoom_factor_x", params.get<int>("zoom_factor", 0))),
zoomFactorHeight(params.get<int>("zoom_factor_y", params.get<int>("zoom_factor", 0))),
scaleWidth(0), scaleHeight(0)
{ {
setParamsFrom(params); setParamsFrom(params);
outWidth = params.get<float>("width", 0); outWidth = params.get<float>("width", 0);
...@@ -33,13 +35,10 @@ public: ...@@ -33,13 +35,10 @@ public:
if (params.has("zoom_factor")) if (params.has("zoom_factor"))
{ {
CV_Assert(!params.has("zoom_factor_x") && !params.has("zoom_factor_y")); CV_Assert(!params.has("zoom_factor_x") && !params.has("zoom_factor_y"));
zoomFactorWidth = zoomFactorHeight = params.get<int>("zoom_factor");
} }
else if (params.has("zoom_factor_x") || params.has("zoom_factor_y")) else if (params.has("zoom_factor_x") || params.has("zoom_factor_y"))
{ {
CV_Assert(params.has("zoom_factor_x") && params.has("zoom_factor_y")); CV_Assert(params.has("zoom_factor_x") && params.has("zoom_factor_y"));
zoomFactorWidth = params.get<int>("zoom_factor_x");
zoomFactorHeight = params.get<int>("zoom_factor_y");
} }
interpolation = params.get<String>("interpolation"); interpolation = params.get<String>("interpolation");
CV_Assert(interpolation == "nearest" || interpolation == "bilinear"); CV_Assert(interpolation == "nearest" || interpolation == "bilinear");
...@@ -54,8 +53,8 @@ public: ...@@ -54,8 +53,8 @@ public:
{ {
CV_Assert_N(inputs.size() == 1, inputs[0].size() == 4); CV_Assert_N(inputs.size() == 1, inputs[0].size() == 4);
outputs.resize(1, inputs[0]); outputs.resize(1, inputs[0]);
outputs[0][2] = outHeight > 0 ? outHeight : (outputs[0][2] * zoomFactorHeight); outputs[0][2] = zoomFactorHeight > 0 ? (outputs[0][2] * zoomFactorHeight) : outHeight;
outputs[0][3] = outWidth > 0 ? outWidth : (outputs[0][3] * zoomFactorWidth); outputs[0][3] = zoomFactorWidth > 0 ? (outputs[0][3] * zoomFactorWidth) : outWidth;
// We can work in-place (do nothing) if input shape == output shape. // We can work in-place (do nothing) if input shape == output shape.
return (outputs[0][2] == inputs[0][2]) && (outputs[0][3] == inputs[0][3]); return (outputs[0][2] == inputs[0][2]) && (outputs[0][3] == inputs[0][3]);
} }
...@@ -82,11 +81,8 @@ public: ...@@ -82,11 +81,8 @@ public:
inputs_arr.getMatVector(inputs); inputs_arr.getMatVector(inputs);
outputs_arr.getMatVector(outputs); outputs_arr.getMatVector(outputs);
if (!outWidth && !outHeight) outHeight = outputs[0].size[2];
{ outWidth = outputs[0].size[3];
outHeight = outputs[0].size[2];
outWidth = outputs[0].size[3];
}
if (alignCorners && outHeight > 1) if (alignCorners && outHeight > 1)
scaleHeight = static_cast<float>(inputs[0].size[2] - 1) / (outHeight - 1); scaleHeight = static_cast<float>(inputs[0].size[2] - 1) / (outHeight - 1);
else else
...@@ -214,7 +210,7 @@ public: ...@@ -214,7 +210,7 @@ public:
ieLayer.setType("Interp"); ieLayer.setType("Interp");
ieLayer.getParameters()["pad_beg"] = 0; ieLayer.getParameters()["pad_beg"] = 0;
ieLayer.getParameters()["pad_end"] = 0; ieLayer.getParameters()["pad_end"] = 0;
ieLayer.getParameters()["align_corners"] = false; ieLayer.getParameters()["align_corners"] = alignCorners;
} }
else else
CV_Error(Error::StsNotImplemented, "Unsupported interpolation: " + interpolation); CV_Error(Error::StsNotImplemented, "Unsupported interpolation: " + interpolation);
...@@ -238,7 +234,7 @@ public: ...@@ -238,7 +234,7 @@ public:
attrs.pads_begin.push_back(0); attrs.pads_begin.push_back(0);
attrs.pads_end.push_back(0); attrs.pads_end.push_back(0);
attrs.axes = ngraph::AxisSet{2, 3}; attrs.axes = ngraph::AxisSet{2, 3};
attrs.align_corners = false; attrs.align_corners = alignCorners;
if (interpolation == "nearest") { if (interpolation == "nearest") {
attrs.mode = "nearest"; attrs.mode = "nearest";
...@@ -257,7 +253,8 @@ public: ...@@ -257,7 +253,8 @@ public:
#endif // HAVE_DNN_NGRAPH #endif // HAVE_DNN_NGRAPH
protected: protected:
int outWidth, outHeight, zoomFactorWidth, zoomFactorHeight; int outWidth, outHeight;
const int zoomFactorWidth, zoomFactorHeight;
String interpolation; String interpolation;
float scaleWidth, scaleHeight; float scaleWidth, scaleHeight;
bool alignCorners; bool alignCorners;
...@@ -281,79 +278,18 @@ public: ...@@ -281,79 +278,18 @@ public:
{ {
CV_Assert_N(inputs.size() == 1, inputs[0].size() == 4); CV_Assert_N(inputs.size() == 1, inputs[0].size() == 4);
outputs.resize(1, inputs[0]); outputs.resize(1, inputs[0]);
outputs[0][2] = outHeight > 0 ? outHeight : (1 + zoomFactorHeight * (outputs[0][2] - 1)); outputs[0][2] = zoomFactorHeight > 0 ? (1 + zoomFactorHeight * (outputs[0][2] - 1)) : outHeight;
outputs[0][3] = outWidth > 0 ? outWidth : (1 + zoomFactorWidth * (outputs[0][3] - 1)); outputs[0][3] = zoomFactorWidth > 0 ? (1 + zoomFactorWidth * (outputs[0][3] - 1)) : outWidth;
// We can work in-place (do nothing) if input shape == output shape. // We can work in-place (do nothing) if input shape == output shape.
return (outputs[0][2] == inputs[0][2]) && (outputs[0][3] == inputs[0][3]); return (outputs[0][2] == inputs[0][2]) && (outputs[0][3] == inputs[0][3]);
} }
virtual bool supportBackend(int backendId) CV_OVERRIDE
{
#ifdef HAVE_INF_ENGINE
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019
|| backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
return true;
#endif
return backendId == DNN_BACKEND_OPENCV ||
backendId == DNN_BACKEND_CUDA;
}
virtual void finalize(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr) CV_OVERRIDE
{
std::vector<Mat> inputs, outputs;
inputs_arr.getMatVector(inputs);
outputs_arr.getMatVector(outputs);
if (!outWidth && !outHeight)
{
outHeight = outputs[0].size[2];
outWidth = outputs[0].size[3];
}
int inpHeight = inputs[0].size[2];
int inpWidth = inputs[0].size[3];
scaleHeight = (outHeight > 1) ? (static_cast<float>(inpHeight - 1) / (outHeight - 1)) : 0.f;
scaleWidth = (outWidth > 1) ? (static_cast<float>(inpWidth - 1) / (outWidth - 1)) : 0.f;
}
#ifdef HAVE_INF_ENGINE
virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >&) CV_OVERRIDE
{
InferenceEngine::Builder::Layer ieLayer(name);
ieLayer.setName(name);
ieLayer.setType("Interp");
ieLayer.getParameters()["pad_beg"] = 0;
ieLayer.getParameters()["pad_end"] = 0;
ieLayer.getParameters()["width"] = outWidth;
ieLayer.getParameters()["height"] = outHeight;
ieLayer.setInputPorts(std::vector<InferenceEngine::Port>(1));
ieLayer.setOutputPorts(std::vector<InferenceEngine::Port>(1));
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
}
#endif // HAVE_INF_ENGINE
#ifdef HAVE_DNN_NGRAPH
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inputs,
const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE
{
auto& ieInpNode = nodes[0].dynamicCast<InfEngineNgraphNode>()->node;
ngraph::op::InterpolateAttrs attrs;
attrs.pads_begin.push_back(0);
attrs.pads_end.push_back(0);
attrs.axes = ngraph::AxisSet{2, 3};
attrs.mode = "linear";
std::vector<int64_t> shape = {outHeight, outWidth};
auto out_shape = std::make_shared<ngraph::op::Constant>(ngraph::element::i64, ngraph::Shape{2}, shape.data());
auto interp = std::make_shared<ngraph::op::Interpolate>(ieInpNode, out_shape, attrs);
return Ptr<BackendNode>(new InfEngineNgraphNode(interp));
}
#endif // HAVE_DNN_NGRAPH
}; };
Ptr<Layer> InterpLayer::create(const LayerParams& params) Ptr<Layer> InterpLayer::create(const LayerParams& params)
{ {
LayerParams lp(params); LayerParams lp(params);
lp.set("interpolation", "bilinear"); lp.set("interpolation", "bilinear");
lp.set("align_corners", true);
return Ptr<Layer>(new InterpLayerImpl(lp)); return Ptr<Layer>(new InterpLayerImpl(lp));
} }
......
...@@ -1760,4 +1760,35 @@ INSTANTIATE_TEST_CASE_P(/**/, Layer_Test_Eltwise_unequal, Combine( ...@@ -1760,4 +1760,35 @@ INSTANTIATE_TEST_CASE_P(/**/, Layer_Test_Eltwise_unequal, Combine(
dnnBackendsAndTargets() dnnBackendsAndTargets()
)); ));
typedef testing::TestWithParam<tuple<Backend, Target> > Layer_Test_Resize;
TEST_P(Layer_Test_Resize, change_input)
{
int backendId = get<0>(GetParam());
int targetId = get<1>(GetParam());
Net net;
LayerParams lp;
lp.type = "Resize";
lp.name = "testLayer";
lp.set("zoom_factor", 2);
lp.set("interpolation", "nearest");
net.addLayerToPrev(lp.name, lp.type, lp);
for (int i = 0; i < 2; ++i)
{
Mat inp(4 + i, 5 + i, CV_8UC3), ref;
randu(inp, 0, 255);
resize(inp, ref, Size(0, 0), 2, 2, INTER_NEAREST);
ref = blobFromImage(ref);
net.setInput(blobFromImage(inp));
net.setPreferableBackend(backendId);
net.setPreferableTarget(targetId);
Mat out = net.forward();
normAssert(out, ref);
}
}
INSTANTIATE_TEST_CASE_P(/**/, Layer_Test_Resize, dnnBackendsAndTargets());
}} // namespace }} // namespace
...@@ -54,9 +54,12 @@ ...@@ -54,9 +54,12 @@
#include <unistd.h> #include <unistd.h>
#endif #endif
// Get GL_PERSPECTIVE_CORRECTION_HINT definition, not available in GLES 2 or
// OpenGL 3 core profile or later
#ifdef HAVE_QT_OPENGL #ifdef HAVE_QT_OPENGL
#if defined Q_WS_X11 /* Qt4 */ || defined Q_OS_LINUX /* Qt5 */ #if defined Q_WS_X11 /* Qt4 */ || \
#include <GL/glx.h> (!defined(QT_OPENGL_ES_2) && defined Q_OS_LINUX) /* Qt5 with desktop OpenGL */
#include <GL/gl.h>
#endif #endif
#endif #endif
...@@ -3225,7 +3228,9 @@ void OpenGlViewPort::updateGl() ...@@ -3225,7 +3228,9 @@ void OpenGlViewPort::updateGl()
void OpenGlViewPort::initializeGL() void OpenGlViewPort::initializeGL()
{ {
#ifdef GL_PERSPECTIVE_CORRECTION_HINT
glHint(GL_PERSPECTIVE_CORRECTION_HINT, GL_NICEST); glHint(GL_PERSPECTIVE_CORRECTION_HINT, GL_NICEST);
#endif
} }
void OpenGlViewPort::resizeGL(int w, int h) void OpenGlViewPort::resizeGL(int w, int h)
......
...@@ -1446,7 +1446,7 @@ equal to sigmaX, if both sigmas are zeros, they are computed from ksize.width an ...@@ -1446,7 +1446,7 @@ equal to sigmaX, if both sigmas are zeros, they are computed from ksize.width an
respectively (see #getGaussianKernel for details); to fully control the result regardless of respectively (see #getGaussianKernel for details); to fully control the result regardless of
possible future modifications of all this semantics, it is recommended to specify all of ksize, possible future modifications of all this semantics, it is recommended to specify all of ksize,
sigmaX, and sigmaY. sigmaX, and sigmaY.
@param borderType pixel extrapolation method, see #BorderTypes @param borderType pixel extrapolation method, see #BorderTypes. #BORDER_WRAP is not supported.
@sa sepFilter2D, filter2D, blur, boxFilter, bilateralFilter, medianBlur @sa sepFilter2D, filter2D, blur, boxFilter, bilateralFilter, medianBlur
*/ */
...@@ -1507,7 +1507,7 @@ algorithms, and so on). If you need to compute pixel sums over variable-size win ...@@ -1507,7 +1507,7 @@ algorithms, and so on). If you need to compute pixel sums over variable-size win
@param anchor anchor point; default value Point(-1,-1) means that the anchor is at the kernel @param anchor anchor point; default value Point(-1,-1) means that the anchor is at the kernel
center. center.
@param normalize flag, specifying whether the kernel is normalized by its area or not. @param normalize flag, specifying whether the kernel is normalized by its area or not.
@param borderType border mode used to extrapolate pixels outside of the image, see #BorderTypes @param borderType border mode used to extrapolate pixels outside of the image, see #BorderTypes. #BORDER_WRAP is not supported.
@sa blur, bilateralFilter, GaussianBlur, medianBlur, integral @sa blur, bilateralFilter, GaussianBlur, medianBlur, integral
*/ */
CV_EXPORTS_W void boxFilter( InputArray src, OutputArray dst, int ddepth, CV_EXPORTS_W void boxFilter( InputArray src, OutputArray dst, int ddepth,
...@@ -1530,7 +1530,7 @@ variance and standard deviation around the neighborhood of a pixel. ...@@ -1530,7 +1530,7 @@ variance and standard deviation around the neighborhood of a pixel.
@param anchor kernel anchor point. The default value of Point(-1, -1) denotes that the anchor is at the kernel @param anchor kernel anchor point. The default value of Point(-1, -1) denotes that the anchor is at the kernel
center. center.
@param normalize flag, specifying whether the kernel is to be normalized by it's area or not. @param normalize flag, specifying whether the kernel is to be normalized by it's area or not.
@param borderType border mode used to extrapolate pixels outside of the image, see #BorderTypes @param borderType border mode used to extrapolate pixels outside of the image, see #BorderTypes. #BORDER_WRAP is not supported.
@sa boxFilter @sa boxFilter
*/ */
CV_EXPORTS_W void sqrBoxFilter( InputArray src, OutputArray dst, int ddepth, CV_EXPORTS_W void sqrBoxFilter( InputArray src, OutputArray dst, int ddepth,
...@@ -1553,7 +1553,7 @@ the depth should be CV_8U, CV_16U, CV_16S, CV_32F or CV_64F. ...@@ -1553,7 +1553,7 @@ the depth should be CV_8U, CV_16U, CV_16S, CV_32F or CV_64F.
@param ksize blurring kernel size. @param ksize blurring kernel size.
@param anchor anchor point; default value Point(-1,-1) means that the anchor is at the kernel @param anchor anchor point; default value Point(-1,-1) means that the anchor is at the kernel
center. center.
@param borderType border mode used to extrapolate pixels outside of the image, see #BorderTypes @param borderType border mode used to extrapolate pixels outside of the image, see #BorderTypes. #BORDER_WRAP is not supported.
@sa boxFilter, bilateralFilter, GaussianBlur, medianBlur @sa boxFilter, bilateralFilter, GaussianBlur, medianBlur
*/ */
CV_EXPORTS_W void blur( InputArray src, OutputArray dst, CV_EXPORTS_W void blur( InputArray src, OutputArray dst,
...@@ -1587,7 +1587,7 @@ separate color planes using split and process them individually. ...@@ -1587,7 +1587,7 @@ separate color planes using split and process them individually.
the kernel; the anchor should lie within the kernel; default value (-1,-1) means that the anchor the kernel; the anchor should lie within the kernel; default value (-1,-1) means that the anchor
is at the kernel center. is at the kernel center.
@param delta optional value added to the filtered pixels before storing them in dst. @param delta optional value added to the filtered pixels before storing them in dst.
@param borderType pixel extrapolation method, see #BorderTypes @param borderType pixel extrapolation method, see #BorderTypes. #BORDER_WRAP is not supported.
@sa sepFilter2D, dft, matchTemplate @sa sepFilter2D, dft, matchTemplate
*/ */
CV_EXPORTS_W void filter2D( InputArray src, OutputArray dst, int ddepth, CV_EXPORTS_W void filter2D( InputArray src, OutputArray dst, int ddepth,
...@@ -1608,7 +1608,7 @@ kernel kernelY. The final result shifted by delta is stored in dst . ...@@ -1608,7 +1608,7 @@ kernel kernelY. The final result shifted by delta is stored in dst .
@param anchor Anchor position within the kernel. The default value \f$(-1,-1)\f$ means that the anchor @param anchor Anchor position within the kernel. The default value \f$(-1,-1)\f$ means that the anchor
is at the kernel center. is at the kernel center.
@param delta Value added to the filtered results before storing them. @param delta Value added to the filtered results before storing them.
@param borderType Pixel extrapolation method, see #BorderTypes @param borderType Pixel extrapolation method, see #BorderTypes. #BORDER_WRAP is not supported.
@sa filter2D, Sobel, GaussianBlur, boxFilter, blur @sa filter2D, Sobel, GaussianBlur, boxFilter, blur
*/ */
CV_EXPORTS_W void sepFilter2D( InputArray src, OutputArray dst, int ddepth, CV_EXPORTS_W void sepFilter2D( InputArray src, OutputArray dst, int ddepth,
...@@ -1661,7 +1661,7 @@ The second case corresponds to a kernel of: ...@@ -1661,7 +1661,7 @@ The second case corresponds to a kernel of:
@param scale optional scale factor for the computed derivative values; by default, no scaling is @param scale optional scale factor for the computed derivative values; by default, no scaling is
applied (see #getDerivKernels for details). applied (see #getDerivKernels for details).
@param delta optional delta value that is added to the results prior to storing them in dst. @param delta optional delta value that is added to the results prior to storing them in dst.
@param borderType pixel extrapolation method, see #BorderTypes @param borderType pixel extrapolation method, see #BorderTypes. #BORDER_WRAP is not supported.
@sa Scharr, Laplacian, sepFilter2D, filter2D, GaussianBlur, cartToPolar @sa Scharr, Laplacian, sepFilter2D, filter2D, GaussianBlur, cartToPolar
*/ */
CV_EXPORTS_W void Sobel( InputArray src, OutputArray dst, int ddepth, CV_EXPORTS_W void Sobel( InputArray src, OutputArray dst, int ddepth,
...@@ -1682,7 +1682,8 @@ Sobel( src, dy, CV_16SC1, 0, 1, 3 ); ...@@ -1682,7 +1682,8 @@ Sobel( src, dy, CV_16SC1, 0, 1, 3 );
@param dx output image with first-order derivative in x. @param dx output image with first-order derivative in x.
@param dy output image with first-order derivative in y. @param dy output image with first-order derivative in y.
@param ksize size of Sobel kernel. It must be 3. @param ksize size of Sobel kernel. It must be 3.
@param borderType pixel extrapolation method, see #BorderTypes @param borderType pixel extrapolation method, see #BorderTypes.
Only #BORDER_DEFAULT=#BORDER_REFLECT_101 and #BORDER_REPLICATE are supported.
@sa Sobel @sa Sobel
*/ */
...@@ -1710,7 +1711,7 @@ is equivalent to ...@@ -1710,7 +1711,7 @@ is equivalent to
@param scale optional scale factor for the computed derivative values; by default, no scaling is @param scale optional scale factor for the computed derivative values; by default, no scaling is
applied (see #getDerivKernels for details). applied (see #getDerivKernels for details).
@param delta optional delta value that is added to the results prior to storing them in dst. @param delta optional delta value that is added to the results prior to storing them in dst.
@param borderType pixel extrapolation method, see #BorderTypes @param borderType pixel extrapolation method, see #BorderTypes. #BORDER_WRAP is not supported.
@sa cartToPolar @sa cartToPolar
*/ */
CV_EXPORTS_W void Scharr( InputArray src, OutputArray dst, int ddepth, CV_EXPORTS_W void Scharr( InputArray src, OutputArray dst, int ddepth,
...@@ -1741,7 +1742,7 @@ details. The size must be positive and odd. ...@@ -1741,7 +1742,7 @@ details. The size must be positive and odd.
@param scale Optional scale factor for the computed Laplacian values. By default, no scaling is @param scale Optional scale factor for the computed Laplacian values. By default, no scaling is
applied. See #getDerivKernels for details. applied. See #getDerivKernels for details.
@param delta Optional delta value that is added to the results prior to storing them in dst . @param delta Optional delta value that is added to the results prior to storing them in dst .
@param borderType Pixel extrapolation method, see #BorderTypes @param borderType Pixel extrapolation method, see #BorderTypes. #BORDER_WRAP is not supported.
@sa Sobel, Scharr @sa Sobel, Scharr
*/ */
CV_EXPORTS_W void Laplacian( InputArray src, OutputArray dst, int ddepth, CV_EXPORTS_W void Laplacian( InputArray src, OutputArray dst, int ddepth,
...@@ -1810,7 +1811,7 @@ of the formulae in the cornerEigenValsAndVecs description. ...@@ -1810,7 +1811,7 @@ of the formulae in the cornerEigenValsAndVecs description.
src . src .
@param blockSize Neighborhood size (see the details on #cornerEigenValsAndVecs ). @param blockSize Neighborhood size (see the details on #cornerEigenValsAndVecs ).
@param ksize Aperture parameter for the Sobel operator. @param ksize Aperture parameter for the Sobel operator.
@param borderType Pixel extrapolation method. See #BorderTypes. @param borderType Pixel extrapolation method. See #BorderTypes. #BORDER_WRAP is not supported.
*/ */
CV_EXPORTS_W void cornerMinEigenVal( InputArray src, OutputArray dst, CV_EXPORTS_W void cornerMinEigenVal( InputArray src, OutputArray dst,
int blockSize, int ksize = 3, int blockSize, int ksize = 3,
...@@ -1833,7 +1834,7 @@ size as src . ...@@ -1833,7 +1834,7 @@ size as src .
@param blockSize Neighborhood size (see the details on #cornerEigenValsAndVecs ). @param blockSize Neighborhood size (see the details on #cornerEigenValsAndVecs ).
@param ksize Aperture parameter for the Sobel operator. @param ksize Aperture parameter for the Sobel operator.
@param k Harris detector free parameter. See the formula above. @param k Harris detector free parameter. See the formula above.
@param borderType Pixel extrapolation method. See #BorderTypes. @param borderType Pixel extrapolation method. See #BorderTypes. #BORDER_WRAP is not supported.
*/ */
CV_EXPORTS_W void cornerHarris( InputArray src, OutputArray dst, int blockSize, CV_EXPORTS_W void cornerHarris( InputArray src, OutputArray dst, int blockSize,
int ksize, double k, int ksize, double k,
...@@ -1861,7 +1862,7 @@ The output of the function can be used for robust edge or corner detection. ...@@ -1861,7 +1862,7 @@ The output of the function can be used for robust edge or corner detection.
@param dst Image to store the results. It has the same size as src and the type CV_32FC(6) . @param dst Image to store the results. It has the same size as src and the type CV_32FC(6) .
@param blockSize Neighborhood size (see details below). @param blockSize Neighborhood size (see details below).
@param ksize Aperture parameter for the Sobel operator. @param ksize Aperture parameter for the Sobel operator.
@param borderType Pixel extrapolation method. See #BorderTypes. @param borderType Pixel extrapolation method. See #BorderTypes. #BORDER_WRAP is not supported.
@sa cornerMinEigenVal, cornerHarris, preCornerDetect @sa cornerMinEigenVal, cornerHarris, preCornerDetect
*/ */
...@@ -1890,7 +1891,7 @@ The corners can be found as local maximums of the functions, as shown below: ...@@ -1890,7 +1891,7 @@ The corners can be found as local maximums of the functions, as shown below:
@param src Source single-channel 8-bit of floating-point image. @param src Source single-channel 8-bit of floating-point image.
@param dst Output image that has the type CV_32F and the same size as src . @param dst Output image that has the type CV_32F and the same size as src .
@param ksize %Aperture size of the Sobel . @param ksize %Aperture size of the Sobel .
@param borderType Pixel extrapolation method. See #BorderTypes. @param borderType Pixel extrapolation method. See #BorderTypes. #BORDER_WRAP is not supported.
*/ */
CV_EXPORTS_W void preCornerDetect( InputArray src, OutputArray dst, int ksize, CV_EXPORTS_W void preCornerDetect( InputArray src, OutputArray dst, int ksize,
int borderType = BORDER_DEFAULT ); int borderType = BORDER_DEFAULT );
...@@ -2154,7 +2155,7 @@ structuring element is used. Kernel can be created using #getStructuringElement. ...@@ -2154,7 +2155,7 @@ structuring element is used. Kernel can be created using #getStructuringElement.
@param anchor position of the anchor within the element; default value (-1, -1) means that the @param anchor position of the anchor within the element; default value (-1, -1) means that the
anchor is at the element center. anchor is at the element center.
@param iterations number of times erosion is applied. @param iterations number of times erosion is applied.
@param borderType pixel extrapolation method, see #BorderTypes @param borderType pixel extrapolation method, see #BorderTypes. #BORDER_WRAP is not supported.
@param borderValue border value in case of a constant border @param borderValue border value in case of a constant border
@sa dilate, morphologyEx, getStructuringElement @sa dilate, morphologyEx, getStructuringElement
*/ */
...@@ -2186,7 +2187,7 @@ structuring element is used. Kernel can be created using #getStructuringElement ...@@ -2186,7 +2187,7 @@ structuring element is used. Kernel can be created using #getStructuringElement
@param anchor position of the anchor within the element; default value (-1, -1) means that the @param anchor position of the anchor within the element; default value (-1, -1) means that the
anchor is at the element center. anchor is at the element center.
@param iterations number of times dilation is applied. @param iterations number of times dilation is applied.
@param borderType pixel extrapolation method, see #BorderTypes @param borderType pixel extrapolation method, see #BorderTypes. #BORDER_WRAP is not suported.
@param borderValue border value in case of a constant border @param borderValue border value in case of a constant border
@sa erode, morphologyEx, getStructuringElement @sa erode, morphologyEx, getStructuringElement
*/ */
...@@ -2211,7 +2212,7 @@ CV_8U, CV_16U, CV_16S, CV_32F or CV_64F. ...@@ -2211,7 +2212,7 @@ CV_8U, CV_16U, CV_16S, CV_32F or CV_64F.
@param anchor Anchor position with the kernel. Negative values mean that the anchor is at the @param anchor Anchor position with the kernel. Negative values mean that the anchor is at the
kernel center. kernel center.
@param iterations Number of times erosion and dilation are applied. @param iterations Number of times erosion and dilation are applied.
@param borderType Pixel extrapolation method, see #BorderTypes @param borderType Pixel extrapolation method, see #BorderTypes. #BORDER_WRAP is not supported.
@param borderValue Border value in case of a constant border. The default value has a special @param borderValue Border value in case of a constant border. The default value has a special
meaning. meaning.
@sa dilate, erode, getStructuringElement @sa dilate, erode, getStructuringElement
......
This diff is collapsed.
...@@ -1674,93 +1674,9 @@ struct HResizeLinearVecU8_X4 ...@@ -1674,93 +1674,9 @@ struct HResizeLinearVecU8_X4
} }
} }
} }
else if(cn < 9)
{
const int step = 8;
const int len0 = xmax & -step;
for( ; k <= (count - 2); k+=2 )
{
const uchar *S0 = src[k];
int *D0 = dst[k];
const uchar *S1 = src[k+1];
int *D1 = dst[k+1];
for( dx = 0; dx < len0; dx += cn )
{
v_int16x8 a0 = v_load(alpha+dx*2);
v_int16x8 a1 = v_load(alpha+dx*2 + 8);
v_uint16x8 s0, s1;
v_zip(v_load_expand(S0+xofs[dx]), v_load_expand(S0+xofs[dx]+cn), s0, s1);
v_store(&D0[dx], v_dotprod(v_reinterpret_as_s16(s0), a0));
v_store(&D0[dx+4], v_dotprod(v_reinterpret_as_s16(s1), a1));
v_zip(v_load_expand(S1+xofs[dx]), v_load_expand(S1+xofs[dx]+cn), s0, s1);
v_store(&D1[dx], v_dotprod(v_reinterpret_as_s16(s0), a0));
v_store(&D1[dx+4], v_dotprod(v_reinterpret_as_s16(s1), a1));
}
}
for( ; k < count; k++ )
{
const uchar *S = src[k];
int *D = dst[k];
for( dx = 0; dx < len0; dx += cn )
{
v_int16x8 a0 = v_load(alpha+dx*2);
v_int16x8 a1 = v_load(alpha+dx*2 + 8);
v_uint16x8 s0, s1;
v_zip(v_load_expand(S+xofs[dx]), v_load_expand(S+xofs[dx]+cn), s0, s1);
v_store(&D[dx], v_dotprod(v_reinterpret_as_s16(s0), a0));
v_store(&D[dx+4], v_dotprod(v_reinterpret_as_s16(s1), a1));
}
}
}
else else
{ {
const int step = 16; return 0; // images with channels >4 are out of optimization scope
const int len0 = (xmax - cn) & -step;
for( ; k <= (count - 2); k+=2 )
{
const uchar *S0 = src[k];
int *D0 = dst[k];
const uchar *S1 = src[k+1];
int *D1 = dst[k+1];
for( dx = 0; dx < len0; dx += step )
{
v_int16x8 a0 = v_load(alpha+dx*2);
v_int16x8 a1 = v_load(alpha+dx*2 + 8);
v_int16x8 a2 = v_load(alpha+dx*2 + 16);
v_int16x8 a3 = v_load(alpha+dx*2 + 24);
v_uint8x16 s01, s23;
v_zip(v_lut(S0, xofs+dx), v_lut(S0+cn, xofs+dx), s01, s23);
v_store(&D0[dx], v_dotprod(v_reinterpret_as_s16(v_expand_low(s01)), a0));
v_store(&D0[dx+4], v_dotprod(v_reinterpret_as_s16(v_expand_high(s01)), a1));
v_store(&D0[dx+8], v_dotprod(v_reinterpret_as_s16(v_expand_low(s23)), a2));
v_store(&D0[dx+12], v_dotprod(v_reinterpret_as_s16(v_expand_high(s23)), a3));
v_zip(v_lut(S1, xofs+dx), v_lut(S1+cn, xofs+dx), s01, s23);
v_store(&D1[dx], v_dotprod(v_reinterpret_as_s16(v_expand_low(s01)), a0));
v_store(&D1[dx+4], v_dotprod(v_reinterpret_as_s16(v_expand_high(s01)), a1));
v_store(&D1[dx+8], v_dotprod(v_reinterpret_as_s16(v_expand_low(s23)), a2));
v_store(&D1[dx+12], v_dotprod(v_reinterpret_as_s16(v_expand_high(s23)), a3));
}
}
for( ; k < count; k++ )
{
const uchar *S = src[k];
int *D = dst[k];
for( dx = 0; dx < len0; dx += step )
{
v_int16x8 a0 = v_load(alpha+dx*2);
v_int16x8 a1 = v_load(alpha+dx*2 + 8);
v_int16x8 a2 = v_load(alpha+dx*2 + 16);
v_int16x8 a3 = v_load(alpha+dx*2 + 24);
v_uint8x16 s01, s23;
v_zip(v_lut(S, xofs+dx), v_lut(S+cn, xofs+dx), s01, s23);
v_store(&D[dx], v_dotprod(v_reinterpret_as_s16(v_expand_low(s01)), a0));
v_store(&D[dx+4], v_dotprod(v_reinterpret_as_s16(v_expand_high(s01)), a1));
v_store(&D[dx+8], v_dotprod(v_reinterpret_as_s16(v_expand_low(s23)), a2));
v_store(&D[dx+12], v_dotprod(v_reinterpret_as_s16(v_expand_high(s23)), a3));
}
}
} }
return dx; return dx;
} }
......
...@@ -583,4 +583,14 @@ TEST(Drawing, line) ...@@ -583,4 +583,14 @@ TEST(Drawing, line)
ASSERT_THROW(line(mat, Point(1,1),Point(99,99),Scalar(255),0), cv::Exception); ASSERT_THROW(line(mat, Point(1,1),Point(99,99),Scalar(255),0), cv::Exception);
} }
TEST(Drawing, regression_16308)
{
Mat_<uchar> img(Size(100, 100), (uchar)0);
circle(img, Point(50, 50), 50, 255, 1, LINE_AA);
EXPECT_NE(0, (int)img.at<uchar>(0, 50));
EXPECT_NE(0, (int)img.at<uchar>(50, 0));
EXPECT_NE(0, (int)img.at<uchar>(50, 99));
EXPECT_NE(0, (int)img.at<uchar>(99, 50));
}
}} // namespace }} // namespace
...@@ -2289,7 +2289,8 @@ bool QRCodeDetector::decodeMulti( ...@@ -2289,7 +2289,8 @@ bool QRCodeDetector::decodeMulti(
CV_Assert((points.size().width % 4) == 0); CV_Assert((points.size().width % 4) == 0);
vector< vector< Point2f > > src_points ; vector< vector< Point2f > > src_points ;
Mat qr_points = points.getMat(); Mat qr_points = points.getMat();
for (int i = 0; i < points.size().width ; i += 4) qr_points = qr_points.reshape(2, 1);
for (int i = 0; i < qr_points.size().width ; i += 4)
{ {
vector<Point2f> tempMat = qr_points.colRange(i, i + 4); vector<Point2f> tempMat = qr_points.colRange(i, i + 4);
if (contourArea(tempMat) > 0.0) if (contourArea(tempMat) > 0.0)
......
...@@ -481,6 +481,26 @@ INSTANTIATE_TEST_CASE_P(/**/, Objdetect_QRCode_Close, testing::ValuesIn(qrcode_i ...@@ -481,6 +481,26 @@ INSTANTIATE_TEST_CASE_P(/**/, Objdetect_QRCode_Close, testing::ValuesIn(qrcode_i
INSTANTIATE_TEST_CASE_P(/**/, Objdetect_QRCode_Monitor, testing::ValuesIn(qrcode_images_monitor)); INSTANTIATE_TEST_CASE_P(/**/, Objdetect_QRCode_Monitor, testing::ValuesIn(qrcode_images_monitor));
INSTANTIATE_TEST_CASE_P(/**/, Objdetect_QRCode_Multi, testing::ValuesIn(qrcode_images_multiple)); INSTANTIATE_TEST_CASE_P(/**/, Objdetect_QRCode_Multi, testing::ValuesIn(qrcode_images_multiple));
TEST(Objdetect_QRCode_decodeMulti, decode_regression_16491)
{
#ifdef HAVE_QUIRC
Mat zero_image = Mat::zeros(256, 256, CV_8UC1);
Point corners_[] = {Point(16, 16), Point(128, 16), Point(128, 128), Point(16, 128),
Point(16, 16), Point(128, 16), Point(128, 128), Point(16, 128)};
std::vector<Point> vec_corners;
int array_size = 8;
vec_corners.assign(corners_, corners_ + array_size);
std::vector<cv::String> decoded_info;
std::vector<Mat> straight_barcode;
QRCodeDetector vec_qrcode;
EXPECT_NO_THROW(vec_qrcode.decodeMulti(zero_image, vec_corners, decoded_info, straight_barcode));
Mat mat_corners(2, 4, CV_32SC2, (void*)&vec_corners[0]);
QRCodeDetector mat_qrcode;
EXPECT_NO_THROW(mat_qrcode.decodeMulti(zero_image, mat_corners, decoded_info, straight_barcode));
#endif
}
TEST(Objdetect_QRCode_basic, not_found_qrcode) TEST(Objdetect_QRCode_basic, not_found_qrcode)
{ {
std::vector<Point> corners; std::vector<Point> corners;
......
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment