Commit 43eba3d7 authored by Alexander Alekhin's avatar Alexander Alekhin

Merge remote-tracking branch 'upstream/3.4' into merge-3.4

parents ad49138f 3200fe0e
...@@ -196,6 +196,10 @@ ...@@ -196,6 +196,10 @@
# define ITT_ARCH_PPC64 5 # define ITT_ARCH_PPC64 5
#endif /* ITT_ARCH_PPC64 */ #endif /* ITT_ARCH_PPC64 */
#ifndef ITT_ARCH_AARCH64 /* 64-bit ARM */
# define ITT_ARCH_AARCH64 6
#endif /* ITT_ARCH_AARCH64 */
#ifndef ITT_ARCH #ifndef ITT_ARCH
# if defined _M_IX86 || defined __i386__ # if defined _M_IX86 || defined __i386__
# define ITT_ARCH ITT_ARCH_IA32 # define ITT_ARCH ITT_ARCH_IA32
...@@ -205,6 +209,8 @@ ...@@ -205,6 +209,8 @@
# define ITT_ARCH ITT_ARCH_IA64 # define ITT_ARCH ITT_ARCH_IA64
# elif defined _M_ARM || defined __arm__ # elif defined _M_ARM || defined __arm__
# define ITT_ARCH ITT_ARCH_ARM # define ITT_ARCH ITT_ARCH_ARM
# elif defined __aarch64__
# define ITT_ARCH ITT_ARCH_AARCH64
# elif defined __powerpc64__ # elif defined __powerpc64__
# define ITT_ARCH ITT_ARCH_PPC64 # define ITT_ARCH ITT_ARCH_PPC64
# endif # endif
...@@ -359,7 +365,7 @@ ITT_INLINE long __TBB_machine_fetchadd4(volatile void* ptr, long addend) ...@@ -359,7 +365,7 @@ ITT_INLINE long __TBB_machine_fetchadd4(volatile void* ptr, long addend)
: "memory"); : "memory");
return result; return result;
} }
#elif ITT_ARCH==ITT_ARCH_ARM || ITT_ARCH==ITT_ARCH_PPC64 #elif ITT_ARCH==ITT_ARCH_ARM || ITT_ARCH==ITT_ARCH_AARCH64 || ITT_ARCH==ITT_ARCH_PPC64
#define __TBB_machine_fetchadd4(addr, val) __sync_fetch_and_add(addr, val) #define __TBB_machine_fetchadd4(addr, val) __sync_fetch_and_add(addr, val)
#endif /* ITT_ARCH==ITT_ARCH_IA64 */ #endif /* ITT_ARCH==ITT_ARCH_IA64 */
#ifndef ITT_SIMPLE_INIT #ifndef ITT_SIMPLE_INIT
......
...@@ -221,7 +221,10 @@ OCV_OPTION(BUILD_OPENEXR "Build openexr from source" (((WIN3 ...@@ -221,7 +221,10 @@ OCV_OPTION(BUILD_OPENEXR "Build openexr from source" (((WIN3
OCV_OPTION(BUILD_WEBP "Build WebP from source" (((WIN32 OR ANDROID OR APPLE) AND NOT WINRT) OR OPENCV_FORCE_3RDPARTY_BUILD) ) OCV_OPTION(BUILD_WEBP "Build WebP from source" (((WIN32 OR ANDROID OR APPLE) AND NOT WINRT) OR OPENCV_FORCE_3RDPARTY_BUILD) )
OCV_OPTION(BUILD_TBB "Download and build TBB from source" (ANDROID OR OPENCV_FORCE_3RDPARTY_BUILD) ) OCV_OPTION(BUILD_TBB "Download and build TBB from source" (ANDROID OR OPENCV_FORCE_3RDPARTY_BUILD) )
OCV_OPTION(BUILD_IPP_IW "Build IPP IW from source" (NOT MINGW OR OPENCV_FORCE_3RDPARTY_BUILD) IF (X86_64 OR X86) AND NOT WINRT ) OCV_OPTION(BUILD_IPP_IW "Build IPP IW from source" (NOT MINGW OR OPENCV_FORCE_3RDPARTY_BUILD) IF (X86_64 OR X86) AND NOT WINRT )
OCV_OPTION(BUILD_ITT "Build Intel ITT from source" (NOT MINGW OR OPENCV_FORCE_3RDPARTY_BUILD) IF (X86_64 OR X86) AND NOT WINRT AND NOT APPLE_FRAMEWORK ) OCV_OPTION(BUILD_ITT "Build Intel ITT from source"
(NOT MINGW OR OPENCV_FORCE_3RDPARTY_BUILD)
IF (X86_64 OR X86 OR ARM OR AARCH64 OR PPC64 OR PPC64LE) AND NOT WINRT AND NOT APPLE_FRAMEWORK
)
# Optional 3rd party components # Optional 3rd party components
# =================================================== # ===================================================
......
...@@ -124,7 +124,7 @@ int initUndistortRectifyMapLine_AVX(float* m1f, float* m2f, short* m1, ushort* m ...@@ -124,7 +124,7 @@ int initUndistortRectifyMapLine_AVX(float* m1f, float* m2f, short* m1, ushort* m
_mm256_mul_pd(__matTilt_20, __xd), _mm256_mul_pd(__matTilt_21, __yd)), __matTilt_22); _mm256_mul_pd(__matTilt_20, __xd), _mm256_mul_pd(__matTilt_21, __yd)), __matTilt_22);
#endif #endif
__m256d __invProj = _mm256_blendv_pd( __m256d __invProj = _mm256_blendv_pd(
__one, _mm256_div_pd(__one, __vecTilt2), _mm256_div_pd(__one, __vecTilt2), __one,
_mm256_cmp_pd(__vecTilt2, _mm256_setzero_pd(), _CMP_EQ_OQ)); _mm256_cmp_pd(__vecTilt2, _mm256_setzero_pd(), _CMP_EQ_OQ));
#if CV_FMA3 #if CV_FMA3
......
...@@ -1469,4 +1469,34 @@ TEST(Calib3d_UndistortPoints, outputShape) ...@@ -1469,4 +1469,34 @@ TEST(Calib3d_UndistortPoints, outputShape)
} }
} }
TEST(Calib3d_initUndistortRectifyMap, regression_14467)
{
Size size_w_h(512 + 3, 512);
Matx33f k(
6200, 0, size_w_h.width / 2.0f,
0, 6200, size_w_h.height / 2.0f,
0, 0, 1
);
Mat mesh_uv(size_w_h, CV_32FC2);
for (int i = 0; i < size_w_h.height; i++)
{
for (int j = 0; j < size_w_h.width; j++)
{
mesh_uv.at<Vec2f>(i, j) = Vec2f((float)j, (float)i);
}
}
Matx<double, 1, 14> d(
0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0,
0.09, 0.0
);
Mat mapxy, dst;
initUndistortRectifyMap(k, d, noArray(), k, size_w_h, CV_32FC2, mapxy, noArray());
undistortPoints(mapxy.reshape(2, (int)mapxy.total()), dst, k, d, noArray(), k);
dst = dst.reshape(2, mapxy.rows);
EXPECT_LE(cvtest::norm(dst, mesh_uv, NORM_INF), 1e-3);
}
}} // namespace }} // namespace
...@@ -43,12 +43,18 @@ public: ...@@ -43,12 +43,18 @@ public:
std::vector<MatShape> &outputs, std::vector<MatShape> &outputs,
std::vector<MatShape> &internals) const CV_OVERRIDE std::vector<MatShape> &internals) const CV_OVERRIDE
{ {
CV_Assert(inputs.size() == 2); CV_Assert(inputs.size() == 2 || inputs.size() == 3);
CV_Assert(total(inputs[0]) == total(inputs[1])); CV_Assert(total(inputs[0]) == total(inputs[1]));
MatShape outShape = inputs[0]; MatShape outShape;
outShape[2] = (outShape[2] - 1) * poolStride.height + poolKernel.height - 2 * poolPad.height; if (inputs.size() == 2)
outShape[3] = (outShape[3] - 1) * poolStride.width + poolKernel.width - 2 * poolPad.width; {
outShape = inputs[0];
outShape[2] = (outShape[2] - 1) * poolStride.height + poolKernel.height - 2 * poolPad.height;
outShape[3] = (outShape[3] - 1) * poolStride.width + poolKernel.width - 2 * poolPad.width;
}
else
outShape = inputs[2];
outputs.clear(); outputs.clear();
outputs.push_back(outShape); outputs.push_back(outShape);
...@@ -71,7 +77,7 @@ public: ...@@ -71,7 +77,7 @@ public:
inputs_arr.getMatVector(inputs); inputs_arr.getMatVector(inputs);
outputs_arr.getMatVector(outputs); outputs_arr.getMatVector(outputs);
CV_Assert(inputs.size() == 2); CV_Assert(inputs.size() == 2 || inputs.size() == 3);
Mat& input = inputs[0]; Mat& input = inputs[0];
Mat& indices = inputs[1]; Mat& indices = inputs[1];
......
...@@ -530,6 +530,13 @@ void ONNXImporter::populateNet(Net dstNet) ...@@ -530,6 +530,13 @@ void ONNXImporter::populateNet(Net dstNet)
layerParams.type = "Power"; layerParams.type = "Power";
} }
} }
else if (layer_type == "Clip")
{
layerParams.type = "ReLU6";
replaceLayerParam(layerParams, "min", "min_value");
replaceLayerParam(layerParams, "max", "max_value");
}
else if (layer_type == "LeakyRelu") else if (layer_type == "LeakyRelu")
{ {
layerParams.type = "ReLU"; layerParams.type = "ReLU";
......
...@@ -1370,6 +1370,24 @@ void TFImporter::populateNet(Net dstNet) ...@@ -1370,6 +1370,24 @@ void TFImporter::populateNet(Net dstNet)
connectToAllBlobs(layer_id, dstNet, parsePin(layer.input(0)), id, layer.input_size()); connectToAllBlobs(layer_id, dstNet, parsePin(layer.input(0)), id, layer.input_size());
} }
else if (type == "MaxPoolGrad")
{
CV_Assert(layer.input_size() == 3);
layerParams.set("pool_k_h", 0);
layerParams.set("pool_k_w", 0);
layerParams.set("pool_stride_h", 0);
layerParams.set("pool_stride_w", 0);
layerParams.set("pool_pad_h", 0);
layerParams.set("pool_pad_w", 0);
int id = dstNet.addLayer(name, "MaxUnpool", layerParams);
layer_id[name] = id;
connect(layer_id, dstNet, parsePin(layer.input(2)), id, 0);
connect(layer_id, dstNet, parsePin(layer.input(1) + ":1"), id, 1);
connect(layer_id, dstNet, parsePin(layer.input(0)), id, 2);
}
else if (type == "Placeholder") else if (type == "Placeholder")
{ {
if (!hasLayerAttr(layer, "dtype") || if (!hasLayerAttr(layer, "dtype") ||
......
...@@ -205,7 +205,7 @@ TEST(Reproducibility_FCN, Accuracy) ...@@ -205,7 +205,7 @@ TEST(Reproducibility_FCN, Accuracy)
Net net; Net net;
{ {
const string proto = findDataFile("dnn/fcn8s-heavy-pascal.prototxt"); const string proto = findDataFile("dnn/fcn8s-heavy-pascal.prototxt");
const string model = findDataFile("dnn/fcn8s-heavy-pascal.caffemodel"); const string model = findDataFile("dnn/fcn8s-heavy-pascal.caffemodel", false);
net = readNetFromCaffe(proto, model); net = readNetFromCaffe(proto, model);
ASSERT_FALSE(net.empty()); ASSERT_FALSE(net.empty());
} }
......
...@@ -136,6 +136,11 @@ TEST_P(Test_ONNX_layers, ReLU) ...@@ -136,6 +136,11 @@ TEST_P(Test_ONNX_layers, ReLU)
testONNXModels("ReLU"); testONNXModels("ReLU");
} }
TEST_P(Test_ONNX_layers, Clip)
{
testONNXModels("clip", npy);
}
TEST_P(Test_ONNX_layers, MaxPooling_Sigmoid) TEST_P(Test_ONNX_layers, MaxPooling_Sigmoid)
{ {
testONNXModels("maxpooling_sigmoid"); testONNXModels("maxpooling_sigmoid");
......
...@@ -218,6 +218,13 @@ TEST_P(Test_TensorFlow_layers, pooling) ...@@ -218,6 +218,13 @@ TEST_P(Test_TensorFlow_layers, pooling)
runTensorFlowNet("reduce_mean"); // an average pooling over all spatial dimensions. runTensorFlowNet("reduce_mean"); // an average pooling over all spatial dimensions.
} }
TEST_P(Test_TensorFlow_layers, max_pool_grad)
{
if (backend == DNN_BACKEND_INFERENCE_ENGINE)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE);
runTensorFlowNet("max_pool_grad");
}
// TODO: fix tests and replace to pooling // TODO: fix tests and replace to pooling
TEST_P(Test_TensorFlow_layers, ave_pool_same) TEST_P(Test_TensorFlow_layers, ave_pool_same)
{ {
......
...@@ -968,13 +968,15 @@ static std::string findData(const std::string& relative_path, bool required, boo ...@@ -968,13 +968,15 @@ static std::string findData(const std::string& relative_path, bool required, boo
std::string prefix = path_join(datapath, subdir); std::string prefix = path_join(datapath, subdir);
std::string result_; std::string result_;
CHECK_FILE_WITH_PREFIX(prefix, result_); CHECK_FILE_WITH_PREFIX(prefix, result_);
#if 1 // check for misused 'optional' mode
if (!required && !result_.empty()) if (!required && !result_.empty())
{ {
std::cout << "TEST ERROR: Don't use 'optional' findData() for " << relative_path << std::endl; std::cout << "TEST ERROR: Don't use 'optional' findData() for " << relative_path << std::endl;
CV_Assert(required || result_.empty()); static bool checkOptionalFlag = cv::utils::getConfigurationParameterBool("OPENCV_TEST_CHECK_OPTIONAL_DATA", false);
if (checkOptionalFlag)
{
CV_Assert(required || result_.empty());
}
} }
#endif
if (!result_.empty()) if (!result_.empty())
return result_; return result_;
} }
......
...@@ -85,6 +85,11 @@ static void handleMessage(GstElement * pipeline); ...@@ -85,6 +85,11 @@ static void handleMessage(GstElement * pipeline);
namespace { namespace {
#if defined __clang__
# pragma clang diagnostic push
# pragma clang diagnostic ignored "-Wunused-function"
#endif
template<typename T> static inline void GSafePtr_addref(T* ptr) template<typename T> static inline void GSafePtr_addref(T* ptr)
{ {
if (ptr) if (ptr)
...@@ -109,6 +114,10 @@ template<> inline void GSafePtr_release<GstEncodingContainerProfile>(GstEncoding ...@@ -109,6 +114,10 @@ template<> inline void GSafePtr_release<GstEncodingContainerProfile>(GstEncoding
template<> inline void GSafePtr_addref<char>(char* pPtr); // declaration only. not defined. should not be used template<> inline void GSafePtr_addref<char>(char* pPtr); // declaration only. not defined. should not be used
template<> inline void GSafePtr_release<char>(char** pPtr) { if (pPtr) { g_free(*pPtr); *pPtr = NULL; } } template<> inline void GSafePtr_release<char>(char** pPtr) { if (pPtr) { g_free(*pPtr); *pPtr = NULL; } }
#if defined __clang__
# pragma clang diagnostic pop
#endif
template <typename T> template <typename T>
class GSafePtr class GSafePtr
{ {
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment