Commit 202ba124 authored by Alexander Alekhin's avatar Alexander Alekhin

Merge pull request #16087 from YashasSamaga:cuda4dnn-eltwise-div

parents b2b6f52d a91eca6e
...@@ -102,6 +102,26 @@ namespace cv { namespace dnn { namespace cuda4dnn { namespace kernels { ...@@ -102,6 +102,26 @@ namespace cv { namespace dnn { namespace cuda4dnn { namespace kernels {
v_store(output_vPtr[i], vec_x); v_store(output_vPtr[i], vec_x);
} }
} }
template <class T, std::size_t N>
__global__ void eltwise_div_2_vec(Span<T> output, View<T> x, View<T> y) {
using vector_type = get_vector_type_t<T, N>;
auto output_vPtr = vector_type::get_pointer(output.data());
auto x_vPtr = vector_type::get_pointer(x.data());
auto y_vPtr = vector_type::get_pointer(y.data());
for (auto i : grid_stride_range(output.size() / vector_type::size())) {
vector_type vec_x, vec_y;
v_load(vec_x, x_vPtr[i]);
v_load(vec_y, y_vPtr[i]);
for (int j = 0; j < vector_type::size(); j++)
vec_x.data[j] = vec_x.data[j] / vec_y.data[j];
v_store(output_vPtr[i], vec_x);
}
}
} }
template <class T, std::size_t N> template <class T, std::size_t N>
...@@ -221,4 +241,32 @@ namespace cv { namespace dnn { namespace cuda4dnn { namespace kernels { ...@@ -221,4 +241,32 @@ namespace cv { namespace dnn { namespace cuda4dnn { namespace kernels {
template void eltwise_prod_2(const Stream& stream, Span<__half> output, View<__half> x, View<__half> y); template void eltwise_prod_2(const Stream& stream, Span<__half> output, View<__half> x, View<__half> y);
template void eltwise_prod_2(const Stream& stream, Span<float> output, View<float> x, View<float> y); template void eltwise_prod_2(const Stream& stream, Span<float> output, View<float> x, View<float> y);
template <class T, std::size_t N>
void launch_vectorized_eltwise_div_2(const Stream& stream, Span<T> output, View<T> x, View<T> y) {
CV_Assert(is_fully_aligned<T>(output, N));
CV_Assert(is_fully_aligned<T>(x, N));
CV_Assert(is_fully_aligned<T>(y, N));
auto kernel = raw::eltwise_div_2_vec<T, N>;
auto policy = make_policy(kernel, output.size() / N, 0, stream);
launch_kernel(kernel, policy, output, x, y);
}
template <class T>
void eltwise_div_2(const Stream& stream, Span<T> output, View<T> x, View<T> y) {
CV_Assert(x.size() == y.size());
CV_Assert(x.size() == output.size());
if (is_fully_aligned<T>(output, 4) && is_fully_aligned<T>(x, 4) && is_fully_aligned<T>(y, 4)) {
launch_vectorized_eltwise_div_2<T, 4>(stream, output, x, y);
} else if (is_fully_aligned<T>(output, 2) && is_fully_aligned<T>(x, 2) && is_fully_aligned<T>(y, 2)) {
launch_vectorized_eltwise_div_2<T, 2>(stream, output, x, y);
} else {
launch_vectorized_eltwise_div_2<T, 1>(stream, output, x, y);
}
}
template void eltwise_div_2(const Stream& stream, Span<__half> output, View<__half> x, View<__half> y);
template void eltwise_div_2(const Stream& stream, Span<float> output, View<float> x, View<float> y);
}}}} /* namespace cv::dnn::cuda4dnn::kernels */ }}}} /* namespace cv::dnn::cuda4dnn::kernels */
...@@ -24,6 +24,9 @@ namespace cv { namespace dnn { namespace cuda4dnn { namespace kernels { ...@@ -24,6 +24,9 @@ namespace cv { namespace dnn { namespace cuda4dnn { namespace kernels {
template <class T> template <class T>
void eltwise_prod_2(const csl::Stream& stream, csl::Span<T> output, csl::View<T> x, csl::View<T> y); void eltwise_prod_2(const csl::Stream& stream, csl::Span<T> output, csl::View<T> x, csl::View<T> y);
template <class T>
void eltwise_div_2(const csl::Stream& stream, csl::Span<T> output, csl::View<T> x, csl::View<T> y);
}}}} /* namespace cv::dnn::cuda4dnn::kernels */ }}}} /* namespace cv::dnn::cuda4dnn::kernels */
#endif /* OPENCV_DNN_SRC_CUDA4DNN_KERNELS_ELTWISE_OPS_HPP */ #endif /* OPENCV_DNN_SRC_CUDA4DNN_KERNELS_ELTWISE_OPS_HPP */
...@@ -24,7 +24,8 @@ namespace cv { namespace dnn { namespace cuda4dnn { ...@@ -24,7 +24,8 @@ namespace cv { namespace dnn { namespace cuda4dnn {
enum class EltwiseOpType { enum class EltwiseOpType {
MAX, MAX,
SUM, SUM,
PRODUCT PRODUCT,
DIV
}; };
template <class T> template <class T>
...@@ -64,6 +65,7 @@ namespace cv { namespace dnn { namespace cuda4dnn { ...@@ -64,6 +65,7 @@ namespace cv { namespace dnn { namespace cuda4dnn {
{ {
case EltwiseOpType::MAX: kernels::eltwise_max_2<T>(stream, output, input_x, input_y); break; case EltwiseOpType::MAX: kernels::eltwise_max_2<T>(stream, output, input_x, input_y); break;
case EltwiseOpType::PRODUCT: kernels::eltwise_prod_2<T>(stream, output, input_x, input_y); break; case EltwiseOpType::PRODUCT: kernels::eltwise_prod_2<T>(stream, output, input_x, input_y); break;
case EltwiseOpType::DIV: kernels::eltwise_div_2<T>(stream, output, input_x, input_y); break;
case EltwiseOpType::SUM: case EltwiseOpType::SUM:
if (coeffs.empty() || (coeffs[0] == 1 && coeffs[1] == 1)) if (coeffs.empty() || (coeffs[0] == 1 && coeffs[1] == 1))
kernels::eltwise_sum_2<T>(stream, output, input_x, input_y); kernels::eltwise_sum_2<T>(stream, output, input_x, input_y);
...@@ -89,6 +91,7 @@ namespace cv { namespace dnn { namespace cuda4dnn { ...@@ -89,6 +91,7 @@ namespace cv { namespace dnn { namespace cuda4dnn {
{ {
case EltwiseOpType::MAX: kernels::eltwise_max_2<T>(stream, output, output, input); break; case EltwiseOpType::MAX: kernels::eltwise_max_2<T>(stream, output, output, input); break;
case EltwiseOpType::PRODUCT: kernels::eltwise_prod_2<T>(stream, output, output, input); break; case EltwiseOpType::PRODUCT: kernels::eltwise_prod_2<T>(stream, output, output, input); break;
case EltwiseOpType::DIV: kernels::eltwise_div_2<T>(stream, output, output, input); break;
case EltwiseOpType::SUM: case EltwiseOpType::SUM:
if (coeffs.empty() || coeffs[i] == 1) if (coeffs.empty() || coeffs[i] == 1)
kernels::eltwise_sum_2<T>(stream, output, output, input); kernels::eltwise_sum_2<T>(stream, output, output, input);
......
...@@ -108,7 +108,7 @@ public: ...@@ -108,7 +108,7 @@ public:
virtual bool supportBackend(int backendId) CV_OVERRIDE virtual bool supportBackend(int backendId) CV_OVERRIDE
{ {
return backendId == DNN_BACKEND_OPENCV || return backendId == DNN_BACKEND_OPENCV ||
(backendId == DNN_BACKEND_CUDA && op != DIV) || // TODO: not implemented, see PR #15811 backendId == DNN_BACKEND_CUDA ||
(backendId == DNN_BACKEND_HALIDE && op != DIV) || // TODO: not implemented, see PR #15811 (backendId == DNN_BACKEND_HALIDE && op != DIV) || // TODO: not implemented, see PR #15811
((((backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && (preferableTarget != DNN_TARGET_OPENCL || coeffs.empty())) ((((backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && (preferableTarget != DNN_TARGET_OPENCL || coeffs.empty()))
|| backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) && !variableChannels)); || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) && !variableChannels));
...@@ -471,6 +471,7 @@ public: ...@@ -471,6 +471,7 @@ public:
case MAX: return cuda4dnn::EltwiseOpType::MAX; case MAX: return cuda4dnn::EltwiseOpType::MAX;
case SUM: return cuda4dnn::EltwiseOpType::SUM; case SUM: return cuda4dnn::EltwiseOpType::SUM;
case PROD: return cuda4dnn::EltwiseOpType::PRODUCT; case PROD: return cuda4dnn::EltwiseOpType::PRODUCT;
case DIV: return cuda4dnn::EltwiseOpType::DIV;
} }
return cuda4dnn::EltwiseOpType::SUM; return cuda4dnn::EltwiseOpType::SUM;
}(); }();
......
...@@ -380,6 +380,7 @@ TEST_P(Test_ONNX_layers, Div) ...@@ -380,6 +380,7 @@ TEST_P(Test_ONNX_layers, Div)
normAssert(ref, out, "", default_l1, default_lInf); normAssert(ref, out, "", default_l1, default_lInf);
expectNoFallbacksFromIE(net); expectNoFallbacksFromIE(net);
expectNoFallbacksFromCUDA(net);
} }
TEST_P(Test_ONNX_layers, DynamicReshape) TEST_P(Test_ONNX_layers, DynamicReshape)
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment