Commit c38023f4 authored by Richard Yoo's avatar Richard Yoo

Modifications to support dynamic vector dispatch.

parent 11a09ef5
......@@ -140,15 +140,15 @@ if(CMAKE_COMPILER_IS_GNUCXX)
# SSE3 and further should be disabled under MingW because it generates compiler errors
if(NOT MINGW)
if(ENABLE_AVX)
add_extra_compiler_option(-mavx)
ocv_check_flag_support(CXX "-mavx" _varname)
endif()
if(ENABLE_AVX2)
add_extra_compiler_option(-mavx2)
ocv_check_flag_support(CXX "-mavx2" _varname)
endif()
# GCC depresses SSEx instructions when -mavx is used. Instead, it generates new AVX instructions or AVX equivalence for all SSEx instructions when needed.
if(NOT OPENCV_EXTRA_CXX_FLAGS MATCHES "-m(avx|avx2)")
if(NOT OPENCV_EXTRA_CXX_FLAGS MATCHES "-mavx")
if(ENABLE_SSE3)
add_extra_compiler_option(-msse3)
endif()
......@@ -169,7 +169,7 @@ if(CMAKE_COMPILER_IS_GNUCXX)
if(X86 OR X86_64)
if(NOT APPLE AND CMAKE_SIZEOF_VOID_P EQUAL 4)
if(OPENCV_EXTRA_CXX_FLAGS MATCHES "-m(sse2|avx|avx2)")
if(OPENCV_EXTRA_CXX_FLAGS MATCHES "-m(sse2|avx)")
add_extra_compiler_option(-mfpmath=sse)# !! important - be on the same wave with x64 compilers
else()
add_extra_compiler_option(-mfpmath=387)
......
......@@ -526,6 +526,20 @@ macro(ocv_glob_module_sources)
list(APPEND lib_srcs ${cl_kernels} "${CMAKE_CURRENT_BINARY_DIR}/opencl_kernels.cpp" "${CMAKE_CURRENT_BINARY_DIR}/opencl_kernels.hpp")
endif()
if(ENABLE_AVX)
file(GLOB avx_srcs "src/avx/*.cpp")
foreach(src ${avx_srcs})
set_source_files_properties(${src} PROPERTIES COMPILE_FLAGS -mavx)
endforeach()
endif()
if(ENABLE_AVX2)
file(GLOB avx2_srcs "src/avx2/*.cpp")
foreach(src ${avx2_srcs})
set_source_files_properties(${src} PROPERTIES COMPILE_FLAGS -mavx2)
endforeach()
endif()
source_group("Include" FILES ${lib_hdrs})
source_group("Include\\detail" FILES ${lib_hdrs_detail})
......
......@@ -317,6 +317,7 @@ Returns true if the specified feature is supported by the host hardware.
* ``CV_CPU_SSE4_2`` - SSE 4.2
* ``CV_CPU_POPCNT`` - POPCOUNT
* ``CV_CPU_AVX`` - AVX
* ``CV_CPU_AVX2`` - AVX2
The function returns true if the host hardware supports the specified feature. When user calls ``setUseOptimized(false)``, the subsequent calls to ``checkHardwareSupport()`` will return false until ``setUseOptimized(true)`` is called. This way user can dynamically switch on and off the optimized code in OpenCV.
......
......@@ -284,6 +284,7 @@ CV_EXPORTS_W int64 getCPUTickCount();
- CV_CPU_SSE4_2 - SSE 4.2
- CV_CPU_POPCNT - POPCOUNT
- CV_CPU_AVX - AVX
- CV_CPU_AVX2 - AVX2
\note {Note that the function output is not static. Once you called cv::useOptimized(false),
most of the hardware acceleration is disabled and thus the function will returns false,
......
......@@ -253,7 +253,6 @@ struct HWFeatures
f.have[CV_CPU_AVX] = (((cpuid_data[2] & (1<<28)) != 0)&&((cpuid_data[2] & (1<<27)) != 0));//OS uses XSAVE_XRSTORE and CPU support AVX
}
#if CV_AVX2
#if defined _MSC_VER && (defined _M_IX86 || defined _M_X64)
__cpuidex(cpuid_data, 7, 0);
#elif defined __GNUC__ && (defined __i386__ || defined __x86_64__)
......@@ -286,7 +285,6 @@ struct HWFeatures
{
f.have[CV_CPU_AVX2] = (cpuid_data[1] & (1<<5)) != 0;
}
#endif
return f;
}
......
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "../precomp.hpp"
#include "imgwarp_avx.hpp"
#if CV_AVX
int VResizeLinearVec_32f_avx(const uchar** _src, uchar* _dst, const uchar* _beta, int width )
{
const float** src = (const float**)_src;
const float* beta = (const float*)_beta;
const float *S0 = src[0], *S1 = src[1];
float* dst = (float*)_dst;
int x = 0;
__m256 b0 = _mm256_set1_ps(beta[0]), b1 = _mm256_set1_ps(beta[1]);
if( (((size_t)S0|(size_t)S1)&31) == 0 )
for( ; x <= width - 16; x += 16 )
{
__m256 x0, x1, y0, y1;
x0 = _mm256_load_ps(S0 + x);
x1 = _mm256_load_ps(S0 + x + 8);
y0 = _mm256_load_ps(S1 + x);
y1 = _mm256_load_ps(S1 + x + 8);
x0 = _mm256_add_ps(_mm256_mul_ps(x0, b0), _mm256_mul_ps(y0, b1));
x1 = _mm256_add_ps(_mm256_mul_ps(x1, b0), _mm256_mul_ps(y1, b1));
_mm256_storeu_ps( dst + x, x0);
_mm256_storeu_ps( dst + x + 8, x1);
}
else
for( ; x <= width - 16; x += 16 )
{
__m256 x0, x1, y0, y1;
x0 = _mm256_loadu_ps(S0 + x);
x1 = _mm256_loadu_ps(S0 + x + 8);
y0 = _mm256_loadu_ps(S1 + x);
y1 = _mm256_loadu_ps(S1 + x + 8);
x0 = _mm256_add_ps(_mm256_mul_ps(x0, b0), _mm256_mul_ps(y0, b1));
x1 = _mm256_add_ps(_mm256_mul_ps(x1, b0), _mm256_mul_ps(y1, b1));
_mm256_storeu_ps( dst + x, x0);
_mm256_storeu_ps( dst + x + 8, x1);
}
return x;
}
int VResizeCubicVec_32f_avx(const uchar** _src, uchar* _dst, const uchar* _beta, int width )
{
const float** src = (const float**)_src;
const float* beta = (const float*)_beta;
const float *S0 = src[0], *S1 = src[1], *S2 = src[2], *S3 = src[3];
float* dst = (float*)_dst;
int x = 0;
__m256 b0 = _mm256_set1_ps(beta[0]), b1 = _mm256_set1_ps(beta[1]),
b2 = _mm256_set1_ps(beta[2]), b3 = _mm256_set1_ps(beta[3]);
if( (((size_t)S0|(size_t)S1|(size_t)S2|(size_t)S3)&31) == 0 )
for( ; x <= width - 16; x += 16 )
{
__m256 x0, x1, y0, y1, s0, s1;
x0 = _mm256_load_ps(S0 + x);
x1 = _mm256_load_ps(S0 + x + 8);
y0 = _mm256_load_ps(S1 + x);
y1 = _mm256_load_ps(S1 + x + 8);
s0 = _mm256_mul_ps(x0, b0);
s1 = _mm256_mul_ps(x1, b0);
y0 = _mm256_mul_ps(y0, b1);
y1 = _mm256_mul_ps(y1, b1);
s0 = _mm256_add_ps(s0, y0);
s1 = _mm256_add_ps(s1, y1);
x0 = _mm256_load_ps(S2 + x);
x1 = _mm256_load_ps(S2 + x + 8);
y0 = _mm256_load_ps(S3 + x);
y1 = _mm256_load_ps(S3 + x + 8);
x0 = _mm256_mul_ps(x0, b2);
x1 = _mm256_mul_ps(x1, b2);
y0 = _mm256_mul_ps(y0, b3);
y1 = _mm256_mul_ps(y1, b3);
s0 = _mm256_add_ps(s0, x0);
s1 = _mm256_add_ps(s1, x1);
s0 = _mm256_add_ps(s0, y0);
s1 = _mm256_add_ps(s1, y1);
_mm256_storeu_ps( dst + x, s0);
_mm256_storeu_ps( dst + x + 8, s1);
}
else
for( ; x <= width - 16; x += 16 )
{
__m256 x0, x1, y0, y1, s0, s1;
x0 = _mm256_loadu_ps(S0 + x);
x1 = _mm256_loadu_ps(S0 + x + 8);
y0 = _mm256_loadu_ps(S1 + x);
y1 = _mm256_loadu_ps(S1 + x + 8);
s0 = _mm256_mul_ps(x0, b0);
s1 = _mm256_mul_ps(x1, b0);
y0 = _mm256_mul_ps(y0, b1);
y1 = _mm256_mul_ps(y1, b1);
s0 = _mm256_add_ps(s0, y0);
s1 = _mm256_add_ps(s1, y1);
x0 = _mm256_loadu_ps(S2 + x);
x1 = _mm256_loadu_ps(S2 + x + 8);
y0 = _mm256_loadu_ps(S3 + x);
y1 = _mm256_loadu_ps(S3 + x + 8);
x0 = _mm256_mul_ps(x0, b2);
x1 = _mm256_mul_ps(x1, b2);
y0 = _mm256_mul_ps(y0, b3);
y1 = _mm256_mul_ps(y1, b3);
s0 = _mm256_add_ps(s0, x0);
s1 = _mm256_add_ps(s1, x1);
s0 = _mm256_add_ps(s0, y0);
s1 = _mm256_add_ps(s1, y1);
_mm256_storeu_ps( dst + x, s0);
_mm256_storeu_ps( dst + x + 8, s1);
}
return x;
}
#else
int VResizeLinearVec_32f_avx(const uchar**, uchar*, const uchar*, int ) { return 0; }
int VResizeCubicVec_32f_avx(const uchar**, uchar*, const uchar*, int ) { return 0; }
#endif
/* End of file. */
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// Intel License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000, Intel Corporation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of Intel Corporation may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#ifndef _CV_IMGWARP_AVX_H_
#define _CV_IMGWARP_AVX_H_
int VResizeLinearVec_32f_avx(const uchar** _src, uchar* _dst, const uchar* _beta, int width );
int VResizeCubicVec_32f_avx(const uchar** _src, uchar* _dst, const uchar* _beta, int width );
#endif
/* End of file. */
This diff is collapsed.
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// Intel License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000, Intel Corporation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of Intel Corporation may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#ifndef _CV_IMGWARP_AVX2_H_
#define _CV_IMGWARP_AVX2_H_
int VResizeLinearVec_32s8u_avx2(const uchar** _src, uchar* dst, const uchar* _beta, int width );
template<int shiftval>
int VResizeLinearVec_32f16_avx2(const uchar** _src, uchar* _dst, const uchar* _beta, int width );
int VResizeCubicVec_32s8u_avx2(const uchar** _src, uchar* dst, const uchar* _beta, int width );
template<int shiftval>
int VResizeCubicVec_32f16_avx2(const uchar** _src, uchar* _dst, const uchar* _beta, int width );
#endif
/* End of file. */
This diff is collapsed.
......@@ -3002,12 +3002,8 @@ void printVersionInfo(bool useStdOut)
#if CV_SSE4_2
if (checkHardwareSupport(CV_CPU_SSE4_2)) cpu_features += " sse4.2";
#endif
#if CV_AVX
if (checkHardwareSupport(CV_CPU_AVX)) cpu_features += " avx";
#endif
#if CV_AVX2
if (checkHardwareSupport(CV_CPU_AVX2)) cpu_features += " avx2";
#endif
#if CV_NEON
cpu_features += " neon"; // NEON is currently not checked at runtime
#endif
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment