Commit 652fb121 authored by Anatoly Baksheev's avatar Anatoly Baksheev

module reorganization: added folder with pure device functions, cuda_shared.hpp…

module reorganization: added folder with pure device functions, cuda_shared.hpp renamed to internal_shared.hpp
parent fadd19b9
set(name "gpu") set(name "gpu")
#"opencv_features2d" "opencv_flann" "opencv_objdetect" - only headers needed
set(DEPS "opencv_core" "opencv_imgproc" "opencv_objdetect" "opencv_features2d" "opencv_flann") set(DEPS "opencv_core" "opencv_imgproc" "opencv_objdetect" "opencv_features2d" "opencv_flann")
set(OPENCV_LINKER_LIBS ${OPENCV_LINKER_LIBS} opencv_gpu) set(OPENCV_LINKER_LIBS ${OPENCV_LINKER_LIBS} opencv_gpu)
...@@ -32,6 +34,10 @@ source_group("Src\\Cuda" FILES ${lib_cuda} ${lib_cuda_hdrs}) ...@@ -32,6 +34,10 @@ source_group("Src\\Cuda" FILES ${lib_cuda} ${lib_cuda_hdrs})
file(GLOB lib_hdrs "include/opencv2/${name}/*.h*") file(GLOB lib_hdrs "include/opencv2/${name}/*.h*")
source_group("Include" FILES ${lib_hdrs}) source_group("Include" FILES ${lib_hdrs})
#file(GLOB lib_device_hdrs "include/opencv2/${name}/device/*.h*")
file(GLOB lib_device_hdrs "src/opencv2/gpu/device/*.h*")
source_group("Device" FILES ${lib_device_hdrs})
if (HAVE_CUDA) if (HAVE_CUDA)
get_filename_component(_path_to_findnpp "${CMAKE_CURRENT_LIST_FILE}" PATH) get_filename_component(_path_to_findnpp "${CMAKE_CURRENT_LIST_FILE}" PATH)
set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} ${_path_to_findnpp}) set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} ${_path_to_findnpp})
...@@ -71,7 +77,7 @@ if (HAVE_CUDA) ...@@ -71,7 +77,7 @@ if (HAVE_CUDA)
endif() endif()
add_library(${the_target} ${lib_srcs} ${lib_hdrs} ${lib_int_hdrs} ${lib_cuda} ${lib_cuda_hdrs} ${cuda_objs}) add_library(${the_target} ${lib_srcs} ${lib_hdrs} ${lib_int_hdrs} ${lib_cuda} ${lib_cuda_hdrs} ${lib_device_hdrs} ${cuda_objs})
if(PCHSupport_FOUND) if(PCHSupport_FOUND)
...@@ -131,4 +137,8 @@ install(FILES ${lib_hdrs} ...@@ -131,4 +137,8 @@ install(FILES ${lib_hdrs}
DESTINATION include/opencv2/${name} DESTINATION include/opencv2/${name}
COMPONENT main) COMPONENT main)
#install(FILES ${lib_device_hdrs}
# DESTINATION include/opencv2/${name}/device
# COMPONENT main)
...@@ -55,6 +55,8 @@ namespace cv ...@@ -55,6 +55,8 @@ namespace cv
#else #else
#define __CV_GPU_HOST_DEVICE__ #define __CV_GPU_HOST_DEVICE__
#endif #endif
template <bool expr> struct StaticAssert;
template <> struct StaticAssert<true> {static __CV_GPU_HOST_DEVICE__ void check(){}};
template <typename T> struct DevMem2D_ template <typename T> struct DevMem2D_
{ {
...@@ -96,19 +98,18 @@ namespace cv ...@@ -96,19 +98,18 @@ namespace cv
__CV_GPU_HOST_DEVICE__ const T* ptr(int y = 0) const { return (const T*)( (const char*)data + y * step); } __CV_GPU_HOST_DEVICE__ const T* ptr(int y = 0) const { return (const T*)( (const char*)data + y * step); }
}; };
template <bool> struct StaticCheck;
template <> struct StaticCheck<true>{};
template<typename T> struct PtrElemStep_ : public PtrStep_<T> template<typename T> struct PtrElemStep_ : public PtrStep_<T>
{ {
PtrElemStep_(const DevMem2D_<T>& mem) : PtrStep_<T>(mem) PtrElemStep_(const DevMem2D_<T>& mem) : PtrStep_<T>(mem)
{ {
StaticAssert<256 % sizeof(T) == 0>::check();
PtrStep_<T>::step /= PtrStep_<T>::elem_size; PtrStep_<T>::step /= PtrStep_<T>::elem_size;
} }
__CV_GPU_HOST_DEVICE__ T* ptr(int y = 0) { return PtrStep_<T>::data + y * PtrStep_<T>::step; } __CV_GPU_HOST_DEVICE__ T* ptr(int y = 0) { return PtrStep_<T>::data + y * PtrStep_<T>::step; }
__CV_GPU_HOST_DEVICE__ const T* ptr(int y = 0) const { return PtrStep_<T>::data + y * PtrStep_<T>::step; } __CV_GPU_HOST_DEVICE__ const T* ptr(int y = 0) const { return PtrStep_<T>::data + y * PtrStep_<T>::step; }
private:
StaticCheck<256 % sizeof(T) == 0> ElemStepTypeCheck;
}; };
typedef DevMem2D_<unsigned char> DevMem2D; typedef DevMem2D_<unsigned char> DevMem2D;
......
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "internal_shared.hpp"
#include "border_interpolate.hpp"
#include "opencv2/gpu/gpu.hpp"
bool cv::gpu::tryConvertToGpuBorderType(int cpuBorderType, int& gpuBorderType)
{
if (cpuBorderType == cv::BORDER_REFLECT101)
{
gpuBorderType = cv::gpu::BORDER_REFLECT101;
return true;
}
if (cpuBorderType == cv::BORDER_REPLICATE)
{
gpuBorderType = cv::gpu::BORDER_REPLICATE;
return true;
}
return false;
}
\ No newline at end of file
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#ifndef __OPENCV_GPU_BORDER_INTERPOLATE_HPP__
#define __OPENCV_GPU_BORDER_INTERPOLATE_HPP__
#include "border_interpolate.hpp"
namespace cv { namespace gpu {
// Converts CPU border extrapolation mode into GPU internal analogue.
// Returns true if the GPU analogue exists, false otherwise.
bool tryConvertToGpuBorderType(int cpuBorderType, int& gpuBorderType);
}}
#endif
\ No newline at end of file
...@@ -63,40 +63,29 @@ namespace cv { namespace gpu { namespace csbp ...@@ -63,40 +63,29 @@ namespace cv { namespace gpu { namespace csbp
void load_constants(int ndisp, float max_data_term, float data_weight, float max_disc_term, float disc_single_jump, int min_disp_th, void load_constants(int ndisp, float max_data_term, float data_weight, float max_disc_term, float disc_single_jump, int min_disp_th,
const DevMem2D& left, const DevMem2D& right, const DevMem2D& temp); const DevMem2D& left, const DevMem2D& right, const DevMem2D& temp);
void init_data_cost(int rows, int cols, short* disp_selected_pyr, short* data_cost_selected, template<class T>
size_t msg_step, int h, int w, int level, int nr_plane, int ndisp, int channels, bool use_local_init_data_cost, cudaStream_t stream); void init_data_cost(int rows, int cols, T* disp_selected_pyr, T* data_cost_selected, size_t msg_step,
int h, int w, int level, int nr_plane, int ndisp, int channels, bool use_local_init_data_cost, cudaStream_t stream);
void init_data_cost(int rows, int cols, float* disp_selected_pyr, float* data_cost_selected, template<class T>
size_t msg_step, int h, int w, int level, int nr_plane, int ndisp, int channels, bool use_local_init_data_cost, cudaStream_t stream); void compute_data_cost(const T* disp_selected_pyr, T* data_cost, size_t msg_step1, size_t msg_step2,
void compute_data_cost(const short* disp_selected_pyr, short* data_cost, size_t msg_step1, size_t msg_step2,
int rows, int cols, int h, int w, int h2, int level, int nr_plane, int channels, cudaStream_t stream);
void compute_data_cost(const float* disp_selected_pyr, float* data_cost, size_t msg_step1, size_t msg_step2,
int rows, int cols, int h, int w, int h2, int level, int nr_plane, int channels, cudaStream_t stream); int rows, int cols, int h, int w, int h2, int level, int nr_plane, int channels, cudaStream_t stream);
void init_message(short* u_new, short* d_new, short* l_new, short* r_new, template<class T>
const short* u_cur, const short* d_cur, const short* l_cur, const short* r_cur, void init_message(T* u_new, T* d_new, T* l_new, T* r_new,
short* selected_disp_pyr_new, const short* selected_disp_pyr_cur, const T* u_cur, const T* d_cur, const T* l_cur, const T* r_cur,
short* data_cost_selected, const short* data_cost, size_t msg_step1, size_t msg_step2, T* selected_disp_pyr_new, const T* selected_disp_pyr_cur,
T* data_cost_selected, const T* data_cost, size_t msg_step1, size_t msg_step2,
int h, int w, int nr_plane, int h2, int w2, int nr_plane2, cudaStream_t stream); int h, int w, int nr_plane, int h2, int w2, int nr_plane2, cudaStream_t stream);
void init_message(float* u_new, float* d_new, float* l_new, float* r_new, template<class T>
const float* u_cur, const float* d_cur, const float* l_cur, const float* r_cur, void calc_all_iterations(T* u, T* d, T* l, T* r, const T* data_cost_selected,
float* selected_disp_pyr_new, const float* selected_disp_pyr_cur, const T* selected_disp_pyr_cur, size_t msg_step, int h, int w, int nr_plane, int iters, cudaStream_t stream);
float* data_cost_selected, const float* data_cost, size_t msg_step1, size_t msg_step2,
int h, int w, int nr_plane, int h2, int w2, int nr_plane2, cudaStream_t stream);
void calc_all_iterations(short* u, short* d, short* l, short* r, short* data_cost_selected,
const short* selected_disp_pyr_cur, size_t msg_step, int h, int w, int nr_plane, int iters, cudaStream_t stream);
void calc_all_iterations(float*u, float* d, float* l, float* r, float* data_cost_selected,
const float* selected_disp_pyr_cur, size_t msg_step, int h, int w, int nr_plane, int iters, cudaStream_t stream);
void compute_disp(const short* u, const short* d, const short* l, const short* r, const short* data_cost_selected, const short* disp_selected, size_t msg_step, template<class T>
DevMem2D_<short> disp, int nr_plane, cudaStream_t stream); void compute_disp(const T* u, const T* d, const T* l, const T* r, const T* data_cost_selected, const T* disp_selected, size_t msg_step,
const DevMem2D_<short>& disp, int nr_plane, cudaStream_t stream);
void compute_disp(const float* u, const float* d, const float* l, const float* r, const float* data_cost_selected, const float* disp_selected, size_t msg_step,
DevMem2D_<short> disp, int nr_plane, cudaStream_t stream);
}}} }}}
namespace namespace
......
...@@ -41,14 +41,17 @@ ...@@ -41,14 +41,17 @@
//M*/ //M*/
#include "opencv2/gpu/devmem2d.hpp" #include "opencv2/gpu/devmem2d.hpp"
#include "saturate_cast.hpp" #include "opencv2/gpu/device/saturate_cast.hpp"
#include "opencv2/gpu/device/limits_gpu.hpp"
#include "safe_call.hpp" #include "safe_call.hpp"
using namespace cv::gpu; using namespace cv::gpu;
using namespace cv::gpu::device;
#ifndef FLT_MAX #undef FLT_MAX
#define FLT_MAX 3.402823466e+38F //#ifndef FLT_MAX
#endif //#define FLT_MAX 3.402823466e+38F
//#endif
namespace cv { namespace gpu { namespace bp { namespace cv { namespace gpu { namespace bp {
...@@ -349,7 +352,7 @@ namespace cv { namespace gpu { namespace bp { ...@@ -349,7 +352,7 @@ namespace cv { namespace gpu { namespace bp {
template <typename T> template <typename T>
__device__ void message(const T* msg1, const T* msg2, const T* msg3, const T* data, T* dst, size_t msg_disp_step, size_t data_disp_step) __device__ void message(const T* msg1, const T* msg2, const T* msg3, const T* data, T* dst, size_t msg_disp_step, size_t data_disp_step)
{ {
float minimum = FLT_MAX; float minimum = numeric_limits_gpu<float>::max();
for(int i = 0; i < cndisp; ++i) for(int i = 0; i < cndisp; ++i)
{ {
...@@ -470,7 +473,7 @@ namespace cv { namespace gpu { namespace bp { ...@@ -470,7 +473,7 @@ namespace cv { namespace gpu { namespace bp {
size_t disp_step = rows * step; size_t disp_step = rows * step;
int best = 0; int best = 0;
float best_val = FLT_MAX; float best_val = numeric_limits_gpu<float>::max();
for (int d = 0; d < cndisp; ++d) for (int d = 0; d < cndisp; ++d)
{ {
float val = us[d * disp_step]; float val = us[d * disp_step];
......
...@@ -40,8 +40,8 @@ ...@@ -40,8 +40,8 @@
// //
//M*/ //M*/
#include "cuda_shared.hpp" #include "internal_shared.hpp"
#include "limits_gpu.hpp" #include "opencv2/gpu/device/limits_gpu.hpp"
using namespace cv::gpu; using namespace cv::gpu;
using namespace cv::gpu::device; using namespace cv::gpu::device;
...@@ -52,9 +52,6 @@ namespace cv { namespace gpu { namespace bfmatcher ...@@ -52,9 +52,6 @@ namespace cv { namespace gpu { namespace bfmatcher
////////////////////////////////// General funcs ////////////////////////////////// ////////////////////////////////// General funcs //////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////////////////////
template <bool expr> struct StaticAssert;
template <> struct StaticAssert<true> {static __host__ __device__ void check(){}};
/////////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////////////////
// Mask strategy // Mask strategy
......
...@@ -40,18 +40,19 @@ ...@@ -40,18 +40,19 @@
// //
//M*/ //M*/
#include "cuda_shared.hpp" #include "internal_shared.hpp"
#include "saturate_cast.hpp" #include "opencv2/gpu/device/saturate_cast.hpp"
#include "vecmath.hpp" #include "opencv2/gpu/device/vecmath.hpp"
using namespace cv::gpu; using namespace cv::gpu;
using namespace cv::gpu::device;
#ifndef CV_DESCALE #ifndef CV_DESCALE
#define CV_DESCALE(x, n) (((x) + (1 << ((n)-1))) >> (n)) #define CV_DESCALE(x, n) (((x) + (1 << ((n)-1))) >> (n))
#endif #endif
#ifndef FLT_EPSILON #ifndef FLT_EPSILON
#define FLT_EPSILON 1.192092896e-07F #define FLT_EPSILON 1.192092896e-07F
#endif #endif
namespace cv { namespace gpu { namespace color namespace cv { namespace gpu { namespace color
......
This diff is collapsed.
...@@ -41,12 +41,14 @@ ...@@ -41,12 +41,14 @@
//M*/ //M*/
#include "opencv2/gpu/devmem2d.hpp" #include "opencv2/gpu/devmem2d.hpp"
#include "saturate_cast.hpp" #include "opencv2/gpu/device/saturate_cast.hpp"
#include "opencv2/gpu/device/vecmath.hpp"
#include "safe_call.hpp" #include "safe_call.hpp"
#include "cuda_shared.hpp" #include "internal_shared.hpp"
#include "vecmath.hpp"
using namespace cv::gpu; using namespace cv::gpu;
using namespace cv::gpu::device;
#ifndef FLT_MAX #ifndef FLT_MAX
#define FLT_MAX 3.402823466e+30F #define FLT_MAX 3.402823466e+30F
......
...@@ -40,7 +40,7 @@ ...@@ -40,7 +40,7 @@
// //
//M*/ //M*/
#include "cuda_shared.hpp" #include "internal_shared.hpp"
#ifndef CV_PI_F #ifndef CV_PI_F
#ifndef CV_PI #ifndef CV_PI
......
...@@ -40,10 +40,12 @@ ...@@ -40,10 +40,12 @@
// //
//M*/ //M*/
#include "cuda_shared.hpp" #include "internal_shared.hpp"
#include "border_interpolate.hpp" #include "opencv2/gpu/device/border_interpolate.hpp"
#include "internal_shared.hpp"
using namespace cv::gpu; using namespace cv::gpu;
using namespace cv::gpu::device;
/////////////////////////////////// Remap /////////////////////////////////////////////// /////////////////////////////////// Remap ///////////////////////////////////////////////
namespace cv { namespace gpu { namespace imgproc namespace cv { namespace gpu { namespace imgproc
...@@ -584,11 +586,11 @@ namespace cv { namespace gpu { namespace imgproc ...@@ -584,11 +586,11 @@ namespace cv { namespace gpu { namespace imgproc
switch (border_type) switch (border_type)
{ {
case BORDER_REFLECT101: case BORDER_REFLECT101_GPU:
cornerHarris_kernel<<<grid, threads>>>( cornerHarris_kernel<<<grid, threads>>>(
cols, rows, block_size, k, dst, BrdReflect101(cols), BrdReflect101(rows)); cols, rows, block_size, k, dst, BrdReflect101(cols), BrdReflect101(rows));
break; break;
case BORDER_REPLICATE: case BORDER_REPLICATE_GPU:
harrisDxTex.addressMode[0] = cudaAddressModeClamp; harrisDxTex.addressMode[0] = cudaAddressModeClamp;
harrisDxTex.addressMode[1] = cudaAddressModeClamp; harrisDxTex.addressMode[1] = cudaAddressModeClamp;
harrisDyTex.addressMode[0] = cudaAddressModeClamp; harrisDyTex.addressMode[0] = cudaAddressModeClamp;
...@@ -698,11 +700,11 @@ namespace cv { namespace gpu { namespace imgproc ...@@ -698,11 +700,11 @@ namespace cv { namespace gpu { namespace imgproc
switch (border_type) switch (border_type)
{ {
case BORDER_REFLECT101: case BORDER_REFLECT101_GPU:
cornerMinEigenVal_kernel<<<grid, threads>>>( cornerMinEigenVal_kernel<<<grid, threads>>>(
cols, rows, block_size, dst, BrdReflect101(cols), BrdReflect101(rows)); cols, rows, block_size, dst, BrdReflect101(cols), BrdReflect101(rows));
break; break;
case BORDER_REPLICATE: case BORDER_REPLICATE_GPU:
minEigenValDxTex.addressMode[0] = cudaAddressModeClamp; minEigenValDxTex.addressMode[0] = cudaAddressModeClamp;
minEigenValDxTex.addressMode[1] = cudaAddressModeClamp; minEigenValDxTex.addressMode[1] = cudaAddressModeClamp;
minEigenValDyTex.addressMode[0] = cudaAddressModeClamp; minEigenValDyTex.addressMode[0] = cudaAddressModeClamp;
......
...@@ -40,8 +40,8 @@ ...@@ -40,8 +40,8 @@
// //
//M*/ //M*/
#ifndef __OPENCV_CUDA_SHARED_HPP__ #ifndef __OPENCV_internal_shared_HPP__
#define __OPENCV_CUDA_SHARED_HPP__ #define __OPENCV_internal_shared_HPP__
#include "opencv2/gpu/devmem2d.hpp" #include "opencv2/gpu/devmem2d.hpp"
#include "safe_call.hpp" #include "safe_call.hpp"
...@@ -56,6 +56,17 @@ namespace cv ...@@ -56,6 +56,17 @@ namespace cv
typedef unsigned short ushort; typedef unsigned short ushort;
typedef unsigned int uint; typedef unsigned int uint;
enum
{
BORDER_REFLECT101_GPU = 0,
BORDER_REPLICATE_GPU
};
// Converts CPU border extrapolation mode into GPU internal analogue.
// Returns true if the GPU analogue exists, false otherwise.
bool tryConvertToGpuBorderType(int cpuBorderType, int& gpuBorderType);
static inline int divUp(int total, int grain) { return (total + grain - 1) / grain; } static inline int divUp(int total, int grain) { return (total + grain - 1) / grain; }
template<class T> static inline void uploadConstant(const char* name, const T& value) template<class T> static inline void uploadConstant(const char* name, const T& value)
...@@ -99,4 +110,4 @@ namespace cv ...@@ -99,4 +110,4 @@ namespace cv
} }
#endif /* __OPENCV_CUDA_SHARED_HPP__ */ #endif /* __OPENCV_internal_shared_HPP__ */
...@@ -41,16 +41,16 @@ ...@@ -41,16 +41,16 @@
//M*/ //M*/
#include "opencv2/gpu/devmem2d.hpp" #include "opencv2/gpu/devmem2d.hpp"
#include "opencv2/gpu/device/border_interpolate.hpp"
#include "safe_call.hpp" #include "safe_call.hpp"
#include "cuda_shared.hpp" #include "internal_shared.hpp"
#include "border_interpolate.hpp"
#define BLOCK_DIM_X 16 #define BLOCK_DIM_X 16
#define BLOCK_DIM_Y 16 #define BLOCK_DIM_Y 16
#define MAX_KERNEL_SIZE 16 #define MAX_KERNEL_SIZE 16
using namespace cv::gpu; using namespace cv::gpu;
using namespace cv::gpu::device;
namespace cv { namespace gpu { namespace linear_filters { namespace cv { namespace gpu { namespace linear_filters {
......
...@@ -40,7 +40,7 @@ ...@@ -40,7 +40,7 @@
// //
//M*/ //M*/
#include "cuda_shared.hpp" #include "internal_shared.hpp"
using namespace cv::gpu; using namespace cv::gpu;
......
...@@ -40,10 +40,10 @@ ...@@ -40,10 +40,10 @@
// //
//M*/ //M*/
#include "cuda_shared.hpp" #include "opencv2/gpu/device/limits_gpu.hpp"
#include "opencv2/gpu/device/saturate_cast.hpp"
#include "transform.hpp" #include "transform.hpp"
#include "limits_gpu.hpp" #include "internal_shared.hpp"
#include "saturate_cast.hpp"
using namespace cv::gpu; using namespace cv::gpu;
using namespace cv::gpu::device; using namespace cv::gpu::device;
......
...@@ -40,8 +40,10 @@ ...@@ -40,8 +40,10 @@
// //
//M*/ //M*/
#include "cuda_shared.hpp" #include "internal_shared.hpp"
#include "saturate_cast.hpp" #include "opencv2/gpu/device/saturate_cast.hpp"
using namespace cv::gpu::device;
namespace cv { namespace gpu { namespace matrix_operations { namespace cv { namespace gpu { namespace matrix_operations {
......
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#ifndef __OPENCV_GPU_SATURATE_CAST_HPP__
#define __OPENCV_GPU_SATURATE_CAST_HPP__
#include "cuda_shared.hpp"
namespace cv
{
namespace gpu
{
template<typename _Tp> static __device__ _Tp saturate_cast(uchar v) { return _Tp(v); }
template<typename _Tp> static __device__ _Tp saturate_cast(schar v) { return _Tp(v); }
template<typename _Tp> static __device__ _Tp saturate_cast(ushort v) { return _Tp(v); }
template<typename _Tp> static __device__ _Tp saturate_cast(short v) { return _Tp(v); }
template<typename _Tp> static __device__ _Tp saturate_cast(uint v) { return _Tp(v); }
template<typename _Tp> static __device__ _Tp saturate_cast(int v) { return _Tp(v); }
template<typename _Tp> static __device__ _Tp saturate_cast(float v) { return _Tp(v); }
template<typename _Tp> static __device__ _Tp saturate_cast(double v) { return _Tp(v); }
template<> static __device__ uchar saturate_cast<uchar>(schar v)
{ return (uchar)max((int)v, 0); }
template<> static __device__ uchar saturate_cast<uchar>(ushort v)
{ return (uchar)min((uint)v, (uint)UCHAR_MAX); }
template<> static __device__ uchar saturate_cast<uchar>(int v)
{ return (uchar)((uint)v <= UCHAR_MAX ? v : v > 0 ? UCHAR_MAX : 0); }
template<> static __device__ uchar saturate_cast<uchar>(uint v)
{ return (uchar)min(v, (uint)UCHAR_MAX); }
template<> static __device__ uchar saturate_cast<uchar>(short v)
{ return saturate_cast<uchar>((uint)v); }
template<> static __device__ uchar saturate_cast<uchar>(float v)
{ int iv = __float2int_rn(v); return saturate_cast<uchar>(iv); }
template<> static __device__ uchar saturate_cast<uchar>(double v)
{
#if defined (__CUDA_ARCH__) && __CUDA_ARCH__ >= 130
int iv = __double2int_rn(v); return saturate_cast<uchar>(iv);
#else
return saturate_cast<uchar>((float)v);
#endif
}
template<> static __device__ schar saturate_cast<schar>(uchar v)
{ return (schar)min((int)v, SCHAR_MAX); }
template<> static __device__ schar saturate_cast<schar>(ushort v)
{ return (schar)min((uint)v, (uint)SCHAR_MAX); }
template<> static __device__ schar saturate_cast<schar>(int v)
{
return (schar)((uint)(v-SCHAR_MIN) <= (uint)UCHAR_MAX ?
v : v > 0 ? SCHAR_MAX : SCHAR_MIN);
}
template<> static __device__ schar saturate_cast<schar>(short v)
{ return saturate_cast<schar>((int)v); }
template<> static __device__ schar saturate_cast<schar>(uint v)
{ return (schar)min(v, (uint)SCHAR_MAX); }
template<> static __device__ schar saturate_cast<schar>(float v)
{ int iv = __float2int_rn(v); return saturate_cast<schar>(iv); }
template<> static __device__ schar saturate_cast<schar>(double v)
{
#if defined (__CUDA_ARCH__) && __CUDA_ARCH__ >= 130
int iv = __double2int_rn(v); return saturate_cast<schar>(iv);
#else
return saturate_cast<schar>((float)v);
#endif
}
template<> static __device__ ushort saturate_cast<ushort>(schar v)
{ return (ushort)max((int)v, 0); }
template<> static __device__ ushort saturate_cast<ushort>(short v)
{ return (ushort)max((int)v, 0); }
template<> static __device__ ushort saturate_cast<ushort>(int v)
{ return (ushort)((uint)v <= (uint)USHRT_MAX ? v : v > 0 ? USHRT_MAX : 0); }
template<> static __device__ ushort saturate_cast<ushort>(uint v)
{ return (ushort)min(v, (uint)USHRT_MAX); }
template<> static __device__ ushort saturate_cast<ushort>(float v)
{ int iv = __float2int_rn(v); return saturate_cast<ushort>(iv); }
template<> static __device__ ushort saturate_cast<ushort>(double v)
{
#if defined (__CUDA_ARCH__) && __CUDA_ARCH__ >= 130
int iv = __double2int_rn(v); return saturate_cast<ushort>(iv);
#else
return saturate_cast<ushort>((float)v);
#endif
}
template<> static __device__ short saturate_cast<short>(ushort v)
{ return (short)min((int)v, SHRT_MAX); }
template<> static __device__ short saturate_cast<short>(int v)
{
return (short)((uint)(v - SHRT_MIN) <= (uint)USHRT_MAX ?
v : v > 0 ? SHRT_MAX : SHRT_MIN);
}
template<> static __device__ short saturate_cast<short>(uint v)
{ return (short)min(v, (uint)SHRT_MAX); }
template<> static __device__ short saturate_cast<short>(float v)
{ int iv = __float2int_rn(v); return saturate_cast<short>(iv); }
template<> static __device__ short saturate_cast<short>(double v)
{
#if defined (__CUDA_ARCH__) && __CUDA_ARCH__ >= 130
int iv = __double2int_rn(v); return saturate_cast<short>(iv);
#else
return saturate_cast<short>((float)v);
#endif
}
template<> static __device__ int saturate_cast<int>(float v) { return __float2int_rn(v); }
template<> static __device__ int saturate_cast<int>(double v)
{
#if defined (__CUDA_ARCH__) && __CUDA_ARCH__ >= 130
return __double2int_rn(v);
#else
return saturate_cast<int>((float)v);
#endif
}
template<> static __device__ uint saturate_cast<uint>(float v){ return __float2uint_rn(v); }
template<> static __device__ uint saturate_cast<uint>(double v)
{
#if defined (__CUDA_ARCH__) && __CUDA_ARCH__ >= 130
return __double2uint_rn(v);
#else
return saturate_cast<uint>((float)v);
#endif
}
}
}
#endif /* __OPENCV_GPU_SATURATE_CAST_HPP__ */
\ No newline at end of file
...@@ -41,7 +41,7 @@ ...@@ -41,7 +41,7 @@
//M*/ //M*/
#include "opencv2/gpu/devmem2d.hpp" #include "opencv2/gpu/devmem2d.hpp"
#include "cuda_shared.hpp" #include "internal_shared.hpp"
namespace cv { namespace gpu { namespace split_merge { namespace cv { namespace gpu { namespace split_merge {
......
...@@ -40,7 +40,7 @@ ...@@ -40,7 +40,7 @@
// //
//M*/ //M*/
//#include "cuda_shared.hpp" //#include "internal_shared.hpp"
#include "opencv2/gpu/devmem2d.hpp" #include "opencv2/gpu/devmem2d.hpp"
#include "safe_call.hpp" #include "safe_call.hpp"
static inline int divUp(int total, int grain) { return (total + grain - 1) / grain; } static inline int divUp(int total, int grain) { return (total + grain - 1) / grain; }
......
...@@ -43,7 +43,7 @@ ...@@ -43,7 +43,7 @@
#ifndef __OPENCV_GPU_TRANSFORM_HPP__ #ifndef __OPENCV_GPU_TRANSFORM_HPP__
#define __OPENCV_GPU_TRANSFORM_HPP__ #define __OPENCV_GPU_TRANSFORM_HPP__
#include "cuda_shared.hpp" #include "internal_shared.hpp"
namespace cv { namespace gpu { namespace device namespace cv { namespace gpu { namespace device
{ {
......
This diff is collapsed.
...@@ -41,7 +41,6 @@ ...@@ -41,7 +41,6 @@
//M*/ //M*/
#include "precomp.hpp" #include "precomp.hpp"
#include "border_interpolate.hpp"
using namespace cv; using namespace cv;
using namespace cv::gpu; using namespace cv::gpu;
...@@ -860,6 +859,9 @@ void cv::gpu::histRange(const GpuMat& src, GpuMat hist[4], const GpuMat levels[4 ...@@ -860,6 +859,9 @@ void cv::gpu::histRange(const GpuMat& src, GpuMat hist[4], const GpuMat levels[4
hist_callers[src.depth()](src, hist, levels); hist_callers[src.depth()](src, hist, levels);
} }
////////////////////////////////////////////////////////////////////////
// cornerHarris & minEgenVal
namespace cv { namespace gpu { namespace imgproc { namespace cv { namespace gpu { namespace imgproc {
void extractCovData_caller(const DevMem2Df Dx, const DevMem2Df Dy, PtrStepf dst); void extractCovData_caller(const DevMem2Df Dx, const DevMem2Df Dy, PtrStepf dst);
...@@ -939,6 +941,24 @@ namespace ...@@ -939,6 +941,24 @@ namespace
} // Anonymous namespace } // Anonymous namespace
bool cv::gpu::tryConvertToGpuBorderType(int cpuBorderType, int& gpuBorderType)
{
if (cpuBorderType == cv::BORDER_REFLECT101)
{
gpuBorderType = cv::gpu::BORDER_REFLECT101_GPU;
return true;
}
if (cpuBorderType == cv::BORDER_REPLICATE)
{
gpuBorderType = cv::gpu::BORDER_REPLICATE_GPU;
return true;
}
return false;
}
void cv::gpu::cornerHarris(const GpuMat& src, GpuMat& dst, int blockSize, int ksize, double k, int borderType) void cv::gpu::cornerHarris(const GpuMat& src, GpuMat& dst, int blockSize, int ksize, double k, int borderType)
{ {
CV_Assert(borderType == cv::BORDER_REFLECT101 || CV_Assert(borderType == cv::BORDER_REFLECT101 ||
......
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#ifndef __OPENCV_GPU_INTERNAL_SHARED_HPP__
#define __OPENCV_GPU_INTERNAL_SHARED_HPP__
namespace cv { namespace gpu {
// Internal GPU anlagues of CPU border extrapolation types
enum
{
BORDER_REFLECT101 = 0,
BORDER_REPLICATE
};
}}
#endif
\ No newline at end of file
...@@ -28,7 +28,7 @@ ...@@ -28,7 +28,7 @@
// derived from this software without specific prior written permission. // derived from this software without specific prior written permission.
// //
// This software is provided by the copyright holders and contributors "as is" and // This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied // any express or bpied warranties, including, but not limited to, the bpied
// warranties of merchantability and fitness for a particular purpose are disclaimed. // warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct, // In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages // indirect, incidental, special, exemplary, or consequential damages
...@@ -40,13 +40,12 @@ ...@@ -40,13 +40,12 @@
// //
//M*/ //M*/
#ifndef __OPENCV_GPU_BORDER_INTERPOLATE_HPP__ namespace cv
#define __OPENCV_GPU_BORDER_INTERPOLATE_HPP__ {
namespace gpu
#include "../internal_shared.hpp" {
namespace device
namespace cv { namespace gpu { {
struct BrdReflect101 struct BrdReflect101
{ {
BrdReflect101(int len): last(len - 1) {} BrdReflect101(int len): last(len - 1) {}
...@@ -170,10 +169,8 @@ namespace cv { namespace gpu { ...@@ -170,10 +169,8 @@ namespace cv { namespace gpu {
{ {
return data[idx_high(i) * step]; return data[idx_high(i) * step];
} }
int step; int step;
}; };
}
}} }
}
#endif \ No newline at end of file
\ No newline at end of file
...@@ -193,7 +193,7 @@ namespace cv ...@@ -193,7 +193,7 @@ namespace cv
typedef float type; typedef float type;
__device__ static type min() { return 1.175494351e-38f/*FLT_MIN*/; }; __device__ static type min() { return 1.175494351e-38f/*FLT_MIN*/; };
__device__ static type max() { return 3.402823466e+38f/*FLT_MAX*/; }; __device__ static type max() { return 3.402823466e+38f/*FLT_MAX*/; };
__device__ static type epsilon(); __device__ static type epsilon() { return 1.192092896e-07f/*FLT_EPSILON*/; };
__device__ static type round_error(); __device__ static type round_error();
__device__ static type denorm_min(); __device__ static type denorm_min();
__device__ static type infinity(); __device__ static type infinity();
......
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#ifndef __OPENCV_GPU_SATURATE_CAST_HPP__
#define __OPENCV_GPU_SATURATE_CAST_HPP__
#include "internal_shared.hpp"
namespace cv
{
namespace gpu
{
namespace device
{
template<typename _Tp> static __device__ _Tp saturate_cast(uchar v) { return _Tp(v); }
template<typename _Tp> static __device__ _Tp saturate_cast(schar v) { return _Tp(v); }
template<typename _Tp> static __device__ _Tp saturate_cast(ushort v) { return _Tp(v); }
template<typename _Tp> static __device__ _Tp saturate_cast(short v) { return _Tp(v); }
template<typename _Tp> static __device__ _Tp saturate_cast(uint v) { return _Tp(v); }
template<typename _Tp> static __device__ _Tp saturate_cast(int v) { return _Tp(v); }
template<typename _Tp> static __device__ _Tp saturate_cast(float v) { return _Tp(v); }
template<typename _Tp> static __device__ _Tp saturate_cast(double v) { return _Tp(v); }
template<> static __device__ uchar saturate_cast<uchar>(schar v)
{ return (uchar)max((int)v, 0); }
template<> static __device__ uchar saturate_cast<uchar>(ushort v)
{ return (uchar)min((uint)v, (uint)UCHAR_MAX); }
template<> static __device__ uchar saturate_cast<uchar>(int v)
{ return (uchar)((uint)v <= UCHAR_MAX ? v : v > 0 ? UCHAR_MAX : 0); }
template<> static __device__ uchar saturate_cast<uchar>(uint v)
{ return (uchar)min(v, (uint)UCHAR_MAX); }
template<> static __device__ uchar saturate_cast<uchar>(short v)
{ return saturate_cast<uchar>((uint)v); }
template<> static __device__ uchar saturate_cast<uchar>(float v)
{ int iv = __float2int_rn(v); return saturate_cast<uchar>(iv); }
template<> static __device__ uchar saturate_cast<uchar>(double v)
{
#if defined (__CUDA_ARCH__) && __CUDA_ARCH__ >= 130
int iv = __double2int_rn(v); return saturate_cast<uchar>(iv);
#else
return saturate_cast<uchar>((float)v);
#endif
}
template<> static __device__ schar saturate_cast<schar>(uchar v)
{ return (schar)min((int)v, SCHAR_MAX); }
template<> static __device__ schar saturate_cast<schar>(ushort v)
{ return (schar)min((uint)v, (uint)SCHAR_MAX); }
template<> static __device__ schar saturate_cast<schar>(int v)
{
return (schar)((uint)(v-SCHAR_MIN) <= (uint)UCHAR_MAX ?
v : v > 0 ? SCHAR_MAX : SCHAR_MIN);
}
template<> static __device__ schar saturate_cast<schar>(short v)
{ return saturate_cast<schar>((int)v); }
template<> static __device__ schar saturate_cast<schar>(uint v)
{ return (schar)min(v, (uint)SCHAR_MAX); }
template<> static __device__ schar saturate_cast<schar>(float v)
{ int iv = __float2int_rn(v); return saturate_cast<schar>(iv); }
template<> static __device__ schar saturate_cast<schar>(double v)
{
#if defined (__CUDA_ARCH__) && __CUDA_ARCH__ >= 130
int iv = __double2int_rn(v); return saturate_cast<schar>(iv);
#else
return saturate_cast<schar>((float)v);
#endif
}
template<> static __device__ ushort saturate_cast<ushort>(schar v)
{ return (ushort)max((int)v, 0); }
template<> static __device__ ushort saturate_cast<ushort>(short v)
{ return (ushort)max((int)v, 0); }
template<> static __device__ ushort saturate_cast<ushort>(int v)
{ return (ushort)((uint)v <= (uint)USHRT_MAX ? v : v > 0 ? USHRT_MAX : 0); }
template<> static __device__ ushort saturate_cast<ushort>(uint v)
{ return (ushort)min(v, (uint)USHRT_MAX); }
template<> static __device__ ushort saturate_cast<ushort>(float v)
{ int iv = __float2int_rn(v); return saturate_cast<ushort>(iv); }
template<> static __device__ ushort saturate_cast<ushort>(double v)
{
#if defined (__CUDA_ARCH__) && __CUDA_ARCH__ >= 130
int iv = __double2int_rn(v); return saturate_cast<ushort>(iv);
#else
return saturate_cast<ushort>((float)v);
#endif
}
template<> static __device__ short saturate_cast<short>(ushort v)
{ return (short)min((int)v, SHRT_MAX); }
template<> static __device__ short saturate_cast<short>(int v)
{
return (short)((uint)(v - SHRT_MIN) <= (uint)USHRT_MAX ?
v : v > 0 ? SHRT_MAX : SHRT_MIN);
}
template<> static __device__ short saturate_cast<short>(uint v)
{ return (short)min(v, (uint)SHRT_MAX); }
template<> static __device__ short saturate_cast<short>(float v)
{ int iv = __float2int_rn(v); return saturate_cast<short>(iv); }
template<> static __device__ short saturate_cast<short>(double v)
{
#if defined (__CUDA_ARCH__) && __CUDA_ARCH__ >= 130
int iv = __double2int_rn(v); return saturate_cast<short>(iv);
#else
return saturate_cast<short>((float)v);
#endif
}
template<> static __device__ int saturate_cast<int>(float v) { return __float2int_rn(v); }
template<> static __device__ int saturate_cast<int>(double v)
{
#if defined (__CUDA_ARCH__) && __CUDA_ARCH__ >= 130
return __double2int_rn(v);
#else
return saturate_cast<int>((float)v);
#endif
}
template<> static __device__ uint saturate_cast<uint>(float v){ return __float2uint_rn(v); }
template<> static __device__ uint saturate_cast<uint>(double v)
{
#if defined (__CUDA_ARCH__) && __CUDA_ARCH__ >= 130
return __double2uint_rn(v);
#else
return saturate_cast<uint>((float)v);
#endif
}
}
}
}
#endif /* __OPENCV_GPU_SATURATE_CAST_HPP__ */
\ No newline at end of file
This diff is collapsed.
...@@ -62,7 +62,7 @@ ...@@ -62,7 +62,7 @@
#if defined(HAVE_CUDA) #if defined(HAVE_CUDA)
#include "cuda_shared.hpp" #include "internal_shared.hpp"
#include "cuda_runtime_api.h" #include "cuda_runtime_api.h"
#include "opencv2/gpu/stream_accessor.hpp" #include "opencv2/gpu/stream_accessor.hpp"
#include "npp.h" #include "npp.h"
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment