Skip to content
Projects
Groups
Snippets
Help
Loading...
Sign in / Register
Toggle navigation
O
opencv
Project
Project
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Packages
Packages
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
submodule
opencv
Commits
3f5dd5f1
Commit
3f5dd5f1
authored
Jul 22, 2010
by
Vladislav Vinogradov
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
added implementation GpuMat::convertTo and merged this with matrix_operations.cpp
parent
7bf29e14
Hide whitespace changes
Inline
Side-by-side
Showing
5 changed files
with
426 additions
and
3 deletions
+426
-3
cuda_shared.hpp
modules/gpu/src/cuda/cuda_shared.hpp
+3
-0
matrix_operations.cu
modules/gpu/src/cuda/matrix_operations.cu
+309
-1
matrix_operations.cpp
modules/gpu/src/matrix_operations.cpp
+24
-2
precomp.hpp
modules/gpu/src/precomp.hpp
+1
-0
convert_to.cpp
tests/gpu/src/convert_to.cpp
+89
-0
No files found.
modules/gpu/src/cuda/cuda_shared.hpp
View file @
3f5dd5f1
...
@@ -51,6 +51,7 @@ namespace cv
...
@@ -51,6 +51,7 @@ namespace cv
namespace
gpu
namespace
gpu
{
{
typedef
unsigned
char
uchar
;
typedef
unsigned
char
uchar
;
typedef
signed
char
schar
;
typedef
unsigned
short
ushort
;
typedef
unsigned
short
ushort
;
typedef
unsigned
int
uint
;
typedef
unsigned
int
uint
;
...
@@ -62,6 +63,8 @@ namespace cv
...
@@ -62,6 +63,8 @@ namespace cv
extern
"C"
void
set_to_without_mask
(
const
DevMem2D
&
mat
,
const
double
*
scalar
,
int
depth
,
int
channels
);
extern
"C"
void
set_to_without_mask
(
const
DevMem2D
&
mat
,
const
double
*
scalar
,
int
depth
,
int
channels
);
extern
"C"
void
set_to_with_mask
(
const
DevMem2D
&
mat
,
const
double
*
scalar
,
const
DevMem2D
&
mask
,
int
depth
,
int
channels
);
extern
"C"
void
set_to_with_mask
(
const
DevMem2D
&
mat
,
const
double
*
scalar
,
const
DevMem2D
&
mask
,
int
depth
,
int
channels
);
extern
"C"
void
convert_to
(
const
DevMem2D
&
src
,
int
sdepth
,
DevMem2D
dst
,
int
ddepth
,
size_t
width
,
size_t
height
,
double
alpha
,
double
beta
);
}
}
}
}
}
}
...
...
modules/gpu/src/cuda/matrix_operations.cu
View file @
3f5dd5f1
...
@@ -46,10 +46,17 @@
...
@@ -46,10 +46,17 @@
#include "cuda_shared.hpp"
#include "cuda_shared.hpp"
#include "cuda_runtime.h"
#include "cuda_runtime.h"
using namespace cv::gpu;
using namespace cv::gpu::impl;
__constant__ __align__(16) float scalar_d[4];
__constant__ __align__(16) float scalar_d[4];
namespace mat_operators
namespace mat_operators
{
{
//////////////////////////////////////////////////////////
// SetTo
//////////////////////////////////////////////////////////
template<typename T, int channels>
template<typename T, int channels>
__global__ void kernel_set_to_without_mask(T * mat, int cols, int rows, int step)
__global__ void kernel_set_to_without_mask(T * mat, int cols, int rows, int step)
{
{
...
@@ -76,7 +83,245 @@ namespace mat_operators
...
@@ -76,7 +83,245 @@ namespace mat_operators
mat[idx] = scalar_d[ x % channels ];
mat[idx] = scalar_d[ x % channels ];
}
}
}
}
}
//////////////////////////////////////////////////////////
// ConvertTo
//////////////////////////////////////////////////////////
template <typename T, typename DT, size_t src_elem_size, size_t dst_elem_size>
struct Converter
{
__device__ static void convert(uchar* srcmat, size_t src_step, uchar* dstmat, size_t dst_step, size_t width, size_t height, double alpha, double beta)
{
size_t x = threadIdx.x + blockIdx.x * blockDim.x;
size_t y = threadIdx.y + blockIdx.y * blockDim.y;
if (x < width && y < height)
{
const T* src = (const T*)(srcmat + src_step * y);
DT* dst = (DT*)(dstmat + dst_step * y);
dst[x] = (DT)__double2int_rn(alpha * src[x] + beta);
}
}
__host__ static inline dim3 calcGrid(size_t width, size_t height, dim3 block)
{
return dim3(divUp(width, block.x), divUp(height, block.y));
}
};
template <typename T, typename DT>
struct Converter<T, DT, 1, 1>
{
__device__ static void convert(uchar* srcmat, size_t src_step, uchar* dstmat, size_t dst_step, size_t width, size_t height, double alpha, double beta)
{
size_t x = threadIdx.x + blockIdx.x * blockDim.x;
size_t y = threadIdx.y + blockIdx.y * blockDim.y;
if (y < height)
{
const T* src = (const T*)(srcmat + src_step * y);
DT* dst = (DT*)(dstmat + dst_step * y);
if ((x << 2) + 3 < width)
{
uchar4 src4b = ((const uchar4*)src)[x];
uchar4 dst4b;
const T* src1b = (const T*) &src4b.x;
DT* dst1b = (DT*) &dst4b.x;
dst1b[0] = (DT)__double2int_rn(alpha * src1b[0] + beta);
dst1b[1] = (DT)__double2int_rn(alpha * src1b[1] + beta);
dst1b[2] = (DT)__double2int_rn(alpha * src1b[2] + beta);
dst1b[3] = (DT)__double2int_rn(alpha * src1b[3] + beta);
((uchar4*)dst)[x] = dst4b;
}
else
{
if ((x << 2) + 0 < width)
dst[(x << 2) + 0] = (DT)__double2int_rn(alpha * src[(x << 2) + 0] + beta);
if ((x << 2) + 1 < width)
dst[(x << 2) + 1] = (DT)__double2int_rn(alpha * src[(x << 2) + 1] + beta);
if ((x << 2) + 2 < width)
dst[(x << 2) + 2] = (DT)__double2int_rn(alpha * src[(x << 2) + 2] + beta);
}
}
}
__host__ static inline dim3 calcGrid(size_t width, size_t height, dim3 block)
{
return dim3(divUp(width, block.x << 2), divUp(height, block.y));
}
};/**/
template <typename T, typename DT>
struct Converter<T, DT, 1, 2>
{
__device__ static void convert(uchar* srcmat, size_t src_step, uchar* dstmat, size_t dst_step, size_t width, size_t height, double alpha, double beta)
{
size_t x = threadIdx.x + blockIdx.x * blockDim.x;
size_t y = threadIdx.y + blockIdx.y * blockDim.y;
if (y < height)
{
const T* src = (const T*)(srcmat + src_step * y);
DT* dst = (DT*)(dstmat + dst_step * y);
if ((x << 1) + 1 < width)
{
uchar2 src2b = ((const uchar2*)src)[x];
ushort2 dst2s;
const T* src1b = (const T*) &src2b;
DT* dst1s = (DT*) &dst2s;
dst1s[0] = (DT)__double2int_rn(alpha * src1b[0] + beta);
dst1s[1] = (DT)__double2int_rn(alpha * src1b[1] + beta);
((ushort2*)(dst))[x] = dst2s;
}
else
{
if ((x << 1) < width)
dst[(x << 1)] = (DT)__double2int_rn(alpha * src[(x << 1)] + beta);
}
}
}
__host__ static inline dim3 calcGrid(size_t width, size_t height, dim3 block)
{
return dim3(divUp(width, block.x << 1), divUp(height, block.y));
}
};/**/
template <typename T, typename DT>
struct Converter<T, DT, 2, 1>
{
__device__ static void convert(uchar* srcmat, size_t src_step, uchar* dstmat, size_t dst_step, size_t width, size_t height, double alpha, double beta)
{
size_t x = threadIdx.x + blockIdx.x * blockDim.x;
size_t y = threadIdx.y + blockIdx.y * blockDim.y;
if (y < height)
{
const T* src = (const T*)(srcmat + src_step * y);
DT* dst = (DT*)(dstmat + dst_step * y);
if ((x << 2) + 3 < width)
{
ushort4 src4s = ((const ushort4*)src)[x];
uchar4 dst4b;
const T* src1s = (const T*) &src4s.x;
DT* dst1b = (DT*) &dst4b.x;
dst1b[0] = (DT)__double2int_rn(alpha * src1s[0] + beta);
dst1b[1] = (DT)__double2int_rn(alpha * src1s[1] + beta);
dst1b[2] = (DT)__double2int_rn(alpha * src1s[2] + beta);
dst1b[3] = (DT)__double2int_rn(alpha * src1s[3] + beta);
((uchar4*)(dst))[x] = dst4b;
}
else
{
if ((x << 2) + 0 < width)
dst[(x << 2) + 0] = (DT)__double2int_rn(alpha * src[(x << 2) + 0] + beta);
if ((x << 2) + 1 < width)
dst[(x << 2) + 1] = (DT)__double2int_rn(alpha * src[(x << 2) + 1] + beta);
if ((x << 2) + 2 < width)
dst[(x << 2) + 2] = (DT)__double2int_rn(alpha * src[(x << 2) + 2] + beta);
}
}
}
__host__ static inline dim3 calcGrid(size_t width, size_t height, dim3 block)
{
return dim3(divUp(width, block.x << 2), divUp(height, block.y));
}
};/**/
template <typename T, typename DT>
struct Converter<T, DT, 2, 2>
{
__device__ static void convert(uchar* srcmat, size_t src_step, uchar* dstmat, size_t dst_step, size_t width, size_t height, double alpha, double beta)
{
size_t x = threadIdx.x + blockIdx.x * blockDim.x;
size_t y = threadIdx.y + blockIdx.y * blockDim.y;
if (y < height)
{
const T* src = (const T*)(srcmat + src_step * y);
DT* dst = (DT*)(dstmat + dst_step * y);
if ((x << 1) + 1 < width)
{
ushort2 src2s = ((const ushort2*)src)[x];
ushort2 dst2s;
const T* src1s = (const T*) &src2s.x;
DT* dst1s = (DT*) &dst2s.x;
dst1s[0] = (DT)__double2int_rn(alpha * src1s[0] + beta);
dst1s[1] = (DT)__double2int_rn(alpha * src1s[1] + beta);
((ushort2*)dst)[x] = dst2s;
}
else
{
if ((x << 1) < width)
dst[(x << 1)] = (DT)__double2int_rn(alpha * src[(x << 1)] + beta);
}
}
}
__host__ static inline dim3 calcGrid(size_t width, size_t height, dim3 block)
{
return dim3(divUp(width, block.x << 1), divUp(height, block.y));
}
};/**/
template <typename T, size_t src_elem_size, size_t dst_elem_size>
struct Converter<T, float, src_elem_size, dst_elem_size>
{
__device__ static void convert(uchar* srcmat, size_t src_step, uchar* dstmat, size_t dst_step, size_t width, size_t height, double alpha, double beta)
{
size_t x = threadIdx.x + blockIdx.x * blockDim.x;
size_t y = threadIdx.y + blockIdx.y * blockDim.y;
if (x < width && y < height)
{
const T* src = (const T*)(srcmat + src_step * y);
float* dst = (float*)(dstmat + dst_step * y);
dst[x] = (float)(alpha * src[x] + beta);
}
}
__host__ static inline dim3 calcGrid(size_t width, size_t height, dim3 block)
{
return dim3(divUp(width, block.x), divUp(height, block.y));
}
};
template <typename T, size_t src_elem_size, size_t dst_elem_size>
struct Converter<T, double, src_elem_size, dst_elem_size>
{
__device__ static void convert(uchar* srcmat, size_t src_step, uchar* dstmat, size_t dst_step, size_t width, size_t height, double alpha, double beta)
{
size_t x = threadIdx.x + blockIdx.x * blockDim.x;
size_t y = threadIdx.y + blockIdx.y * blockDim.y;
if (x < width && y < height)
{
const T* src = (const T*)(srcmat + src_step * y);
double* dst = (double*)(dstmat + dst_step * y);
dst[x] = (double)(alpha * src[x] + beta);
}
}
__host__ static inline dim3 calcGrid(size_t width, size_t height, dim3 block)
{
return dim3(divUp(width, block.x), divUp(height, block.y));
}
};
template <typename T, typename DT>
__global__ static void kernel_convert_to(uchar* srcmat, size_t src_step, uchar* dstmat, size_t dst_step, size_t width, size_t height, double alpha, double beta)
{
Converter<T, DT, sizeof(T), sizeof(DT)>::convert(srcmat, src_step, dstmat, dst_step, width, height, alpha, beta);
}
} // namespace mat_operators
//////////////////////////////////////////////////////////////
// SetTo
//////////////////////////////////////////////////////////////
extern "C" void cv::gpu::impl::set_to_without_mask(const DevMem2D& mat, const double * scalar, int elemSize1, int channels)
extern "C" void cv::gpu::impl::set_to_without_mask(const DevMem2D& mat, const double * scalar, int elemSize1, int channels)
{
{
...
@@ -158,3 +403,66 @@ extern "C" void cv::gpu::impl::set_to_with_mask(const DevMem2D& mat, const doubl
...
@@ -158,3 +403,66 @@ extern "C" void cv::gpu::impl::set_to_with_mask(const DevMem2D& mat, const doubl
cudaSafeCall ( cudaThreadSynchronize() );
cudaSafeCall ( cudaThreadSynchronize() );
}
}
//////////////////////////////////////////////////////////////
// ConvertTo
//////////////////////////////////////////////////////////////
namespace cv
{
namespace gpu
{
namespace impl
{
typedef void (*CvtFunc)(const DevMem2D& src, DevMem2D& dst, size_t width, size_t height, double alpha, double beta);
//#if !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 130)
template<typename T, typename DT>
void cvt_(const DevMem2D& src, DevMem2D& dst, size_t width, size_t height, double alpha, double beta)
{
dim3 block(32, 8);
dim3 grid = ::mat_operators::Converter<T, DT, sizeof(T), sizeof(DT)>::calcGrid(width, height, block);
::mat_operators::kernel_convert_to<T, DT><<<grid, block>>>(src.ptr, src.step, dst.ptr, dst.step, width, height, alpha, beta);
cudaSafeCall( cudaThreadSynchronize() );
}
//#endif
extern "C" void convert_to(const DevMem2D& src, int sdepth, DevMem2D dst, int ddepth, size_t width, size_t height, double alpha, double beta)
{
static CvtFunc tab[8][8] =
{
{cvt_<uchar, uchar>, cvt_<uchar, schar>, cvt_<uchar, ushort>, cvt_<uchar, short>,
cvt_<uchar, int>, cvt_<uchar, float>, cvt_<uchar, double>, 0},
{cvt_<schar, uchar>, cvt_<schar, schar>, cvt_<schar, ushort>, cvt_<schar, short>,
cvt_<schar, int>, cvt_<schar, float>, cvt_<schar, double>, 0},
{cvt_<ushort, uchar>, cvt_<ushort, schar>, cvt_<ushort, ushort>, cvt_<ushort, short>,
cvt_<ushort, int>, cvt_<ushort, float>, cvt_<ushort, double>, 0},
{cvt_<short, uchar>, cvt_<short, schar>, cvt_<short, ushort>, cvt_<short, short>,
cvt_<short, int>, cvt_<short, float>, cvt_<short, double>, 0},
{cvt_<int, uchar>, cvt_<int, schar>, cvt_<int, ushort>,
cvt_<int, short>, cvt_<int, int>, cvt_<int, float>, cvt_<int, double>, 0},
{cvt_<float, uchar>, cvt_<float, schar>, cvt_<float, ushort>,
cvt_<float, short>, cvt_<float, int>, cvt_<float, float>, cvt_<float, double>, 0},
{cvt_<double, uchar>, cvt_<double, schar>, cvt_<double, ushort>,
cvt_<double, short>, cvt_<double, int>, cvt_<double, float>, cvt_<double, double>, 0},
{0,0,0,0,0,0,0,0}
};
CvtFunc func = tab[sdepth][ddepth];
if (func == 0)
error("Operation \'ConvertTo\' doesn't supported on your GPU model", __FILE__, __LINE__);
func(src, dst, width, height, alpha, beta);
}
}
}
}
modules/gpu/src/matrix_operations.cpp
View file @
3f5dd5f1
...
@@ -104,9 +104,31 @@ void cv::gpu::GpuMat::copyTo( GpuMat& /*m*/, const GpuMat&/* mask */) const
...
@@ -104,9 +104,31 @@ void cv::gpu::GpuMat::copyTo( GpuMat& /*m*/, const GpuMat&/* mask */) const
CV_Assert
(
!
"Not implemented"
);
CV_Assert
(
!
"Not implemented"
);
}
}
void
cv
::
gpu
::
GpuMat
::
convertTo
(
GpuMat
&
/*m*/
,
int
/*rtype*/
,
double
/*alpha*/
,
double
/*beta*/
)
const
void
cv
::
gpu
::
GpuMat
::
convertTo
(
GpuMat
&
dst
,
int
rtype
,
double
alpha
,
double
beta
)
const
{
{
CV_Assert
(
!
"Not implemented"
);
//CV_Assert(!"Not implemented");
bool
noScale
=
fabs
(
alpha
-
1
)
<
std
::
numeric_limits
<
double
>::
epsilon
()
&&
fabs
(
beta
)
<
std
::
numeric_limits
<
double
>::
epsilon
();
if
(
rtype
<
0
)
rtype
=
type
();
else
rtype
=
CV_MAKETYPE
(
CV_MAT_DEPTH
(
rtype
),
channels
());
int
sdepth
=
depth
(),
ddepth
=
CV_MAT_DEPTH
(
rtype
);
/*if( sdepth == ddepth && noScale )
{
copyTo(dst);
return;
}*/
GpuMat
temp
;
const
GpuMat
*
psrc
=
this
;
if
(
sdepth
!=
ddepth
&&
psrc
==
&
dst
)
psrc
=
&
(
temp
=
*
this
);
dst
.
create
(
size
(),
rtype
);
impl
::
convert_to
(
*
psrc
,
sdepth
,
dst
,
ddepth
,
psrc
->
cols
*
psrc
->
channels
(),
psrc
->
rows
,
alpha
,
beta
);
}
}
GpuMat
&
GpuMat
::
operator
=
(
const
Scalar
&
s
)
GpuMat
&
GpuMat
::
operator
=
(
const
Scalar
&
s
)
...
...
modules/gpu/src/precomp.hpp
View file @
3f5dd5f1
...
@@ -51,6 +51,7 @@
...
@@ -51,6 +51,7 @@
#endif
#endif
#include <iostream>
#include <iostream>
#include <limits>
#include "opencv2/gpu/gpu.hpp"
#include "opencv2/gpu/gpu.hpp"
...
...
tests/gpu/src/convert_to.cpp
0 → 100644
View file @
3f5dd5f1
#include "gputest.hpp"
#include <string>
#include <iostream>
#include <fstream>
#include <iterator>
#include <limits>
#include <numeric>
using
namespace
cv
;
using
namespace
std
;
using
namespace
gpu
;
class
CV_GpuMatOpConvertTo
:
public
CvTest
{
public
:
CV_GpuMatOpConvertTo
();
~
CV_GpuMatOpConvertTo
();
protected
:
void
run
(
int
);
};
CV_GpuMatOpConvertTo
::
CV_GpuMatOpConvertTo
()
:
CvTest
(
"GpuMatOperatorConvertTo"
,
"convertTo"
)
{}
CV_GpuMatOpConvertTo
::~
CV_GpuMatOpConvertTo
()
{}
void
CV_GpuMatOpConvertTo
::
run
(
int
/* start_from */
)
{
const
Size
img_size
(
67
,
35
);
const
int
types
[]
=
{
CV_8U
,
CV_8S
,
CV_16U
,
CV_16S
,
CV_32S
,
CV_32F
,
CV_64F
/**/
};
const
int
types_num
=
sizeof
(
types
)
/
sizeof
(
int
);
const
char
*
types_str
[]
=
{
"CV_8U"
,
"CV_8S"
,
"CV_16U"
,
"CV_16S"
,
"CV_32S"
,
"CV_32F"
,
"CV_64F"
};
bool
passed
=
true
;
for
(
int
i
=
0
;
i
<
types_num
&&
passed
;
++
i
)
{
for
(
int
j
=
0
;
j
<
types_num
&&
passed
;
++
j
)
{
for
(
int
c
=
1
;
c
<
2
&&
passed
;
++
c
)
{
//if (i == j)
// continue;
const
int
src_type
=
CV_MAKETYPE
(
types
[
i
],
c
);
const
int
dst_type
=
types
[
j
];
const
double
alpha
=
(
double
)
rand
()
/
RAND_MAX
*
10.0
;
const
double
beta
=
(
double
)
rand
()
/
RAND_MAX
*
10.0
;
Mat
cpumatsrc
(
img_size
,
src_type
);
randu
(
cpumatsrc
,
Scalar
::
all
(
0
),
Scalar
::
all
(
10
));
GpuMat
gpumatsrc
(
cpumatsrc
);
Mat
cpumatdst
;
GpuMat
gpumatdst
;
//double cput = (double)getTickCount();
cpumatsrc
.
convertTo
(
cpumatdst
,
dst_type
,
alpha
,
beta
);
//cput = ((double)getTickCount() - cput) / getTickFrequency();
//double gput = (double)getTickCount();
gpumatsrc
.
convertTo
(
gpumatdst
,
dst_type
,
alpha
,
beta
);
//gput = ((double)getTickCount() - gput) / getTickFrequency();
/*cout << "convertTo time: " << endl;
cout << "CPU time: " << cput << endl;
cout << "GPU time: " << gput << endl;/**/
double
r
=
norm
(
cpumatdst
,
gpumatdst
,
NORM_L1
);
if
(
r
>
1
)
{
/*namedWindow("CPU");
imshow("CPU", cpumatdst);
namedWindow("GPU");
imshow("GPU", gpumatdst);
waitKey();/**/
cout
<<
"Failed:"
<<
endl
;
cout
<<
"
\t
r = "
<<
r
<<
endl
;
cout
<<
"
\t
SRC_TYPE="
<<
types_str
[
i
]
<<
"C"
<<
c
<<
" DST_TYPE="
<<
types_str
[
j
]
<<
endl
;
/**/
passed
=
false
;
}
}
}
}
ts
->
set_failed_test_info
(
passed
?
CvTS
::
OK
:
CvTS
::
FAIL_GENERIC
);
}
CV_GpuMatOpConvertTo
CV_GpuMatOpConvertTo_test
;
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment