Skip to content
Projects
Groups
Snippets
Help
Loading...
Sign in / Register
Toggle navigation
O
opencv
Project
Project
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Packages
Packages
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
submodule
opencv
Commits
34565c28
Commit
34565c28
authored
Aug 02, 2010
by
Vladislav Vinogradov
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
added support of 3 channels images to StereoBeliefPropagation_GPU
parent
6da2573b
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
65 additions
and
28 deletions
+65
-28
beliefpropagation_gpu.cpp
modules/gpu/src/beliefpropagation_gpu.cpp
+6
-6
beliefpropagation.cu
modules/gpu/src/cuda/beliefpropagation.cu
+59
-22
No files found.
modules/gpu/src/beliefpropagation_gpu.cpp
View file @
34565c28
...
...
@@ -65,11 +65,11 @@ const float DEFAULT_DISC_SINGLE_JUMP = 1.0f;
namespace
cv
{
namespace
gpu
{
namespace
impl
{
void
load_constants
(
int
ndisp
,
float
max_data_term
,
float
data_weight
,
float
max_disc_term
,
float
disc_single_jump
);
void
comp_data
(
int
msg
Type
,
const
DevMem2D
&
l
,
const
DevMem2D
&
r
,
DevMem2D
mdata
,
const
cudaStream_t
&
stream
);
void
data_step_down
(
int
dst_cols
,
int
dst_rows
,
int
src_rows
,
int
msg
T
ype
,
const
DevMem2D
&
src
,
DevMem2D
dst
,
const
cudaStream_t
&
stream
);
void
level_up_messages
(
int
dst_idx
,
int
dst_cols
,
int
dst_rows
,
int
src_rows
,
int
msg
T
ype
,
DevMem2D
*
mus
,
DevMem2D
*
mds
,
DevMem2D
*
mls
,
DevMem2D
*
mrs
,
const
cudaStream_t
&
stream
);
void
calc_all_iterations
(
int
cols
,
int
rows
,
int
iters
,
int
msg
T
ype
,
DevMem2D
&
u
,
DevMem2D
&
d
,
DevMem2D
&
l
,
DevMem2D
&
r
,
const
DevMem2D
&
data
,
const
cudaStream_t
&
stream
);
void
output
(
int
msg
T
ype
,
const
DevMem2D
&
u
,
const
DevMem2D
&
d
,
const
DevMem2D
&
l
,
const
DevMem2D
&
r
,
const
DevMem2D
&
data
,
DevMem2D
disp
,
const
cudaStream_t
&
stream
);
void
comp_data
(
int
msg
_type
,
const
DevMem2D
&
l
,
const
DevMem2D
&
r
,
int
channels
,
DevMem2D
mdata
,
const
cudaStream_t
&
stream
);
void
data_step_down
(
int
dst_cols
,
int
dst_rows
,
int
src_rows
,
int
msg
_t
ype
,
const
DevMem2D
&
src
,
DevMem2D
dst
,
const
cudaStream_t
&
stream
);
void
level_up_messages
(
int
dst_idx
,
int
dst_cols
,
int
dst_rows
,
int
src_rows
,
int
msg
_t
ype
,
DevMem2D
*
mus
,
DevMem2D
*
mds
,
DevMem2D
*
mls
,
DevMem2D
*
mrs
,
const
cudaStream_t
&
stream
);
void
calc_all_iterations
(
int
cols
,
int
rows
,
int
iters
,
int
msg
_t
ype
,
DevMem2D
&
u
,
DevMem2D
&
d
,
DevMem2D
&
l
,
DevMem2D
&
r
,
const
DevMem2D
&
data
,
const
cudaStream_t
&
stream
);
void
output
(
int
msg
_t
ype
,
const
DevMem2D
&
u
,
const
DevMem2D
&
d
,
const
DevMem2D
&
l
,
const
DevMem2D
&
r
,
const
DevMem2D
&
data
,
DevMem2D
disp
,
const
cudaStream_t
&
stream
);
}}}
cv
::
gpu
::
StereoBeliefPropagation_GPU
::
StereoBeliefPropagation_GPU
(
int
ndisp_
,
int
iters_
,
int
levels_
,
int
msg_type_
,
float
msg_scale_
)
...
...
@@ -228,7 +228,7 @@ static void stereo_bp_gpu_operator(int ndisp, int iters, int levels,
datas
[
0
].
create
(
rows
*
ndisp
,
cols
,
msg_type
);
impl
::
comp_data
(
msg_type
,
left
,
right
,
datas
.
front
(),
stream
);
impl
::
comp_data
(
msg_type
,
left
,
right
,
left
.
channels
(),
datas
.
front
(),
stream
);
for
(
int
i
=
1
;
i
<
levels
;
i
++
)
{
...
...
modules/gpu/src/cuda/beliefpropagation.cu
View file @
34565c28
...
...
@@ -81,26 +81,60 @@ namespace cv { namespace gpu { namespace impl {
namespace beliefpropagation_gpu
{
template <typename T>
__global__ void comp_data
(uchar* l,
uchar* r, size_t step, T* data, size_t data_step, int cols, int rows)
__global__ void comp_data
_gray(const uchar* l, const
uchar* r, size_t step, T* data, size_t data_step, int cols, int rows)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (y
< rows && x < cols
)
if (y
> 0 && y < rows - 1 && x > 0 && x < cols - 1
)
{
uchar* ls = l + y * step + x;
uchar* rs = r + y * step + x;
const
uchar* ls = l + y * step + x;
const
uchar* rs = r + y * step + x;
T* ds = data + y * data_step + x;
size_t disp_step = data_step * rows;
for (int disp = 0; disp < cndisp; disp++)
{
if (x - disp >=
0
)
if (x - disp >=
1
)
{
int le = ls[0];
int re = rs[-disp];
float val = abs(le - re);
float val = abs((int)ls[0] - rs[-disp]);
ds[disp * disp_step] = saturate_cast<T>(fmin(cdata_weight * val, cdata_weight * cmax_data_term));
}
else
{
ds[disp * disp_step] = saturate_cast<T>(cdata_weight * cmax_data_term);
}
}
}
}
template <typename T>
__global__ void comp_data_bgr(const uchar* l, const uchar* r, size_t step, T* data, size_t data_step, int cols, int rows)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (y > 0 && y < rows - 1 && x > 0 && x < cols - 1)
{
const uchar* ls = l + y * step + x * 3;
const uchar* rs = r + y * step + x * 3;
T* ds = data + y * data_step + x;
size_t disp_step = data_step * rows;
for (int disp = 0; disp < cndisp; disp++)
{
if (x - disp >= 1)
{
const float tr = 0.299f;
const float tg = 0.587f;
const float tb = 0.114f;
float val = tb * abs((int)ls[0] - rs[0-disp*3]);
val += tg * abs((int)ls[1] - rs[1-disp*3]);
val += tr * abs((int)ls[2] - rs[2-disp*3]);
ds[disp * disp_step] = saturate_cast<T>(fmin(cdata_weight * val, cdata_weight * cmax_data_term));
}
...
...
@@ -114,10 +148,10 @@ namespace beliefpropagation_gpu
}
namespace cv { namespace gpu { namespace impl {
typedef void (*CompDataFunc)(const DevMem2D& l, const DevMem2D& r, DevMem2D mdata, const cudaStream_t& stream);
typedef void (*CompDataFunc)(const DevMem2D& l, const DevMem2D& r,
int channels,
DevMem2D mdata, const cudaStream_t& stream);
template<typename T>
void comp_data_(const DevMem2D& l, const DevMem2D& r, DevMem2D mdata, const cudaStream_t& stream)
void comp_data_(const DevMem2D& l, const DevMem2D& r,
int channels,
DevMem2D mdata, const cudaStream_t& stream)
{
dim3 threads(32, 8, 1);
dim3 grid(1, 1, 1);
...
...
@@ -125,13 +159,16 @@ namespace cv { namespace gpu { namespace impl {
grid.x = divUp(l.cols, threads.x);
grid.y = divUp(l.rows, threads.y);
beliefpropagation_gpu::comp_data<T><<<grid, threads, 0, stream>>>(l.ptr, r.ptr, l.step, (T*)mdata.ptr, mdata.step/sizeof(T), l.cols, l.rows);
if (channels == 1)
beliefpropagation_gpu::comp_data_gray<T><<<grid, threads, 0, stream>>>(l.ptr, r.ptr, l.step, (T*)mdata.ptr, mdata.step/sizeof(T), l.cols, l.rows);
else
beliefpropagation_gpu::comp_data_bgr<T><<<grid, threads, 0, stream>>>(l.ptr, r.ptr, l.step, (T*)mdata.ptr, mdata.step/sizeof(T), l.cols, l.rows);
if (stream == 0)
cudaSafeCall( cudaThreadSynchronize() );
}
void comp_data(int msg
Type, const DevMem2D& l, const DevMem2D& r
, DevMem2D mdata, const cudaStream_t& stream)
void comp_data(int msg
_type, const DevMem2D& l, const DevMem2D& r, int channels
, DevMem2D mdata, const cudaStream_t& stream)
{
static CompDataFunc tab[8] =
{
...
...
@@ -145,10 +182,10 @@ namespace cv { namespace gpu { namespace impl {
0 // user type
};
CompDataFunc func = tab[msg
T
ype];
CompDataFunc func = tab[msg
_t
ype];
if (func == 0)
cv::gpu::error("Unsupported message type", __FILE__, __LINE__);
func(l, r, mdata, stream);
func(l, r,
channels,
mdata, stream);
}
}}}
...
...
@@ -200,7 +237,7 @@ namespace cv { namespace gpu { namespace impl {
cudaSafeCall( cudaThreadSynchronize() );
}
void data_step_down(int dst_cols, int dst_rows, int src_rows, int msg
T
ype, const DevMem2D& src, DevMem2D dst, const cudaStream_t& stream)
void data_step_down(int dst_cols, int dst_rows, int src_rows, int msg
_t
ype, const DevMem2D& src, DevMem2D dst, const cudaStream_t& stream)
{
static DataStepDownFunc tab[8] =
{
...
...
@@ -214,7 +251,7 @@ namespace cv { namespace gpu { namespace impl {
0 // user type
};
DataStepDownFunc func = tab[msg
T
ype];
DataStepDownFunc func = tab[msg
_t
ype];
if (func == 0)
cv::gpu::error("Unsupported message type", __FILE__, __LINE__);
func(dst_cols, dst_rows, src_rows, src, dst, stream);
...
...
@@ -270,7 +307,7 @@ namespace cv { namespace gpu { namespace impl {
cudaSafeCall( cudaThreadSynchronize() );
}
void level_up_messages(int dst_idx, int dst_cols, int dst_rows, int src_rows, int msg
T
ype, DevMem2D* mus, DevMem2D* mds, DevMem2D* mls, DevMem2D* mrs, const cudaStream_t& stream)
void level_up_messages(int dst_idx, int dst_cols, int dst_rows, int src_rows, int msg
_t
ype, DevMem2D* mus, DevMem2D* mds, DevMem2D* mls, DevMem2D* mrs, const cudaStream_t& stream)
{
static LevelUpMessagesFunc tab[8] =
{
...
...
@@ -284,7 +321,7 @@ namespace cv { namespace gpu { namespace impl {
0 // user type
};
LevelUpMessagesFunc func = tab[msg
T
ype];
LevelUpMessagesFunc func = tab[msg
_t
ype];
if (func == 0)
cv::gpu::error("Unsupported message type", __FILE__, __LINE__);
func(dst_idx, dst_cols, dst_rows, src_rows, mus, mds, mls, mrs, stream);
...
...
@@ -413,7 +450,7 @@ namespace cv { namespace gpu { namespace impl {
}
}
void calc_all_iterations(int cols, int rows, int iters, int msg
T
ype, DevMem2D& u, DevMem2D& d, DevMem2D& l, DevMem2D& r, const DevMem2D& data, const cudaStream_t& stream)
void calc_all_iterations(int cols, int rows, int iters, int msg
_t
ype, DevMem2D& u, DevMem2D& d, DevMem2D& l, DevMem2D& r, const DevMem2D& data, const cudaStream_t& stream)
{
static CalcAllIterationFunc tab[8] =
{
...
...
@@ -427,7 +464,7 @@ namespace cv { namespace gpu { namespace impl {
0 // user type
};
CalcAllIterationFunc func = tab[msg
T
ype];
CalcAllIterationFunc func = tab[msg
_t
ype];
if (func == 0)
cv::gpu::error("Unsupported message type", __FILE__, __LINE__);
func(cols, rows, iters, u, d, l, r, data, stream);
...
...
@@ -496,7 +533,7 @@ namespace cv { namespace gpu { namespace impl {
cudaSafeCall( cudaThreadSynchronize() );
}
void output(int msg
T
ype, const DevMem2D& u, const DevMem2D& d, const DevMem2D& l, const DevMem2D& r, const DevMem2D& data, DevMem2D disp, const cudaStream_t& stream)
void output(int msg
_t
ype, const DevMem2D& u, const DevMem2D& d, const DevMem2D& l, const DevMem2D& r, const DevMem2D& data, DevMem2D disp, const cudaStream_t& stream)
{
static OutputFunc tab[8] =
{
...
...
@@ -510,7 +547,7 @@ namespace cv { namespace gpu { namespace impl {
0 // user type
};
OutputFunc func = tab[msg
T
ype];
OutputFunc func = tab[msg
_t
ype];
if (func == 0)
cv::gpu::error("Unsupported message type", __FILE__, __LINE__);
func(u, d, l, r, data, disp, stream);
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment