Skip to content
Projects
Groups
Snippets
Help
Loading...
Sign in / Register
Toggle navigation
O
opencv
Project
Project
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Packages
Packages
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
submodule
opencv
Commits
720eaf1e
Commit
720eaf1e
authored
Feb 23, 2013
by
yao
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
fix the haar kernel problems on Nvidia and Intel OCL
parent
e6dd4e84
Show whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
78 additions
and
73 deletions
+78
-73
haar.cpp
modules/ocl/src/haar.cpp
+1
-1
haarobjectdetect.cl
modules/ocl/src/kernels/haarobjectdetect.cl
+77
-72
No files found.
modules/ocl/src/haar.cpp
View file @
720eaf1e
...
@@ -926,7 +926,7 @@ CvSeq *cv::ocl::OclCascadeClassifier::oclHaarDetectObjects( oclMat &gimg, CvMemS
...
@@ -926,7 +926,7 @@ CvSeq *cv::ocl::OclCascadeClassifier::oclHaarDetectObjects( oclMat &gimg, CvMemS
if
(
gimg
.
cols
<
minSize
.
width
||
gimg
.
rows
<
minSize
.
height
)
if
(
gimg
.
cols
<
minSize
.
width
||
gimg
.
rows
<
minSize
.
height
)
CV_Error
(
CV_StsError
,
"Image too small"
);
CV_Error
(
CV_StsError
,
"Image too small"
);
if
(
(
flags
&
CV_HAAR_SCALE_IMAGE
)
&&
gimg
.
clCxt
->
impl
->
devName
.
find
(
"Intel(R) HD Graphics"
)
==
string
::
npos
)
if
(
(
flags
&
CV_HAAR_SCALE_IMAGE
)
)
{
{
CvSize
winSize0
=
cascade
->
orig_window_size
;
CvSize
winSize0
=
cascade
->
orig_window_size
;
//float scalefactor = 1.1f;
//float scalefactor = 1.1f;
...
...
modules/ocl/src/kernels/haarobjectdetect.cl
View file @
720eaf1e
...
@@ -9,6 +9,7 @@
...
@@ -9,6 +9,7 @@
//
Niko
Li,
newlife20080214@gmail.com
//
Niko
Li,
newlife20080214@gmail.com
//
Wang
Weiyan,
wangweiyanster@gmail.com
//
Wang
Weiyan,
wangweiyanster@gmail.com
//
Jia
Haipeng,
jiahaipeng95@gmail.com
//
Jia
Haipeng,
jiahaipeng95@gmail.com
//
Nathan,
liujun@multicorewareinc.com
//
Redistribution
and
use
in
source
and
binary
forms,
with
or
without
modification,
//
Redistribution
and
use
in
source
and
binary
forms,
with
or
without
modification,
//
are
permitted
provided
that
the
following
conditions
are
met:
//
are
permitted
provided
that
the
following
conditions
are
met:
//
//
...
@@ -47,14 +48,14 @@ typedef float sqsumtype;
...
@@ -47,14 +48,14 @@ typedef float sqsumtype;
typedef
struct
__attribute__
((
aligned
(
128
)))
GpuHidHaarFeature
typedef
struct
__attribute__
((
aligned
(
128
)))
GpuHidHaarFeature
{
{
struct
__attribute__
((
aligned
(
32
)))
struct
__attribute__
((
aligned
(
32
)))
{
{
int
p0
__attribute__
((
aligned
(
4
)))
;
int
p0
__attribute__
((
aligned
(
4
)))
;
int
p1
__attribute__
((
aligned
(
4
)))
;
int
p1
__attribute__
((
aligned
(
4
)))
;
int
p2
__attribute__
((
aligned
(
4
)))
;
int
p2
__attribute__
((
aligned
(
4
)))
;
int
p3
__attribute__
((
aligned
(
4
)))
;
int
p3
__attribute__
((
aligned
(
4
)))
;
float
weight
__attribute__
((
aligned
(
4
)))
;
float
weight
__attribute__
((
aligned
(
4
)))
;
}
}
rect[CV_HAAR_FEATURE_MAX]
__attribute__
((
aligned
(
32
)))
;
rect[CV_HAAR_FEATURE_MAX]
__attribute__
((
aligned
(
32
)))
;
}
}
GpuHidHaarFeature
;
GpuHidHaarFeature
;
...
@@ -108,7 +109,7 @@ typedef struct __attribute__((aligned (64))) GpuHidHaarClassifierCascade
...
@@ -108,7 +109,7 @@ typedef struct __attribute__((aligned (64))) GpuHidHaarClassifierCascade
int
p2
__attribute__
((
aligned
(
4
)))
;
int
p2
__attribute__
((
aligned
(
4
)))
;
int
p3
__attribute__
((
aligned
(
4
)))
;
int
p3
__attribute__
((
aligned
(
4
)))
;
float
inv_window_area
__attribute__
((
aligned
(
4
)))
;
float
inv_window_area
__attribute__
((
aligned
(
4
)))
;
}GpuHidHaarClassifierCascade
;
}
GpuHidHaarClassifierCascade
;
__kernel
void
__attribute__
((
reqd_work_group_size
(
8
,
8
,
1
)))
gpuRunHaarClassifierCascade
(
//constant
GpuHidHaarClassifierCascade
*
cascade,
__kernel
void
__attribute__
((
reqd_work_group_size
(
8
,
8
,
1
)))
gpuRunHaarClassifierCascade
(
//constant
GpuHidHaarClassifierCascade
*
cascade,
...
@@ -132,7 +133,7 @@ __kernel void __attribute__((reqd_work_group_size(8,8,1)))gpuRunHaarClassifierCa
...
@@ -132,7 +133,7 @@ __kernel void __attribute__((reqd_work_group_size(8,8,1)))gpuRunHaarClassifierCa
//const
int
height,
//const
int
height,
//const
int
grpnumperline,
//const
int
grpnumperline,
//const
int
totalgrp
//const
int
totalgrp
)
)
{
{
int
grpszx
=
get_local_size
(
0
)
;
int
grpszx
=
get_local_size
(
0
)
;
int
grpszy
=
get_local_size
(
1
)
;
int
grpszy
=
get_local_size
(
1
)
;
...
@@ -184,7 +185,7 @@ __kernel void __attribute__((reqd_work_group_size(8,8,1)))gpuRunHaarClassifierCa
...
@@ -184,7 +185,7 @@ __kernel void __attribute__((reqd_work_group_size(8,8,1)))gpuRunHaarClassifierCa
__global
const
int
*
sum
=
sum1
+
imgoff
;
__global
const
int
*
sum
=
sum1
+
imgoff
;
__global
const
float
*
sqsum
=
sqsum1
+
imgoff
;
__global
const
float
*
sqsum
=
sqsum1
+
imgoff
;
for
(
int
grploop=grpidx
;
grploop<totalgrp;
grploop+=grpnumx)
for
(
int
grploop=grpidx
;
grploop<totalgrp;
grploop+=grpnumx)
{
{
int
grpidy
=
grploop
/
grpnumperline
;
int
grpidy
=
grploop
/
grpnumperline
;
int
grpidx
=
grploop
-
mul24
(
grpidy,
grpnumperline
)
;
int
grpidx
=
grploop
-
mul24
(
grpidy,
grpnumperline
)
;
...
@@ -195,7 +196,7 @@ __kernel void __attribute__((reqd_work_group_size(8,8,1)))gpuRunHaarClassifierCa
...
@@ -195,7 +196,7 @@ __kernel void __attribute__((reqd_work_group_size(8,8,1)))gpuRunHaarClassifierCa
int
grpoffx
=
x-lclidx
;
int
grpoffx
=
x-lclidx
;
int
grpoffy
=
y-lclidy
;
int
grpoffy
=
y-lclidy
;
for
(
int
i=0
;
i<read_loop;
i++)
for
(
int
i=0
;
i<read_loop;
i++)
{
{
int
pos_id
=
mad24
(
i,lcl_sz,lcl_id
)
;
int
pos_id
=
mad24
(
i,lcl_sz,lcl_id
)
;
pos_id
=
pos_id
<
total_read
?
pos_id
:
0
;
pos_id
=
pos_id
<
total_read
?
pos_id
:
0
;
...
@@ -299,8 +300,9 @@ __kernel void __attribute__((reqd_work_group_size(8,8,1)))gpuRunHaarClassifierCa
...
@@ -299,8 +300,9 @@ __kernel void __attribute__((reqd_work_group_size(8,8,1)))gpuRunHaarClassifierCa
}
}
barrier
(
CLK_LOCAL_MEM_FENCE
)
;
barrier
(
CLK_LOCAL_MEM_FENCE
)
;
int
queuecount
=
lclcount[0]
;
int
queuecount
=
lclcount[0]
;
barrier
(
CLK_LOCAL_MEM_FENCE
)
;
nodecounter
=
splitnode
;
nodecounter
=
splitnode
;
for
(
int
stageloop
=
split_stage
; stageloop< end_stage && queuecount>0;stageloop++)
for
(
int
stageloop
=
split_stage
; stageloop< end_stage && queuecount>0;
stageloop++)
{
{
//barrier
(
CLK_LOCAL_MEM_FENCE
)
;
//barrier
(
CLK_LOCAL_MEM_FENCE
)
;
//if
(
lcl_id
==
0
)
//if
(
lcl_id
==
0
)
...
@@ -316,7 +318,7 @@ __kernel void __attribute__((reqd_work_group_size(8,8,1)))gpuRunHaarClassifierCa
...
@@ -316,7 +318,7 @@ __kernel void __attribute__((reqd_work_group_size(8,8,1)))gpuRunHaarClassifierCa
int
lcl_compute_win_id
=
(
lcl_id
>>
(
6-perfscale
))
;
int
lcl_compute_win_id
=
(
lcl_id
>>
(
6-perfscale
))
;
int
lcl_loops
=
(
stageinfo.x
+
lcl_compute_win
-1
)
>>
(
6-perfscale
)
;
int
lcl_loops
=
(
stageinfo.x
+
lcl_compute_win
-1
)
>>
(
6-perfscale
)
;
int
lcl_compute_id
=
lcl_id
-
(
lcl_compute_win_id
<<
(
6-perfscale
))
;
int
lcl_compute_id
=
lcl_id
-
(
lcl_compute_win_id
<<
(
6-perfscale
))
;
for
(
int
queueloop=0
;
queueloop<queuecount_loop/* && lcl_compute_win_id < queuecount*/;
queueloop++)
for
(
int
queueloop=0
;
queueloop<queuecount_loop/* && lcl_compute_win_id < queuecount*/;
queueloop++)
{
{
float
stage_sum
=
0.f
;
float
stage_sum
=
0.f
;
int
temp_coord
=
lcloutindex[lcl_compute_win_id<<1]
;
int
temp_coord
=
lcloutindex[lcl_compute_win_id<<1]
;
...
@@ -324,11 +326,12 @@ __kernel void __attribute__((reqd_work_group_size(8,8,1)))gpuRunHaarClassifierCa
...
@@ -324,11 +326,12 @@ __kernel void __attribute__((reqd_work_group_size(8,8,1)))gpuRunHaarClassifierCa
int
queue_pixel
=
mad24
(((
temp_coord
&
(
int
)
0xffff0000
)
>>16
)
,
readwidth,temp_coord
&
0xffff
)
;
int
queue_pixel
=
mad24
(((
temp_coord
&
(
int
)
0xffff0000
)
>>16
)
,
readwidth,temp_coord
&
0xffff
)
;
//barrier
(
CLK_LOCAL_MEM_FENCE
)
;
//barrier
(
CLK_LOCAL_MEM_FENCE
)
;
if
(
lcl_compute_win_id
<
queuecount
)
{
if
(
lcl_compute_win_id
<
queuecount
)
{
int
tempnodecounter
=
lcl_compute_id
;
int
tempnodecounter
=
lcl_compute_id
;
float
part_sum
=
0.f
;
float
part_sum
=
0.f
;
for
(
int
lcl_loop=0
;lcl_loop<lcl_loops && tempnodecounter<stageinfo.x;
lcl_loop++)
for
(
int
lcl_loop=0
; lcl_loop<lcl_loops && tempnodecounter<stageinfo.x;
lcl_loop++)
{
{
__global
GpuHidHaarTreeNode*
currentnodeptr
=
(
nodeptr
+
nodecounter
+
tempnodecounter
)
;
__global
GpuHidHaarTreeNode*
currentnodeptr
=
(
nodeptr
+
nodecounter
+
tempnodecounter
)
;
...
@@ -363,8 +366,9 @@ __kernel void __attribute__((reqd_work_group_size(8,8,1)))gpuRunHaarClassifierCa
...
@@ -363,8 +366,9 @@ __kernel void __attribute__((reqd_work_group_size(8,8,1)))gpuRunHaarClassifierCa
partialsum[lcl_id]=part_sum
;
partialsum[lcl_id]=part_sum
;
}
}
barrier
(
CLK_LOCAL_MEM_FENCE
)
;
barrier
(
CLK_LOCAL_MEM_FENCE
)
;
if
(
lcl_compute_win_id
<
queuecount
)
{
if
(
lcl_compute_win_id
<
queuecount
)
for
(
int
i=0
;i<lcl_compute_win && (lcl_compute_id==0);i++)
{
for
(
int
i=0
; i<lcl_compute_win && (lcl_compute_id==0); i++)
{
{
stage_sum
+=
partialsum[lcl_id+i]
;
stage_sum
+=
partialsum[lcl_id+i]
;
}
}
...
@@ -378,8 +382,9 @@ __kernel void __attribute__((reqd_work_group_size(8,8,1)))gpuRunHaarClassifierCa
...
@@ -378,8 +382,9 @@ __kernel void __attribute__((reqd_work_group_size(8,8,1)))gpuRunHaarClassifierCa
}
}
barrier
(
CLK_LOCAL_MEM_FENCE
)
;
barrier
(
CLK_LOCAL_MEM_FENCE
)
;
}//end
for
(
int
queueloop=0
;queueloop<queuecount_loop;queueloop++)
}//end
for
(
int
queueloop=0
;queueloop<queuecount_loop;queueloop++)
barrier
(
CLK_LOCAL_MEM_FENCE
)
;
//
barrier
(
CLK_LOCAL_MEM_FENCE
)
;
queuecount
=
lclcount[0]
;
queuecount
=
lclcount[0]
;
barrier
(
CLK_LOCAL_MEM_FENCE
)
;
nodecounter
+=
stageinfo.x
;
nodecounter
+=
stageinfo.x
;
}//end
for
(
int
stageloop
=
splitstage
; stageloop< endstage && queuecount>0;stageloop++)
}//end
for
(
int
stageloop
=
splitstage
; stageloop< endstage && queuecount>0;stageloop++)
//barrier
(
CLK_LOCAL_MEM_FENCE
)
;
//barrier
(
CLK_LOCAL_MEM_FENCE
)
;
...
@@ -420,9 +425,9 @@ __kernel void __attribute__((reqd_work_group_size(8,8,1)))gpuRunHaarClassifierCa
...
@@ -420,9 +425,9 @@ __kernel void __attribute__((reqd_work_group_size(8,8,1)))gpuRunHaarClassifierCa
/*
/*
if
(
stagecascade->two_rects
)
if
(
stagecascade->two_rects
)
{
{
#
pragma
unroll
#
pragma
unroll
for
(
n
=
0
; n < stagecascade->count; n++ )
for
(
n
=
0
; n < stagecascade->count; n++ )
{
{
...
@@ -435,9 +440,9 @@ __kernel void __attribute__((reqd_work_group_size(8,8,1)))gpuRunHaarClassifierCa
...
@@ -435,9 +440,9 @@ __kernel void __attribute__((reqd_work_group_size(8,8,1)))gpuRunHaarClassifierCa
counter++
;
counter++
;
}
}
}
}
else
else
{
{
#
pragma
unroll
#
pragma
unroll
for
(
n
=
0
; n < stagecascade->count; n++ )
for
(
n
=
0
; n < stagecascade->count; n++ )
{
{
...
@@ -452,9 +457,9 @@ __kernel void __attribute__((reqd_work_group_size(8,8,1)))gpuRunHaarClassifierCa
...
@@ -452,9 +457,9 @@ __kernel void __attribute__((reqd_work_group_size(8,8,1)))gpuRunHaarClassifierCa
counter++
;
counter++
;
}
}
}
}
*/
*/
/*
/*
__kernel
void
gpuRunHaarClassifierCascade_ScaleWindow
(
__kernel
void
gpuRunHaarClassifierCascade_ScaleWindow
(
constant
GpuHidHaarClassifierCascade
*
_cascade,
constant
GpuHidHaarClassifierCascade
*
_cascade,
global
GpuHidHaarStageClassifier
*
stagecascadeptr,
global
GpuHidHaarStageClassifier
*
stagecascadeptr,
...
@@ -477,51 +482,51 @@ __kernel void gpuRunHaarClassifierCascade_ScaleWindow(
...
@@ -477,51 +482,51 @@ __kernel void gpuRunHaarClassifierCascade_ScaleWindow(
int
outputstep
)
int
outputstep
)
//float
scalefactor
)
//float
scalefactor
)
{
{
unsigned
int
x1
=
get_global_id
(
0
)
;
unsigned
int
x1
=
get_global_id
(
0
)
;
unsigned
int
y1
=
get_global_id
(
1
)
;
unsigned
int
y1
=
get_global_id
(
1
)
;
int
p_offset
;
int
p_offset
;
int
m,
n
;
int
m,
n
;
int
result
;
int
result
;
int
counter
;
int
counter
;
float
mean,
variance_norm_factor
;
float
mean,
variance_norm_factor
;
for
(
int
i=0
;i<loopcount;i++)
for
(
int
i=0
;i<loopcount;i++)
{
{
constant
GpuHidHaarClassifierCascade
*
cascade
=
_cascade
+
i
;
constant
GpuHidHaarClassifierCascade
*
cascade
=
_cascade
+
i
;
global
int
*
candidate
=
_candidate
+
i*outputstep
;
global
int
*
candidate
=
_candidate
+
i*outputstep
;
int
window_width
=
cascade->p1
-
cascade->p0
;
int
window_width
=
cascade->p1
-
cascade->p0
;
int
window_height
=
window_width
;
int
window_height
=
window_width
;
result
=
1
;
result
=
1
;
counter
=
0
;
counter
=
0
;
unsigned
int
x
=
mul24
(
x1,ystep
)
;
unsigned
int
x
=
mul24
(
x1,ystep
)
;
unsigned
int
y
=
mul24
(
y1,ystep
)
;
unsigned
int
y
=
mul24
(
y1,ystep
)
;
if
((
x
<
cols
-
window_width
-
1
)
&&
(
y
<
rows
-
window_height
-1
))
if
((
x
<
cols
-
window_width
-
1
)
&&
(
y
<
rows
-
window_height
-1
))
{
{
global
GpuHidHaarStageClassifier
*stagecascade
=
stagecascadeptr
+cascade->count*i+
start_stage
;
global
GpuHidHaarStageClassifier
*stagecascade
=
stagecascadeptr
+cascade->count*i+
start_stage
;
//global
GpuHidHaarClassifier
*classifier
=
classifierptr
;
//global
GpuHidHaarClassifier
*classifier
=
classifierptr
;
global
GpuHidHaarTreeNode
*node
=
nodeptr
+
nodenum*i
;
global
GpuHidHaarTreeNode
*node
=
nodeptr
+
nodenum*i
;
p_offset
=
mad24
(
y,
pixel_step,
x
)
;// modify
p_offset
=
mad24
(
y,
pixel_step,
x
)
;// modify
mean
=
(
*
(
sum
+
p_offset
+
(
int
)
cascade->p0
)
-
*
(
sum
+
p_offset
+
(
int
)
cascade->p1
)
-
mean
=
(
*
(
sum
+
p_offset
+
(
int
)
cascade->p0
)
-
*
(
sum
+
p_offset
+
(
int
)
cascade->p1
)
-
*
(
sum
+
p_offset
+
(
int
)
cascade->p2
)
+
*
(
sum
+
p_offset
+
(
int
)
cascade->p3
))
*
(
sum
+
p_offset
+
(
int
)
cascade->p2
)
+
*
(
sum
+
p_offset
+
(
int
)
cascade->p3
))
*cascade->inv_window_area
;
*cascade->inv_window_area
;
variance_norm_factor
=
*
(
sqsum
+
p_offset
+
cascade->p0
)
-
*
(
sqsum
+
cascade->p1
+
p_offset
)
-
variance_norm_factor
=
*
(
sqsum
+
p_offset
+
cascade->p0
)
-
*
(
sqsum
+
cascade->p1
+
p_offset
)
-
*
(
sqsum
+
p_offset
+
cascade->p2
)
+
*
(
sqsum
+
cascade->p3
+
p_offset
)
;
*
(
sqsum
+
p_offset
+
cascade->p2
)
+
*
(
sqsum
+
cascade->p3
+
p_offset
)
;
variance_norm_factor
=
variance_norm_factor
*
cascade->inv_window_area
-
mean
*
mean
;
variance_norm_factor
=
variance_norm_factor
*
cascade->inv_window_area
-
mean
*
mean
;
variance_norm_factor
=
variance_norm_factor
>=0.f
?
sqrt
(
variance_norm_factor
)
:
1
;//modify
variance_norm_factor
=
variance_norm_factor
>=0.f
?
sqrt
(
variance_norm_factor
)
:
1
;//modify
//
if
(
cascade->is_stump_based
)
//
if
(
cascade->is_stump_based
)
//{
//{
for
(
m
=
start_stage
; m < end_stage; m++ )
for
(
m
=
start_stage
; m < end_stage; m++ )
{
{
float
stage_sum
=
0.f
;
float
stage_sum
=
0.f
;
float
t
,
classsum
;
float
t
,
classsum
;
GpuHidHaarTreeNode
t1
;
GpuHidHaarTreeNode
t1
;
//#pragma
unroll
//#pragma
unroll
for
(
n
=
0
; n < stagecascade->count; n++ )
for
(
n
=
0
; n < stagecascade->count; n++ )
{
{
t1
=
*
(
node
+
counter
)
;
t1
=
*
(
node
+
counter
)
;
t
=
t1.threshold
*
variance_norm_factor
;
t
=
t1.threshold
*
variance_norm_factor
;
classsum
=
calc_sum1
(
t1,
p_offset
,
0
)
*
t1.weight[0]
+
calc_sum1
(
t1,
p_offset
,
1
)
*
t1.weight[1]
;
classsum
=
calc_sum1
(
t1,
p_offset
,
0
)
*
t1.weight[0]
+
calc_sum1
(
t1,
p_offset
,
1
)
*
t1.weight[1]
;
...
@@ -531,27 +536,27 @@ __kernel void gpuRunHaarClassifierCascade_ScaleWindow(
...
@@ -531,27 +536,27 @@ __kernel void gpuRunHaarClassifierCascade_ScaleWindow(
stage_sum
+=
classsum
>=
t
?
t1.alpha[1]
:
t1.alpha[0]
;// modify
stage_sum
+=
classsum
>=
t
?
t1.alpha[1]
:
t1.alpha[0]
;// modify
counter++
;
counter++
;
}
}
if
(
stage_sum
<
stagecascade->threshold
)
if
(
stage_sum
<
stagecascade->threshold
)
{
{
result
=
0
;
result
=
0
;
break
;
break
;
}
}
stagecascade++
;
stagecascade++
;
}
}
if
(
result
)
if
(
result
)
{
{
candidate[4
*
(
y1
*
detect_width
+
x1
)
]
=
x
;
candidate[4
*
(
y1
*
detect_width
+
x1
)
]
=
x
;
candidate[4
*
(
y1
*
detect_width
+
x1
)
+
1]
=
y
;
candidate[4
*
(
y1
*
detect_width
+
x1
)
+
1]
=
y
;
candidate[4
*
(
y1
*
detect_width
+
x1
)
+2]
=
window_width
;
candidate[4
*
(
y1
*
detect_width
+
x1
)
+2]
=
window_width
;
candidate[4
*
(
y1
*
detect_width
+
x1
)
+
3]
=
window_height
;
candidate[4
*
(
y1
*
detect_width
+
x1
)
+
3]
=
window_height
;
}
}
//}
//}
}
}
}
}
}
}
*/
*/
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment