Skip to content
Projects
Groups
Snippets
Help
Loading...
Sign in / Register
Toggle navigation
O
opencv
Project
Project
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Packages
Packages
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
submodule
opencv
Commits
6728b40a
Commit
6728b40a
authored
Jan 17, 2018
by
Alexander Alekhin
Browse files
Options
Browse Files
Download
Plain Diff
Merge pull request #10602 from pengli:dnn
parents
4dc788ff
e77af4ae
Hide whitespace changes
Inline
Side-by-side
Showing
7 changed files
with
390 additions
and
49 deletions
+390
-49
batch_norm_layer.cpp
modules/dnn/src/layers/batch_norm_layer.cpp
+22
-22
elementwise_layers.cpp
modules/dnn/src/layers/elementwise_layers.cpp
+75
-8
mvn_layer.cpp
modules/dnn/src/layers/mvn_layer.cpp
+84
-0
activations.cl
modules/dnn/src/opencl/activations.cl
+9
-0
batchnorm.cl
modules/dnn/src/opencl/batchnorm.cl
+77
-19
mvn.cl
modules/dnn/src/opencl/mvn.cl
+112
-0
test_layers.cpp
modules/dnn/test/test_layers.cpp
+11
-0
No files found.
modules/dnn/src/layers/batch_norm_layer.cpp
View file @
6728b40a
...
@@ -12,6 +12,7 @@ Implementation of Batch Normalization layer.
...
@@ -12,6 +12,7 @@ Implementation of Batch Normalization layer.
#include "../precomp.hpp"
#include "../precomp.hpp"
#include "op_halide.hpp"
#include "op_halide.hpp"
#include <opencv2/dnn/shape_utils.hpp>
#include <opencv2/dnn/shape_utils.hpp>
#include "opencl_kernels_dnn.hpp"
namespace
cv
namespace
cv
{
{
...
@@ -22,7 +23,7 @@ class BatchNormLayerImpl : public BatchNormLayer
...
@@ -22,7 +23,7 @@ class BatchNormLayerImpl : public BatchNormLayer
{
{
public
:
public
:
Mat
weights_
,
bias_
;
Mat
weights_
,
bias_
;
Mat
weightMat
,
biasMat
;
UMat
umat_weight
,
umat_bias
;
BatchNormLayerImpl
(
const
LayerParams
&
params
)
BatchNormLayerImpl
(
const
LayerParams
&
params
)
{
{
...
@@ -80,6 +81,9 @@ public:
...
@@ -80,6 +81,9 @@ public:
dstWeightsData
[
i
]
=
w
;
dstWeightsData
[
i
]
=
w
;
dstBiasData
[
i
]
=
(
hasBias
?
biasData
[
i
]
:
0.0
f
)
-
w
*
meanData
[
i
]
*
varMeanScale
;
dstBiasData
[
i
]
=
(
hasBias
?
biasData
[
i
]
:
0.0
f
)
-
w
*
meanData
[
i
]
*
varMeanScale
;
}
}
umat_weight
=
weights_
.
getUMat
(
ACCESS_READ
);
umat_bias
=
bias_
.
getUMat
(
ACCESS_READ
);
}
}
void
getScaleShift
(
Mat
&
scale
,
Mat
&
shift
)
const
void
getScaleShift
(
Mat
&
scale
,
Mat
&
shift
)
const
...
@@ -97,25 +101,6 @@ public:
...
@@ -97,25 +101,6 @@ public:
return
true
;
return
true
;
}
}
void
finalize
(
const
std
::
vector
<
Mat
*>
&
inputs
,
std
::
vector
<
Mat
>
&
outputs
)
{
if
(
inputs
[
0
]
->
dims
==
4
)
{
int
groups
=
inputs
[
0
]
->
size
[
0
];
int
channels
=
inputs
[
0
]
->
size
[
1
];
int
rows
=
inputs
[
0
]
->
size
[
2
];
int
cols
=
inputs
[
0
]
->
size
[
3
];
MatShape
s
=
shape
(
groups
*
channels
,
rows
*
cols
);
weightMat
=
Mat
(
s
[
0
],
s
[
1
],
CV_32FC1
);
biasMat
=
Mat
(
s
[
0
],
s
[
1
],
CV_32FC1
);
for
(
int
n
=
0
;
n
<
s
[
0
];
n
++
)
{
weightMat
.
row
(
n
).
setTo
(
weights_
.
at
<
float
>
(
n
%
channels
));
biasMat
.
row
(
n
).
setTo
(
bias_
.
at
<
float
>
(
n
%
channels
));
}
}
}
virtual
bool
supportBackend
(
int
backendId
)
virtual
bool
supportBackend
(
int
backendId
)
{
{
return
backendId
==
DNN_BACKEND_DEFAULT
||
return
backendId
==
DNN_BACKEND_DEFAULT
||
...
@@ -155,8 +140,23 @@ public:
...
@@ -155,8 +140,23 @@ public:
MatShape
s
=
shape
(
groups
*
channels
,
rows
*
cols
);
MatShape
s
=
shape
(
groups
*
channels
,
rows
*
cols
);
UMat
src
=
inputs
[
ii
].
reshape
(
1
,
s
.
size
(),
&
s
[
0
]);
UMat
src
=
inputs
[
ii
].
reshape
(
1
,
s
.
size
(),
&
s
[
0
]);
UMat
dst
=
outputs
[
ii
].
reshape
(
1
,
s
.
size
(),
&
s
[
0
]);
UMat
dst
=
outputs
[
ii
].
reshape
(
1
,
s
.
size
(),
&
s
[
0
]);
multiply
(
src
,
weightMat
,
dst
);
int
number
=
(
s
[
1
]
%
8
==
0
)
?
8
:
((
s
[
1
]
%
4
==
0
)
?
4
:
1
);
add
(
dst
,
biasMat
,
dst
);
String
buildopt
=
format
(
"-DNUM=%d "
,
number
);
String
kname
=
format
(
"batch_norm%d"
,
number
);
ocl
::
Kernel
kernel
(
kname
.
c_str
(),
ocl
::
dnn
::
batchnorm_oclsrc
,
buildopt
);
if
(
kernel
.
empty
())
return
false
;
size_t
global
[]
=
{
(
size_t
)
s
[
0
],
(
size_t
)(
s
[
1
]
/
number
)
};
kernel
.
set
(
0
,
ocl
::
KernelArg
::
PtrReadOnly
(
src
));
kernel
.
set
(
1
,
(
int
)
s
[
0
]);
kernel
.
set
(
2
,
(
int
)
s
[
1
]);
kernel
.
set
(
3
,
(
int
)
channels
);
kernel
.
set
(
4
,
ocl
::
KernelArg
::
PtrReadOnly
(
umat_weight
));
kernel
.
set
(
5
,
ocl
::
KernelArg
::
PtrReadOnly
(
umat_bias
));
kernel
.
set
(
6
,
ocl
::
KernelArg
::
PtrWriteOnly
(
dst
));
bool
ret
=
kernel
.
run
(
2
,
global
,
NULL
,
false
);
if
(
!
ret
)
return
false
;
}
}
}
}
return
true
;
return
true
;
...
...
modules/dnn/src/layers/elementwise_layers.cpp
View file @
6728b40a
...
@@ -267,7 +267,6 @@ struct ReLUFunctor
...
@@ -267,7 +267,6 @@ struct ReLUFunctor
bool
applyOCL
(
InputArrayOfArrays
inps
,
OutputArrayOfArrays
outs
,
OutputArrayOfArrays
internals
)
bool
applyOCL
(
InputArrayOfArrays
inps
,
OutputArrayOfArrays
outs
,
OutputArrayOfArrays
internals
)
{
{
size_t
wgSize
=
ocl
::
Device
::
getDefault
().
maxWorkGroupSize
();
std
::
vector
<
UMat
>
inputs
;
std
::
vector
<
UMat
>
inputs
;
std
::
vector
<
UMat
>
outputs
;
std
::
vector
<
UMat
>
outputs
;
...
@@ -287,7 +286,7 @@ struct ReLUFunctor
...
@@ -287,7 +286,7 @@ struct ReLUFunctor
kernel
.
set
(
2
,
ocl
::
KernelArg
::
PtrWriteOnly
(
dst
));
kernel
.
set
(
2
,
ocl
::
KernelArg
::
PtrWriteOnly
(
dst
));
size_t
gSize
=
src
.
total
();
size_t
gSize
=
src
.
total
();
CV_Assert
(
kernel
.
run
(
1
,
&
gSize
,
&
wgSize
,
false
));
CV_Assert
(
kernel
.
run
(
1
,
&
gSize
,
NULL
,
false
));
}
}
return
true
;
return
true
;
...
@@ -395,8 +394,28 @@ struct TanHFunctor
...
@@ -395,8 +394,28 @@ struct TanHFunctor
#ifdef HAVE_OPENCL
#ifdef HAVE_OPENCL
bool
applyOCL
(
InputArrayOfArrays
inps
,
OutputArrayOfArrays
outs
,
OutputArrayOfArrays
internals
)
bool
applyOCL
(
InputArrayOfArrays
inps
,
OutputArrayOfArrays
outs
,
OutputArrayOfArrays
internals
)
{
{
// TODO: implement OCL version
std
::
vector
<
UMat
>
inputs
;
return
false
;
std
::
vector
<
UMat
>
outputs
;
inps
.
getUMatVector
(
inputs
);
outs
.
getUMatVector
(
outputs
);
String
buildopt
=
oclGetTMacro
(
inputs
[
0
]);
for
(
size_t
i
=
0
;
i
<
inputs
.
size
();
i
++
)
{
UMat
&
src
=
inputs
[
i
];
UMat
&
dst
=
outputs
[
i
];
ocl
::
Kernel
kernel
(
"TanHForward"
,
ocl
::
dnn
::
activations_oclsrc
,
buildopt
);
kernel
.
set
(
0
,
(
int
)
src
.
total
());
kernel
.
set
(
1
,
ocl
::
KernelArg
::
PtrReadOnly
(
src
));
kernel
.
set
(
2
,
ocl
::
KernelArg
::
PtrWriteOnly
(
dst
));
size_t
gSize
=
src
.
total
();
CV_Assert
(
kernel
.
run
(
1
,
&
gSize
,
NULL
,
false
));
}
return
true
;
}
}
#endif
#endif
...
@@ -594,8 +613,31 @@ struct PowerFunctor
...
@@ -594,8 +613,31 @@ struct PowerFunctor
#ifdef HAVE_OPENCL
#ifdef HAVE_OPENCL
bool
applyOCL
(
InputArrayOfArrays
inps
,
OutputArrayOfArrays
outs
,
OutputArrayOfArrays
internals
)
bool
applyOCL
(
InputArrayOfArrays
inps
,
OutputArrayOfArrays
outs
,
OutputArrayOfArrays
internals
)
{
{
// TODO: implement OCL version
std
::
vector
<
UMat
>
inputs
;
return
false
;
std
::
vector
<
UMat
>
outputs
;
inps
.
getUMatVector
(
inputs
);
outs
.
getUMatVector
(
outputs
);
String
buildopt
=
oclGetTMacro
(
inputs
[
0
]);
for
(
size_t
i
=
0
;
i
<
inputs
.
size
();
i
++
)
{
UMat
&
src
=
inputs
[
i
];
UMat
&
dst
=
outputs
[
i
];
ocl
::
Kernel
kernel
(
"PowForward"
,
ocl
::
dnn
::
activations_oclsrc
,
buildopt
);
kernel
.
set
(
0
,
(
int
)
src
.
total
());
kernel
.
set
(
1
,
ocl
::
KernelArg
::
PtrReadOnly
(
src
));
kernel
.
set
(
2
,
ocl
::
KernelArg
::
PtrWriteOnly
(
dst
));
kernel
.
set
(
3
,
(
float
)
power
);
kernel
.
set
(
4
,
(
float
)
scale
);
kernel
.
set
(
5
,
(
float
)
shift
);
size_t
gSize
=
src
.
total
();
CV_Assert
(
kernel
.
run
(
1
,
&
gSize
,
NULL
,
false
));
}
return
true
;
}
}
#endif
#endif
...
@@ -624,9 +666,11 @@ struct ChannelsPReLUFunctor
...
@@ -624,9 +666,11 @@ struct ChannelsPReLUFunctor
{
{
typedef
ChannelsPReLULayer
Layer
;
typedef
ChannelsPReLULayer
Layer
;
Mat
scale
;
Mat
scale
;
UMat
scale_umat
;
explicit
ChannelsPReLUFunctor
(
const
Mat
&
scale_
=
Mat
())
:
scale
(
scale_
)
explicit
ChannelsPReLUFunctor
(
const
Mat
&
scale_
=
Mat
())
:
scale
(
scale_
)
{
{
scale_umat
=
scale
.
getUMat
(
ACCESS_READ
);
}
}
void
apply
(
const
float
*
srcptr
,
float
*
dstptr
,
int
len
,
size_t
planeSize
,
int
cn0
,
int
cn1
)
const
void
apply
(
const
float
*
srcptr
,
float
*
dstptr
,
int
len
,
size_t
planeSize
,
int
cn0
,
int
cn1
)
const
...
@@ -669,8 +713,31 @@ struct ChannelsPReLUFunctor
...
@@ -669,8 +713,31 @@ struct ChannelsPReLUFunctor
#ifdef HAVE_OPENCL
#ifdef HAVE_OPENCL
bool
applyOCL
(
InputArrayOfArrays
inps
,
OutputArrayOfArrays
outs
,
OutputArrayOfArrays
internals
)
bool
applyOCL
(
InputArrayOfArrays
inps
,
OutputArrayOfArrays
outs
,
OutputArrayOfArrays
internals
)
{
{
// TODO: implement OCL version
std
::
vector
<
UMat
>
inputs
;
return
false
;
std
::
vector
<
UMat
>
outputs
;
inps
.
getUMatVector
(
inputs
);
outs
.
getUMatVector
(
outputs
);
String
buildopt
=
oclGetTMacro
(
inputs
[
0
]);
for
(
size_t
i
=
0
;
i
<
inputs
.
size
();
i
++
)
{
UMat
&
src
=
inputs
[
i
];
UMat
&
dst
=
outputs
[
i
];
ocl
::
Kernel
kernel
(
"PReLUForward"
,
ocl
::
dnn
::
activations_oclsrc
,
buildopt
);
kernel
.
set
(
0
,
(
int
)
src
.
total
());
kernel
.
set
(
1
,
(
int
)
src
.
size
[
1
]);
kernel
.
set
(
2
,
(
int
)
total
(
shape
(
src
),
2
));
kernel
.
set
(
3
,
ocl
::
KernelArg
::
PtrReadOnly
(
src
));
kernel
.
set
(
4
,
ocl
::
KernelArg
::
PtrWriteOnly
(
dst
));
kernel
.
set
(
5
,
ocl
::
KernelArg
::
PtrReadOnly
(
scale_umat
));
size_t
gSize
=
src
.
total
();
CV_Assert
(
kernel
.
run
(
1
,
&
gSize
,
NULL
,
false
));
}
return
true
;
}
}
#endif
#endif
...
...
modules/dnn/src/layers/mvn_layer.cpp
View file @
6728b40a
...
@@ -43,6 +43,8 @@
...
@@ -43,6 +43,8 @@
#include "../precomp.hpp"
#include "../precomp.hpp"
#include "layers_common.hpp"
#include "layers_common.hpp"
#include <opencv2/dnn/shape_utils.hpp>
#include <opencv2/dnn/shape_utils.hpp>
#include "math_functions.hpp"
#include "opencl_kernels_dnn.hpp"
namespace
cv
namespace
cv
{
{
...
@@ -60,11 +62,93 @@ public:
...
@@ -60,11 +62,93 @@ public:
eps
=
params
.
get
<
double
>
(
"eps"
,
1e-9
);
eps
=
params
.
get
<
double
>
(
"eps"
,
1e-9
);
}
}
#ifdef HAVE_OPENCL
bool
forward_ocl
(
InputArrayOfArrays
inputs_
,
OutputArrayOfArrays
outputs_
,
OutputArrayOfArrays
internals_
)
{
std
::
vector
<
UMat
>
inputs
;
std
::
vector
<
UMat
>
outputs
;
inputs_
.
getUMatVector
(
inputs
);
outputs_
.
getUMatVector
(
outputs
);
for
(
size_t
inpIdx
=
0
;
inpIdx
<
inputs
.
size
();
inpIdx
++
)
{
UMat
&
inpBlob
=
inputs
[
inpIdx
];
UMat
&
outBlob
=
outputs
[
inpIdx
];
int
splitDim
=
(
acrossChannels
)
?
1
:
2
;
int
i
,
newRows
=
1
;
for
(
i
=
0
;
i
<
splitDim
;
i
++
)
newRows
*=
inpBlob
.
size
[
i
];
MatShape
s
=
shape
(
newRows
,
inpBlob
.
total
()
/
newRows
);
UMat
&
inpMat
=
inpBlob
;
UMat
&
outMat
=
outBlob
;
UMat
oneMat
=
UMat
::
ones
(
s
[
1
],
1
,
CV_32F
);
UMat
meanMat
=
UMat
(
s
[
0
],
1
,
CV_32F
);
UMat
devMat
=
UMat
(
s
[
0
],
1
,
CV_32F
);
UMat
tmpMat
=
UMat
(
s
[
0
],
s
[
1
],
CV_32F
);
float
alpha
=
1.0
f
/
s
[
1
];
bool
ret
=
ocl4dnn
::
ocl4dnnGEMV
<
float
>
(
ocl4dnn
::
CblasNoTrans
,
s
[
0
],
s
[
1
],
alpha
,
inpMat
,
0
,
oneMat
,
0
,
0.0
f
,
meanMat
,
0
);
if
(
!
ret
)
return
false
;
int
number
=
(
s
[
1
]
%
8
==
0
)
?
8
:
((
s
[
1
]
%
4
==
0
)
?
4
:
1
);
String
buildopt
=
format
(
"-DNUM=%d "
,
number
);
String
kname
=
format
(
"calc_mean%d"
,
number
);
ocl
::
Kernel
kernel
(
kname
.
c_str
(),
ocl
::
dnn
::
mvn_oclsrc
,
buildopt
);
if
(
kernel
.
empty
())
return
false
;
size_t
global
[]
=
{
(
size_t
)
s
[
0
],
(
size_t
)(
s
[
1
]
/
number
)
};
kernel
.
set
(
0
,
ocl
::
KernelArg
::
PtrReadOnly
(
inpMat
));
kernel
.
set
(
1
,
(
int
)
s
[
0
]);
kernel
.
set
(
2
,
(
int
)
s
[
1
]);
kernel
.
set
(
3
,
ocl
::
KernelArg
::
PtrReadOnly
(
meanMat
));
kernel
.
set
(
4
,
ocl
::
KernelArg
::
PtrWriteOnly
(
tmpMat
));
ret
=
kernel
.
run
(
2
,
global
,
NULL
,
false
);
if
(
!
ret
)
return
false
;
if
(
normVariance
)
{
ret
=
ocl4dnn
::
ocl4dnnGEMV
<
float
>
(
ocl4dnn
::
CblasNoTrans
,
s
[
0
],
s
[
1
],
alpha
,
tmpMat
,
0
,
oneMat
,
0
,
0.0
f
,
devMat
,
0
);
if
(
!
ret
)
return
false
;
}
kname
=
format
(
"mvn%d"
,
number
);
if
(
normVariance
)
buildopt
+=
"-DNORM_VARIANCE"
;
ocl
::
Kernel
kernel1
(
kname
.
c_str
(),
ocl
::
dnn
::
mvn_oclsrc
,
buildopt
);
if
(
kernel1
.
empty
())
return
false
;
kernel1
.
set
(
0
,
ocl
::
KernelArg
::
PtrReadOnly
(
inpMat
));
kernel1
.
set
(
1
,
(
int
)
s
[
0
]);
kernel1
.
set
(
2
,
(
int
)
s
[
1
]);
kernel1
.
set
(
3
,
(
float
)
eps
);
kernel1
.
set
(
4
,
ocl
::
KernelArg
::
PtrReadOnly
(
meanMat
));
kernel1
.
set
(
5
,
ocl
::
KernelArg
::
PtrReadOnly
(
devMat
));
kernel1
.
set
(
6
,
ocl
::
KernelArg
::
PtrWriteOnly
(
outMat
));
ret
=
kernel1
.
run
(
2
,
global
,
NULL
,
false
);
if
(
!
ret
)
return
false
;
}
return
true
;
}
#endif
void
forward
(
InputArrayOfArrays
inputs_arr
,
OutputArrayOfArrays
outputs_arr
,
OutputArrayOfArrays
internals_arr
)
void
forward
(
InputArrayOfArrays
inputs_arr
,
OutputArrayOfArrays
outputs_arr
,
OutputArrayOfArrays
internals_arr
)
{
{
CV_TRACE_FUNCTION
();
CV_TRACE_FUNCTION
();
CV_TRACE_ARG_VALUE
(
name
,
"name"
,
name
.
c_str
());
CV_TRACE_ARG_VALUE
(
name
,
"name"
,
name
.
c_str
());
CV_OCL_RUN
((
preferableTarget
==
DNN_TARGET_OPENCL
)
&&
OCL_PERFORMANCE_CHECK
(
ocl
::
Device
::
getDefault
().
isIntel
()),
forward_ocl
(
inputs_arr
,
outputs_arr
,
internals_arr
))
Layer
::
forward_fallback
(
inputs_arr
,
outputs_arr
,
internals_arr
);
Layer
::
forward_fallback
(
inputs_arr
,
outputs_arr
,
internals_arr
);
}
}
...
...
modules/dnn/src/opencl/activations.cl
View file @
6728b40a
...
@@ -54,6 +54,15 @@ __kernel void ReLUForward(const int count, __global const T* in, __global T* out
...
@@ -54,6 +54,15 @@ __kernel void ReLUForward(const int count, __global const T* in, __global T* out
#
endif
#
endif
}
}
__kernel
void
PReLUForward
(
const
int
count,
const
int
channels,
const
int
plane_size,
__global
const
T*
in,
__global
T*
out,
__global
const
T*
slope_data
)
{
int
index
=
get_global_id
(
0
)
;
int
c
=
(
index
/
plane_size
)
%
channels
;
if
(
index
<
count
)
out[index]
=
in[index]
>
0
?
in[index]
:
in[index]
*
slope_data[c]
;
}
__kernel
void
TanHForward
(
const
int
count,
__global
T*
in,
__global
T*
out
)
{
__kernel
void
TanHForward
(
const
int
count,
__global
T*
in,
__global
T*
out
)
{
int
index
=
get_global_id
(
0
)
;
int
index
=
get_global_id
(
0
)
;
if
(
index
<
count
)
if
(
index
<
count
)
...
...
modules/dnn/src/opencl/batchnorm.cl
View file @
6728b40a
/*M///////////////////////////////////////////////////////////////////////////////////////
//
//
IMPORTANT:
READ
BEFORE
DOWNLOADING,
COPYING,
INSTALLING
OR
USING.
//
//
By
downloading,
copying,
installing
or
using
the
software
you
agree
to
this
license.
//
If
you
do
not
agree
to
this
license,
do
not
download,
install,
//
copy
or
use
the
software.
//
//
//
License
Agreement
//
For
Open
Source
Computer
Vision
Library
//
//
Copyright
(
C
)
2017
,
Intel
Corporation,
all
rights
reserved.
//
Copyright
(
c
)
2016-2017
Fabian
David
Tschopp,
all
rights
reserved.
//
Third
party
copyrights
are
property
of
their
respective
owners.
//
//
Redistribution
and
use
in
source
and
binary
forms,
with
or
without
modification,
//
are
permitted
provided
that
the
following
conditions
are
met:
//
//
*
Redistribution
's
of
source
code
must
retain
the
above
copyright
notice,
//
this
list
of
conditions
and
the
following
disclaimer.
//
//
*
Redistribution
's
in
binary
form
must
reproduce
the
above
copyright
notice,
//
this
list
of
conditions
and
the
following
disclaimer
in
the
documentation
//
and/or
other
materials
provided
with
the
distribution.
//
//
*
The
name
of
the
copyright
holders
may
not
be
used
to
endorse
or
promote
products
//
derived
from
this
software
without
specific
prior
written
permission.
//
//
This
software
is
provided
by
the
copyright
holders
and
contributors
"as is"
and
//
any
express
or
implied
warranties,
including,
but
not
limited
to,
the
implied
//
warranties
of
merchantability
and
fitness
for
a
particular
purpose
are
disclaimed.
//
In
no
event
shall
the
Intel
Corporation
or
contributors
be
liable
for
any
direct,
//
indirect,
incidental,
special,
exemplary,
or
consequential
damages
//
(
including,
but
not
limited
to,
procurement
of
substitute
goods
or
services
;
//
loss
of
use,
data,
or
profits
; or business interruption) however caused
//
and
on
any
theory
of
liability,
whether
in
contract,
strict
liability,
//
or
tort
(
including
negligence
or
otherwise
)
arising
in
any
way
out
of
//
the
use
of
this
software,
even
if
advised
of
the
possibility
of
such
damage.
//
//M*/
__kernel
void
batchnorm
(
__global
const
T
*src,
int
src_offset,
#
define
Dtype
float
__global
const
float
*meanMat,
#
define
Dtype4
float4
float
varMeanScale,
#
define
Dtype8
float8
__global
const
float
*invStdMat,
__global
const
float
*weight,
#
if
NUM
==
8
__global
const
float
*bias,
#
define
load
(
src,
index
)
vload8
(
0
,
src
+
index
)
int
hasWeight,
int
hasBias,
#
define
store
(
vec,
dst,
index
)
vstore8
(
vec,
0
,
dst
+
index
)
int
width,
int
height,
int
channel,
#
define
vec_type
Dtype8
__global
T
*dst,
int
dst_offset
)
#
define
BATCH_NORM
batch_norm8
#
elif
NUM
==
4
#
define
load
(
src,
index
)
vload4
(
0
,
src
+
index
)
#
define
store
(
vec,
dst,
index
)
vstore4
(
vec,
0
,
dst
+
index
)
#
define
vec_type
Dtype4
#
define
BATCH_NORM
batch_norm4
#
elif
NUM
==
1
#
define
load
(
src,
index
)
src[index]
#
define
store
(
vec,
dst,
index
)
dst[index]
=
vec
#
define
vec_type
Dtype
#
define
BATCH_NORM
batch_norm1
#
endif
__kernel
void
BATCH_NORM
(
__global
const
Dtype*
src,
const
int
rows,
const
int
cols,
const
int
channels,
__global
const
Dtype*
weight,
__global
const
Dtype*
bias,
__global
Dtype*
dst
)
{
{
int
x
=
get_global_id
(
0
)
;
int
x
=
get_global_id
(
0
)
;
int
y
=
get_global_id
(
1
)
;
int
y
=
get_global_id
(
1
)
*
NUM
;
int
c
=
get_global_id
(
2
)
;
int
index
=
x
*
cols
+
y
;
if
(
x
>=
width
|
| y >= height |
|
c
>=
channel
)
if
(
x
>=
rows
||
y
>=
cols
)
return
;
return
;
float
mean
=
meanMat[c]
*
varMeanScale
;
Dtype
w
=
weight[x
%
channels]
;
float
invstd
=
invStdMat[c]
;
Dtype
b
=
bias[x
%
channels]
;
float
w
=
hasWeight
?
weight[c]
:
1
;
vec_type
src_vec
=
load
(
src,
index
)
;
float
b
=
hasBias
?
bias[c]
:
0
;
vec_type
dst_vec
=
src_vec
*
w
+
(
vec_type
)
b
;
int
index
=
y
*
width
+
x
+
c
*
width
*
height
;
store
(
dst_vec,
dst,
index
)
;
T
val
=
(
src[index
+
src_offset]
-
mean
)
*
w
*
invstd
+
b
;
dst[index
+
dst_offset]
=
val
;
}
}
modules/dnn/src/opencl/mvn.cl
0 → 100644
View file @
6728b40a
/*M///////////////////////////////////////////////////////////////////////////////////////
//
//
IMPORTANT:
READ
BEFORE
DOWNLOADING,
COPYING,
INSTALLING
OR
USING.
//
//
By
downloading,
copying,
installing
or
using
the
software
you
agree
to
this
license.
//
If
you
do
not
agree
to
this
license,
do
not
download,
install,
//
copy
or
use
the
software.
//
//
//
License
Agreement
//
For
Open
Source
Computer
Vision
Library
//
//
Copyright
(
C
)
2017
,
Intel
Corporation,
all
rights
reserved.
//
Copyright
(
c
)
2016-2017
Fabian
David
Tschopp,
all
rights
reserved.
//
Third
party
copyrights
are
property
of
their
respective
owners.
//
//
Redistribution
and
use
in
source
and
binary
forms,
with
or
without
modification,
//
are
permitted
provided
that
the
following
conditions
are
met:
//
//
*
Redistribution
's
of
source
code
must
retain
the
above
copyright
notice,
//
this
list
of
conditions
and
the
following
disclaimer.
//
//
*
Redistribution
's
in
binary
form
must
reproduce
the
above
copyright
notice,
//
this
list
of
conditions
and
the
following
disclaimer
in
the
documentation
//
and/or
other
materials
provided
with
the
distribution.
//
//
*
The
name
of
the
copyright
holders
may
not
be
used
to
endorse
or
promote
products
//
derived
from
this
software
without
specific
prior
written
permission.
//
//
This
software
is
provided
by
the
copyright
holders
and
contributors
"as is"
and
//
any
express
or
implied
warranties,
including,
but
not
limited
to,
the
implied
//
warranties
of
merchantability
and
fitness
for
a
particular
purpose
are
disclaimed.
//
In
no
event
shall
the
Intel
Corporation
or
contributors
be
liable
for
any
direct,
//
indirect,
incidental,
special,
exemplary,
or
consequential
damages
//
(
including,
but
not
limited
to,
procurement
of
substitute
goods
or
services
;
//
loss
of
use,
data,
or
profits
; or business interruption) however caused
//
and
on
any
theory
of
liability,
whether
in
contract,
strict
liability,
//
or
tort
(
including
negligence
or
otherwise
)
arising
in
any
way
out
of
//
the
use
of
this
software,
even
if
advised
of
the
possibility
of
such
damage.
//
//M*/
#
define
Dtype
float
#
define
Dtype4
float4
#
define
Dtype8
float8
#
if
NUM
==
8
#
define
load
(
src,
index
)
vload8
(
0
,
src
+
index
)
#
define
store
(
vec,
dst,
index
)
vstore8
(
vec,
0
,
dst
+
index
)
#
define
vec_type
Dtype8
#
define
CALC_MEAN
calc_mean8
#
define
MVN
mvn8
#
elif
NUM
==
4
#
define
load
(
src,
index
)
vload4
(
0
,
src
+
index
)
#
define
store
(
vec,
dst,
index
)
vstore4
(
vec,
0
,
dst
+
index
)
#
define
vec_type
Dtype4
#
define
CALC_MEAN
calc_mean4
#
define
MVN
mvn4
#
elif
NUM
==
1
#
define
load
(
src,
index
)
src[index]
#
define
store
(
vec,
dst,
index
)
dst[index]
=
vec
#
define
vec_type
Dtype
#
define
CALC_MEAN
calc_mean1
#
define
MVN
mvn1
#
endif
__kernel
void
CALC_MEAN
(
__global
const
Dtype*
src,
const
int
rows,
const
int
cols,
__global
Dtype*
mean,
__global
Dtype*
dst
)
{
int
x
=
get_global_id
(
0
)
;
int
y
=
get_global_id
(
1
)
*
NUM
;
int
index
=
x
*
cols
+
y
;
if
(
x
>=
rows
|
| y >= cols)
return;
Dtype mean_val = mean[x];
vec_type src_vec = load(src, index);
vec_type dst_vec = pow(src_vec - (vec_type)mean_val, 2);
store(dst_vec, dst, index);
}
__kernel void MVN(__global const Dtype* src,
const int rows,
const int cols,
const Dtype eps,
__global const Dtype* mean,
__global const Dtype* dev,
__global Dtype* dst)
{
int x = get_global_id(0);
int y = get_global_id(1) * NUM;
int index = x * cols + y;
if (x >= rows |
|
y
>=
cols
)
return
;
Dtype
mean_val
=
mean[x]
;
Dtype
dev_val
=
sqrt
(
dev[x]
)
;
Dtype
alpha
;
#
ifdef
NORM_VARIANCE
alpha
=
1
/
(
eps
+
dev_val
)
;
#
else
alpha
=
1
;
#
endif
vec_type
src_vec
=
load
(
src,
index
)
-
(
vec_type
)
mean_val
;
vec_type
dst_vec
=
src_vec
*
alpha
;
store
(
dst_vec,
dst,
index
)
;
}
modules/dnn/test/test_layers.cpp
View file @
6728b40a
...
@@ -202,6 +202,11 @@ TEST(Layer_Test_MVN, Accuracy)
...
@@ -202,6 +202,11 @@ TEST(Layer_Test_MVN, Accuracy)
testLayerUsingCaffeModels
(
"layer_mvn"
);
testLayerUsingCaffeModels
(
"layer_mvn"
);
}
}
OCL_TEST
(
Layer_Test_MVN
,
Accuracy
)
{
testLayerUsingCaffeModels
(
"layer_mvn"
,
DNN_TARGET_OPENCL
);
}
void
testReshape
(
const
MatShape
&
inputShape
,
const
MatShape
&
targetShape
,
void
testReshape
(
const
MatShape
&
inputShape
,
const
MatShape
&
targetShape
,
int
axis
=
0
,
int
num_axes
=
-
1
,
int
axis
=
0
,
int
num_axes
=
-
1
,
MatShape
mask
=
MatShape
())
MatShape
mask
=
MatShape
())
...
@@ -331,6 +336,12 @@ TEST(Layer_Test_PReLU, Accuracy)
...
@@ -331,6 +336,12 @@ TEST(Layer_Test_PReLU, Accuracy)
testLayerUsingCaffeModels
(
"layer_prelu_fc"
,
DNN_TARGET_CPU
,
true
,
false
);
testLayerUsingCaffeModels
(
"layer_prelu_fc"
,
DNN_TARGET_CPU
,
true
,
false
);
}
}
OCL_TEST
(
Layer_Test_PReLU
,
Accuracy
)
{
testLayerUsingCaffeModels
(
"layer_prelu"
,
DNN_TARGET_OPENCL
,
true
);
testLayerUsingCaffeModels
(
"layer_prelu_fc"
,
DNN_TARGET_OPENCL
,
true
,
false
);
}
//template<typename XMat>
//template<typename XMat>
//static void test_Layer_Concat()
//static void test_Layer_Concat()
//{
//{
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment