Skip to content
Projects
Groups
Snippets
Help
Loading...
Sign in / Register
Toggle navigation
O
opencv_contrib
Project
Project
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Packages
Packages
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
submodule
opencv_contrib
Commits
12c9b0ea
Commit
12c9b0ea
authored
Jul 07, 2016
by
vbystricky
Committed by
Anna Petrovicheva
Jul 22, 2016
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Add Eltwise layer. Add case of FCN8s into sample for semantic segmentation
parent
048c3fab
Hide whitespace changes
Inline
Side-by-side
Showing
6 changed files
with
855 additions
and
25 deletions
+855
-25
fcn8s-heavy-pascal.prototxt
modules/dnn/samples/fcn8s-heavy-pascal.prototxt
+612
-0
fcn_semsegm.cpp
modules/dnn/samples/fcn_semsegm.cpp
+22
-20
init.cpp
modules/dnn/src/init.cpp
+2
-0
crop_layer.cpp
modules/dnn/src/layers/crop_layer.cpp
+5
-5
eltwise_layer.cpp
modules/dnn/src/layers/eltwise_layer.cpp
+146
-0
eltwise_layer.hpp
modules/dnn/src/layers/eltwise_layer.hpp
+68
-0
No files found.
modules/dnn/samples/fcn8s-heavy-pascal.prototxt
0 → 100755
View file @
12c9b0ea
#
# This prototxt is based on voc-fcn8s/val.prototxt file from
# https://github.com/shelhamer/fcn.berkeleyvision.org, which is distributed under
# Caffe (BSD) license:
# http://caffe.berkeleyvision.org/model_zoo.html#bvlc-model-license
#
name: "voc-fcn8s"
input: "data"
input_dim: 1
input_dim: 3
input_dim: 500
input_dim: 500
layer {
name: "conv1_1"
type: "Convolution"
bottom: "data"
top: "conv1_1"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 64
pad: 100
kernel_size: 3
stride: 1
}
}
layer {
name: "relu1_1"
type: "ReLU"
bottom: "conv1_1"
top: "conv1_1"
}
layer {
name: "conv1_2"
type: "Convolution"
bottom: "conv1_1"
top: "conv1_2"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 64
pad: 1
kernel_size: 3
stride: 1
}
}
layer {
name: "relu1_2"
type: "ReLU"
bottom: "conv1_2"
top: "conv1_2"
}
layer {
name: "pool1"
type: "Pooling"
bottom: "conv1_2"
top: "pool1"
pooling_param {
pool: MAX
kernel_size: 2
stride: 2
}
}
layer {
name: "conv2_1"
type: "Convolution"
bottom: "pool1"
top: "conv2_1"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 128
pad: 1
kernel_size: 3
stride: 1
}
}
layer {
name: "relu2_1"
type: "ReLU"
bottom: "conv2_1"
top: "conv2_1"
}
layer {
name: "conv2_2"
type: "Convolution"
bottom: "conv2_1"
top: "conv2_2"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 128
pad: 1
kernel_size: 3
stride: 1
}
}
layer {
name: "relu2_2"
type: "ReLU"
bottom: "conv2_2"
top: "conv2_2"
}
layer {
name: "pool2"
type: "Pooling"
bottom: "conv2_2"
top: "pool2"
pooling_param {
pool: MAX
kernel_size: 2
stride: 2
}
}
layer {
name: "conv3_1"
type: "Convolution"
bottom: "pool2"
top: "conv3_1"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 256
pad: 1
kernel_size: 3
stride: 1
}
}
layer {
name: "relu3_1"
type: "ReLU"
bottom: "conv3_1"
top: "conv3_1"
}
layer {
name: "conv3_2"
type: "Convolution"
bottom: "conv3_1"
top: "conv3_2"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 256
pad: 1
kernel_size: 3
stride: 1
}
}
layer {
name: "relu3_2"
type: "ReLU"
bottom: "conv3_2"
top: "conv3_2"
}
layer {
name: "conv3_3"
type: "Convolution"
bottom: "conv3_2"
top: "conv3_3"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 256
pad: 1
kernel_size: 3
stride: 1
}
}
layer {
name: "relu3_3"
type: "ReLU"
bottom: "conv3_3"
top: "conv3_3"
}
layer {
name: "pool3"
type: "Pooling"
bottom: "conv3_3"
top: "pool3"
pooling_param {
pool: MAX
kernel_size: 2
stride: 2
}
}
layer {
name: "conv4_1"
type: "Convolution"
bottom: "pool3"
top: "conv4_1"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 512
pad: 1
kernel_size: 3
stride: 1
}
}
layer {
name: "relu4_1"
type: "ReLU"
bottom: "conv4_1"
top: "conv4_1"
}
layer {
name: "conv4_2"
type: "Convolution"
bottom: "conv4_1"
top: "conv4_2"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 512
pad: 1
kernel_size: 3
stride: 1
}
}
layer {
name: "relu4_2"
type: "ReLU"
bottom: "conv4_2"
top: "conv4_2"
}
layer {
name: "conv4_3"
type: "Convolution"
bottom: "conv4_2"
top: "conv4_3"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 512
pad: 1
kernel_size: 3
stride: 1
}
}
layer {
name: "relu4_3"
type: "ReLU"
bottom: "conv4_3"
top: "conv4_3"
}
layer {
name: "pool4"
type: "Pooling"
bottom: "conv4_3"
top: "pool4"
pooling_param {
pool: MAX
kernel_size: 2
stride: 2
}
}
layer {
name: "conv5_1"
type: "Convolution"
bottom: "pool4"
top: "conv5_1"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 512
pad: 1
kernel_size: 3
stride: 1
}
}
layer {
name: "relu5_1"
type: "ReLU"
bottom: "conv5_1"
top: "conv5_1"
}
layer {
name: "conv5_2"
type: "Convolution"
bottom: "conv5_1"
top: "conv5_2"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 512
pad: 1
kernel_size: 3
stride: 1
}
}
layer {
name: "relu5_2"
type: "ReLU"
bottom: "conv5_2"
top: "conv5_2"
}
layer {
name: "conv5_3"
type: "Convolution"
bottom: "conv5_2"
top: "conv5_3"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 512
pad: 1
kernel_size: 3
stride: 1
}
}
layer {
name: "relu5_3"
type: "ReLU"
bottom: "conv5_3"
top: "conv5_3"
}
layer {
name: "pool5"
type: "Pooling"
bottom: "conv5_3"
top: "pool5"
pooling_param {
pool: MAX
kernel_size: 2
stride: 2
}
}
layer {
name: "fc6"
type: "Convolution"
bottom: "pool5"
top: "fc6"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 4096
pad: 0
kernel_size: 7
stride: 1
}
}
layer {
name: "relu6"
type: "ReLU"
bottom: "fc6"
top: "fc6"
}
layer {
name: "fc7"
type: "Convolution"
bottom: "fc6"
top: "fc7"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 4096
pad: 0
kernel_size: 1
stride: 1
}
}
layer {
name: "relu7"
type: "ReLU"
bottom: "fc7"
top: "fc7"
}
layer {
name: "score_fr"
type: "Convolution"
bottom: "fc7"
top: "score_fr"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 21
pad: 0
kernel_size: 1
}
}
layer {
name: "upscore2"
type: "Deconvolution"
bottom: "score_fr"
top: "upscore2"
param {
lr_mult: 0
}
convolution_param {
num_output: 21
bias_term: false
kernel_size: 4
stride: 2
}
}
layer {
name: "score_pool4"
type: "Convolution"
bottom: "pool4"
top: "score_pool4"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 21
pad: 0
kernel_size: 1
}
}
layer {
name: "score_pool4c"
type: "Crop"
bottom: "score_pool4"
bottom: "upscore2"
top: "score_pool4c"
crop_param {
axis: 2
offset: 5
}
}
layer {
name: "fuse_pool4"
type: "Eltwise"
bottom: "upscore2"
bottom: "score_pool4c"
top: "fuse_pool4"
eltwise_param {
operation: SUM
}
}
layer {
name: "upscore_pool4"
type: "Deconvolution"
bottom: "fuse_pool4"
top: "upscore_pool4"
param {
lr_mult: 0
}
convolution_param {
num_output: 21
bias_term: false
kernel_size: 4
stride: 2
}
}
layer {
name: "score_pool3"
type: "Convolution"
bottom: "pool3"
top: "score_pool3"
param {
lr_mult: 1
decay_mult: 1
}
param {
lr_mult: 2
decay_mult: 0
}
convolution_param {
num_output: 21
pad: 0
kernel_size: 1
}
}
layer {
name: "score_pool3c"
type: "Crop"
bottom: "score_pool3"
bottom: "upscore_pool4"
top: "score_pool3c"
crop_param {
axis: 2
offset: 9
}
}
layer {
name: "fuse_pool3"
type: "Eltwise"
bottom: "upscore_pool4"
bottom: "score_pool3c"
top: "fuse_pool3"
eltwise_param {
operation: SUM
}
}
layer {
name: "upscore8"
type: "Deconvolution"
bottom: "fuse_pool3"
top: "upscore8"
param {
lr_mult: 0
}
convolution_param {
num_output: 21
bias_term: false
kernel_size: 16
stride: 8
}
}
layer {
name: "score"
type: "Crop"
bottom: "upscore8"
bottom: "data"
top: "score"
crop_param {
axis: 2
offset: 31
}
}
modules/dnn/samples/fcn_semsegm.cpp
View file @
12c9b0ea
...
...
@@ -9,26 +9,28 @@ using namespace cv::dnn;
#include <cstdlib>
using
namespace
std
;
static
std
::
vector
<
cv
::
Vec3b
>
readColors
(
const
string
&
filename
=
"d:/dnn_opencv/pascal-classes.txt"
)
static
const
string
fcnType
=
"fcn8s"
;
static
vector
<
cv
::
Vec3b
>
readColors
(
const
string
&
filename
=
"pascal-classes.txt"
)
{
std
::
vector
<
cv
::
Vec3b
>
colors
;
vector
<
cv
::
Vec3b
>
colors
;
std
::
ifstream
fp
(
filename
);
ifstream
fp
(
filename
.
c_str
()
);
if
(
!
fp
.
is_open
())
{
std
::
cerr
<<
"File with colors not found: "
<<
filename
<<
std
::
endl
;
cerr
<<
"File with colors not found: "
<<
filename
<<
endl
;
exit
(
-
1
);
}
st
d
::
st
ring
line
;
string
line
;
while
(
!
fp
.
eof
())
{
std
::
getline
(
fp
,
line
);
getline
(
fp
,
line
);
if
(
line
.
length
())
{
st
d
::
st
ringstream
ss
(
line
);
stringstream
ss
(
line
);
st
d
::
st
ring
name
;
ss
>>
name
;
string
name
;
ss
>>
name
;
int
temp
;
cv
::
Vec3b
color
;
ss
>>
temp
;
color
[
0
]
=
temp
;
...
...
@@ -42,7 +44,7 @@ static std::vector<cv::Vec3b> readColors(const string &filename = "d:/dnn_opencv
return
colors
;
}
static
void
colorizeSegmentation
(
dnn
::
Blob
&
score
,
const
std
::
vector
<
cv
::
Vec3b
>
&
colors
,
cv
::
Mat
&
segm
)
static
void
colorizeSegmentation
(
dnn
::
Blob
&
score
,
const
vector
<
cv
::
Vec3b
>
&
colors
,
cv
::
Mat
&
segm
)
{
const
int
rows
=
score
.
rows
();
const
int
cols
=
score
.
cols
();
...
...
@@ -83,11 +85,11 @@ static void colorizeSegmentation(dnn::Blob &score, const std::vector<cv::Vec3b>
int
main
(
int
argc
,
char
**
argv
)
{
String
modelTxt
=
"d:/dnn_opencv/fcn32s
-heavy-pascal.prototxt"
;
String
modelBin
=
"d:/dnn_opencv/fcn32s
-heavy-pascal.caffemodel"
;
String
imageFile
=
(
argc
>
1
)
?
argv
[
1
]
:
"
d:/dnn_opencv/
rgb.jpg"
;
String
modelTxt
=
fcnType
+
"
-heavy-pascal.prototxt"
;
String
modelBin
=
fcnType
+
"
-heavy-pascal.caffemodel"
;
String
imageFile
=
(
argc
>
1
)
?
argv
[
1
]
:
"rgb.jpg"
;
std
::
vector
<
cv
::
Vec3b
>
colors
=
readColors
();
vector
<
cv
::
Vec3b
>
colors
=
readColors
();
//! [Create the importer of Caffe model]
Ptr
<
dnn
::
Importer
>
importer
;
...
...
@@ -97,17 +99,17 @@ int main(int argc, char **argv)
}
catch
(
const
cv
::
Exception
&
err
)
//Importer can throw errors, we will catch them
{
std
::
cerr
<<
err
.
msg
<<
std
::
endl
;
cerr
<<
err
.
msg
<<
endl
;
}
//! [Create the importer of Caffe model]
if
(
!
importer
)
{
std
::
cerr
<<
"Can't load network by using the following files: "
<<
std
::
endl
;
std
::
cerr
<<
"prototxt: "
<<
modelTxt
<<
std
::
endl
;
std
::
cerr
<<
"caffemodel: "
<<
modelBin
<<
std
::
endl
;
std
::
cerr
<<
"fcn32s-heavy-pascal.caffemodel can be downloaded here:"
<<
std
::
endl
;
std
::
cerr
<<
"http://dl.caffe.berkeleyvision.org/fcn32s-heavy-pascal.caffemodel"
<<
std
::
endl
;
cerr
<<
"Can't load network by using the following files: "
<<
endl
;
cerr
<<
"prototxt: "
<<
modelTxt
<<
endl
;
cerr
<<
"caffemodel: "
<<
modelBin
<<
endl
;
cerr
<<
fcnType
<<
"-heavy-pascal.caffemodel can be downloaded here:"
<<
endl
;
cerr
<<
"http://dl.caffe.berkeleyvision.org/"
<<
fcnType
<<
"-heavy-pascal.caffemodel"
<<
endl
;
exit
(
-
1
);
}
...
...
@@ -121,7 +123,7 @@ int main(int argc, char **argv)
Mat
img
=
imread
(
imageFile
);
if
(
img
.
empty
())
{
std
::
cerr
<<
"Can't read image from the file: "
<<
imageFile
<<
std
::
endl
;
cerr
<<
"Can't read image from the file: "
<<
imageFile
<<
endl
;
exit
(
-
1
);
}
...
...
modules/dnn/src/init.cpp
View file @
12c9b0ea
...
...
@@ -54,6 +54,7 @@
#include "layers/softmax_layer.hpp"
#include "layers/split_layer.hpp"
#include "layers/crop_layer.hpp"
#include "layers/eltwise_layer.hpp"
namespace
cv
{
...
...
@@ -100,6 +101,7 @@ void initModule()
REG_RUNTIME_LAYER_CLASS
(
Concat
,
ConcatLayer
)
REG_RUNTIME_LAYER_CLASS
(
Crop
,
CropLayer
)
REG_RUNTIME_LAYER_CLASS
(
Eltwise
,
EltwiseLayer
)
init
.
status
=
true
;
}
...
...
modules/dnn/src/layers/crop_layer.cpp
View file @
12c9b0ea
...
...
@@ -52,15 +52,15 @@ namespace dnn
start_axis
=
params
.
get
<
int
>
(
"axis"
);
if
(
4
<=
start_axis
)
CV_Error
(
Error
::
StsBadArg
,
"crop axis bigger than input dim"
);
DictValue
paramOffset
=
params
.
get
(
"offset"
);
offset
.
resize
(
4
,
0
);
if
(
1
<
paramOffset
.
size
())
{
if
(
4
-
start_axis
!=
paramOffset
.
size
())
CV_Error
(
Error
::
StsBadArg
,
"number of offset values specified must be equal to the number of dimensions following axis."
);
for
(
in
t
i
=
start_axis
;
i
<
offset
.
size
();
i
++
)
for
(
size_
t
i
=
start_axis
;
i
<
offset
.
size
();
i
++
)
{
offset
[
i
]
=
paramOffset
.
get
<
int
>
(
i
);
}
...
...
@@ -68,7 +68,7 @@ namespace dnn
else
{
const
int
offset_val
=
paramOffset
.
get
<
int
>
(
0
);
for
(
in
t
i
=
start_axis
;
i
<
offset
.
size
();
i
++
)
for
(
size_
t
i
=
start_axis
;
i
<
offset
.
size
();
i
++
)
{
offset
[
i
]
=
offset_val
;
}
...
...
@@ -81,7 +81,7 @@ namespace dnn
const
Blob
&
inpBlob
=
*
inputs
[
0
];
CV_Assert
(
inpBlob
.
dims
()
==
4
&&
inpBlob
.
type
()
==
CV_32F
);
const
Blob
&
inpSzBlob
=
*
inputs
[
1
];
outSizes
.
resize
(
4
,
0
);
...
...
modules/dnn/src/layers/eltwise_layer.cpp
0 → 100755
View file @
12c9b0ea
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "../precomp.hpp"
#include "layers_common.hpp"
#include "eltwise_layer.hpp"
namespace
cv
{
namespace
dnn
{
EltwiseLayer
::
EltwiseLayer
(
LayerParams
&
params
)
:
Layer
(
params
)
{
if
(
params
.
has
(
"operation"
))
{
String
operation
=
params
.
get
<
String
>
(
"operation"
).
toLowerCase
();
if
(
operation
==
"prod"
)
op
=
PROD
;
else
if
(
operation
==
"sum"
)
op
=
SUM
;
else
if
(
operation
==
"max"
)
op
=
MAX
;
else
CV_Error
(
cv
::
Error
::
StsBadArg
,
"Unknown operaticon type
\"
"
+
operation
+
"
\"
"
);
}
else
{
op
=
SUM
;
}
if
(
params
.
has
(
"coeff"
))
{
DictValue
paramCoeff
=
params
.
get
(
"coeff"
);
coeffs
.
resize
(
paramCoeff
.
size
(),
1
);
for
(
int
i
=
0
;
i
<
paramCoeff
.
size
();
i
++
)
{
coeffs
[
i
]
=
paramCoeff
.
get
<
int
>
(
i
);
}
}
}
void
EltwiseLayer
::
allocate
(
const
std
::
vector
<
Blob
*>
&
inputs
,
std
::
vector
<
Blob
>
&
outputs
)
{
CV_Assert
(
2
<=
inputs
.
size
());
CV_Assert
(
coeffs
.
size
()
==
0
||
coeffs
.
size
()
==
inputs
.
size
());
CV_Assert
(
op
==
SUM
||
coeffs
.
size
()
==
0
);
const
BlobShape
&
shape0
=
inputs
[
0
]
->
shape
();
for
(
size_t
i
=
1
;
i
<
inputs
.
size
();
++
i
)
{
CV_Assert
(
shape0
==
inputs
[
i
]
->
shape
());
}
outputs
.
resize
(
1
);
outputs
[
0
].
create
(
shape0
);
}
void
EltwiseLayer
::
forward
(
std
::
vector
<
Blob
*>
&
inputs
,
std
::
vector
<
Blob
>
&
outputs
)
{
switch
(
op
)
{
case
SUM
:
{
CV_Assert
(
coeffs
.
size
()
==
0
||
coeffs
.
size
()
==
inputs
.
size
());
Mat
&
output
=
outputs
[
0
].
matRef
();
output
.
setTo
(
0.
);
if
(
0
<
coeffs
.
size
())
{
for
(
size_t
i
=
0
;
i
<
inputs
.
size
();
i
++
)
{
output
+=
inputs
[
i
]
->
matRefConst
()
*
coeffs
[
i
];
}
}
else
{
for
(
size_t
i
=
0
;
i
<
inputs
.
size
();
i
++
)
{
output
+=
inputs
[
i
]
->
matRefConst
();
}
}
}
break
;
case
PROD
:
{
Mat
&
output
=
outputs
[
0
].
matRef
();
output
.
setTo
(
1.
);
for
(
size_t
i
=
0
;
i
<
inputs
.
size
();
i
++
)
{
output
=
output
.
mul
(
inputs
[
i
]
->
matRefConst
());
}
}
break
;
case
MAX
:
{
Mat
&
output
=
outputs
[
0
].
matRef
();
cv
::
max
(
inputs
[
0
]
->
matRefConst
(),
inputs
[
1
]
->
matRefConst
(),
output
);
for
(
size_t
i
=
2
;
i
<
inputs
.
size
();
i
++
)
{
cv
::
max
(
output
,
inputs
[
i
]
->
matRefConst
(),
output
);
}
}
break
;
default
:
CV_Assert
(
0
);
break
;
};
}
}
}
modules/dnn/src/layers/eltwise_layer.hpp
0 → 100755
View file @
12c9b0ea
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#ifndef __OPENCV_DNN_LAYERS_ELTWISE_LAYER_HPP__
#define __OPENCV_DNN_LAYERS_ELTWISE_LAYER_HPP__
#include "../precomp.hpp"
namespace
cv
{
namespace
dnn
{
class
EltwiseLayer
:
public
Layer
{
enum
EltwiseOp
{
PROD
=
0
,
SUM
=
1
,
MAX
=
2
,
};
EltwiseOp
op
;
std
::
vector
<
int
>
coeffs
;
public
:
EltwiseLayer
(
LayerParams
&
params
);
void
allocate
(
const
std
::
vector
<
Blob
*>
&
inputs
,
std
::
vector
<
Blob
>
&
outputs
);
void
forward
(
std
::
vector
<
Blob
*>
&
inputs
,
std
::
vector
<
Blob
>
&
outputs
);
};
}
}
#endif
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment