Skip to content
Projects
Groups
Snippets
Help
Loading...
Sign in / Register
Toggle navigation
O
opencv_contrib
Project
Project
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Packages
Packages
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
submodule
opencv_contrib
Commits
3cdc0e48
Commit
3cdc0e48
authored
Feb 13, 2017
by
Alexander Alekhin
Browse files
Options
Browse Files
Download
Plain Diff
Merge pull request #991 from arrybn:issue_912
parents
478baf93
0cef0503
Expand all
Hide whitespace changes
Inline
Side-by-side
Showing
15 changed files
with
216 additions
and
35 deletions
+216
-35
all_layers.hpp
modules/dnn/include/opencv2/dnn/all_layers.hpp
+7
-1
caffe.pb.cc
modules/dnn/misc/caffe/caffe.pb.cc
+0
-0
caffe.pb.h
modules/dnn/misc/caffe/caffe.pb.h
+0
-0
caffe.proto
modules/dnn/src/caffe/caffe.proto
+0
-0
caffe_io.cpp
modules/dnn/src/caffe/caffe_io.cpp
+40
-4
layer_loaders.cpp
modules/dnn/src/caffe/layer_loaders.cpp
+13
-5
init.cpp
modules/dnn/src/init.cpp
+2
-0
batch_norm_layer.cpp
modules/dnn/src/layers/batch_norm_layer.cpp
+32
-14
batch_norm_layer.hpp
modules/dnn/src/layers/batch_norm_layer.hpp
+3
-3
scale_layer.cpp
modules/dnn/src/layers/scale_layer.cpp
+60
-0
scale_layer.hpp
modules/dnn/src/layers/scale_layer.hpp
+36
-0
torch_importer.cpp
modules/dnn/src/torch/torch_importer.cpp
+4
-6
test_layers.cpp
modules/dnn/test/test_layers.cpp
+5
-0
test_torch_importer.cpp
modules/dnn/test/test_torch_importer.cpp
+5
-0
torch_gen_test_data.lua
modules/dnn/testdata/dnn/torch/torch_gen_test_data.lua
+9
-2
No files found.
modules/dnn/include/opencv2/dnn/all_layers.hpp
View file @
3cdc0e48
...
...
@@ -406,7 +406,7 @@ namespace dnn
class
CV_EXPORTS_W
BatchNormLayer
:
public
Layer
{
public
:
static
CV_WRAP
Ptr
<
BatchNormLayer
>
create
(
float
eps
,
bool
has_weights
,
bool
has_bias
);
static
CV_WRAP
Ptr
<
BatchNormLayer
>
create
(
bool
hasWeights
,
bool
hasBias
,
float
epsilon
);
};
class
CV_EXPORTS_W
MaxUnpoolLayer
:
public
Layer
...
...
@@ -415,6 +415,12 @@ namespace dnn
static
CV_WRAP
Ptr
<
MaxUnpoolLayer
>
create
(
Size
unpoolSize
);
};
class
CV_EXPORTS_W
ScaleLayer
:
public
Layer
{
public
:
static
CV_WRAP
Ptr
<
ScaleLayer
>
create
(
bool
hasBias
);
};
//! @}
//! @}
...
...
modules/dnn/misc/caffe/caffe.pb.cc
View file @
3cdc0e48
This diff is collapsed.
Click to expand it.
modules/dnn/misc/caffe/caffe.pb.h
View file @
3cdc0e48
This diff is collapsed.
Click to expand it.
modules/dnn/src/caffe/caffe.proto
View file @
3cdc0e48
This diff is collapsed.
Click to expand it.
modules/dnn/src/caffe/caffe_io.cpp
View file @
3cdc0e48
...
...
@@ -155,12 +155,17 @@ bool UpgradeV1LayerParameter(const V1LayerParameter& v1_layer_param,
const
char
*
UpgradeV1LayerType
(
const
V1LayerParameter_LayerType
type
);
bool
NetNeedsBatchNormUpgrade
(
const
NetParameter
&
net_param
);
void
UpgradeNetBatchNorm
(
NetParameter
*
net_param
);
// Check for deprecations and upgrade the NetParameter as needed.
bool
UpgradeNetAsNeeded
(
const
string
&
param_file
,
NetParameter
*
param
);
bool
NetNeedsUpgrade
(
const
NetParameter
&
net_param
)
{
return
NetNeedsV0ToV1Upgrade
(
net_param
)
||
NetNeedsV1ToV2Upgrade
(
net_param
);
return
NetNeedsV0ToV1Upgrade
(
net_param
)
||
NetNeedsV1ToV2Upgrade
(
net_param
)
||
NetNeedsBatchNormUpgrade
(
net_param
);
}
bool
NetNeedsV0ToV1Upgrade
(
const
NetParameter
&
net_param
)
{
...
...
@@ -340,7 +345,7 @@ bool UpgradeV0LayerParameter(const V1LayerParameter& v0_layer_connection,
}
if
(
v0_layer_param
.
has_pad
())
{
if
(
type
==
"conv"
)
{
layer_param
->
mutable_convolution_param
()
->
set
_pad
(
v0_layer_param
.
pad
());
layer_param
->
mutable_convolution_param
()
->
add
_pad
(
v0_layer_param
.
pad
());
}
else
if
(
type
==
"pool"
)
{
layer_param
->
mutable_pooling_param
()
->
set_pad
(
v0_layer_param
.
pad
());
}
else
{
...
...
@@ -350,7 +355,7 @@ bool UpgradeV0LayerParameter(const V1LayerParameter& v0_layer_connection,
}
if
(
v0_layer_param
.
has_kernelsize
())
{
if
(
type
==
"conv"
)
{
layer_param
->
mutable_convolution_param
()
->
set
_kernel_size
(
layer_param
->
mutable_convolution_param
()
->
add
_kernel_size
(
v0_layer_param
.
kernelsize
());
}
else
if
(
type
==
"pool"
)
{
layer_param
->
mutable_pooling_param
()
->
set_kernel_size
(
...
...
@@ -371,7 +376,7 @@ bool UpgradeV0LayerParameter(const V1LayerParameter& v0_layer_connection,
}
if
(
v0_layer_param
.
has_stride
())
{
if
(
type
==
"conv"
)
{
layer_param
->
mutable_convolution_param
()
->
set
_stride
(
layer_param
->
mutable_convolution_param
()
->
add
_stride
(
v0_layer_param
.
stride
());
}
else
if
(
type
==
"pool"
)
{
layer_param
->
mutable_pooling_param
()
->
set_stride
(
...
...
@@ -774,6 +779,14 @@ bool UpgradeNetAsNeeded(const string& param_file, NetParameter* param) {
<<
"V1LayerParameter"
;
}
}
// NetParameter uses old style batch norm layers; try to upgrade it.
if
(
NetNeedsBatchNormUpgrade
(
*
param
))
{
LOG
(
INFO
)
<<
"Attempting to upgrade batch norm layers using deprecated "
<<
"params: "
<<
param_file
;
UpgradeNetBatchNorm
(
param
);
LOG
(
INFO
)
<<
"Successfully upgraded batch norm layers using deprecated "
<<
"params."
;
}
return
success
;
}
...
...
@@ -797,6 +810,29 @@ bool UpgradeV1Net(const NetParameter& v1_net_param, NetParameter* net_param) {
return
is_fully_compatible
;
}
bool
NetNeedsBatchNormUpgrade
(
const
NetParameter
&
net_param
)
{
for
(
int
i
=
0
;
i
<
net_param
.
layer_size
();
++
i
)
{
// Check if BatchNorm layers declare three parameters, as required by
// the previous BatchNorm layer definition.
if
(
net_param
.
layer
(
i
).
type
()
==
"BatchNorm"
&&
net_param
.
layer
(
i
).
param_size
()
==
3
)
{
return
true
;
}
}
return
false
;
}
void
UpgradeNetBatchNorm
(
NetParameter
*
net_param
)
{
for
(
int
i
=
0
;
i
<
net_param
->
layer_size
();
++
i
)
{
// Check if BatchNorm layers declare three parameters, as required by
// the previous BatchNorm layer definition.
if
(
net_param
->
layer
(
i
).
type
()
==
"BatchNorm"
&&
net_param
->
layer
(
i
).
param_size
()
==
3
)
{
net_param
->
mutable_layer
(
i
)
->
clear_param
();
}
}
}
bool
UpgradeV1LayerParameter
(
const
V1LayerParameter
&
v1_layer_param
,
LayerParameter
*
layer_param
)
{
layer_param
->
Clear
();
...
...
modules/dnn/src/caffe/layer_loaders.cpp
View file @
3cdc0e48
...
...
@@ -286,13 +286,12 @@ template<> //BatchNormLayer specialization
Ptr
<
Layer
>
createLayerFromCaffe
<
BatchNormLayer
>
(
LayerParams
&
params
)
{
const
std
::
vector
<
Blob
>
&
blobs
=
params
.
blobs
;
CV_Assert
(
blobs
.
size
()
==
4
);
CV_Assert
(
blobs
.
size
()
>=
3
);
float
eps
=
params
.
get
<
float
>
(
"eps"
);
bool
hasWeights
=
params
.
get
<
bool
>
(
"has_weight"
,
false
);
bool
hasBias
=
params
.
get
<
bool
>
(
"has_bias"
,
false
);
Ptr
<
BatchNormLayer
>
l
=
BatchNormLayer
::
create
(
eps
,
hasWeights
,
hasBias
);
float
epsilon
=
params
.
get
<
float
>
(
"eps"
,
1E-5
);
Ptr
<
BatchNormLayer
>
l
=
BatchNormLayer
::
create
(
hasWeights
,
hasBias
,
epsilon
);
l
->
setParamsFrom
(
params
);
return
Ptr
<
Layer
>
(
l
);
...
...
@@ -318,6 +317,15 @@ Ptr<Layer> createLayerFromCaffe<MaxUnpoolLayer>(LayerParams& params)
return
Ptr
<
Layer
>
(
l
);
}
template
<>
//ScaleLayer specialization
Ptr
<
Layer
>
createLayerFromCaffe
<
ScaleLayer
>
(
LayerParams
&
params
)
{
Ptr
<
ScaleLayer
>
l
=
ScaleLayer
::
create
(
params
.
get
<
bool
>
(
"bias_term"
,
false
));
l
->
setParamsFrom
(
params
);
return
Ptr
<
Layer
>
(
l
);
}
//Explicit instantiation
template
Ptr
<
Layer
>
createLayerFromCaffe
<
ConvolutionLayer
>
(
LayerParams
&
);
template
Ptr
<
Layer
>
createLayerFromCaffe
<
DeconvolutionLayer
>
(
LayerParams
&
);
...
...
@@ -342,6 +350,6 @@ template Ptr<Layer> createLayerFromCaffe<EltwiseLayer>(LayerParams&);
template
Ptr
<
Layer
>
createLayerFromCaffe
<
BatchNormLayer
>
(
LayerParams
&
);
template
Ptr
<
Layer
>
createLayerFromCaffe
<
ChannelsPReLULayer
>
(
LayerParams
&
);
template
Ptr
<
Layer
>
createLayerFromCaffe
<
MaxUnpoolLayer
>
(
LayerParams
&
);
template
Ptr
<
Layer
>
createLayerFromCaffe
<
ScaleLayer
>
(
LayerParams
&
);
}
}
modules/dnn/src/init.cpp
View file @
3cdc0e48
...
...
@@ -52,6 +52,7 @@
#include "layers/normalize_bbox_layer.hpp"
#include "layers/shift_layer.hpp"
#include "layers/padding_layer.hpp"
#include "layers/scale_layer.hpp"
namespace
cv
{
...
...
@@ -109,6 +110,7 @@ void initModule()
REG_RUNTIME_LAYER_CLASS
(
NormalizeBBox
,
NormalizeBBoxLayer
);
REG_RUNTIME_LAYER_CLASS
(
Shift
,
ShiftLayer
);
REG_RUNTIME_LAYER_CLASS
(
Padding
,
PaddingLayer
);
REG_RUNTIME_LAYER_FUNC
(
Scale
,
createLayerFromCaffe
<
ScaleLayer
>
);
init
.
status
=
true
;
}
...
...
modules/dnn/src/layers/batch_norm_layer.cpp
View file @
3cdc0e48
...
...
@@ -16,19 +16,21 @@ namespace cv
namespace
dnn
{
BatchNormLayerImpl
::
BatchNormLayerImpl
(
float
eps_
,
bool
hasWeights_
,
bool
hasBias_
)
:
eps
(
eps_
),
BatchNormLayerImpl
::
BatchNormLayerImpl
(
bool
hasWeights_
,
bool
hasBias_
,
float
epsilon_
)
:
hasWeights
(
hasWeights_
),
hasBias
(
hasBias_
)
hasBias
(
hasBias_
),
epsilon
(
epsilon_
)
{}
void
BatchNormLayerImpl
::
allocate
(
const
std
::
vector
<
Blob
*>
&
inputs
,
std
::
vector
<
Blob
>
&
outputs
)
{
CV_Assert
(
blobs
.
size
()
==
4
);
CV_Assert
(
blobs
.
size
()
>=
2
);
outputs
.
resize
(
inputs
.
size
());
for
(
size_t
i
=
0
;
i
<
inputs
.
size
();
i
++
)
{
CV_Assert
(
blobs
[
0
].
total
()
==
inputs
[
i
]
->
channels
());
CV_Assert
(
blobs
[
1
].
total
()
==
inputs
[
i
]
->
channels
());
outputs
[
i
].
create
(
inputs
[
i
]
->
shape
());
}
}
...
...
@@ -39,30 +41,46 @@ void BatchNormLayerImpl::forward(std::vector<Blob*> &inputs, std::vector<Blob> &
Blob
&
inpBlob
=
*
inputs
[
0
];
int
weightsBlobIndex
=
2
;
int
biasBlobIndex
=
weightsBlobIndex
+
hasWeights
;
float
varMeanScale
=
1
;
if
(
!
hasWeights
&&
!
hasBias
)
{
varMeanScale
=
*
blobs
[
2
].
ptrf
();
if
(
varMeanScale
!=
0
)
varMeanScale
=
1
/
varMeanScale
;
}
Mat
invStdMat
;
cv
::
pow
(
blobs
[
1
].
matRefConst
()
*
varMeanScale
+
epsilon
,
-
0.5
,
invStdMat
);
for
(
size_t
ii
=
0
;
ii
<
outputs
.
size
();
ii
++
)
{
Blob
&
outBlob
=
outputs
[
ii
];
if
(
hasWeights
)
CV_Assert
(
inpBlob
.
channels
()
==
blobs
[
2
].
total
());
CV_Assert
(
inpBlob
.
channels
()
==
blobs
[
weightsBlobIndex
].
total
());
if
(
hasBias
)
CV_Assert
(
inpBlob
.
channels
()
==
blobs
[
3
].
total
());
CV_Assert
(
inpBlob
.
channels
()
==
blobs
[
biasBlobIndex
].
total
());
for
(
int
n
=
0
;
n
<
inpBlob
.
channels
();
n
++
)
for
(
int
num
=
0
;
num
<
outBlob
.
num
();
num
++
)
{
float
mean
=
blobs
[
0
].
matRefConst
().
at
<
float
>
(
n
);
float
invstd
=
1
/
sqrt
(
blobs
[
1
].
matRefConst
().
at
<
float
>
(
n
)
+
eps
);
float
w
=
hasWeights
?
blobs
[
2
].
matRefConst
().
at
<
float
>
(
n
)
:
1
;
float
b
=
hasBias
?
blobs
[
3
].
matRefConst
().
at
<
float
>
(
n
)
:
0
;
outBlob
.
getPlane
(
0
,
n
)
=
(
inpBlob
.
getPlane
(
0
,
n
)
-
mean
)
*
(
w
*
invstd
)
+
b
;
for
(
int
n
=
0
;
n
<
outBlob
.
channels
();
n
++
)
{
float
mean
=
blobs
[
0
].
matRefConst
().
at
<
float
>
(
n
)
*
varMeanScale
;
double
invstd
=
invStdMat
.
at
<
float
>
(
n
);
float
w
=
hasWeights
?
blobs
[
weightsBlobIndex
].
matRefConst
().
at
<
float
>
(
n
)
:
1
;
float
b
=
hasBias
?
blobs
[
biasBlobIndex
].
matRefConst
().
at
<
float
>
(
n
)
:
0
;
outBlob
.
getPlane
(
num
,
n
)
=
(
inpBlob
.
getPlane
(
num
,
n
)
-
mean
)
*
w
*
invstd
+
b
;
}
}
}
}
Ptr
<
BatchNormLayer
>
BatchNormLayer
::
create
(
float
eps
,
bool
has_weights
,
bool
has_bias
)
Ptr
<
BatchNormLayer
>
BatchNormLayer
::
create
(
bool
hasWeights
,
bool
hasBias
,
float
epsilon
)
{
return
Ptr
<
BatchNormLayer
>
(
new
BatchNormLayerImpl
(
eps
,
has_weights
,
has_bias
));
return
Ptr
<
BatchNormLayer
>
(
new
BatchNormLayerImpl
(
hasWeights
,
hasBias
,
epsilon
));
}
}
// namespace dnn
...
...
modules/dnn/src/layers/batch_norm_layer.hpp
View file @
3cdc0e48
...
...
@@ -21,17 +21,17 @@ namespace dnn
class
BatchNormLayerImpl
:
public
BatchNormLayer
{
public
:
BatchNormLayerImpl
(
float
eps_
,
bool
hasWeights_
,
bool
hasBias
_
);
BatchNormLayerImpl
(
bool
hasWeights_
,
bool
hasBias_
,
float
epsilon
_
);
void
allocate
(
const
std
::
vector
<
Blob
*>
&
inputs
,
std
::
vector
<
Blob
>
&
outputs
);
void
forward
(
std
::
vector
<
Blob
*>
&
inputs
,
std
::
vector
<
Blob
>
&
outputs
);
private
:
float
eps
;
bool
hasWeights
,
hasBias
;
float
epsilon
;
};
}
}
#endif //
BATCH_NORM_LAYER_HPP
#endif //
__OPENCV_DNN_LAYERS_BATCH_NORM_LAYER_HPP__
modules/dnn/src/layers/scale_layer.cpp
0 → 100644
View file @
3cdc0e48
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
// Copyright (C) 2016, Intel Corporation, all rights reserved.
// Third party copyrights are property of their respective owners.
/*
Implementation of Scale layer.
*/
#include "scale_layer.hpp"
namespace
cv
{
namespace
dnn
{
void
ScaleLayerImpl
::
allocate
(
const
std
::
vector
<
Blob
*>
&
inputs
,
std
::
vector
<
Blob
>
&
outputs
)
{
CV_Assert
(
blobs
.
size
()
==
1
+
hasBias
);
outputs
.
resize
(
inputs
.
size
());
for
(
size_t
i
=
0
;
i
<
inputs
.
size
();
i
++
)
{
outputs
[
i
].
create
(
inputs
[
i
]
->
shape
());
}
}
void
ScaleLayerImpl
::
forward
(
std
::
vector
<
Blob
*>
&
inputs
,
std
::
vector
<
Blob
>
&
outputs
)
{
CV_Assert
(
inputs
.
size
()
==
1
);
Blob
&
inpBlob
=
*
inputs
[
0
];
for
(
size_t
ii
=
0
;
ii
<
outputs
.
size
();
ii
++
)
{
Blob
&
outBlob
=
outputs
[
ii
];
CV_Assert
(
inpBlob
.
channels
()
==
blobs
[
0
].
total
());
if
(
hasBias
)
CV_Assert
(
inpBlob
.
channels
()
==
blobs
[
1
].
total
());
for
(
int
n
=
0
;
n
<
inpBlob
.
channels
();
n
++
)
{
float
w
=
blobs
[
0
].
matRefConst
().
at
<
float
>
(
n
);
float
b
=
hasBias
?
blobs
[
1
].
matRefConst
().
at
<
float
>
(
n
)
:
0
;
outBlob
.
getPlane
(
0
,
n
)
=
w
*
inpBlob
.
getPlane
(
0
,
n
)
+
b
;
}
}
}
Ptr
<
ScaleLayer
>
ScaleLayer
::
create
(
bool
hasBias
)
{
return
Ptr
<
ScaleLayer
>
(
new
ScaleLayerImpl
(
hasBias
));
}
}
// namespace dnn
}
// namespace cv
modules/dnn/src/layers/scale_layer.hpp
0 → 100644
View file @
3cdc0e48
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
// Copyright (C) 2016, Intel Corporation, all rights reserved.
// Third party copyrights are property of their respective owners.
/*
Declaration of scale layer, which multiplies and shifts channels in input blob.
*/
#ifndef __OPENCV_DNN_LAYERS_SCALE_LAYER_HPP__
#define __OPENCV_DNN_LAYERS_SCALE_LAYER_HPP__
#include <opencv2/dnn/all_layers.hpp>
namespace
cv
{
namespace
dnn
{
class
ScaleLayerImpl
:
public
ScaleLayer
{
public
:
ScaleLayerImpl
(
bool
hasBias_
)
:
hasBias
(
hasBias_
)
{}
void
allocate
(
const
std
::
vector
<
Blob
*>
&
inputs
,
std
::
vector
<
Blob
>
&
outputs
);
void
forward
(
std
::
vector
<
Blob
*>
&
inputs
,
std
::
vector
<
Blob
>
&
outputs
);
private
:
bool
hasBias
;
};
}
}
#endif // __OPENCV_DNN_LAYERS_SCALE_LAYER_HPP__
modules/dnn/src/torch/torch_importer.cpp
View file @
3cdc0e48
...
...
@@ -575,21 +575,19 @@ struct TorchImporter : public ::cv::dnn::Importer
layerParams
.
blobs
.
push_back
(
tensorParams
[
"running_var"
].
second
);
CV_Assert
(
scalarParams
.
has
(
"eps"
));
layerParams
.
set
(
"eps"
,
float
(
scalarParams
.
get
<
double
>
(
"eps"
)));
layerParams
.
blobs
.
push_back
(
Blob
());
layerParams
.
blobs
.
push_back
(
Blob
());
float
eps
=
float
(
scalarParams
.
get
<
double
>
(
"eps"
));
layerParams
.
set
(
"eps"
,
eps
);
if
(
tensorParams
.
count
(
"weight"
))
{
layerParams
.
set
(
"has_weight"
,
true
);
layerParams
.
blobs
[
2
]
=
tensorParams
[
"weight"
].
second
;
layerParams
.
blobs
.
push_back
(
tensorParams
[
"weight"
].
second
)
;
}
if
(
tensorParams
.
count
(
"bias"
))
{
layerParams
.
set
(
"has_bias"
,
true
);
layerParams
.
blobs
[
3
]
=
tensorParams
[
"bias"
].
second
;
layerParams
.
blobs
.
push_back
(
tensorParams
[
"bias"
].
second
)
;
}
curModule
->
modules
.
push_back
(
newModule
);
...
...
modules/dnn/test/test_layers.cpp
View file @
3cdc0e48
...
...
@@ -215,6 +215,11 @@ TEST(Layer_Test_Reshape, squeeze)
EXPECT_EQ
(
outVec
[
0
].
shape
(),
BlobShape
(
4
,
3
,
2
));
}
TEST
(
Layer_Test_BatchNorm
,
Accuracy
)
{
OCL_OFF
(
testLayerUsingCaffeModels
(
"layer_batch_norm"
,
true
));
}
//template<typename XMat>
//static void test_Layer_Concat()
//{
...
...
modules/dnn/test/test_torch_importer.cpp
View file @
3cdc0e48
...
...
@@ -135,6 +135,11 @@ TEST(Torch_Importer, run_deconv)
runTorchNet
(
"net_deconv"
,
""
,
false
);
}
TEST
(
Torch_Importer
,
run_batch_norm
)
{
runTorchNet
(
"net_batch_norm"
,
""
,
false
);
}
#if defined(ENABLE_TORCH_ENET_TESTS)
TEST
(
Torch_Importer
,
ENet_accuracy
)
...
...
modules/dnn/testdata/dnn/torch/torch_gen_test_data.lua
View file @
3cdc0e48
...
...
@@ -12,6 +12,9 @@ function fill_net(net)
if
net
.
bias
then
net
.
bias
=
torch
.
rand
(
net
.
bias
:
size
())
end
if
net
.
train
then
net
.
train
=
0
end
end
function
save
(
net
,
input
,
label
)
...
...
@@ -68,4 +71,8 @@ save(net_concat, torch.rand(2, 6, 4, 3) - 0.5, 'net_concat')
local
net_deconv
=
nn
.
Sequential
()
net_deconv
:
add
(
nn
.
SpatialFullConvolution
(
3
,
9
,
4
,
5
,
1
,
2
,
0
,
1
,
0
,
1
))
save
(
net_deconv
,
torch
.
rand
(
2
,
3
,
4
,
3
)
-
0
.
5
,
'net_deconv'
)
\ No newline at end of file
save
(
net_deconv
,
torch
.
rand
(
2
,
3
,
4
,
3
)
-
0
.
5
,
'net_deconv'
)
local
net_batch_norm
=
nn
.
Sequential
()
net_batch_norm
:
add
(
nn
.
SpatialBatchNormalization
(
3
))
save
(
net_batch_norm
,
torch
.
rand
(
1
,
3
,
4
,
3
)
-
0
.
5
,
'net_batch_norm'
)
\ No newline at end of file
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment