Skip to content
Projects
Groups
Snippets
Help
Loading...
Sign in / Register
Toggle navigation
O
opencv
Project
Project
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Packages
Packages
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
submodule
opencv
Commits
222149b9
Commit
222149b9
authored
Sep 22, 2017
by
Dmitry Kurtaev
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Refactored Padding layer
parent
a0d3d114
Hide whitespace changes
Inline
Side-by-side
Showing
7 changed files
with
166 additions
and
113 deletions
+166
-113
all_layers.hpp
modules/dnn/include/opencv2/dnn/all_layers.hpp
+19
-0
padding_layer.cpp
modules/dnn/src/layers/padding_layer.cpp
+59
-58
tf_importer.cpp
modules/dnn/src/tensorflow/tf_importer.cpp
+20
-43
torch_importer.cpp
modules/dnn/src/torch/torch_importer.cpp
+39
-12
test_halide_layers.cpp
modules/dnn/test/test_halide_layers.cpp
+22
-0
test_tf_importer.cpp
modules/dnn/test/test_tf_importer.cpp
+1
-0
test_torch_importer.cpp
modules/dnn/test/test_torch_importer.cpp
+6
-0
No files found.
modules/dnn/include/opencv2/dnn/all_layers.hpp
View file @
222149b9
...
...
@@ -337,6 +337,25 @@ CV__DNN_EXPERIMENTAL_NS_BEGIN
static
Ptr
<
PermuteLayer
>
create
(
const
LayerParams
&
params
);
};
/**
* @brief Adds extra values for specific axes.
* @param paddings Vector of paddings in format
* @code
* [ pad_before, pad_after, // [0]th dimension
* pad_before, pad_after, // [1]st dimension
* ...
* pad_before, pad_after ] // [n]th dimension
* @endcode
* that represents number of padded values at every dimension
* starting from the first one. The rest of dimensions won't
* be padded.
* @param value Value to be padded. Defaults to zero.
* @param input_dims Torch's parameter. If @p input_dims is not equal to the
* actual input dimensionality then the `[0]th` dimension
* is considered as a batch dimension and @p paddings are shifted
* to a one dimension. Defaults to `-1` that means padding
* corresponding to @p paddings.
*/
class
CV_EXPORTS
PaddingLayer
:
public
Layer
{
public
:
...
...
modules/dnn/src/layers/padding_layer.cpp
View file @
222149b9
...
...
@@ -2,7 +2,7 @@
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
// Copyright (C) 201
6
, Intel Corporation, all rights reserved.
// Copyright (C) 201
7
, Intel Corporation, all rights reserved.
// Third party copyrights are property of their respective owners.
/*
...
...
@@ -24,14 +24,20 @@ public:
PaddingLayerImpl
(
const
LayerParams
&
params
)
{
setParamsFrom
(
params
);
paddingDim
=
params
.
get
<
int
>
(
"padding_dim"
);
padding
=
params
.
get
<
int
>
(
"padding"
);
inputDims
=
params
.
get
<
int
>
(
"input_dims"
,
0
);
index
=
params
.
get
<
int
>
(
"index"
,
0
);
paddingValue
=
params
.
get
<
double
>
(
"value"
,
0
);
if
(
paddingDim
<
0
||
padding
<
0
)
CV_Error
(
cv
::
Error
::
StsNotImplemented
,
"Negative padding and dim aren't supported"
);
paddingValue
=
params
.
get
<
float
>
(
"value"
,
0
);
inputDims
=
params
.
get
<
int
>
(
"input_dims"
,
-
1
);
CV_Assert
(
params
.
has
(
"paddings"
));
const
DictValue
&
paddingsParam
=
params
.
get
(
"paddings"
);
CV_Assert
((
paddingsParam
.
size
()
&
1
)
==
0
);
paddings
.
resize
(
paddingsParam
.
size
()
/
2
);
for
(
int
i
=
0
;
i
<
paddings
.
size
();
++
i
)
{
paddings
[
i
].
first
=
paddingsParam
.
get
<
int
>
(
i
*
2
);
// Pad before.
paddings
[
i
].
second
=
paddingsParam
.
get
<
int
>
(
i
*
2
+
1
);
// Pad after.
CV_Assert
(
paddings
[
i
].
first
>=
0
,
paddings
[
i
].
second
>=
0
);
}
}
bool
getMemoryShapes
(
const
std
::
vector
<
MatShape
>
&
inputs
,
...
...
@@ -39,24 +45,48 @@ public:
std
::
vector
<
MatShape
>
&
outputs
,
std
::
vector
<
MatShape
>
&
internals
)
const
{
outputs
.
clear
();
for
(
int
i
=
0
;
i
<
inputs
.
size
();
i
++
)
CV_Assert
(
inputs
.
size
()
==
1
);
const
MatShape
&
inpShape
=
inputs
[
0
];
CV_Assert
(
inpShape
.
size
()
>=
paddings
.
size
());
CV_Assert
(
inputDims
==
-
1
||
inpShape
.
size
()
==
inputDims
||
inpShape
.
size
()
>
paddings
.
size
());
outputs
.
resize
(
1
,
inpShape
);
int
offset
=
(
inputDims
==
-
1
?
0
:
(
inpShape
.
size
()
>
inputDims
?
1
:
0
));
for
(
int
i
=
0
;
i
<
paddings
.
size
();
++
i
)
{
MatShape
shape
=
inputs
[
i
];
int
dim
=
getPadDim
(
shape
);
CV_Assert
(
dim
<
shape
.
size
());
outputs
[
0
][
offset
+
i
]
=
inpShape
[
offset
+
i
]
+
paddings
[
i
].
first
+
paddings
[
i
].
second
;
}
return
false
;
}
shape
[
dim
]
+=
padding
;
outputs
.
push_back
(
shape
);
void
finalize
(
const
std
::
vector
<
Mat
*>
&
inputs
,
std
::
vector
<
Mat
>
&
outputs
)
{
// Compute dstRanges.
const
MatSize
&
inpShape
=
inputs
[
0
]
->
size
;
dstRanges
.
resize
(
paddings
.
size
());
int
offset
=
0
;
if
(
inputDims
!=
-
1
&&
inputs
[
0
]
->
dims
!=
inputDims
)
{
dstRanges
.
insert
(
dstRanges
.
begin
(),
Range
::
all
());
offset
=
1
;
}
return
false
;
for
(
int
i
=
0
;
i
<
paddings
.
size
();
++
i
)
{
dstRanges
[
offset
+
i
].
start
=
paddings
[
i
].
first
;
dstRanges
[
offset
+
i
].
end
=
paddings
[
i
].
first
+
inpShape
[
offset
+
i
];
}
// Add the rest of dimensions.
for
(
int
i
=
dstRanges
.
size
();
i
<
inputs
[
0
]
->
dims
;
++
i
)
dstRanges
.
push_back
(
Range
::
all
());
}
virtual
bool
supportBackend
(
int
backendId
)
{
return
backendId
==
DNN_BACKEND_DEFAULT
||
backendId
==
DNN_BACKEND_HALIDE
&&
haveHalide
();
backendId
==
DNN_BACKEND_HALIDE
&&
haveHalide
()
&&
dstRanges
.
size
()
==
4
;
}
void
forward
(
std
::
vector
<
Mat
*>
&
inputs
,
std
::
vector
<
Mat
>
&
outputs
,
std
::
vector
<
Mat
>
&
internals
)
...
...
@@ -64,50 +94,18 @@ public:
CV_TRACE_FUNCTION
();
CV_TRACE_ARG_VALUE
(
name
,
"name"
,
name
.
c_str
());
for
(
int
i
=
0
;
i
<
inputs
.
size
();
i
++
)
{
outputs
[
i
]
=
paddingValue
;
const
Mat
&
inp
=
*
inputs
[
i
];
Mat
&
out
=
outputs
[
i
];
int
dims
=
inp
.
dims
;
MatShape
inShape
(
inp
.
size
.
p
,
inp
.
size
.
p
+
dims
);
MatShape
outShape
(
out
.
size
.
p
,
out
.
size
.
p
+
dims
);
int
dim
=
getPadDim
(
inShape
);
int
actualIndex
=
index
;
if
(
index
==
0
)
actualIndex
=
inShape
[
dim
];
std
::
vector
<
std
::
pair
<
Range
,
Range
>
>
srcDstRanges
;
srcDstRanges
.
push_back
(
std
::
make_pair
(
Range
(
0
,
actualIndex
),
Range
(
0
,
actualIndex
)));
srcDstRanges
.
push_back
(
std
::
make_pair
(
Range
(
actualIndex
,
inShape
[
dim
]),
Range
(
actualIndex
+
padding
,
outShape
[
dim
])));
std
::
vector
<
Range
>
srcRanges
(
dims
,
Range
::
all
()),
dstRanges
=
srcRanges
;
for
(
int
j
=
0
;
j
<
srcDstRanges
.
size
();
j
++
)
{
if
(
!
srcDstRanges
[
j
].
first
.
empty
())
{
srcRanges
[
dim
]
=
srcDstRanges
[
j
].
first
;
dstRanges
[
dim
]
=
srcDstRanges
[
j
].
second
;
Mat
dst
=
out
(
&
dstRanges
[
0
]);
Mat
src
=
inp
(
&
srcRanges
[
0
]).
clone
();
src
.
copyTo
(
dst
);
}
}
}
}
int
getPadDim
(
const
MatShape
&
shape
)
const
{
return
inputDims
>
0
&&
(
int
)
shape
.
size
()
>
inputDims
?
paddingDim
+
1
:
paddingDim
;
outputs
[
0
].
setTo
(
paddingValue
);
inputs
[
0
]
->
copyTo
(
outputs
[
0
](
dstRanges
));
}
virtual
Ptr
<
BackendNode
>
initHalide
(
const
std
::
vector
<
Ptr
<
BackendWrapper
>
>
&
inputs
)
{
#ifdef HAVE_HALIDE
int
inW
,
inH
,
inC
,
inN
;
int
minN
=
std
::
max
(
dstRanges
[
0
].
start
,
0
);
int
minC
=
std
::
max
(
dstRanges
[
1
].
start
,
0
);
int
minY
=
std
::
max
(
dstRanges
[
2
].
start
,
0
);
int
minX
=
std
::
max
(
dstRanges
[
3
].
start
,
0
);
Halide
::
Buffer
<
float
>
inputBuffer
=
halideBuffer
(
inputs
[
0
]);
getCanonicalSize
(
inputBuffer
,
&
inW
,
&
inH
,
&
inC
,
&
inN
);
...
...
@@ -115,13 +113,16 @@ public:
Halide
::
Func
top
=
(
name
.
empty
()
?
Halide
::
Func
()
:
Halide
::
Func
(
name
));
Halide
::
Func
padded
=
Halide
::
BoundaryConditions
::
constant_exterior
(
inputBuffer
,
paddingValue
);
top
(
x
,
y
,
c
,
n
)
=
padded
(
x
,
y
,
c
,
n
);
top
(
x
,
y
,
c
,
n
)
=
padded
(
x
-
minX
,
y
-
minY
,
c
-
minC
,
n
-
minN
);
return
Ptr
<
BackendNode
>
(
new
HalideBackendNode
(
top
));
#endif // HAVE_HALIDE
return
Ptr
<
BackendNode
>
();
}
int
paddingDim
,
padding
,
inputDims
,
index
;
private
:
std
::
vector
<
std
::
pair
<
int
,
int
>
>
paddings
;
// Pairs pad before, pad after.
std
::
vector
<
Range
>
dstRanges
;
int
inputDims
;
float
paddingValue
;
};
...
...
modules/dnn/src/tensorflow/tf_importer.cpp
View file @
222149b9
...
...
@@ -931,51 +931,28 @@ void TFImporter::populateNet(Net dstNet)
}
else
if
(
type
==
"Pad"
)
{
tensorflow
::
TensorProto
paddings
=
getConstBlob
(
layer
,
value_id
,
1
);
MatShape
shape
;
blobShapeFromTensor
(
paddings
,
shape
);
if
(
shape
[
0
]
!=
4
)
CV_Error
(
Error
::
StsError
,
"Expected NHWC data format"
);
// Copy tensor with paddings.
std
::
vector
<
int32_t
>
values
(
shape
[
0
]
*
2
);
CV_Assert
(
sizeof
(
int32_t
)
*
values
.
size
()
==
paddings
.
tensor_content
().
size
());
memcpy
(
&
values
[
0
],
&
paddings
.
tensor_content
()[
0
],
paddings
.
tensor_content
().
size
());
// Allow only one padding operation per layer.
bool
padded
=
false
;
for
(
int
i
=
0
;
i
<
values
.
size
();
++
i
)
Mat
paddings
=
getTensorContent
(
getConstBlob
(
layer
,
value_id
,
1
));
CV_Assert
(
paddings
.
type
()
==
CV_32SC1
);
if
(
paddings
.
total
()
==
8
)
{
if
(
values
[
i
])
{
if
(
padded
)
CV_Error
(
Error
::
StsError
,
"Only single padding operation per layer is supported"
);
padded
=
true
;
int
axis
=
i
/
2
;
// Remap NHWC to NCHW.
// 0 -> 0
// 1 -> 2
// 2 -> 3
// 3 -> 1
if
(
axis
!=
0
)
axis
=
axis
%
3
+
1
;
layerParams
.
set
(
"padding_dim"
,
axis
);
if
(
i
%
2
)
// Pad after
layerParams
.
set
(
"padding"
,
values
[
i
]);
else
// Pad before
layerParams
.
set
(
"padding"
,
-
1
*
values
[
i
]);
int
id
=
dstNet
.
addLayer
(
name
,
"Padding"
,
layerParams
);
layer_id
[
name
]
=
id
;
connect
(
layer_id
,
dstNet
,
parsePin
(
layer
.
input
(
0
)),
id
,
0
);
}
// Perhabs, we have NHWC padding dimensions order.
// N H W C
// 0 1 2 3 4 5 6 7
std
::
swap
(
*
paddings
.
ptr
<
int32_t
>
(
0
,
2
),
*
paddings
.
ptr
<
int32_t
>
(
0
,
6
));
std
::
swap
(
*
paddings
.
ptr
<
int32_t
>
(
0
,
3
),
*
paddings
.
ptr
<
int32_t
>
(
0
,
7
));
// N C W H
// 0 1 2 3 4 5 6 7
std
::
swap
(
*
paddings
.
ptr
<
int32_t
>
(
0
,
4
),
*
paddings
.
ptr
<
int32_t
>
(
0
,
6
));
std
::
swap
(
*
paddings
.
ptr
<
int32_t
>
(
0
,
5
),
*
paddings
.
ptr
<
int32_t
>
(
0
,
7
));
// N C H W
// 0 1 2 3 4 5 6 7
}
layerParams
.
set
(
"paddings"
,
DictValue
::
arrayInt
<
int
*>
((
int
*
)
paddings
.
data
,
paddings
.
total
()));
int
id
=
dstNet
.
addLayer
(
name
,
"Padding"
,
layerParams
);
layer_id
[
name
]
=
id
;
connect
(
layer_id
,
dstNet
,
parsePin
(
layer
.
input
(
0
)),
id
,
0
);
}
else
if
(
type
==
"FusedBatchNorm"
)
{
...
...
modules/dnn/src/torch/torch_importer.cpp
View file @
222149b9
...
...
@@ -714,23 +714,25 @@ struct TorchImporter : public ::cv::dnn::Importer
readTorchTable
(
scalarParams
,
tensorParams
);
newModule
->
apiType
=
"Padding"
;
CV_Assert
(
scalarParams
.
has
(
"pad"
)
&&
scalarParams
.
has
(
"dim"
));
CV_Assert
(
scalarParams
.
has
(
"pad"
)
&&
scalarParams
.
has
(
"dim"
));
if
(
scalarParams
.
has
(
"index"
)
&&
scalarParams
.
get
<
int
>
(
"index"
)
!=
1
)
CV_Error
(
Error
::
StsNotImplemented
,
"Padding with offset is not implemented"
);
layerParams
.
set
(
"padding_dim"
,
static_cast
<
int
>
(
scalarParams
.
get
<
double
>
(
"dim"
)
-
1
));
layerParams
.
set
(
"padding"
,
static_cast
<
int
>
(
scalarParams
.
get
<
double
>
(
"pad"
)));
if
(
scalarParams
.
has
(
"value"
))
layerParams
.
set
(
"value"
,
scalarParams
.
get
<
float
>
(
"value"
));
if
(
scalarParams
.
has
(
"nInputDim"
))
layerParams
.
set
(
"input_dims"
,
static_cast
<
int
>
(
scalarParams
.
get
<
double
>
(
"nInputDim"
)));
layerParams
.
set
(
"input_dims"
,
scalarParams
.
get
<
int
>
(
"nInputDim"
));
i
f
(
scalarParams
.
has
(
"value"
))
layerParams
.
set
(
"value"
,
scalarParams
.
get
<
double
>
(
"value"
)
);
i
nt
dim
=
scalarParams
.
get
<
int
>
(
"dim"
)
-
1
;
// In Lua we start from 1.
int
pad
=
scalarParams
.
get
<
int
>
(
"pad"
);
if
(
scalarParams
.
has
(
"index"
))
layerParams
.
set
(
"index"
,
static_cast
<
int
>
(
scalarParams
.
get
<
double
>
(
"index"
)
-
1
));
std
::
vector
<
int
>
paddings
((
dim
+
1
)
*
2
,
0
);
if
(
pad
>
0
)
paddings
[
dim
*
2
+
1
]
=
pad
;
// Pad after (right).
else
paddings
[
dim
*
2
]
=
-
pad
;
// Pad before (left).
layerParams
.
set
(
"paddings"
,
DictValue
::
arrayInt
<
int
*>
(
&
paddings
[
0
],
paddings
.
size
()));
curModule
->
modules
.
push_back
(
newModule
);
}
...
...
@@ -867,6 +869,31 @@ struct TorchImporter : public ::cv::dnn::Importer
layerParams
.
set
(
"scale"
,
scalarParams
.
get
<
float
>
(
"constant_scalar"
));
curModule
->
modules
.
push_back
(
newModule
);
}
else
if
(
nnName
==
"SpatialZeroPadding"
)
{
readTorchTable
(
scalarParams
,
tensorParams
);
CV_Assert
(
scalarParams
.
has
(
"pad_l"
),
scalarParams
.
has
(
"pad_r"
),
scalarParams
.
has
(
"pad_t"
),
scalarParams
.
has
(
"pad_b"
));
int
padTop
=
scalarParams
.
get
<
int
>
(
"pad_t"
);
int
padLeft
=
scalarParams
.
get
<
int
>
(
"pad_l"
);
int
padRight
=
scalarParams
.
get
<
int
>
(
"pad_r"
);
int
padBottom
=
scalarParams
.
get
<
int
>
(
"pad_b"
);
if
(
padTop
<
0
||
padLeft
<
0
||
padRight
<
0
||
padBottom
<
0
)
CV_Error
(
Error
::
StsNotImplemented
,
"SpatialZeroPadding in cropping mode is not implemented"
);
newModule
->
apiType
=
"Padding"
;
// Torch's SpatialZeroPadding works with 3- or 4-dimensional input.
// So we add parameter input_dims=3 to ignore batch dimension if it will be.
std
::
vector
<
int
>
paddings
(
6
,
0
);
// CHW
paddings
[
2
]
=
padTop
;
paddings
[
3
]
=
padBottom
;
paddings
[
4
]
=
padLeft
;
paddings
[
5
]
=
padRight
;
layerParams
.
set
(
"paddings"
,
DictValue
::
arrayInt
<
int
*>
(
&
paddings
[
0
],
paddings
.
size
()));
layerParams
.
set
(
"input_dims"
,
3
);
curModule
->
modules
.
push_back
(
newModule
);
}
else
{
CV_Error
(
Error
::
StsNotImplemented
,
"Unknown nn class
\"
"
+
className
+
"
\"
"
);
...
...
modules/dnn/test/test_halide_layers.cpp
View file @
222149b9
...
...
@@ -34,6 +34,28 @@ static void test(LayerParams& params, Mat& input)
normAssert
(
outputDefault
,
outputHalide
);
}
////////////////////////////////////////////////////////////////////////////////
// Padding
////////////////////////////////////////////////////////////////////////////////
TEST
(
Padding_Halide
,
Accuracy
)
{
static
const
int
kNumRuns
=
10
;
std
::
vector
<
int
>
paddings
(
8
);
for
(
int
t
=
0
;
t
<
kNumRuns
;
++
t
)
{
for
(
int
i
=
0
;
i
<
paddings
.
size
();
++
i
)
paddings
[
i
]
=
rand
()
%
5
;
LayerParams
lp
;
lp
.
set
(
"paddings"
,
DictValue
::
arrayInt
<
int
*>
(
&
paddings
[
0
],
paddings
.
size
()));
lp
.
type
=
"Padding"
;
lp
.
name
=
"testLayer"
;
Mat
input
({
1
+
rand
()
%
10
,
1
+
rand
()
%
10
,
1
+
rand
()
%
10
,
1
+
rand
()
%
10
},
CV_32F
);
test
(
lp
,
input
);
}
}
////////////////////////////////////////////////////////////////////////////////
// Convolution
////////////////////////////////////////////////////////////////////////////////
...
...
modules/dnn/test/test_tf_importer.cpp
View file @
222149b9
...
...
@@ -103,6 +103,7 @@ TEST(Test_TensorFlow, padding)
{
runTensorFlowNet
(
"padding_same"
);
runTensorFlowNet
(
"padding_valid"
);
runTensorFlowNet
(
"spatial_padding"
);
}
TEST
(
Test_TensorFlow
,
eltwise_add_mul
)
...
...
modules/dnn/test/test_torch_importer.cpp
View file @
222149b9
...
...
@@ -190,6 +190,12 @@ TEST(Torch_Importer, net_normalize)
runTorchNet
(
"net_normalize"
,
""
,
false
,
true
);
}
TEST
(
Torch_Importer
,
net_padding
)
{
runTorchNet
(
"net_padding"
,
""
,
false
,
true
);
runTorchNet
(
"net_spatial_zero_padding"
,
""
,
false
,
true
);
}
TEST
(
Torch_Importer
,
ENet_accuracy
)
{
Net
net
;
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment