Skip to content
Projects
Groups
Snippets
Help
Loading...
Sign in / Register
Toggle navigation
O
opencv
Project
Project
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Packages
Packages
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
submodule
opencv
Commits
41b23fde
Commit
41b23fde
authored
Sep 15, 2017
by
Vadim Pisarevsky
Browse files
Options
Browse Files
Download
Plain Diff
Merge pull request #9524 from dkurt:dnn_torch_openface
parents
48cc1b35
7dc6b1d7
Expand all
Hide whitespace changes
Inline
Side-by-side
Showing
10 changed files
with
188 additions
and
25 deletions
+188
-25
all_layers.hpp
modules/dnn/include/opencv2/dnn/all_layers.hpp
+16
-0
dnn.cpp
modules/dnn/src/dnn.cpp
+1
-1
init.cpp
modules/dnn/src/init.cpp
+1
-0
concat_layer.cpp
modules/dnn/src/layers/concat_layer.cpp
+28
-11
convolution_layer.cpp
modules/dnn/src/layers/convolution_layer.cpp
+1
-1
lp_normalize_layer.cpp
modules/dnn/src/layers/lp_normalize_layer.cpp
+78
-0
pooling_layer.cpp
modules/dnn/src/layers/pooling_layer.cpp
+5
-6
reshape_layer.cpp
modules/dnn/src/layers/reshape_layer.cpp
+13
-4
torch_importer.cpp
modules/dnn/src/torch/torch_importer.cpp
+0
-0
test_torch_importer.cpp
modules/dnn/test/test_torch_importer.cpp
+45
-2
No files found.
modules/dnn/include/opencv2/dnn/all_layers.hpp
View file @
41b23fde
...
...
@@ -245,6 +245,7 @@ CV__DNN_EXPERIMENTAL_NS_BEGIN
bool
globalPooling
;
bool
computeMaxIdx
;
String
padMode
;
bool
ceilMode
;
static
Ptr
<
PoolingLayer
>
create
(
const
LayerParams
&
params
);
};
...
...
@@ -257,6 +258,14 @@ CV__DNN_EXPERIMENTAL_NS_BEGIN
static
Ptr
<
SoftmaxLayer
>
create
(
const
LayerParams
&
params
);
};
class
CV_EXPORTS
LPNormalizeLayer
:
public
Layer
{
public
:
float
pnorm
,
epsilon
;
static
Ptr
<
LPNormalizeLayer
>
create
(
const
LayerParams
&
params
);
};
class
CV_EXPORTS
InnerProductLayer
:
public
Layer
{
public
:
...
...
@@ -294,6 +303,13 @@ CV__DNN_EXPERIMENTAL_NS_BEGIN
{
public
:
int
axis
;
/**
* @brief Add zero padding in case of concatenation of blobs with different
* spatial sizes.
*
* Details: https://github.com/torch/nn/blob/master/doc/containers.md#depthconcat
*/
bool
padding
;
static
Ptr
<
ConcatLayer
>
create
(
const
LayerParams
&
params
);
};
...
...
modules/dnn/src/dnn.cpp
View file @
41b23fde
...
...
@@ -1137,7 +1137,7 @@ struct Net::Impl
// (and so we eliminate the concatenation layer, because the channels
// are concatenated implicitly).
Ptr
<
ConcatLayer
>
concatLayer
=
ld
.
layerInstance
.
dynamicCast
<
ConcatLayer
>
();
if
(
!
concatLayer
.
empty
()
&&
concatLayer
->
axis
==
1
&&
if
(
!
concatLayer
.
empty
()
&&
concatLayer
->
axis
==
1
&&
!
concatLayer
->
padding
&&
ld
.
outputBlobs
.
size
()
==
1
)
{
Mat
&
output
=
ld
.
outputBlobs
[
0
];
...
...
modules/dnn/src/init.cpp
View file @
41b23fde
...
...
@@ -91,6 +91,7 @@ void initializeLayerFactory()
CV_DNN_REGISTER_LAYER_CLASS
(
InnerProduct
,
InnerProductLayer
);
CV_DNN_REGISTER_LAYER_CLASS
(
Softmax
,
SoftmaxLayer
);
CV_DNN_REGISTER_LAYER_CLASS
(
MVN
,
MVNLayer
);
CV_DNN_REGISTER_LAYER_CLASS
(
LPNormalize
,
LPNormalizeLayer
);
CV_DNN_REGISTER_LAYER_CLASS
(
ReLU
,
ReLULayer
);
CV_DNN_REGISTER_LAYER_CLASS
(
ChannelsPReLU
,
ChannelsPReLULayer
);
...
...
modules/dnn/src/layers/concat_layer.cpp
View file @
41b23fde
...
...
@@ -56,6 +56,7 @@ public:
{
setParamsFrom
(
params
);
axis
=
params
.
get
<
int
>
(
"axis"
,
1
);
padding
=
params
.
get
<
bool
>
(
"padding"
,
false
);
}
virtual
bool
getMemoryShapes
(
const
std
::
vector
<
MatShape
>
&
inputs
,
...
...
@@ -64,8 +65,7 @@ public:
std
::
vector
<
MatShape
>
&
internals
)
const
{
CV_Assert
(
inputs
.
size
()
>
0
);
outputs
.
clear
();
outputs
.
push_back
(
inputs
[
0
]);
outputs
.
resize
(
1
,
inputs
[
0
]);
int
cAxis
=
clamp
(
axis
,
inputs
[
0
]);
int
axisSum
=
0
;
...
...
@@ -73,25 +73,33 @@ public:
{
MatShape
curShape
=
inputs
[
i
];
CV_Assert
(
curShape
.
size
()
==
outputs
.
back
().
size
());
for
(
int
curAxis
=
0
;
curAxis
<
outputs
.
back
().
size
();
curAxis
++
)
if
(
padding
)
{
if
(
curAxis
!=
cAxis
&&
outputs
.
back
()[
curAxis
]
!=
curShape
[
curAxis
])
CV_Error
(
Error
::
StsBadSize
,
"Inconsitent shape for ConcatLayer"
);
for
(
int
curAxis
=
0
;
curAxis
<
outputs
[
0
].
size
();
curAxis
++
)
{
outputs
[
0
][
curAxis
]
=
std
::
max
(
outputs
[
0
][
curAxis
],
curShape
[
curAxis
]);
}
}
else
{
CV_Assert
(
curShape
.
size
()
==
outputs
[
0
].
size
());
for
(
int
curAxis
=
0
;
curAxis
<
outputs
[
0
].
size
();
curAxis
++
)
{
if
(
curAxis
!=
cAxis
&&
outputs
[
0
][
curAxis
]
!=
curShape
[
curAxis
])
CV_Error
(
Error
::
StsBadSize
,
"Inconsitent shape for ConcatLayer"
);
}
}
axisSum
+=
curShape
[
cAxis
];
}
outputs
.
back
()[
cAxis
]
=
axisSum
;
outputs
[
0
][
cAxis
]
=
axisSum
;
return
false
;
}
virtual
bool
supportBackend
(
int
backendId
)
{
return
backendId
==
DNN_BACKEND_DEFAULT
||
backendId
==
DNN_BACKEND_HALIDE
&&
haveHalide
()
&&
axis
==
1
;
// By channels
backendId
==
DNN_BACKEND_HALIDE
&&
haveHalide
()
&&
axis
==
1
&&
!
padding
;
// By channels
}
class
ChannelConcatInvoker
:
public
ParallelLoopBody
...
...
@@ -174,7 +182,10 @@ public:
int
cAxis
=
clamp
(
axis
,
inputs
[
0
]
->
dims
);
Mat
&
outMat
=
outputs
[
0
];
if
(
cAxis
==
1
&&
outMat
.
dims
==
4
)
if
(
padding
)
outMat
.
setTo
(
0
);
if
(
cAxis
==
1
&&
outMat
.
dims
==
4
&&
!
padding
)
{
int
nstripes
=
getNumThreads
();
ChannelConcatInvoker
::
run
(
inputs
,
outMat
,
nstripes
);
...
...
@@ -187,6 +198,12 @@ public:
for
(
size_t
i
=
0
;
i
<
inputs
.
size
();
i
++
)
{
ranges
[
cAxis
].
end
=
ranges
[
cAxis
].
start
+
inputs
[
i
]
->
size
[
cAxis
];
for
(
int
j
=
0
;
j
<
outMat
.
dims
;
++
j
)
{
if
(
j
==
cAxis
)
continue
;
ranges
[
j
].
start
=
(
outMat
.
size
[
j
]
-
inputs
[
i
]
->
size
[
j
])
/
2
;
ranges
[
j
].
end
=
ranges
[
j
].
start
+
inputs
[
i
]
->
size
[
j
];
}
inputs
[
i
]
->
copyTo
(
outMat
(
&
ranges
[
0
]));
ranges
[
cAxis
].
start
=
ranges
[
cAxis
].
end
;
}
...
...
modules/dnn/src/layers/convolution_layer.cpp
View file @
41b23fde
...
...
@@ -187,7 +187,7 @@ public:
}
int
ngroups
=
inpCn
/
blobs
[
0
].
size
[
1
];
CV_Assert
(
inpCn
%
ngroups
==
0
&&
outCn
%
ngroups
==
0
);
CV_Assert
(
ngroups
>
0
&&
inpCn
%
ngroups
==
0
&&
outCn
%
ngroups
==
0
);
int
dims
[]
=
{
inputs
[
0
][
0
],
outCn
,
out
.
height
,
out
.
width
};
outputs
.
resize
(
inputs
.
size
(),
shape
(
dims
));
...
...
modules/dnn/src/layers/lp_normalize_layer.cpp
0 → 100644
View file @
41b23fde
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
//
// Copyright (C) 2017, Intel Corporation, all rights reserved.
// Third party copyrights are property of their respective owners.
#include "../precomp.hpp"
#include "layers_common.hpp"
#include <iostream>
namespace
cv
{
namespace
dnn
{
class
LPNormalizeLayerImpl
:
public
LPNormalizeLayer
{
public
:
LPNormalizeLayerImpl
(
const
LayerParams
&
params
)
{
setParamsFrom
(
params
);
pnorm
=
params
.
get
<
float
>
(
"p"
,
2
);
epsilon
=
params
.
get
<
float
>
(
"eps"
,
1e-10
f
);
CV_Assert
(
pnorm
>
0
);
}
bool
getMemoryShapes
(
const
std
::
vector
<
MatShape
>
&
inputs
,
const
int
requiredOutputs
,
std
::
vector
<
MatShape
>
&
outputs
,
std
::
vector
<
MatShape
>
&
internals
)
const
{
Layer
::
getMemoryShapes
(
inputs
,
requiredOutputs
,
outputs
,
internals
);
if
(
pnorm
!=
1
&&
pnorm
!=
2
)
{
internals
.
resize
(
1
,
inputs
[
0
]);
}
return
true
;
}
virtual
bool
supportBackend
(
int
backendId
)
{
return
backendId
==
DNN_BACKEND_DEFAULT
;
}
void
forward
(
std
::
vector
<
Mat
*>
&
inputs
,
std
::
vector
<
Mat
>
&
outputs
,
std
::
vector
<
Mat
>
&
internals
)
{
CV_TRACE_FUNCTION
();
CV_TRACE_ARG_VALUE
(
name
,
"name"
,
name
.
c_str
());
CV_Assert
(
inputs
[
0
]
->
total
()
==
outputs
[
0
].
total
());
float
norm
;
if
(
pnorm
==
1
)
norm
=
cv
::
norm
(
*
inputs
[
0
],
NORM_L1
);
else
if
(
pnorm
==
2
)
norm
=
cv
::
norm
(
*
inputs
[
0
],
NORM_L2
);
else
{
pow
(
abs
(
*
inputs
[
0
]),
pnorm
,
internals
[
0
]);
norm
=
pow
(
sum
(
internals
[
0
])[
0
],
1.0
f
/
pnorm
);
}
multiply
(
*
inputs
[
0
],
1.0
f
/
(
norm
+
epsilon
),
outputs
[
0
]);
}
int64
getFLOPS
(
const
std
::
vector
<
MatShape
>
&
inputs
,
const
std
::
vector
<
MatShape
>
&
)
const
{
int64
flops
=
0
;
for
(
int
i
=
0
;
i
<
inputs
.
size
();
i
++
)
flops
+=
3
*
total
(
inputs
[
i
]);
return
flops
;
}
};
Ptr
<
LPNormalizeLayer
>
LPNormalizeLayer
::
create
(
const
LayerParams
&
params
)
{
return
Ptr
<
LPNormalizeLayer
>
(
new
LPNormalizeLayerImpl
(
params
));
}
}
// namespace dnn
}
// namespace cv
modules/dnn/src/layers/pooling_layer.cpp
View file @
41b23fde
...
...
@@ -54,7 +54,6 @@ namespace cv
namespace
dnn
{
//TODO: add ceil_mode param
class
PoolingLayerImpl
:
public
PoolingLayer
{
public
:
...
...
@@ -79,6 +78,7 @@ public:
getPoolingKernelParams
(
params
,
kernel
.
height
,
kernel
.
width
,
globalPooling
,
pad
.
height
,
pad
.
width
,
stride
.
height
,
stride
.
width
,
padMode
);
setParamsFrom
(
params
);
ceilMode
=
params
.
get
<
bool
>
(
"ceil_mode"
,
true
);
}
void
finalize
(
const
std
::
vector
<
Mat
*>
&
inputs
,
std
::
vector
<
Mat
>
&
outputs
)
...
...
@@ -572,11 +572,10 @@ public:
}
else
if
(
padMode
.
empty
())
{
//Yeah, something strange Caffe scheme-)
out
.
height
=
static_cast
<
int
>
(
ceil
(
static_cast
<
float
>
(
in
.
height
+
2
*
pad
.
height
-
kernel
.
height
)
/
stride
.
height
))
+
1
;
out
.
width
=
static_cast
<
int
>
(
ceil
(
static_cast
<
float
>
(
in
.
width
+
2
*
pad
.
width
-
kernel
.
width
)
/
stride
.
width
))
+
1
;
float
height
=
(
float
)(
in
.
height
+
2
*
pad
.
height
-
kernel
.
height
)
/
stride
.
height
;
float
width
=
(
float
)(
in
.
width
+
2
*
pad
.
width
-
kernel
.
width
)
/
stride
.
width
;
out
.
height
=
1
+
(
ceilMode
?
ceil
(
height
)
:
floor
(
height
));
out
.
width
=
1
+
(
ceilMode
?
ceil
(
width
)
:
floor
(
width
));
if
(
pad
.
height
||
pad
.
width
)
{
...
...
modules/dnn/src/layers/reshape_layer.cpp
View file @
41b23fde
...
...
@@ -75,12 +75,21 @@ static void computeShapeByReshapeMask(const MatShape &srcShape,
if
(
explicitMask
)
{
int
maskTotal
=
total
(
maskShape
);
for
(
int
i
=
srcRange
.
start
+
1
;
i
<
srcRange
.
end
;
++
i
)
// Go from the end of mask until we collect required total.
bool
matched
=
false
;
for
(
int
i
=
srcRange
.
end
-
1
;
i
>=
srcRange
.
start
;
--
i
)
{
if
(
total
(
srcShape
,
i
,
srcRange
.
end
)
!=
maskTotal
)
if
(
matched
)
{
srcRange
.
start
=
i
-
1
;
break
;
if
(
i
==
0
||
total
(
srcShape
,
i
,
srcRange
.
end
)
!=
maskTotal
)
{
srcRange
.
start
=
i
+
1
;
break
;
}
}
else
{
matched
=
total
(
srcShape
,
i
,
srcRange
.
end
)
==
maskTotal
;
}
}
CV_Assert
(
total
(
srcShape
,
srcRange
.
start
,
srcRange
.
end
)
==
maskTotal
);
...
...
modules/dnn/src/torch/torch_importer.cpp
View file @
41b23fde
This diff is collapsed.
Click to expand it.
modules/dnn/test/test_torch_importer.cpp
View file @
41b23fde
...
...
@@ -56,11 +56,11 @@ using namespace cv::dnn;
template
<
typename
TStr
>
static
std
::
string
_tf
(
TStr
filename
,
bool
inTorchDir
=
true
)
{
String
path
=
getOpenCVExtraDir
()
+
"/
dnn/"
;
String
path
=
"
dnn/"
;
if
(
inTorchDir
)
path
+=
"torch/"
;
path
+=
filename
;
return
path
;
return
findDataFile
(
path
,
false
)
;
}
TEST
(
Torch_Importer
,
simple_read
)
...
...
@@ -123,6 +123,7 @@ TEST(Torch_Importer, run_reshape)
runTorchNet
(
"net_reshape"
);
runTorchNet
(
"net_reshape_batch"
);
runTorchNet
(
"net_reshape_single_sample"
);
runTorchNet
(
"net_reshape_channels"
,
""
,
false
,
true
);
}
TEST
(
Torch_Importer
,
run_linear
)
...
...
@@ -138,6 +139,7 @@ TEST(Torch_Importer, run_paralel)
TEST
(
Torch_Importer
,
run_concat
)
{
runTorchNet
(
"net_concat"
,
"l5_torchMerge"
);
runTorchNet
(
"net_depth_concat"
,
""
,
false
,
true
);
}
TEST
(
Torch_Importer
,
run_deconv
)
...
...
@@ -172,6 +174,27 @@ TEST(Torch_Importer, net_logsoftmax)
runTorchNet
(
"net_logsoftmax_spatial"
);
}
TEST
(
Torch_Importer
,
net_lp_pooling
)
{
runTorchNet
(
"net_lp_pooling_square"
,
""
,
false
,
true
);
runTorchNet
(
"net_lp_pooling_power"
,
""
,
false
,
true
);
}
TEST
(
Torch_Importer
,
net_conv_gemm_lrn
)
{
runTorchNet
(
"net_conv_gemm_lrn"
,
""
,
false
,
true
);
}
TEST
(
Torch_Importer
,
net_inception_block
)
{
runTorchNet
(
"net_inception_block"
,
""
,
false
,
true
);
}
TEST
(
Torch_Importer
,
net_normalize
)
{
runTorchNet
(
"net_normalize"
,
""
,
false
,
true
);
}
TEST
(
Torch_Importer
,
ENet_accuracy
)
{
Net
net
;
...
...
@@ -202,6 +225,26 @@ TEST(Torch_Importer, ENet_accuracy)
}
}
TEST
(
Torch_Importer
,
OpenFace_accuracy
)
{
const
string
model
=
findDataFile
(
"dnn/openface_nn4.small2.v1.t7"
,
false
);
Net
net
=
readNetFromTorch
(
model
);
Mat
sample
=
imread
(
findDataFile
(
"cv/shared/lena.png"
,
false
));
Mat
sampleF32
(
sample
.
size
(),
CV_32FC3
);
sample
.
convertTo
(
sampleF32
,
sampleF32
.
type
());
sampleF32
/=
255
;
resize
(
sampleF32
,
sampleF32
,
Size
(
96
,
96
),
0
,
0
,
INTER_NEAREST
);
Mat
inputBlob
=
blobFromImage
(
sampleF32
);
net
.
setInput
(
inputBlob
);
Mat
out
=
net
.
forward
();
Mat
outRef
=
readTorchBlob
(
_tf
(
"net_openface_output.dat"
),
true
);
normAssert
(
out
,
outRef
);
}
}
#endif
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment