Skip to content
Projects
Groups
Snippets
Help
Loading...
Sign in / Register
Toggle navigation
O
opencv_contrib
Project
Project
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Packages
Packages
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
submodule
opencv_contrib
Commits
a5d0ef52
Commit
a5d0ef52
authored
Apr 28, 2017
by
Aleksandr Rybnikov
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Added statistics functions
parent
9b73fee2
Hide whitespace changes
Inline
Side-by-side
Showing
16 changed files
with
412 additions
and
0 deletions
+412
-0
dnn.hpp
modules/dnn/include/opencv2/dnn/dnn.hpp
+46
-0
dnn.cpp
modules/dnn/src/dnn.cpp
+138
-0
batch_norm_layer.cpp
modules/dnn/src/layers/batch_norm_layer.cpp
+14
-0
convolution_layer.cpp
modules/dnn/src/layers/convolution_layer.cpp
+30
-0
elementwise_layers.cpp
modules/dnn/src/layers/elementwise_layers.cpp
+39
-0
eltwise_layer.cpp
modules/dnn/src/layers/eltwise_layer.cpp
+11
-0
fully_connected_layer.cpp
modules/dnn/src/layers/fully_connected_layer.cpp
+16
-0
lrn_layer.cpp
modules/dnn/src/layers/lrn_layer.cpp
+29
-0
mvn_layer.cpp
modules/dnn/src/layers/mvn_layer.cpp
+12
-0
pooling_layer.cpp
modules/dnn/src/layers/pooling_layer.cpp
+21
-0
prior_box_layer.cpp
modules/dnn/src/layers/prior_box_layer.cpp
+14
-0
scale_layer.cpp
modules/dnn/src/layers/scale_layer.cpp
+12
-0
shift_layer.cpp
modules/dnn/src/layers/shift_layer.cpp
+14
-0
softmax_layer.cpp
modules/dnn/src/layers/softmax_layer.cpp
+14
-0
test_caffe_importer.cpp
modules/dnn/test/test_caffe_importer.cpp
+1
-0
test_torch_importer.cpp
modules/dnn/test/test_torch_importer.cpp
+1
-0
No files found.
modules/dnn/include/opencv2/dnn/dnn.hpp
View file @
a5d0ef52
...
...
@@ -135,6 +135,8 @@ namespace dnn //! This namespace is used for dnn module functionlaity.
const
int
requiredOutputs
,
std
::
vector
<
MatShape
>
&
outputs
,
std
::
vector
<
MatShape
>
&
internals
)
const
;
virtual
int64
getFLOPS
(
const
std
::
vector
<
MatShape
>
&
inputs
,
const
std
::
vector
<
MatShape
>
&
outputs
)
const
{(
void
)
inputs
;
(
void
)
outputs
;
return
0
;}
CV_PROP
String
name
;
//!< Name of the layer instance, can be used for logging or other internal purposes.
CV_PROP
String
type
;
//!< Type name which was used for creating layer by layer factory.
...
...
@@ -323,6 +325,50 @@ namespace dnn //! This namespace is used for dnn module functionlaity.
const
int
layerId
,
std
::
vector
<
MatShape
>*
inLayerShapes
,
std
::
vector
<
MatShape
>*
outLayerShapes
)
const
;
/** @brief Computes FLOP for whole loaded model with specified input shapes.
* @param netInputShapes vector of shapes for all net inputs.
* @returns computed FLOP.
*/
CV_WRAP
int64
getFLOPS
(
const
std
::
vector
<
MatShape
>&
netInputShapes
)
const
;
/** @overload */
CV_WRAP
int64
getFLOPS
(
const
MatShape
&
netInputShape
)
const
;
/** @overload */
CV_WRAP
int64
getFLOPS
(
const
int
layerId
,
const
std
::
vector
<
MatShape
>&
netInputShapes
)
const
;
/** @overload */
CV_WRAP
int64
getFLOPS
(
const
int
layerId
,
const
MatShape
&
netInputShape
)
const
;
/** @brief Returns list of types for layer used in model.
* @param layersTypes output parameter for returning types.
*/
CV_WRAP
void
getLayerTypes
(
std
::
vector
<
String
>&
layersTypes
)
const
;
/** @brief Returns count of layers of specified type.
* @param layerType type.
* @returns count of layers
*/
CV_WRAP
int
getLayersCount
(
const
String
&
layerType
)
const
;
/** @brief Computes bytes number which are requered to store
* all weights and intermediate blobs for model.
* @param netInputShapes vector of shapes for all net inputs.
* @param weights output parameter to store resulting bytes for weights.
* @param blobs output parameter to store resulting bytes for intermediate blobs.
*/
CV_WRAP
void
getMemoryConsumption
(
const
std
::
vector
<
MatShape
>&
netInputShapes
,
size_t
&
weights
,
size_t
&
blobs
)
const
;
/** @overload */
CV_WRAP
void
getMemoryConsumption
(
const
MatShape
&
netInputShape
,
size_t
&
weights
,
size_t
&
blobs
)
const
;
/** @overload */
CV_WRAP
void
getMemoryConsumption
(
const
int
layerId
,
const
std
::
vector
<
MatShape
>&
netInputShapes
,
size_t
&
weights
,
size_t
&
blobs
)
const
;
/** @overload */
CV_WRAP
void
getMemoryConsumption
(
const
int
layerId
,
const
MatShape
&
netInputShape
,
size_t
&
weights
,
size_t
&
blobs
)
const
;
private
:
struct
Impl
;
...
...
modules/dnn/src/dnn.cpp
View file @
a5d0ef52
...
...
@@ -876,6 +876,144 @@ void Net::getLayerShapes(const Net::Impl::ShapesVec& netInputShapes,
*
outLayerShapes
=
shapes
.
out
;
}
int64
Net
::
getFLOPS
(
const
std
::
vector
<
MatShape
>&
netInputShapes
)
const
{
int64
flops
=
0
;
std
::
vector
<
int
>
ids
;
std
::
vector
<
std
::
vector
<
MatShape
>
>
inShapes
,
outShapes
;
getLayersShapes
(
netInputShapes
,
&
ids
,
&
inShapes
,
&
outShapes
);
CV_Assert
(
inShapes
.
size
()
==
outShapes
.
size
());
CV_Assert
(
inShapes
.
size
()
==
ids
.
size
());
for
(
int
i
=
0
;
i
<
ids
.
size
();
i
++
)
{
flops
+=
impl
->
layers
[
ids
[
i
]].
getLayerInstance
()
->
getFLOPS
(
inShapes
[
i
],
outShapes
[
i
]);
}
return
flops
;
}
int64
Net
::
getFLOPS
(
const
MatShape
&
netInputShape
)
const
{
return
getFLOPS
(
std
::
vector
<
MatShape
>
(
1
,
netInputShape
));
}
int64
Net
::
getFLOPS
(
const
int
layerId
,
const
std
::
vector
<
MatShape
>&
netInputShapes
)
const
{
Impl
::
MapIdToLayerData
::
iterator
layer
=
impl
->
layers
.
find
(
layerId
);
CV_Assert
(
layer
!=
impl
->
layers
.
end
());
Impl
::
LayerShapes
shapes
;
impl
->
getLayerShapes
(
netInputShapes
,
layerId
,
shapes
);
return
layer
->
second
.
getLayerInstance
()
->
getFLOPS
(
shapes
.
in
,
shapes
.
out
);
}
int64
Net
::
getFLOPS
(
const
int
layerId
,
const
MatShape
&
netInputShape
)
const
{
return
getFLOPS
(
layerId
,
std
::
vector
<
MatShape
>
(
1
,
netInputShape
));
}
void
Net
::
getLayerTypes
(
std
::
vector
<
String
>&
layersTypes
)
const
{
layersTypes
.
clear
();
std
::
map
<
String
,
int
>
layers
;
for
(
Impl
::
MapIdToLayerData
::
iterator
it
=
impl
->
layers
.
begin
();
it
!=
impl
->
layers
.
end
();
it
++
)
{
if
(
layers
.
find
(
it
->
second
.
type
)
==
layers
.
end
())
layers
[
it
->
second
.
type
]
=
0
;
layers
[
it
->
second
.
type
]
++
;
}
for
(
std
::
map
<
String
,
int
>::
iterator
it
=
layers
.
begin
();
it
!=
layers
.
end
();
it
++
)
{
layersTypes
.
push_back
(
it
->
first
);
}
}
int
Net
::
getLayersCount
(
const
String
&
layerType
)
const
{
int
count
=
0
;
for
(
Impl
::
MapIdToLayerData
::
iterator
it
=
impl
->
layers
.
begin
();
it
!=
impl
->
layers
.
end
();
it
++
)
{
if
(
it
->
second
.
type
==
layerType
)
count
++
;
}
return
count
;
}
void
Net
::
getMemoryConsumption
(
const
int
layerId
,
const
std
::
vector
<
MatShape
>&
netInputShapes
,
size_t
&
weights
,
size_t
&
blobs
)
const
{
Impl
::
MapIdToLayerData
::
iterator
layer
=
impl
->
layers
.
find
(
layerId
);
CV_Assert
(
layer
!=
impl
->
layers
.
end
());
weights
=
blobs
=
0
;
for
(
int
i
=
0
;
i
<
layer
->
second
.
params
.
blobs
.
size
();
i
++
)
{
const
Mat
&
weightsBlob
=
layer
->
second
.
params
.
blobs
[
i
];
weights
+=
weightsBlob
.
total
()
*
weightsBlob
.
elemSize
();
}
std
::
vector
<
MatShape
>
outLayerShapes
;
getLayerShapes
(
netInputShapes
,
layerId
,
0
,
&
outLayerShapes
);
for
(
int
i
=
0
;
i
<
outLayerShapes
.
size
();
i
++
)
{
blobs
+=
total
(
outLayerShapes
[
i
])
*
sizeof
(
float
);
}
}
void
Net
::
getMemoryConsumption
(
const
std
::
vector
<
MatShape
>&
netInputShapes
,
size_t
&
weights
,
size_t
&
blobs
)
const
{
std
::
vector
<
int
>
layerIds
;
std
::
vector
<
std
::
vector
<
MatShape
>
>
outLayerShapes
;
getLayersShapes
(
netInputShapes
,
&
layerIds
,
0
,
&
outLayerShapes
);
weights
=
blobs
=
0
;
for
(
int
i
=
0
;
i
<
layerIds
.
size
();
i
++
)
{
Impl
::
MapIdToLayerData
::
iterator
layer
=
impl
->
layers
.
find
(
layerIds
[
i
]);
CV_Assert
(
layer
!=
impl
->
layers
.
end
());
for
(
int
j
=
0
;
j
<
layer
->
second
.
params
.
blobs
.
size
();
j
++
)
{
const
Mat
&
weightsBlob
=
layer
->
second
.
params
.
blobs
[
j
];
weights
+=
weightsBlob
.
total
()
*
weightsBlob
.
elemSize
();
}
for
(
int
j
=
0
;
j
<
outLayerShapes
[
i
].
size
();
j
++
)
{
blobs
+=
total
(
outLayerShapes
[
i
][
j
])
*
sizeof
(
float
);
}
}
}
void
Net
::
getMemoryConsumption
(
const
int
layerId
,
const
MatShape
&
netInputShape
,
size_t
&
weights
,
size_t
&
blobs
)
const
{
getMemoryConsumption
(
layerId
,
std
::
vector
<
MatShape
>
(
1
,
netInputShape
),
weights
,
blobs
);
}
void
Net
::
getMemoryConsumption
(
const
MatShape
&
netInputShape
,
size_t
&
weights
,
size_t
&
blobs
)
const
{
getMemoryConsumption
(
std
::
vector
<
MatShape
>
(
1
,
netInputShape
),
weights
,
blobs
);
}
//////////////////////////////////////////////////////////////////////////
Importer
::~
Importer
()
{}
...
...
modules/dnn/src/layers/batch_norm_layer.cpp
View file @
a5d0ef52
...
...
@@ -10,6 +10,7 @@ Implementation of Batch Normalization layer.
*/
#include "../precomp.hpp"
#include <opencv2/dnn/shape_utils.hpp>
namespace
cv
{
...
...
@@ -78,6 +79,19 @@ public:
}
}
virtual
int64
getFLOPS
(
const
std
::
vector
<
MatShape
>
&
inputs
,
const
std
::
vector
<
MatShape
>
&
outputs
)
const
{
(
void
)
outputs
;
// suppress unused variable warning
int64
flops
=
0
;
for
(
int
i
=
0
;
i
<
inputs
.
size
();
i
++
)
{
flops
+=
3
*
total
(
inputs
[
i
]);
}
return
flops
;
}
bool
hasWeights
,
hasBias
;
float
epsilon
;
};
...
...
modules/dnn/src/layers/convolution_layer.cpp
View file @
a5d0ef52
...
...
@@ -224,6 +224,20 @@ public:
dilation
.
height
,
dilation
.
width
,
outH
,
outW
,
dstRow
.
ptr
<
float
>
());
}
}
virtual
int64
getFLOPS
(
const
std
::
vector
<
MatShape
>
&
inputs
,
const
std
::
vector
<
MatShape
>
&
outputs
)
const
{
CV_Assert
(
inputs
.
size
()
==
outputs
.
size
());
int64
flops
=
0
;
for
(
int
i
=
0
;
i
<
inputs
.
size
();
i
++
)
{
flops
+=
total
(
outputs
[
i
])
*
(
2
*
kernel
.
area
()
*
inputs
[
i
][
1
]
+
1
);
}
return
flops
;
}
};
class
DeConvolutionLayerImpl
:
public
BaseConvolutionLayerImpl
...
...
@@ -339,6 +353,22 @@ public:
dilation
.
height
,
dilation
.
width
,
dstImg
.
ptr
<
float
>
(),
&
ofsbuf
[
0
]);
}
virtual
int64
getFLOPS
(
const
std
::
vector
<
MatShape
>
&
inputs
,
const
std
::
vector
<
MatShape
>
&
outputs
)
const
{
CV_Assert
(
inputs
.
size
()
==
outputs
.
size
());
float
flops
=
0
;
int
outChannels
=
blobs
[
0
].
size
[
0
];
for
(
int
i
=
0
;
i
<
inputs
.
size
();
i
++
)
{
flops
+=
2
*
outChannels
*
kernel
.
area
()
*
total
(
inputs
[
i
]);
}
return
flops
;
}
std
::
vector
<
int
>
ofsbuf
;
};
...
...
modules/dnn/src/layers/elementwise_layers.cpp
View file @
a5d0ef52
...
...
@@ -63,6 +63,17 @@ public:
}
}
virtual
int64
getFLOPS
(
const
std
::
vector
<
MatShape
>
&
inputs
,
const
std
::
vector
<
MatShape
>
&
outputs
)
const
{
long
flops
=
0
;
for
(
int
i
=
0
;
i
<
outputs
.
size
();
i
++
)
{
flops
+=
total
(
outputs
[
i
])
*
func
.
getFLOPSPerElement
();
}
return
flops
;
}
Func
func
;
bool
run_parallel
;
};
...
...
@@ -79,6 +90,8 @@ struct ReLUFunctor
{
return
(
x
>=
(
TFloat
)
0
)
?
x
:
(
TFloat
)
slope
*
x
;
}
int64
getFLOPSPerElement
()
const
{
return
1
;}
};
struct
TanHFunctor
...
...
@@ -90,6 +103,8 @@ struct TanHFunctor
{
return
tanh
(
x
);
}
int64
getFLOPSPerElement
()
const
{
return
1
;}
};
struct
SigmoidFunctor
...
...
@@ -101,6 +116,8 @@ struct SigmoidFunctor
{
return
(
TFloat
)
1
/
((
TFloat
)
1
+
exp
(
-
x
));
}
int64
getFLOPSPerElement
()
const
{
return
3
;}
};
struct
AbsValFunctor
...
...
@@ -112,6 +129,8 @@ struct AbsValFunctor
{
return
abs
(
x
);
}
int64
getFLOPSPerElement
()
const
{
return
1
;}
};
struct
BNLLFunctor
...
...
@@ -123,6 +142,8 @@ struct BNLLFunctor
{
return
log
((
TFloat
)
1
+
exp
(
-
abs
(
x
)));
}
int64
getFLOPSPerElement
()
const
{
return
5
;}
};
struct
PowerFunctor
...
...
@@ -141,6 +162,8 @@ struct PowerFunctor
{
return
pow
((
TFloat
)
shift
+
(
TFloat
)
scale
*
x
,
(
TFloat
)
power
);
}
int64
getFLOPSPerElement
()
const
{
return
3
;}
};
struct
PowerFunctor1
...
...
@@ -158,6 +181,8 @@ struct PowerFunctor1
{
return
(
TFloat
)
shift
+
(
TFloat
)
scale
*
x
;
}
int64
getFLOPSPerElement
()
const
{
return
2
;}
};
class
ChannelsPReLULayerImpl
:
public
ChannelsPReLULayer
...
...
@@ -210,6 +235,20 @@ public:
}
}
}
virtual
int64
getFLOPS
(
const
std
::
vector
<
MatShape
>
&
inputs
,
const
std
::
vector
<
MatShape
>
&
outputs
)
const
{
(
void
)
inputs
;
// suppress unused variable warning
long
flops
=
0
;
for
(
int
i
=
0
;
i
<
outputs
.
size
();
i
++
)
{
flops
+=
total
(
outputs
[
i
])
*
3
;
}
return
flops
;
}
};
#define ACTIVATION_CREATOR_FOR(_Layer, _Functor, ...) \
...
...
modules/dnn/src/layers/eltwise_layer.cpp
View file @
a5d0ef52
...
...
@@ -143,6 +143,17 @@ public:
break
;
}
}
virtual
int64
getFLOPS
(
const
std
::
vector
<
MatShape
>
&
inputs
,
const
std
::
vector
<
MatShape
>
&
outputs
)
const
{
(
void
)
outputs
;
// suppress unused variable warning
CV_Assert
(
inputs
.
size
());
long
flops
=
inputs
.
size
()
*
total
(
inputs
[
0
]);
return
flops
;
}
};
Ptr
<
EltwiseLayer
>
EltwiseLayer
::
create
(
const
LayerParams
&
params
)
...
...
modules/dnn/src/layers/fully_connected_layer.cpp
View file @
a5d0ef52
...
...
@@ -117,6 +117,22 @@ public:
}
}
virtual
int64
getFLOPS
(
const
std
::
vector
<
MatShape
>
&
inputs
,
const
std
::
vector
<
MatShape
>
&
outputs
)
const
{
(
void
)
inputs
;
// suppress unused variable warning
long
flops
=
0
;
int
innerSize
=
blobs
[
0
].
size
[
1
];
for
(
int
i
=
0
;
i
<
outputs
.
size
();
i
++
)
{
flops
+=
3
*
innerSize
*
total
(
outputs
[
i
]);
}
return
flops
;
}
bool
bias
;
};
...
...
modules/dnn/src/layers/lrn_layer.cpp
View file @
a5d0ef52
...
...
@@ -171,6 +171,35 @@ public:
}
}
}
virtual
int64
getFLOPS
(
const
std
::
vector
<
MatShape
>
&
inputs
,
const
std
::
vector
<
MatShape
>
&
outputs
)
const
{
(
void
)
outputs
;
// suppress unused variable warning
CV_Assert
(
inputs
.
size
()
>
0
);
long
flops
=
0
;
for
(
int
i
=
0
;
i
<
inputs
.
size
();
i
++
)
{
if
(
type
==
CHANNEL_NRM
)
{
int
channels
=
inputs
[
i
][
1
];
int
ksize
=
(
size
-
1
)
/
2
;
flops
+=
inputs
[
i
][
0
]
*
(
std
::
min
(
ksize
,
channels
)
*
2
*
total
(
inputs
[
i
],
2
)
+
channels
*
4
*
total
(
inputs
[
i
],
2
));
if
(
ksize
<
channels
)
{
flops
+=
(
size
+
2
*
(
channels
-
size
))
*
total
(
inputs
[
i
],
2
);
}
}
else
{
flops
+=
total
(
inputs
[
i
])
*
(
2
*
size
*
size
+
2
);
}
}
return
flops
;
}
};
Ptr
<
LRNLayer
>
LRNLayer
::
create
(
const
LayerParams
&
params
)
...
...
modules/dnn/src/layers/mvn_layer.cpp
View file @
a5d0ef52
...
...
@@ -85,6 +85,18 @@ public:
}
}
}
virtual
int64
getFLOPS
(
const
std
::
vector
<
MatShape
>
&
inputs
,
const
std
::
vector
<
MatShape
>
&
outputs
)
const
{
(
void
)
outputs
;
// suppress unused variable warning
long
flops
=
0
;
for
(
int
i
=
0
;
i
<
inputs
.
size
();
i
++
)
{
flops
+=
6
*
total
(
inputs
[
i
])
+
3
*
total
(
inputs
[
i
],
0
,
normVariance
?
2
:
1
);
}
return
flops
;
}
};
Ptr
<
MVNLayer
>
MVNLayer
::
create
(
const
LayerParams
&
params
)
...
...
modules/dnn/src/layers/pooling_layer.cpp
View file @
a5d0ef52
...
...
@@ -241,6 +241,27 @@ public:
return
false
;
}
virtual
int64
getFLOPS
(
const
std
::
vector
<
MatShape
>
&
inputs
,
const
std
::
vector
<
MatShape
>
&
outputs
)
const
{
(
void
)
inputs
;
// suppress unused variable warning
long
flops
=
0
;
for
(
int
i
=
0
;
i
<
outputs
.
size
();
i
++
)
{
if
(
type
==
MAX
)
{
if
(
i
%
2
==
0
)
flops
+=
total
(
outputs
[
i
])
*
kernel
.
area
();
}
else
{
flops
+=
total
(
outputs
[
i
])
*
(
kernel
.
area
()
+
1
);
}
}
return
flops
;
}
};
Ptr
<
PoolingLayer
>
PoolingLayer
::
create
(
const
LayerParams
&
params
)
...
...
modules/dnn/src/layers/prior_box_layer.cpp
View file @
a5d0ef52
...
...
@@ -312,6 +312,20 @@ public:
}
}
virtual
int64
getFLOPS
(
const
std
::
vector
<
MatShape
>
&
inputs
,
const
std
::
vector
<
MatShape
>
&
outputs
)
const
{
(
void
)
outputs
;
// suppress unused variable warning
long
flops
=
0
;
for
(
int
i
=
0
;
i
<
inputs
.
size
();
i
++
)
{
flops
+=
total
(
inputs
[
i
],
2
)
*
_numPriors
*
4
;
}
return
flops
;
}
float
_minSize
;
float
_maxSize
;
...
...
modules/dnn/src/layers/scale_layer.cpp
View file @
a5d0ef52
...
...
@@ -56,6 +56,18 @@ public:
}
}
virtual
int64
getFLOPS
(
const
std
::
vector
<
MatShape
>
&
inputs
,
const
std
::
vector
<
MatShape
>
&
outputs
)
const
{
(
void
)
outputs
;
// suppress unused variable warning
long
flops
=
0
;
for
(
int
i
=
0
;
i
<
inputs
.
size
();
i
++
)
{
flops
+=
2
*
total
(
inputs
[
i
]);
}
return
flops
;
}
bool
hasBias
;
};
...
...
modules/dnn/src/layers/shift_layer.cpp
View file @
a5d0ef52
...
...
@@ -81,6 +81,20 @@ public:
}
}
}
virtual
int64
getFLOPS
(
const
std
::
vector
<
MatShape
>
&
inputs
,
const
std
::
vector
<
MatShape
>
&
outputs
)
const
{
(
void
)
outputs
;
// suppress unused variable warning
long
flops
=
0
;
for
(
int
i
=
0
;
i
<
inputs
.
size
();
i
++
)
{
flops
+=
total
(
inputs
[
i
]);
}
return
flops
;
}
};
Ptr
<
ShiftLayer
>
ShiftLayer
::
create
(
const
LayerParams
&
params
)
...
...
modules/dnn/src/layers/softmax_layer.cpp
View file @
a5d0ef52
...
...
@@ -146,6 +146,20 @@ public:
}
}
int64
getFLOPS
(
const
std
::
vector
<
MatShape
>
&
inputs
,
const
std
::
vector
<
MatShape
>
&
outputs
)
const
{
(
void
)
outputs
;
// suppress unused variable warning
int64
flops
=
0
;
for
(
int
i
=
0
;
i
<
inputs
.
size
();
i
++
)
{
flops
+=
4
*
total
(
inputs
[
i
]);
}
return
flops
;
}
int
axisRaw
;
};
...
...
modules/dnn/test/test_caffe_importer.cpp
View file @
a5d0ef52
...
...
@@ -41,6 +41,7 @@
#include "test_precomp.hpp"
#include "npy_blob.hpp"
#include <opencv2/dnn/shape_utils.hpp>
namespace
cvtest
{
...
...
modules/dnn/test/test_torch_importer.cpp
View file @
a5d0ef52
...
...
@@ -43,6 +43,7 @@
#include "test_precomp.hpp"
#include "npy_blob.hpp"
#include <opencv2/dnn/shape_utils.hpp>
namespace
cvtest
{
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment