Skip to content
Projects
Groups
Snippets
Help
Loading...
Sign in / Register
Toggle navigation
O
opencv_contrib
Project
Project
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Packages
Packages
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
submodule
opencv_contrib
Commits
e33da71a
Commit
e33da71a
authored
Jul 28, 2016
by
Vitaliy Lyudvichenko
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Adding of setTo method for Blob, fixing of UMat allocation
parent
909a0022
Hide whitespace changes
Inline
Side-by-side
Showing
6 changed files
with
79 additions
and
11 deletions
+79
-11
blob.hpp
modules/dnn/include/opencv2/dnn/blob.hpp
+9
-1
blob.cpp
modules/dnn/src/blob.cpp
+61
-0
convolution_layer.cpp
modules/dnn/src/layers/convolution_layer.cpp
+3
-5
elementwise_layers.hpp
modules/dnn/src/layers/elementwise_layers.hpp
+4
-0
fully_connected_layer.cpp
modules/dnn/src/layers/fully_connected_layer.cpp
+1
-4
lrn_layer.cpp
modules/dnn/src/layers/lrn_layer.cpp
+1
-1
No files found.
modules/dnn/include/opencv2/dnn/blob.hpp
View file @
e33da71a
...
...
@@ -164,13 +164,21 @@ namespace dnn
/** @brief Creates blob with specified @p shape and @p type. */
void
create
(
const
BlobShape
&
shape
,
int
type
=
CV_32F
,
int
allocFlags
=
ALLOC_MAT
);
/** @brief Creates blob from cv::Mat or cv::UMat without copying the data */
/** @brief Creates blob from Mat or UMat without copying the data.
* @details If in is Mat then Mat data is populated, otherwise - UMat.
*/
void
fill
(
InputArray
in
);
/** @brief Creates blob from user data.
* @details If @p deepCopy is false then CPU data will not be allocated.
*/
void
fill
(
const
BlobShape
&
shape
,
int
type
,
void
*
data
,
bool
deepCopy
=
true
);
/** @brief Sets @p value to the last used data (if @p allocFlags = -1).
* @details If @p allocFlags != -1 then destination data (Mat or UMat) is determined by flags from AllocFlag enum like in create().
*/
void
setTo
(
InputArray
value
,
int
allocFlags
=
-
1
);
Mat
&
matRef
(
bool
writeOnly
=
true
);
//!< Returns reference to cv::Mat, containing blob data.
const
Mat
&
matRefConst
()
const
;
//!< Returns reference to cv::Mat, containing blob data, for read-only purposes.
UMat
&
umatRef
(
bool
writeOnly
=
true
);
//!< Returns reference to cv::UMat, containing blob data.
...
...
modules/dnn/src/blob.cpp
View file @
e33da71a
...
...
@@ -101,6 +101,26 @@ namespace dnn
#endif
}
void
Blob
::
fill
(
InputArray
in
)
{
#ifdef CV_DNN_UMAT
CV_Assert
(
in
.
isMat
()
||
in
.
isUMat
());
if
(
in
.
isMat
())
{
m
=
in
.
getMat
();
state
=
HEAD_AT_MAT
;
}
else
{
um
=
in
.
getUMat
();
state
=
HEAD_AT_UMAT
;
}
#else
CV_Assert
(
in
.
isMat
());
m
=
in
.
getMat
();
#endif
}
static
inline
int
getMatChannels
(
const
Mat
&
mat
)
{
return
(
mat
.
dims
<=
2
)
?
mat
.
channels
()
:
mat
.
size
[
0
];
...
...
@@ -226,6 +246,47 @@ namespace dnn
CV_DNN_UMAT_ONLY
(
state
=
HEAD_AT_MAT
);
}
void
Blob
::
setTo
(
InputArray
value
,
int
allocFlags
)
{
#ifdef CV_DNN_UMAT
if
(
allocFlags
==
-
1
)
{
if
(
state
==
HEAD_AT_UMAT
)
um
.
setTo
(
value
);
else
if
(
state
==
HEAD_AT_MAT
)
m
.
setTo
(
value
);
else
//SYNCED or UNINITIALIZED
{
um
.
setTo
(
value
);
m
.
setTo
(
value
);
if
(
state
==
UNINITIALIZED
)
state
=
SYNCED
;
}
}
else
if
(
allocFlags
==
ALLOC_BOTH
)
{
m
.
setTo
(
value
);
um
.
setTo
(
value
);
state
=
SYNCED
;
}
else
if
(
allocFlags
==
ALLOC_MAT
)
{
matRef
().
setTo
(
value
);
}
else
if
(
allocFlags
==
ALLOC_UMAT
)
{
umatRef
().
setTo
(
value
);
}
else
{
CV_Error
(
Error
::
StsBadArg
,
"allocFlags sholud be -1 or one of Blob::AllocFlag values"
);
}
#else
m
.
setTo
(
value
);
#endif
}
void
Blob
::
updateMat
(
bool
syncData
)
const
{
#ifdef CV_DNN_UMAT
...
...
modules/dnn/src/layers/convolution_layer.cpp
View file @
e33da71a
...
...
@@ -53,8 +53,6 @@ namespace cv
namespace
dnn
{
typedef
BlobShape
Shape
;
ConvolutionLayerImpl
::
ConvolutionLayerImpl
()
{
tryUseOpenCL
=
true
;
...
...
@@ -104,7 +102,7 @@ void ConvolutionLayerImpl::allocate(const std::vector<Blob*> &inputs, std::vecto
CV_Assert
(
inputs
[
i
]
->
rows
()
==
input
.
rows
()
&&
inputs
[
i
]
->
cols
()
==
input
.
cols
());
}
int
allocFlags
=
useOpenCL
?
Blob
::
ALLOC_
BOTH
:
Blob
::
ALLOC_MAT
;
int
allocFlags
=
useOpenCL
?
Blob
::
ALLOC_
UMAT
:
Blob
::
ALLOC_MAT
;
if
(
!
is1x1
())
{
...
...
@@ -114,13 +112,13 @@ void ConvolutionLayerImpl::allocate(const std::vector<Blob*> &inputs, std::vecto
if
(
bias
)
{
biasOnesBlob
.
create
(
Shape
(
1
,
topH
*
topW
),
input
.
type
(),
allocFlags
);
biasOnesBlob
.
matRef
().
setTo
(
1
);
biasOnesBlob
.
setTo
(
1
);
}
outputs
.
resize
(
inputs
.
size
());
for
(
size_t
i
=
0
;
i
<
inputs
.
size
();
i
++
)
{
outputs
[
i
].
create
(
Shape
(
inputs
[
i
]
->
num
(),
topCn
,
topH
,
topW
));
outputs
[
i
].
create
(
Shape
(
inputs
[
i
]
->
num
(),
topCn
,
topH
,
topW
)
,
input
.
type
(),
allocFlags
);
}
}
...
...
modules/dnn/src/layers/elementwise_layers.hpp
View file @
e33da71a
...
...
@@ -86,7 +86,11 @@ public:
{
outputs
.
resize
(
inputs
.
size
());
for
(
size_t
i
=
0
;
i
<
inputs
.
size
();
i
++
)
{
outputs
[
i
].
shareFrom
(
*
inputs
[
i
]);
//no data copy
//hotfix: shareFrom doesn't provide properly Mat/UMat switching
outputs
[
i
].
matRef
()
=
inputs
[
i
]
->
matRefConst
();
}
}
void
forward
(
std
::
vector
<
Blob
*>
&
inputs
,
std
::
vector
<
Blob
>
&
outputs
)
...
...
modules/dnn/src/layers/fully_connected_layer.cpp
View file @
e33da71a
...
...
@@ -76,10 +76,7 @@ void FullyConnectedLayerImpl::allocate(const std::vector<Blob*> &input, std::vec
int
allocFlags
=
useOpenCL
?
Blob
::
ALLOC_UMAT
:
Blob
::
ALLOC_UMAT
;
biasOnesBlob
.
create
(
Shape
(
outerSize
,
1
),
dtype
,
allocFlags
);
if
(
useOpenCL
)
biasOnesBlob
.
getRef
<
UMat
>
().
setTo
(
1
);
else
biasOnesBlob
.
getRef
<
Mat
>
().
setTo
(
1
);
biasOnesBlob
.
setTo
(
1
);
output
.
resize
(
input
.
size
());
for
(
size_t
i
=
0
;
i
<
input
.
size
();
i
++
)
...
...
modules/dnn/src/layers/lrn_layer.cpp
View file @
e33da71a
...
...
@@ -261,7 +261,7 @@ Ptr<Layer> createLRNLayerFromCaffe(LayerParams ¶ms)
double
alpha
=
params
.
get
<
double
>
(
"alpha"
,
1
);
double
beta
=
params
.
get
<
double
>
(
"beta"
,
0.75
);
return
Ptr
<
Layer
>
(
new
LRNLayerImpl
(
type
,
size
,
alpha
,
beta
));
return
Ptr
<
Layer
>
(
LRNLayer
::
create
(
type
,
size
,
alpha
,
beta
));
}
}
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment