Skip to content
Projects
Groups
Snippets
Help
Loading...
Sign in / Register
Toggle navigation
N
ngraph
Project
Project
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Packages
Packages
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
submodule
ngraph
Commits
388fb89b
Commit
388fb89b
authored
Jun 17, 2019
by
Adam Procter
Browse files
Options
Browse Files
Download
Plain Diff
Merge remote-tracking branch 'origin/master' into aprocter/dyn-replace-slice
parents
0b0fb5b7
ca220f7d
Expand all
Hide whitespace changes
Inline
Side-by-side
Showing
27 changed files
with
700 additions
and
41 deletions
+700
-41
CMakeLists.txt
CMakeLists.txt
+1
-0
CMakeLists.txt
src/ngraph/CMakeLists.txt
+2
-0
CMakeLists.txt
src/ngraph/frontend/onnx_import/CMakeLists.txt
+2
-0
conv_transpose.cpp
src/ngraph/frontend/onnx_import/op/conv_transpose.cpp
+0
-0
shrink.cpp
src/ngraph/frontend/onnx_import/op/shrink.cpp
+87
-0
shrink.hpp
src/ngraph/frontend/onnx_import/op/shrink.hpp
+43
-0
supported_ops.md
src/ngraph/frontend/onnx_import/op/supported_ops.md
+1
-0
ops_bridge.cpp
src/ngraph/frontend/onnx_import/ops_bridge.cpp
+2
-0
ngraph.hpp
src/ngraph/ngraph.hpp
+1
-0
group_conv_transpose.cpp
src/ngraph/op/fused/group_conv_transpose.cpp
+0
-0
group_conv_transpose.hpp
src/ngraph/op/fused/group_conv_transpose.hpp
+149
-0
fused_op_tbl.hpp
src/ngraph/op/fused_op_tbl.hpp
+1
-0
result.cpp
src/ngraph/op/result.cpp
+3
-6
result.hpp
src/ngraph/op/result.hpp
+1
-1
dropout.cpp
src/ngraph/runtime/cpu/builder/dropout.cpp
+8
-4
dropout.cpp
src/ngraph/runtime/cpu/op/dropout.cpp
+16
-7
dropout.hpp
src/ngraph/runtime/cpu/op/dropout.hpp
+5
-10
cpu_fusion.cpp
src/ngraph/runtime/cpu/pass/cpu_fusion.cpp
+22
-9
intelgpu_backend.cpp
src/ngraph/runtime/intelgpu/intelgpu_backend.cpp
+3
-0
unit_test.manifest
src/ngraph/runtime/plaidml/unit_test.manifest
+90
-0
serializer.cpp
src/ngraph/serializer.cpp
+47
-2
backend_fused_op.in.cpp
test/backend_fused_op.in.cpp
+91
-0
shrink_float.prototxt
test/models/onnx/shrink_float.prototxt
+49
-0
shrink_int.prototxt
test/models/onnx/shrink_int.prototxt
+49
-0
onnx_import.in.cpp
test/onnx/onnx_import.in.cpp
+26
-0
type_prop.cpp
test/type_prop.cpp
+0
-0
test_case.hpp
test/util/test_case.hpp
+1
-2
No files found.
CMakeLists.txt
View file @
388fb89b
...
...
@@ -301,6 +301,7 @@ if (LINUX)
else
()
set
(
CMAKE_INSTALL_RPATH
"$ORIGIN"
)
endif
()
set
(
CMAKE_INSTALL_RPATH_USE_LINK_PATH TRUE
)
set
(
CMAKE_BUILD_WITH_INSTALL_RPATH TRUE
)
endif
()
...
...
src/ngraph/CMakeLists.txt
View file @
388fb89b
...
...
@@ -310,6 +310,8 @@ set (SRC
op/fused/grn.hpp
op/fused/group_conv.hpp
op/fused/group_conv.cpp
op/fused/group_conv_transpose.hpp
op/fused/group_conv_transpose.cpp
op/fused/leaky_relu.cpp
op/fused/leaky_relu.hpp
op/fused/mvn.cpp
...
...
src/ngraph/frontend/onnx_import/CMakeLists.txt
View file @
388fb89b
...
...
@@ -150,6 +150,8 @@ add_library(onnx_import STATIC
op/selu.hpp
op/shape.hpp
op/shape.cpp
op/shrink.hpp
op/shrink.cpp
op/sigmoid.hpp
op/sign.hpp
op/sin.hpp
...
...
src/ngraph/frontend/onnx_import/op/conv_transpose.cpp
View file @
388fb89b
This diff is collapsed.
Click to expand it.
src/ngraph/frontend/onnx_import/op/shrink.cpp
0 → 100644
View file @
388fb89b
//*****************************************************************************
// Copyright 2017-2019 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#include <memory>
#include "exceptions.hpp"
#include "ngraph/op/add.hpp"
#include "ngraph/op/constant.hpp"
#include "ngraph/op/convert.hpp"
#include "ngraph/op/greater.hpp"
#include "ngraph/op/less.hpp"
#include "ngraph/op/multiply.hpp"
#include "ngraph/op/subtract.hpp"
#include "shrink.hpp"
namespace
ngraph
{
namespace
onnx_import
{
namespace
op
{
namespace
set_1
{
NodeVector
shrink
(
const
Node
&
node
)
{
const
auto
input
=
node
.
get_ng_inputs
().
at
(
0
);
const
float
bias
=
node
.
get_attribute_value
<
float
>
(
"bias"
,
0.0
f
);
const
float
lambd
=
node
.
get_attribute_value
<
float
>
(
"lambd"
,
0.5
f
);
ASSERT_VALID_ARGUMENT
(
node
,
!
(
lambd
<
0.0
f
))
<<
" The provided 'lambd' value:"
<<
lambd
<<
" must not be negative."
;
const
auto
negative_lambd
=
ngraph
::
op
::
Constant
::
create
(
input
->
get_element_type
(),
input
->
get_shape
(),
{
-
lambd
});
const
auto
positive_lambd
=
ngraph
::
op
::
Constant
::
create
(
input
->
get_element_type
(),
input
->
get_shape
(),
{
lambd
});
const
auto
bias_tensor
=
ngraph
::
op
::
Constant
::
create
(
input
->
get_element_type
(),
input
->
get_shape
(),
{
bias
});
// Create a mask indicating locations of values that need to be adjusted
// by adding and subtracting bias
// All other values indicated by 'false' in the masks need to be zeroed out
std
::
shared_ptr
<
ngraph
::
Node
>
values_below_neg_lambd
=
std
::
make_shared
<
ngraph
::
op
::
Less
>
(
input
,
negative_lambd
);
std
::
shared_ptr
<
ngraph
::
Node
>
values_above_pos_lambd
=
std
::
make_shared
<
ngraph
::
op
::
Greater
>
(
input
,
positive_lambd
);
// Convert from bool to the input type to be able to multiply adjusted inputs
// by the created masks
values_below_neg_lambd
=
std
::
make_shared
<
ngraph
::
op
::
Convert
>
(
values_below_neg_lambd
,
input
->
get_element_type
());
values_above_pos_lambd
=
std
::
make_shared
<
ngraph
::
op
::
Convert
>
(
values_above_pos_lambd
,
input
->
get_element_type
());
std
::
shared_ptr
<
ngraph
::
Node
>
input_minus_bias
=
input
-
bias_tensor
;
std
::
shared_ptr
<
ngraph
::
Node
>
input_plus_bias
=
input
+
bias_tensor
;
// multiply by the corresponding mask to zero-out the values within
// the <-lambd;lambd> range and keep the bias-adjusted values from outside of it
input_minus_bias
=
values_above_pos_lambd
*
input_minus_bias
;
input_plus_bias
=
values_below_neg_lambd
*
input_plus_bias
;
return
{
input_plus_bias
+
input_minus_bias
};
}
}
// namespace set_1
}
//namespace op
}
// namespace onnx_import
}
// namespace ngraph
src/ngraph/frontend/onnx_import/op/shrink.hpp
0 → 100644
View file @
388fb89b
//*****************************************************************************
// Copyright 2017-2019 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#pragma once
#include "core/node.hpp"
#include "ngraph/node.hpp"
namespace
ngraph
{
namespace
onnx_import
{
namespace
op
{
namespace
set_1
{
/// @brief ONNX Shrink operator
///
/// @note It operates on a single input tensor and two attributes: lambd and bias.
/// Input values greater or equal to '-lambd' and less or equal to 'lambd' are zeroed-out.
/// 'Bias' is added to the values that are less than '-lambd'
/// and subtracted from values greater than 'lambd'.
NodeVector
shrink
(
const
Node
&
node
);
}
// namespace set_1
}
//namespace op
}
// namespace onnx_import
}
// namespace ngraph
src/ngraph/frontend/onnx_import/op/supported_ops.md
View file @
388fb89b
...
...
@@ -84,6 +84,7 @@ opset versions starting from `1` to `6` and to the latest opset version.
| Relu | 1-6- |
| Selu | 1-6- |
| Shape | 1- |
| Shrink | 1- |
| Sigmoid | 1-6- |
| Sign | 9- |
| Sin | 7- |
...
...
src/ngraph/frontend/onnx_import/ops_bridge.cpp
View file @
388fb89b
...
...
@@ -94,6 +94,7 @@
#include "op/reshape.hpp"
#include "op/selu.hpp"
#include "op/shape.hpp"
#include "op/shrink.hpp"
#include "op/sigmoid.hpp"
#include "op/sign.hpp"
#include "op/sin.hpp"
...
...
@@ -311,6 +312,7 @@ namespace ngraph
REGISTER_OPERATOR
(
"Reshape"
,
1
,
reshape
);
REGISTER_OPERATOR
(
"Selu"
,
1
,
selu
);
REGISTER_OPERATOR
(
"Shape"
,
1
,
shape
);
REGISTER_OPERATOR
(
"Shrink"
,
1
,
shrink
);
REGISTER_OPERATOR
(
"Sigmoid"
,
1
,
sigmoid
);
REGISTER_OPERATOR
(
"Sign"
,
1
,
sign
);
REGISTER_OPERATOR
(
"Sin"
,
1
,
sin
);
...
...
src/ngraph/ngraph.hpp
View file @
388fb89b
...
...
@@ -104,6 +104,7 @@
#include "ngraph/op/fused/gemm.hpp"
#include "ngraph/op/fused/grn.hpp"
#include "ngraph/op/fused/group_conv.hpp"
#include "ngraph/op/fused/group_conv_transpose.hpp"
#include "ngraph/op/fused/hard_sigmoid.hpp"
#include "ngraph/op/fused/leaky_relu.hpp"
#include "ngraph/op/fused/mvn.hpp"
...
...
src/ngraph/op/fused/group_conv_transpose.cpp
0 → 100644
View file @
388fb89b
This diff is collapsed.
Click to expand it.
src/ngraph/op/fused/group_conv_transpose.hpp
0 → 100644
View file @
388fb89b
//*****************************************************************************
// Copyright 2017-2019 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#pragma once
#include <cstdlib>
#include <memory>
#include "ngraph/autodiff/adjoints.hpp"
#include "ngraph/coordinate_diff.hpp"
#include "ngraph/node.hpp"
#include "ngraph/op/util/attr_types.hpp"
#include "ngraph/op/util/fused_op.hpp"
#include "ngraph/shape.hpp"
#include "ngraph/strides.hpp"
namespace
ngraph
{
namespace
op
{
/// \brief Group Transpose Convolution (Deconvolution)
class
GroupConvolutionTranspose
:
public
util
::
FusedOp
{
public
:
///
/// \brief Constructs GroupConvolutionTranspose operation.
///
/// \param[in] data The node producing input data.
/// \param[in] filters The node producing filters data.
/// \param[in] strides The strides along each feature axis.
/// \param[in] dilations The dilations along each feature axis.
/// \param[in] padding_begin The padding added at the beggining of each feature axis.
/// \param[in] padding_end The padding added at the end of each feature axis.
/// \param[in] output_padding The zero-padding (adjustment) added to one side of the output.
/// \param[in] groups The number of groups the input channels and output channels
/// are divided into.
/// \param[in] pad_type The provided padding type.
/// \param[in] output_shape The output shape. When provided padding values are
/// automatically inferred.
///
GroupConvolutionTranspose
(
const
std
::
shared_ptr
<
Node
>&
data
,
const
std
::
shared_ptr
<
Node
>&
filters
,
const
Strides
&
strides
,
const
Strides
&
dilations
,
const
CoordinateDiff
&
padding_begin
,
const
CoordinateDiff
&
padding_end
,
const
CoordinateDiff
&
output_padding
,
const
std
::
size_t
groups
=
1UL
,
const
PadType
&
pad_type
=
PadType
::
EXPLICIT
,
const
Shape
&
output_shape
=
Shape
{});
///
/// \brief Constructs GroupConvolutionTranspose operation.
///
/// \param[in] data The node producing input data.
/// \param[in] filters The node producing filters data.
/// \param[in] groups The number of groups the input channels and output channels
/// are divided into.
///
GroupConvolutionTranspose
(
const
std
::
shared_ptr
<
Node
>&
data
,
const
std
::
shared_ptr
<
Node
>&
filters
,
const
std
::
size_t
groups
=
1UL
);
///
/// \brief Constructs GroupConvolutionTranspose operation.
///
/// \param[in] data The node producing input data.
/// \param[in] filters The node producing filters data.
/// \param[in] strides The strides along each feature axis.
/// \param[in] dilations The dilations along each feature axis.
/// \param[in] output_padding The zero-padding (adjustment) added to one side of the output.
/// \param[in] output_shape The output shape. When provided padding values are
/// automatically inferred.
/// \param[in] groups The number of groups the input channels and output channels
/// are divided into.
///
GroupConvolutionTranspose
(
const
std
::
shared_ptr
<
Node
>&
data
,
const
std
::
shared_ptr
<
Node
>&
filters
,
const
Strides
&
strides
,
const
Strides
&
dilations
,
const
CoordinateDiff
&
output_padding
,
const
Shape
&
output_shape
,
const
std
::
size_t
groups
=
1UL
);
///
/// \brief Constructs GroupConvolutionTranspose operation.
///
/// \param[in] data The node producing input data.
/// \param[in] filters The node producing filters data.
/// \param[in] output_shape The output shape. When provided padding values are
/// automatically inferred.
/// \param[in] groups The number of groups the input channels and output channels
/// are divided into.
///
GroupConvolutionTranspose
(
const
std
::
shared_ptr
<
Node
>&
data
,
const
std
::
shared_ptr
<
Node
>&
filters
,
const
Shape
&
output_shape
,
const
std
::
size_t
groups
=
1UL
);
std
::
shared_ptr
<
Node
>
get_filters
()
{
return
get_argument
(
1
);
}
std
::
shared_ptr
<
Node
>
get_data
()
{
return
get_argument
(
0
);
}
const
Strides
&
get_strides
()
const
{
return
m_strides
;
}
const
Strides
&
get_dilations
()
const
{
return
m_dilations
;
}
const
CoordinateDiff
&
get_padding_begin
()
const
{
return
m_padding_begin
;
}
const
CoordinateDiff
&
get_padding_end
()
const
{
return
m_padding_end
;
}
const
CoordinateDiff
&
get_output_padding
()
const
{
return
m_output_padding
;
}
std
::
size_t
get_groups
()
const
{
return
m_groups
;
}
const
PadType
&
get_pad_type
()
const
{
return
m_pad_type
;
}
const
Shape
&
get_output_shape
()
const
{
return
m_output_shape
;
}
virtual
void
pre_validate_and_infer_types
()
override
;
virtual
NodeVector
decompose_op
()
const
override
;
virtual
std
::
shared_ptr
<
Node
>
copy_with_new_args
(
const
NodeVector
&
new_args
)
const
override
;
virtual
void
generate_adjoints
(
autodiff
::
Adjoints
&
adjoints
,
const
NodeVector
&
deltas
)
override
;
private
:
///
/// \brief Calculate the shape of the data batch from forward propagation.
///
/// \return The data batch shape.
///
Shape
get_data_batch_shape
()
const
;
Strides
m_strides
;
Strides
m_dilations
;
CoordinateDiff
m_padding_begin
;
CoordinateDiff
m_padding_end
;
CoordinateDiff
m_output_padding
;
std
::
size_t
m_groups
;
PadType
m_pad_type
;
Shape
m_output_shape
;
};
}
}
src/ngraph/op/fused_op_tbl.hpp
View file @
388fb89b
...
...
@@ -27,6 +27,7 @@ NGRAPH_OP(FakeQuantize, ngraph::op)
NGRAPH_OP
(
GRN
,
ngraph
::
op
)
NGRAPH_OP
(
Gemm
,
ngraph
::
op
)
NGRAPH_OP
(
GroupConvolution
,
ngraph
::
op
)
NGRAPH_OP
(
GroupConvolutionTranspose
,
ngraph
::
op
)
NGRAPH_OP
(
HardSigmoid
,
ngraph
::
op
)
NGRAPH_OP
(
LeakyRelu
,
ngraph
::
op
)
NGRAPH_OP
(
MVN
,
ngraph
::
op
)
...
...
src/ngraph/op/result.cpp
View file @
388fb89b
...
...
@@ -24,8 +24,9 @@
using
namespace
std
;
using
namespace
ngraph
;
op
::
Result
::
Result
(
const
shared_ptr
<
Node
>&
arg
)
op
::
Result
::
Result
(
const
shared_ptr
<
Node
>&
arg
,
bool
needs_default_layout
)
:
Op
(
"Result"
,
check_single_output_args
({
arg
}))
,
m_needs_default_layout
(
needs_default_layout
)
{
constructor_validate_and_infer_types
();
// always borrow the placement conf even the default one
...
...
@@ -44,11 +45,7 @@ shared_ptr<Node> op::Result::copy_with_new_args(const NodeVector& new_args) cons
{
check_new_args_count
(
this
,
new_args
);
auto
res
=
make_shared
<
Result
>
(
new_args
.
at
(
0
));
if
(
res
)
{
res
->
set_needs_default_layout
(
m_needs_default_layout
);
}
auto
res
=
make_shared
<
Result
>
(
new_args
.
at
(
0
),
m_needs_default_layout
);
return
std
::
move
(
res
);
}
...
...
src/ngraph/op/result.hpp
View file @
388fb89b
...
...
@@ -30,7 +30,7 @@ namespace ngraph
/// \brief Allows a value to be used as a function result.
///
/// \param arg Node that produces the input tensor.
Result
(
const
std
::
shared_ptr
<
Node
>&
arg
);
Result
(
const
std
::
shared_ptr
<
Node
>&
arg
,
bool
needs_default_layout
=
false
);
void
validate_and_infer_types
()
override
;
...
...
src/ngraph/runtime/cpu/builder/dropout.cpp
View file @
388fb89b
...
...
@@ -38,13 +38,13 @@ namespace ngraph
auto
arg_buffer_index
=
external_function
->
get_buffer_index
(
args
[
0
].
get_name
());
auto
arg1_buffer_index
=
external_function
->
get_buffer_index
(
args
[
1
].
get_name
());
auto
arg4_buffer_index
=
external_function
->
get_buffer_index
(
args
[
4
].
get_name
());
auto
out0_buffer_index
=
external_function
->
get_buffer_index
(
out
[
0
].
get_name
());
auto
out1_buffer_index
=
external_function
->
get_buffer_index
(
out
[
1
].
get_name
());
size_t
element_count
=
out
[
0
].
get_size
();
bool
use_seed
=
drop
->
get_use_seed
();
double
keep_prob
=
drop
->
get_keep_prob
();
// Note: for performance optimization in addition to parallel RNG with multiple,
// threads, we create, initialize and advance each msr here in builder instead of
...
...
@@ -56,7 +56,7 @@ namespace ngraph
std
::
vector
<
std
::
minstd_rand
>
vmsr
(
nthr
);
if
(
use_seed
)
{
uint
32
_t
seed
=
drop
->
get_seed
();
uint
64
_t
seed
=
drop
->
get_seed
();
for
(
size_t
i
=
0
;
i
<
nthr
;
i
++
)
{
std
::
minstd_rand
msr
;
...
...
@@ -72,13 +72,15 @@ namespace ngraph
element_count
,
arg_buffer_index
,
arg1_buffer_index
,
arg4_buffer_index
,
out0_buffer_index
,
out1_buffer_index
,
keep_prob
,
vmsr
,
use_seed
](
CPURuntimeContext
*
ctx
,
CPUExecutionContext
*
ectx
)
{
bool
training
=
static_cast
<
bool
>
(
static_cast
<
float
*>
(
ctx
->
buffer_data
[
arg1_buffer_index
])[
0
]);
double
keep_prob
=
static_cast
<
double
*>
(
ctx
->
buffer_data
[
arg4_buffer_index
])[
0
];
runtime
::
cpu
::
kernel
::
generate_dropout
(
static_cast
<
float
*>
(
ctx
->
buffer_data
[
arg_buffer_index
]),
static_cast
<
float
*>
(
ctx
->
buffer_data
[
out0_buffer_index
]),
...
...
@@ -96,13 +98,15 @@ namespace ngraph
element_count
,
arg_buffer_index
,
arg1_buffer_index
,
arg4_buffer_index
,
out0_buffer_index
,
out1_buffer_index
,
keep_prob
,
vmsr
,
use_seed
](
CPURuntimeContext
*
ctx
,
CPUExecutionContext
*
ectx
)
{
bool
training
=
static_cast
<
bool
>
(
static_cast
<
double
*>
(
ctx
->
buffer_data
[
arg1_buffer_index
])[
0
]);
double
keep_prob
=
static_cast
<
double
*>
(
ctx
->
buffer_data
[
arg4_buffer_index
])[
0
];
runtime
::
cpu
::
kernel
::
generate_dropout
(
static_cast
<
double
*>
(
ctx
->
buffer_data
[
arg_buffer_index
]),
static_cast
<
double
*>
(
ctx
->
buffer_data
[
out0_buffer_index
]),
...
...
src/ngraph/runtime/cpu/op/dropout.cpp
View file @
388fb89b
...
...
@@ -26,11 +26,9 @@ using namespace ngraph;
op
::
Dropout
::
Dropout
(
const
std
::
shared_ptr
<
Node
>&
input
,
const
std
::
shared_ptr
<
Node
>&
gm_const
,
const
std
::
shared_ptr
<
Node
>&
use_seed
,
const
uint32_t
seed
,
const
double
keep_prob
)
:
Op
(
"Dropout"
,
check_single_output_args
({
input
,
gm_const
,
use_seed
}))
,
m_seed
(
seed
)
,
m_keep_prob
(
keep_prob
)
const
std
::
shared_ptr
<
Node
>&
seed
,
const
std
::
shared_ptr
<
Node
>&
keep_prob
)
:
Op
(
"Dropout"
,
check_single_output_args
({
input
,
gm_const
,
use_seed
,
seed
,
keep_prob
}))
{
constructor_validate_and_infer_types
();
...
...
@@ -41,13 +39,13 @@ op::Dropout::Dropout(const std::shared_ptr<Node>& input,
shared_ptr
<
Node
>
op
::
Dropout
::
copy_with_new_args
(
const
NodeVector
&
new_args
)
const
{
if
(
new_args
.
size
()
!=
3
)
if
(
new_args
.
size
()
!=
5
)
{
throw
ngraph_error
(
"Incorrect number of new arguments"
);
}
return
make_shared
<
Dropout
>
(
new_args
.
at
(
0
),
new_args
.
at
(
1
),
new_args
.
at
(
2
),
m_seed
,
m_keep_prob
);
new_args
.
at
(
0
),
new_args
.
at
(
1
),
new_args
.
at
(
2
),
new_args
.
at
(
3
),
new_args
.
at
(
4
)
);
}
bool
op
::
Dropout
::
get_use_seed
()
const
...
...
@@ -60,3 +58,14 @@ bool op::Dropout::get_use_seed() const
}
return
use_seed
;
}
uint64_t
op
::
Dropout
::
get_seed
()
const
{
uint64_t
seed
=
0
;
if
(
auto
const_op
=
dynamic_pointer_cast
<
op
::
Constant
>
(
get_argument
(
3
)))
{
auto
seed_ptr
=
static_cast
<
const
uint64_t
*>
(
const_op
->
get_data_ptr
());
seed
=
*
seed_ptr
;
}
return
seed
;
}
src/ngraph/runtime/cpu/op/dropout.hpp
View file @
388fb89b
...
...
@@ -29,20 +29,15 @@ namespace ngraph
Dropout
(
const
std
::
shared_ptr
<
Node
>&
input
,
const
std
::
shared_ptr
<
Node
>&
gm_const
,
const
std
::
shared_ptr
<
Node
>&
use_seed
,
const
uint32_t
seed
,
const
double
keep_prob
);
// keep_prob = 1 - dropout_prob
const
std
::
shared_ptr
<
Node
>&
seed
,
const
std
::
shared_ptr
<
Node
>&
keep_prob
);
// keep_prob = 1 - dropout_prob
bool
get_use_seed
()
const
;
uint32_t
get_seed
()
const
{
return
m_seed
;
}
double
get_keep_prob
()
const
{
return
m_keep_prob
;
}
void
set_seed
(
uint32_t
new_seed
)
{
m_seed
=
new_seed
;
}
void
set_keep_prob
(
double
new_keep_prob
)
{
m_keep_prob
=
new_keep_prob
;
}
uint64_t
get_seed
()
const
;
double
get_keep_prob
()
const
;
virtual
std
::
shared_ptr
<
Node
>
copy_with_new_args
(
const
NodeVector
&
new_args
)
const
override
;
private
:
uint32_t
m_seed
;
double
m_keep_prob
;
};
}
}
src/ngraph/runtime/cpu/pass/cpu_fusion.cpp
View file @
388fb89b
...
...
@@ -923,8 +923,8 @@ void ngraph::runtime::cpu::pass::CPUFusion::construct_dropout()
auto
x
=
std
::
make_shared
<
pattern
::
op
::
Label
>
(
element
::
f32
,
shape
);
auto
x_label
=
std
::
make_shared
<
pattern
::
op
::
Label
>
(
x
,
nullptr
,
NodeVector
{
x
});
uint
32
_t
seed
=
1234
;
auto
seed_label
=
std
::
make_shared
<
pattern
::
op
::
Label
>
(
element
::
u
32
,
Shape
{
0
});
uint
64
_t
seed
=
1234
;
auto
seed_label
=
std
::
make_shared
<
pattern
::
op
::
Label
>
(
element
::
u
64
,
Shape
{
0
});
double
value
=
0.9
;
auto
value_const
=
ngraph
::
op
::
Constant
::
create
(
element
::
f32
,
Shape
{
1
,
1
,
2
,
2
},
{
value
});
...
...
@@ -960,15 +960,28 @@ void ngraph::runtime::cpu::pass::CPUFusion::construct_dropout()
NGRAPH_DEBUG
<<
"training argument to GenerateMask must be constant"
;
return
false
;
}
if
(
!
std
::
dynamic_pointer_cast
<
ngraph
::
op
::
Constant
>
(
gm
->
get_argument
(
2
)))
{
NGRAPH_DEBUG
<<
"use_seed argument to GenerateMask must be constant"
;
return
false
;
}
if
(
!
std
::
dynamic_pointer_cast
<
ngraph
::
op
::
Constant
>
(
gm
->
get_argument
(
3
)))
{
NGRAPH_DEBUG
<<
"seed argument to GenerateMask must be constant"
;
return
false
;
}
if
(
!
std
::
dynamic_pointer_cast
<
ngraph
::
op
::
Constant
>
(
gm
->
get_argument
(
4
)))
{
NGRAPH_DEBUG
<<
"probability argument to GenerateMask must be constant"
;
return
false
;
}
auto
gm_value
=
gm
->
get_probability
();
auto
gm_seed
=
gm
->
get_seed
();
auto
training
=
gm
->
get_argument
(
0
);
//for training purpose this is always going to be 1
auto
use_seed_arg
=
gm
->
get_argument
(
2
);
// this is the use_seed node
auto
dropout_n
=
std
::
make_shared
<
ngraph
::
op
::
Dropout
>
(
pattern_map
[
x
],
gm
->
get_argument
(
0
),
gm
->
get_argument
(
2
),
gm
->
get_argument
(
3
),
gm
->
get_argument
(
4
));
auto
dropout_n
=
std
::
make_shared
<
ngraph
::
op
::
Dropout
>
(
pattern_map
[
x
],
training
,
use_seed_arg
,
gm_seed
,
gm_value
);
auto
goe1
=
std
::
make_shared
<
ngraph
::
op
::
GetOutputElement
>
(
dropout_n
,
0
);
ngraph
::
replace_node
(
m
.
get_match_root
(),
goe1
);
...
...
src/ngraph/runtime/intelgpu/intelgpu_backend.cpp
View file @
388fb89b
...
...
@@ -86,6 +86,7 @@
#include "ngraph/op/fused/gemm.hpp"
#include "ngraph/op/fused/grn.hpp"
#include "ngraph/op/fused/group_conv.hpp"
#include "ngraph/op/fused/group_conv_transpose.hpp"
#include "ngraph/op/fused/hard_sigmoid.hpp"
#include "ngraph/op/fused/leaky_relu.hpp"
#include "ngraph/op/fused/mvn.hpp"
...
...
@@ -2063,6 +2064,7 @@ shared_ptr<runtime::Executable>
case
OP_TYPEID
:
:
GatherND
:
case
OP_TYPEID
:
:
GenerateMask
:
case
OP_TYPEID
:
:
GRN
:
case
OP_TYPEID
:
:
GroupConvolutionTranspose
:
case
OP_TYPEID
:
:
HardSigmoid
:
case
OP_TYPEID
:
:
LeakyRelu
:
case
OP_TYPEID
:
:
MVN
:
...
...
@@ -2183,6 +2185,7 @@ bool runtime::intelgpu::IntelGPUBackend::is_supported_impl(const Node& node)
case
OP_TYPEID
:
:
FakeQuantize
:
case
OP_TYPEID
:
:
Gemm
:
case
OP_TYPEID
:
:
GRN
:
case
OP_TYPEID
:
:
GroupConvolutionTranspose
:
case
OP_TYPEID
:
:
LeakyRelu
:
case
OP_TYPEID
:
:
MVN
:
case
OP_TYPEID
:
:
Normalize
:
...
...
src/ngraph/runtime/plaidml/unit_test.manifest
View file @
388fb89b
...
...
@@ -259,4 +259,94 @@ backwards_softmax_underflow
backwards_softmax_3d
batch_mat_mul_forward
dot_matrix_2x0_0x2
# dgkutnic ww24.5: these tests are to be triaged by the PlaidML team
convolution_3d_1item_large_5o3i_padded_uneven_filter_uneven_data_dilation_data_dilated
select
product_trivial
product_trivial_5d
product_to_scalar
product_matrix_columns
product_matrix_rows
product_3d_to_matrix_most_sig
product_3d_to_matrix_least_sig
product_3d_to_vector
product_3d_to_scalar
product_2d_to_scalar_int32
product_to_scalar_int32
product_to_scalar_int8
max_trivial
max_trivial_5d
max_to_scalar
max_to_scalar_int8
max_matrix_columns
max_matrix_rows
max_matrix_rows_int32
max_3d_to_matrix_most_sig
max_3d_to_matrix_least_sig
max_3d_to_vector
max_3d_to_scalar
max_3d_to_scalar_int32
min_trivial
min_trivial_5d
min_trivial_5d_int32
min_to_scalar
min_to_scalar_int8
min_matrix_columns
min_matrix_rows
min_matrix_rows_int32
min_3d_to_matrix_most_sig
min_3d_to_matrix_least_sig
min_3d_to_vector
min_3d_to_scalar
min_3d_to_scalar_int32
sum_to_scalar
sum_large_1d_to_scalar
sum_matrix_columns
sum_matrix_6d
sum_matrix_rows
sum_3d_to_matrix_most_sig
sum_3d_to_matrix_least_sig
sum_3d_to_vector
sum_3d_to_scalar
sum_3d_to_scalar_int32
sum_5d_to_scalar
sum_5d_to_scalar_int32
sum_2d_to_scalar_int8
sum_stable_acc
sum_stable_simple_float
divide_python_rounding_int32
any_2x2_to_scalar_true
any_2x2_to_scalar_false
any_2x3_eliminate_col_dim
any_2x3_eliminate_row_dim
any_2x2x3_eliminate_dim_1
any_2x2x3_eliminate_dim_2
any_2x2x3_eliminate_dims_0_1
any_2x2x3_eliminate_dims_0_2
any_2x2x3_eliminate_dims_1_2
any_2x2x3_eliminate_dims_0_1_2
all_trivial
all_2x2_to_scalar_false
all_2x2_to_scalar_true
all_2x3_eliminate_col_dim
all_2x3_eliminate_row_dim
all_2x2x3_eliminate_dim_0
all_2x2x3_eliminate_dim_1
all_2x2x3_eliminate_dim_2
all_2x2x3_eliminate_dims_0_1
all_2x2x3_eliminate_dims_0_2
all_2x2x3_eliminate_dims_1_2
all_2x2x3_eliminate_dims_0_1_2
all_dynamic_axis
all_change_axis
backwards_broadcast0
backwards_broadcast1
backwards_select
backwards_select_nested
backwards_sum_v2s
backwards_sum_m2s
backwards_sum_m2v_0
backwards_sum_m2v_1
backwards_batchmatmul_tensor2_tensor2
src/ngraph/serializer.cpp
View file @
388fb89b
...
...
@@ -74,6 +74,7 @@
#include "ngraph/op/fused/gemm.hpp"
#include "ngraph/op/fused/grn.hpp"
#include "ngraph/op/fused/group_conv.hpp"
#include "ngraph/op/fused/group_conv_transpose.hpp"
#include "ngraph/op/fused/hard_sigmoid.hpp"
#include "ngraph/op/fused/leaky_relu.hpp"
#include "ngraph/op/fused/mvn.hpp"
...
...
@@ -1078,6 +1079,31 @@ static shared_ptr<ngraph::Function>
pad_type
);
break
;
}
case
OP_TYPEID
:
:
GroupConvolutionTranspose
:
{
auto
strides
=
node_js
.
at
(
"strides"
).
get
<
vector
<
size_t
>>
();
auto
dilations
=
node_js
.
at
(
"dilations"
).
get
<
vector
<
size_t
>>
();
auto
padding_begin
=
node_js
.
at
(
"padding_begin"
).
get
<
vector
<
ptrdiff_t
>>
();
auto
padding_end
=
node_js
.
at
(
"padding_end"
).
get
<
vector
<
ptrdiff_t
>>
();
auto
output_padding
=
node_js
.
at
(
"output_padding"
).
get
<
vector
<
ptrdiff_t
>>
();
auto
groups
=
node_js
.
at
(
"groups"
).
get
<
size_t
>
();
op
::
PadType
pad_type
=
node_js
[
"pad_type"
].
empty
()
?
op
::
PadType
::
EXPLICIT
:
static_cast
<
op
::
PadType
>
(
node_js
.
at
(
"pad_type"
));
auto
output_shape
=
node_js
.
at
(
"output_shape"
).
get
<
vector
<
size_t
>>
();
node
=
make_shared
<
op
::
GroupConvolutionTranspose
>
(
args
[
0
],
args
[
1
],
strides
,
dilations
,
padding_begin
,
padding_end
,
output_padding
,
groups
,
pad_type
,
output_shape
);
break
;
}
case
OP_TYPEID
:
:
LeakyRelu
:
{
node
=
make_shared
<
op
::
LeakyRelu
>
(
args
[
0
],
args
[
1
]);
...
...
@@ -1417,7 +1443,9 @@ static shared_ptr<ngraph::Function>
}
case
OP_TYPEID
:
:
Result
:
{
node
=
make_shared
<
op
::
Result
>
(
args
[
0
]);
auto
needs_default_layout
=
get_or_default
<
bool
>
(
node_js
,
"needs_default_layout"
,
false
);
node
=
make_shared
<
op
::
Result
>
(
args
[
0
],
needs_default_layout
);
break
;
}
case
OP_TYPEID
:
:
Reverse
:
...
...
@@ -2089,6 +2117,19 @@ static json write(const Node& n, bool binary_constant_data)
node
[
"pad_type"
]
=
tmp
->
get_pad_type
();
break
;
}
case
OP_TYPEID
:
:
GroupConvolutionTranspose
:
{
auto
tmp
=
dynamic_cast
<
const
op
::
GroupConvolutionTranspose
*>
(
&
n
);
node
[
"strides"
]
=
tmp
->
get_strides
();
node
[
"dilations"
]
=
tmp
->
get_dilations
();
node
[
"padding_begin"
]
=
tmp
->
get_padding_begin
();
node
[
"padding_end"
]
=
tmp
->
get_padding_end
();
node
[
"output_padding"
]
=
tmp
->
get_output_padding
();
node
[
"groups"
]
=
tmp
->
get_groups
();
node
[
"pad_type"
]
=
tmp
->
get_pad_type
();
node
[
"output_shape"
]
=
tmp
->
get_output_shape
();
break
;
}
case
OP_TYPEID
:
:
LeakyRelu
:
{
break
;
}
case
OP_TYPEID
:
:
Less
:
...
...
@@ -2341,7 +2382,11 @@ static json write(const Node& n, bool binary_constant_data)
node
[
"output_shape"
]
=
tmp
->
get_output_shape
();
break
;
}
case
OP_TYPEID
:
:
Result
:
{
break
;
case
OP_TYPEID
:
:
Result
:
{
auto
tmp
=
dynamic_cast
<
const
op
::
Result
*>
(
&
n
);
node
[
"needs_default_layout"
]
=
tmp
->
needs_default_layout
();
break
;
}
case
OP_TYPEID
:
:
Reverse
:
{
...
...
test/backend_fused_op.in.cpp
View file @
388fb89b
...
...
@@ -1156,3 +1156,94 @@ NGRAPH_TEST(${BACKEND_NAME}, fake_quantize_with_clip_across_channels)
test_case
.
run
();
}
NGRAPH_TEST
(
$
{
BACKEND_NAME
},
group_conv_transpose
)
{
const
CoordinateDiff
output_padding
{
1
,
1
};
const
CoordinateDiff
padding_begin
{
1
,
1
};
const
CoordinateDiff
padding_end
{
1
,
1
};
Strides
strides
{
2
,
2
};
Strides
dilations
{
1
,
1
};
size_t
groups
=
1
;
auto
data
=
make_shared
<
op
::
Parameter
>
(
element
::
f32
,
Shape
{
1
,
1
,
3
,
3
});
auto
filters
=
make_shared
<
op
::
Parameter
>
(
element
::
f32
,
Shape
{
1
,
1
,
3
,
3
});
auto
gct
=
make_shared
<
op
::
GroupConvolutionTranspose
>
(
data
,
filters
,
strides
,
dilations
,
padding_begin
,
padding_end
,
output_padding
,
groups
);
auto
function
=
make_shared
<
Function
>
(
NodeVector
{
gct
},
ParameterVector
{
data
,
filters
});
auto
test_case
=
ngraph
::
test
::
NgraphTestCase
(
function
,
"${BACKEND_NAME}"
);
// X
test_case
.
add_input
<
float
>
(
vector
<
float
>
{
0.16857791
f
,
-
0.15161794
f
,
0.08540368
f
,
0.1820628
f
,
-
0.21746576
f
,
0.08245695
f
,
0.1431433
f
,
-
0.43156421
f
,
0.30591947
f
});
// W
test_case
.
add_input
<
float
>
({
-
0.06230065
f
,
0.37932432
f
,
-
0.25388849
f
,
0.33878803
f
,
0.43709868
f
,
-
0.22477469
f
,
0.04118127
f
,
-
0.44696793
f
,
0.06373066
f
});
test_case
.
add_expected_output
(
Shape
{
1
,
1
,
6
,
6
},
vector
<
float
>
{
0.07368518
f
,
-
0.08925839
f
,
-
0.06627201
f
,
0.06301362
f
,
0.03732984
f
,
-
0.01919658
f
,
-
0.00628807
f
,
-
0.02817563
f
,
-
0.01472169
f
,
0.04392925
f
,
-
0.00689478
f
,
-
0.01549204
f
,
0.07957941
f
,
-
0.11459791
f
,
-
0.09505399
f
,
0.07681622
f
,
0.03604182
f
,
-
0.01853423
f
,
-
0.0270785
f
,
-
0.00680824
f
,
-
0.06650258
f
,
0.08004665
f
,
0.07918708
f
,
-
0.0724144
f
,
0.06256775
f
,
-
0.17838378
f
,
-
0.18863615
f
,
0.20064656
f
,
0.133717
f
,
-
0.06876295
f
,
-
0.06398046
f
,
-
0.00864975
f
,
0.19289537
f
,
-
0.01490572
f
,
-
0.13673618
f
,
0.01949645
f
});
test_case
.
set_tolerance
(
3
);
test_case
.
run
();
}
NGRAPH_TEST
(
$
{
BACKEND_NAME
},
group_conv_transpose_output_shape
)
{
const
CoordinateDiff
output_padding
{};
const
Shape
output_shape
{
1
,
1
,
1
,
14
};
Strides
strides
{
1
,
1
};
Strides
dilations
{
1
,
1
};
size_t
groups
=
1
;
auto
data
=
make_shared
<
op
::
Parameter
>
(
element
::
f32
,
Shape
{
1
,
1
,
1
,
10
});
auto
filters
=
make_shared
<
op
::
Parameter
>
(
element
::
f32
,
Shape
{
1
,
1
,
1
,
5
});
auto
gct
=
make_shared
<
op
::
GroupConvolutionTranspose
>
(
data
,
filters
,
strides
,
dilations
,
output_padding
,
output_shape
,
groups
);
auto
function
=
make_shared
<
Function
>
(
NodeVector
{
gct
},
ParameterVector
{
data
,
filters
});
auto
test_case
=
ngraph
::
test
::
NgraphTestCase
(
function
,
"${BACKEND_NAME}"
);
// X
test_case
.
add_input
<
float
>
(
vector
<
float
>
{
0.0
f
,
1.0
f
,
2.0
f
,
3.0
f
,
4.0
f
,
5.0
f
,
6.0
f
,
7.0
f
,
8.0
f
,
9.0
f
});
// W
test_case
.
add_input
<
float
>
({
1.0
f
,
2.0
f
,
3.0
f
,
2.0
f
,
1.0
f
});
test_case
.
add_expected_output
(
Shape
{
1
,
1
,
1
,
14
},
vector
<
float
>
{
0.0
f
,
1.0
f
,
4.0
f
,
10.0
f
,
18.0
f
,
27.0
f
,
36.0
f
,
45.0
f
,
54.0
f
,
63.0
f
,
62.0
f
,
50.0
f
,
26.0
f
,
9.0
f
});
test_case
.
run
();
}
test/models/onnx/shrink_float.prototxt
0 → 100644
View file @
388fb89b
ir_version: 3
producer_name: "nGraph ONNX Importer"
graph {
node {
input: "x"
output: "y"
op_type: "Shrink"
attribute {
name: "lambd"
f: 1.5
type: FLOAT
}
attribute {
name: "bias"
f: 0.5
type: FLOAT
}
}
name: "shrink_graph"
input {
name: "x"
type {
tensor_type {
elem_type: 1
shape {
dim {
dim_value: 11
}
}
}
}
}
output {
name: "y"
type {
tensor_type {
elem_type: 1
shape {
dim {
dim_value: 11
}
}
}
}
}
}
opset_import {
version: 9
}
test/models/onnx/shrink_int.prototxt
0 → 100644
View file @
388fb89b
ir_version: 3
producer_name: "nGraph ONNX Importer"
graph {
node {
input: "x"
output: "y"
op_type: "Shrink"
attribute {
name: "lambd"
f: 1.4
type: FLOAT
}
attribute {
name: "bias"
f: 1.5
type: FLOAT
}
}
name: "shrink_graph"
input {
name: "x"
type {
tensor_type {
elem_type: 6
shape {
dim {
dim_value: 11
}
}
}
}
}
output {
name: "y"
type {
tensor_type {
elem_type: 6
shape {
dim {
dim_value: 11
}
}
}
}
}
}
opset_import {
version: 9
}
test/onnx/onnx_import.in.cpp
View file @
388fb89b
...
...
@@ -1456,3 +1456,29 @@ NGRAPH_TEST(onnx_${BACKEND_NAME}, model_hardmax)
test_case
.
run
();
}
NGRAPH_TEST
(
onnx_
$
{
BACKEND_NAME
},
model_shrink_float
)
{
const
auto
shrink_fn
=
onnx_import
::
import_onnx_model
(
file_util
::
path_join
(
SERIALIZED_ZOO
,
"onnx/shrink_float.prototxt"
));
auto
test_case
=
ngraph
::
test
::
NgraphTestCase
(
shrink_fn
,
"${BACKEND_NAME}"
);
test_case
.
add_input
<
float
>
(
{
-
2.0
f
,
-
1.6
f
,
-
1.5
f
,
-
1.4
f
,
-
1.0
f
,
0.0
f
,
1.0
f
,
1.4
f
,
1.5
f
,
1.6
f
,
2.0
f
});
test_case
.
add_expected_output
<
float
>
(
Shape
{
11
},
{
-
1.5
f
,
-
1.1
f
,
0.0
f
,
0.0
f
,
0.0
f
,
0.0
f
,
0.0
f
,
0.0
f
,
0.0
f
,
1.1
f
,
1.5
f
});
test_case
.
run
();
}
NGRAPH_TEST
(
onnx_
$
{
BACKEND_NAME
},
model_shrink_int
)
{
const
auto
shrink_fn
=
onnx_import
::
import_onnx_model
(
file_util
::
path_join
(
SERIALIZED_ZOO
,
"onnx/shrink_int.prototxt"
));
auto
test_case
=
ngraph
::
test
::
NgraphTestCase
(
shrink_fn
,
"${BACKEND_NAME}"
);
test_case
.
add_input
<
int
>
({
-
5
,
-
4
,
-
3
,
-
2
,
-
1
,
0
,
1
,
2
,
3
,
4
,
5
});
test_case
.
add_expected_output
<
int
>
(
Shape
{
11
},
{
-
4
,
-
3
,
-
2
,
-
1
,
0
,
0
,
0
,
1
,
2
,
3
,
4
});
test_case
.
run
();
}
test/type_prop.cpp
View file @
388fb89b
This diff is collapsed.
Click to expand it.
test/util/test_case.hpp
View file @
388fb89b
...
...
@@ -97,9 +97,8 @@ namespace ngraph
"All function results already have expected outputs."
);
auto
function_output_type
=
results
.
at
(
m_output_index
)
->
get_element_type
();
auto
function_output_shape
=
results
.
at
(
m_output_index
)
->
get_shape
();
m_result_tensors
.
emplace_back
(
m_backend
->
create_tensor
(
function_output_type
,
function_output
_shape
));
m_backend
->
create_tensor
(
function_output_type
,
expected
_shape
));
m_expected_outputs
.
emplace_back
(
std
::
make_shared
<
ngraph
::
op
::
Constant
>
(
function_output_type
,
expected_shape
,
values
));
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment