Skip to content
Projects
Groups
Snippets
Help
Loading...
Sign in / Register
Toggle navigation
N
ngraph
Project
Project
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Packages
Packages
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
submodule
ngraph
Commits
da10c2c4
Unverified
Commit
da10c2c4
authored
Feb 26, 2020
by
Sang Ik Lee
Committed by
GitHub
Feb 26, 2020
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Remove Fluid helper function. (#4382)
Co-authored-by:
Scott Cyphers
<
diyessi@users.noreply.github.com
>
parent
c4bcabac
Show whitespace changes
Inline
Side-by-side
Showing
19 changed files
with
0 additions
and
1930 deletions
+0
-1930
CODEOWNERS
CODEOWNERS
+0
-1
CMakeLists.txt
src/ngraph/frontend/CMakeLists.txt
+0
-12
CMakeLists.txt
src/ngraph/frontend/fluid/CMakeLists.txt
+0
-33
README.md
src/ngraph/frontend/fluid/README.md
+0
-1
layout_converter.cpp
src/ngraph/frontend/fluid/operators/layout_converter.cpp
+0
-74
layout_converter.hpp
src/ngraph/frontend/fluid/operators/layout_converter.hpp
+0
-55
lookup_table.cpp
src/ngraph/frontend/fluid/operators/lookup_table.cpp
+0
-125
lookup_table.hpp
src/ngraph/frontend/fluid/operators/lookup_table.hpp
+0
-80
matmul.cpp
src/ngraph/frontend/fluid/operators/matmul.cpp
+0
-339
matmul.hpp
src/ngraph/frontend/fluid/operators/matmul.hpp
+0
-93
pool.cpp
src/ngraph/frontend/fluid/operators/pool.cpp
+0
-241
pool.hpp
src/ngraph/frontend/fluid/operators/pool.hpp
+0
-134
reduce_sum.cpp
src/ngraph/frontend/fluid/operators/reduce_sum.cpp
+0
-176
reduce_sum.hpp
src/ngraph/frontend/fluid/operators/reduce_sum.hpp
+0
-90
CMakeLists.txt
src/ngraph/frontend/fluid/operators/test/CMakeLists.txt
+0
-83
lookup_table.cpp
src/ngraph/frontend/fluid/operators/test/lookup_table.cpp
+0
-73
main.cpp
src/ngraph/frontend/fluid/operators/test/main.cpp
+0
-62
reduce_sum.cpp
src/ngraph/frontend/fluid/operators/test/reduce_sum.cpp
+0
-256
test.manifest
src/ngraph/frontend/fluid/operators/test/test.manifest
+0
-2
No files found.
CODEOWNERS
View file @
da10c2c4
...
...
@@ -41,7 +41,6 @@ project/doc-contributor-README.rst @indie
/src/ngraph/codegen/ @rkimballn1
/src/ngraph/distributed.* @wenzhe-nrv @diyessi
/src/ngraph/frontend/ATen/ @silee2
/src/ngraph/frontend/fluid/ @silee2
/src/ngraph/frontend/onnx_import/ @postrational
/src/ngraph/op/ @diyessi
/src/ngraph/op/allreduce.*pp @wenzhe-nrv @diyessi
...
...
src/ngraph/frontend/CMakeLists.txt
View file @
da10c2c4
...
...
@@ -18,18 +18,6 @@ if (NGRAPH_ONNX_IMPORT_ENABLE)
add_subdirectory
(
onnx_import
)
endif
()
# FIXME(silee2): Re-enable NORMALIZE_BOOL after implementing a proper fix for #4293
option
(
NGRAPH_FLUID_ENABLE
"Enable build for PaddlePaddle Fluid support"
ON
)
#NORMALIZE_BOOL(NGRAPH_FLUID_ENABLE)
message
(
STATUS
"NGRAPH_FLUID_ENABLE:
${
NGRAPH_FLUID_ENABLE
}
"
)
if
(
NGRAPH_FLUID_ENABLE
)
option
(
NGRAPH_FLUID_TEST_ENABLE
"Enable PaddlePaddle Fluid operator tests"
OFF
)
#NORMALIZE_BOOL(NGRAPH_FLUID_TEST_ENABLE)
message
(
STATUS
"NGRAPH_FLUID_TEST_ENABLE:
${
NGRAPH_FLUID_TEST_ENABLE
}
"
)
add_subdirectory
(
fluid
)
endif
()
option
(
NGRAPH_ATEN_ENABLE
"Enable build for PyTorch ATen support"
OFF
)
#NORMALIZE_BOOL(NGRAPH_ATEN_ENABLE)
message
(
STATUS
"NGRAPH_ATEN_ENABLE:
${
NGRAPH_ATEN_ENABLE
}
"
)
...
...
src/ngraph/frontend/fluid/CMakeLists.txt
deleted
100644 → 0
View file @
c4bcabac
# ******************************************************************************
# Copyright 2017-2020 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ******************************************************************************
# Add files here
target_sources
(
ngraph PRIVATE
${
CMAKE_CURRENT_SOURCE_DIR
}
/operators/layout_converter.cpp
${
CMAKE_CURRENT_SOURCE_DIR
}
/operators/layout_converter.hpp
${
CMAKE_CURRENT_SOURCE_DIR
}
/operators/lookup_table.cpp
${
CMAKE_CURRENT_SOURCE_DIR
}
/operators/lookup_table.hpp
${
CMAKE_CURRENT_SOURCE_DIR
}
/operators/matmul.cpp
${
CMAKE_CURRENT_SOURCE_DIR
}
/operators/matmul.hpp
${
CMAKE_CURRENT_SOURCE_DIR
}
/operators/pool.cpp
${
CMAKE_CURRENT_SOURCE_DIR
}
/operators/pool.hpp
${
CMAKE_CURRENT_SOURCE_DIR
}
/operators/reduce_sum.cpp
${
CMAKE_CURRENT_SOURCE_DIR
}
/operators/reduce_sum.hpp
)
if
(
NGRAPH_FLUID_TEST_ENABLE
)
add_subdirectory
(
operators/test
)
endif
()
src/ngraph/frontend/fluid/README.md
deleted
100644 → 0
View file @
c4bcabac
## PaddlePaddle Fluid helper classes
src/ngraph/frontend/fluid/operators/layout_converter.cpp
deleted
100644 → 0
View file @
c4bcabac
//*****************************************************************************
// Copyright 2017-2020 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#include "ngraph/frontend/fluid/operators/layout_converter.hpp"
#include "ngraph/op/reshape.hpp"
using
namespace
std
;
using
namespace
ngraph
::
fluid
;
constexpr
NodeTypeInfo
LayoutConverter
::
type_info
;
LayoutConverter
::
LayoutConverter
(
const
Output
<
Node
>&
x
,
const
int
mode
)
:
FusedOp
({
x
})
,
m_mode
(
mode
)
{
constructor_validate_and_infer_types
();
}
NodeVector
LayoutConverter
::
decompose_op
()
const
{
auto
x
=
input_value
(
0
);
auto
x_shape
=
get_input_shape
(
0
);
int
mode
=
get_mode
();
NODE_VALIDATION_CHECK
(
this
,
x_shape
.
size
()
==
4
,
"Input rank is not 4"
);
AxisVector
axis_vec
;
switch
(
mode
)
{
case
1
:
axis_vec
=
{
0
,
3
,
1
,
2
};
break
;
case
2
:
axis_vec
=
{
0
,
2
,
3
,
1
};
break
;
default
:
throw
ngraph_error
(
"Unsupported layout convert mode"
);
}
Shape
out_shape
=
x_shape
;
for
(
size_t
i
=
0
;
i
<
4
;
++
i
)
{
out_shape
[
i
]
=
x_shape
[
axis_vec
[
i
]];
}
return
{
make_shared
<
op
::
Reshape
>
(
x
,
axis_vec
,
out_shape
)};
}
shared_ptr
<
Node
>
LayoutConverter
::
copy_with_new_args
(
const
NodeVector
&
new_args
)
const
{
check_new_args_count
(
this
,
new_args
);
return
make_shared
<
LayoutConverter
>
(
new_args
.
at
(
0
),
get_mode
());
}
void
LayoutConverter
::
pre_validate_and_infer_types
()
{
auto
shape
=
get_input_partial_shape
(
0
);
if
(
shape
.
is_dynamic
())
{
set_output_type
(
0
,
get_input_element_type
(
0
),
PartialShape
::
dynamic
());
}
}
src/ngraph/frontend/fluid/operators/layout_converter.hpp
deleted
100644 → 0
View file @
c4bcabac
//*****************************************************************************
// Copyright 2017-2020 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#pragma once
#include "ngraph/node.hpp"
#include "ngraph/op/op.hpp"
#include "ngraph/op/util/fused_op.hpp"
using
namespace
std
;
using
namespace
ngraph
;
namespace
ngraph
{
namespace
fluid
{
/// \brief Fluid layout converter
class
NGRAPH_API
LayoutConverter
:
public
ngraph
::
op
::
util
::
FusedOp
{
public
:
static
constexpr
NodeTypeInfo
type_info
{
"FluidLayoutConverter"
,
0
};
const
NodeTypeInfo
&
get_type_info
()
const
override
{
return
type_info
;
}
LayoutConverter
()
=
default
;
/// \brief Constructs a LayoutConverter operation.
///
/// \param x Input x
/// \param mode : 1. nhwc->nchw, 2 hchw->nhwc
LayoutConverter
(
const
Output
<
Node
>&
x
,
const
int
mode
);
virtual
NodeVector
decompose_op
()
const
override
;
virtual
void
pre_validate_and_infer_types
()
override
;
virtual
std
::
shared_ptr
<
Node
>
copy_with_new_args
(
const
NodeVector
&
new_args
)
const
override
;
int
get_mode
()
const
{
return
m_mode
;
}
protected
:
int
m_mode
;
};
}
// namespace fluid
}
// namespace ngraph
src/ngraph/frontend/fluid/operators/lookup_table.cpp
deleted
100644 → 0
View file @
c4bcabac
//*****************************************************************************
// Copyright 2017-2020 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#include "ngraph/frontend/fluid/operators/lookup_table.hpp"
#include "ngraph/op/broadcast.hpp"
#include "ngraph/op/constant.hpp"
#include "ngraph/op/gather.hpp"
#include "ngraph/op/multiply.hpp"
#include "ngraph/op/reshape.hpp"
#include "ngraph/op/scatter_add.hpp"
#include "ngraph/util.hpp"
using
namespace
std
;
using
namespace
ngraph
::
fluid
;
constexpr
NodeTypeInfo
LookupTable2
::
type_info
;
LookupTable2
::
LookupTable2
(
const
Output
<
Node
>&
w
,
const
Output
<
Node
>&
ids
,
const
int64_t
padding_idx
)
:
FusedOp
({
w
,
ids
})
,
m_padding_idx
(
padding_idx
)
{
constructor_validate_and_infer_types
();
}
NodeVector
LookupTable2
::
decompose_op
()
const
{
auto
w
=
input_value
(
0
);
auto
ids
=
input_value
(
1
);
auto
padding_idx
=
get_padding_idx
();
auto
table_shape
=
get_input_shape
(
0
);
NODE_VALIDATION_CHECK
(
this
,
table_shape
.
size
()
==
2
,
"The dimension of look up table must be 2"
);
auto
row_number
=
table_shape
[
0
];
auto
masked_w
=
w
;
if
(
padding_idx
!=
-
1
)
{
vector
<
size_t
>
mask
(
row_number
,
1
);
mask
[
padding_idx
]
=
0
;
auto
mask_node
=
make_shared
<
op
::
Constant
>
(
w
.
get_element_type
(),
Shape
{
row_number
},
mask
);
auto
mask_bcast
=
make_shared
<
op
::
Broadcast
>
(
mask_node
,
table_shape
,
AxisSet
{
1
});
masked_w
=
w
*
mask_bcast
;
}
auto
out
=
make_shared
<
op
::
Gather
>
(
masked_w
,
ids
);
return
{
out
};
}
shared_ptr
<
Node
>
LookupTable2
::
copy_with_new_args
(
const
NodeVector
&
new_args
)
const
{
check_new_args_count
(
this
,
new_args
);
return
make_shared
<
LookupTable2
>
(
new_args
.
at
(
0
),
new_args
.
at
(
1
),
get_padding_idx
());
}
void
LookupTable2
::
pre_validate_and_infer_types
()
{
auto
pshape_w
=
get_input_partial_shape
(
0
);
auto
pshape_ids
=
get_input_partial_shape
(
1
);
if
(
pshape_w
.
is_dynamic
()
||
pshape_ids
.
is_dynamic
())
{
set_output_type
(
0
,
get_input_element_type
(
0
),
PartialShape
::
dynamic
());
}
}
constexpr
NodeTypeInfo
LookupTable2Grad
::
type_info
;
LookupTable2Grad
::
LookupTable2Grad
(
const
Output
<
Node
>&
w
,
const
Output
<
Node
>&
ids
,
const
Output
<
Node
>&
dout
)
:
FusedOp
({
w
,
ids
,
dout
})
{
constructor_validate_and_infer_types
();
}
void
LookupTable2Grad
::
pre_validate_and_infer_types
()
{
if
(
get_input_partial_shape
(
0
).
is_dynamic
()
||
get_input_partial_shape
(
1
).
is_dynamic
()
||
get_input_partial_shape
(
2
).
is_dynamic
())
{
set_output_type
(
0
,
get_input_element_type
(
0
),
PartialShape
::
dynamic
());
}
}
shared_ptr
<
Node
>
LookupTable2Grad
::
copy_with_new_args
(
const
NodeVector
&
new_args
)
const
{
check_new_args_count
(
this
,
new_args
);
return
make_shared
<
LookupTable2Grad
>
(
new_args
.
at
(
0
),
new_args
.
at
(
1
),
new_args
.
at
(
2
));
}
NodeVector
LookupTable2Grad
::
decompose_op
()
const
{
auto
w
=
input_value
(
0
);
auto
ids
=
input_value
(
1
);
auto
dout
=
input_value
(
2
);
auto
shape_w
=
get_input_shape
(
0
);
auto
w0
=
op
::
Constant
::
create
(
dout
.
get_element_type
(),
shape_w
,
{
0
});
auto
dw
=
make_shared
<
op
::
ScatterAdd
>
(
w0
,
ids
,
dout
);
return
{
dw
};
}
src/ngraph/frontend/fluid/operators/lookup_table.hpp
deleted
100644 → 0
View file @
c4bcabac
//*****************************************************************************
// Copyright 2017-2020 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#pragma once
#include "ngraph/node.hpp"
#include "ngraph/op/op.hpp"
#include "ngraph/op/util/fused_op.hpp"
using
namespace
std
;
using
namespace
ngraph
;
namespace
ngraph
{
namespace
fluid
{
/// \brief Fluid lookup_table2
class
NGRAPH_API
LookupTable2
:
public
ngraph
::
op
::
util
::
FusedOp
{
public
:
static
constexpr
NodeTypeInfo
type_info
{
"FluidLookupTable2"
,
0
};
const
NodeTypeInfo
&
get_type_info
()
const
override
{
return
type_info
;
}
LookupTable2
()
=
default
;
/// \brief Constructs a LookupTable2 operation.
///
/// \param w Input weight table
/// \param ids look up ids
LookupTable2
(
const
Output
<
Node
>&
w
,
const
Output
<
Node
>&
ids
,
const
int64_t
padding_idx
);
virtual
NodeVector
decompose_op
()
const
override
;
virtual
void
pre_validate_and_infer_types
()
override
;
virtual
std
::
shared_ptr
<
Node
>
copy_with_new_args
(
const
NodeVector
&
new_args
)
const
override
;
int64_t
get_padding_idx
()
const
{
return
m_padding_idx
;
}
protected
:
int64_t
m_padding_idx
{
-
1
};
};
/// \brief Fluid reduce_sum_grad
class
NGRAPH_API
LookupTable2Grad
:
public
ngraph
::
op
::
util
::
FusedOp
{
public
:
static
constexpr
NodeTypeInfo
type_info
{
"FluidLookupTable2Grad"
,
0
};
const
NodeTypeInfo
&
get_type_info
()
const
override
{
return
type_info
;
}
LookupTable2Grad
()
=
default
;
/// \brief Constructs a LookupTable2Grad operation.
///
/// \param w Input weight table
/// \param ids Input lookup ids
/// \param dout Input delta
LookupTable2Grad
(
const
Output
<
Node
>&
w
,
const
Output
<
Node
>&
ids
,
const
Output
<
Node
>&
dout
);
virtual
NodeVector
decompose_op
()
const
override
;
virtual
void
pre_validate_and_infer_types
()
override
;
virtual
std
::
shared_ptr
<
Node
>
copy_with_new_args
(
const
NodeVector
&
new_args
)
const
override
;
};
}
}
src/ngraph/frontend/fluid/operators/matmul.cpp
deleted
100644 → 0
View file @
c4bcabac
//*****************************************************************************
// Copyright 2017-2020 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#include <memory>
#include <numeric>
#include "ngraph/frontend/fluid/operators/matmul.hpp"
#include "ngraph/op/broadcast.hpp"
#include "ngraph/op/dot.hpp"
#include "ngraph/op/experimental/batch_mat_mul.hpp"
#include "ngraph/op/reshape.hpp"
#include "ngraph/op/sum.hpp"
#include "ngraph/util.hpp"
using
namespace
std
;
using
namespace
ngraph
::
fluid
;
shared_ptr
<
Node
>
broadcast_to_3d
(
const
shared_ptr
<
Node
>&
input
,
size_t
axis0
)
{
auto
shape
=
input
->
get_shape
();
size_t
n
=
shape
.
size
();
if
(
n
==
2
)
{
auto
output
=
make_shared
<
op
::
Broadcast
>
(
input
,
Shape
{
axis0
,
shape
[
0
],
shape
[
1
]},
AxisSet
{
0
});
return
output
;
}
return
input
;
}
shared_ptr
<
Node
>
transpose_and_flatten3d
(
const
shared_ptr
<
Node
>&
input
,
const
bool
transpose
,
const
bool
x
=
true
)
{
auto
shape
=
input
->
get_shape
();
size_t
n
=
shape
.
size
();
shared_ptr
<
Node
>
output
;
if
(
n
>=
3
)
{
vector
<
size_t
>
order
(
n
);
iota
(
begin
(
order
),
end
(
order
),
0
);
size_t
outer
=
1
;
for
(
size_t
i
=
0
;
i
<
n
-
2
;
i
++
)
{
outer
=
outer
*
shape
[
i
];
}
vector
<
size_t
>
reshape
{
outer
,
shape
[
n
-
2
],
shape
[
n
-
1
]};
if
(
transpose
)
{
order
[
n
-
2
]
=
n
-
1
;
order
[
n
-
1
]
=
n
-
2
;
reshape
[
2
]
=
shape
[
n
-
2
];
reshape
[
1
]
=
shape
[
n
-
1
];
}
output
=
make_shared
<
op
::
Reshape
>
(
input
,
AxisVector
(
order
),
Shape
(
reshape
));
}
else
{
shared_ptr
<
Node
>
temp
;
if
(
n
==
1
&&
x
==
true
)
{
temp
=
make_shared
<
op
::
Reshape
>
(
input
,
AxisVector
{
0
},
Shape
{
1
,
shape
[
0
]});
}
else
if
(
n
==
1
&&
x
==
false
)
{
temp
=
make_shared
<
op
::
Reshape
>
(
input
,
AxisVector
{
0
},
Shape
{
shape
[
0
],
1
});
}
else
{
temp
=
input
;
}
auto
temp_shape
=
temp
->
get_shape
();
if
(
transpose
==
true
)
{
output
=
make_shared
<
op
::
Reshape
>
(
temp
,
AxisVector
{
1
,
0
},
Shape
{
temp_shape
[
1
],
temp_shape
[
0
]});
}
else
{
output
=
temp
;
}
}
return
output
;
}
shared_ptr
<
Node
>
dot_helper
(
const
shared_ptr
<
Node
>&
a
,
const
shared_ptr
<
Node
>&
b
)
{
shared_ptr
<
Node
>
out
;
if
(
a
->
get_shape
().
size
()
>
2
&&
b
->
get_shape
().
size
()
>
2
)
{
out
=
make_shared
<
op
::
BatchMatMul
>
(
a
,
b
);
}
else
{
out
=
make_shared
<
op
::
Dot
>
(
a
,
b
);
}
return
out
;
}
shared_ptr
<
Node
>
reshape_to_original
(
shared_ptr
<
Node
>
input
,
const
Shape
&
shape
)
{
auto
input_shape
=
input
->
get_shape
();
return
make_shared
<
op
::
Reshape
>
(
input
,
get_default_order
(
input_shape
),
shape
);
}
constexpr
NodeTypeInfo
MatMul
::
type_info
;
MatMul
::
MatMul
(
const
Output
<
Node
>&
A
,
const
Output
<
Node
>&
B
,
const
bool
transpose_a
,
const
bool
transpose_b
)
:
FusedOp
(
OutputVector
{
A
,
B
})
,
m_transpose_a
{
transpose_a
}
,
m_transpose_b
{
transpose_b
}
{
constructor_validate_and_infer_types
();
}
void
MatMul
::
pre_validate_and_infer_types
()
{
element
::
Type
input_element_type
=
get_input_element_type
(
0
);
PartialShape
pshape_A
=
get_input_partial_shape
(
0
);
PartialShape
pshape_B
=
get_input_partial_shape
(
1
);
NODE_VALIDATION_CHECK
(
this
,
input_element_type
.
is_dynamic
()
||
input_element_type
.
is_real
(),
"Argument element type must be f16, bf16, f32, f64 or dynamic (got "
,
input_element_type
,
")."
);
if
(
pshape_A
.
is_dynamic
()
||
pshape_B
.
is_dynamic
())
{
set_output_type
(
0
,
input_element_type
,
PartialShape
::
dynamic
());
}
}
NodeVector
MatMul
::
decompose_op
()
const
{
auto
x
=
input_value
(
0
).
get_node_shared_ptr
();
auto
y
=
input_value
(
1
).
get_node_shared_ptr
();
auto
x_shape
=
x
->
get_shape
();
auto
y_shape
=
y
->
get_shape
();
size_t
nx
=
x_shape
.
size
();
size_t
ny
=
y_shape
.
size
();
x
=
transpose_and_flatten3d
(
x
,
m_transpose_a
,
true
);
y
=
transpose_and_flatten3d
(
y
,
m_transpose_b
,
false
);
auto
y_shape3
=
y
->
get_shape
();
auto
x_shape3
=
x
->
get_shape
();
shared_ptr
<
Node
>
out
;
Shape
out_shape
;
if
(
nx
>
2
||
ny
>
2
)
{
Shape
out_shape
=
x_shape
;
if
(
nx
!=
3
)
{
x
=
broadcast_to_3d
(
x
,
y_shape3
[
0
]);
out_shape
=
y_shape
;
}
if
(
ny
!=
3
)
{
y
=
broadcast_to_3d
(
y
,
x_shape3
[
0
]);
out_shape
=
x_shape
;
}
auto
nout
=
out_shape
.
size
();
auto
out3
=
make_shared
<
op
::
BatchMatMul
>
(
x
,
y
);
auto
out3_shape
=
out3
->
get_shape
();
out_shape
[
nout
-
1
]
=
out3_shape
[
2
];
out_shape
[
nout
-
2
]
=
out3_shape
[
1
];
out
=
make_shared
<
op
::
Reshape
>
(
out3
,
AxisVector
{
0
,
1
,
2
},
out_shape
);
}
else
{
out
=
make_shared
<
op
::
Dot
>
(
x
,
y
);
}
out_shape
=
out
->
get_shape
();
auto
axis_vector
=
get_default_order
(
out_shape
);
for
(
size_t
i
=
out_shape
.
size
()
-
1
;
i
>
0
;
i
--
)
{
if
(
out_shape
[
i
]
==
1
)
{
out_shape
.
erase
(
out_shape
.
begin
()
+
i
);
}
}
auto
out_reshaped
=
make_shared
<
op
::
Reshape
>
(
out
,
axis_vector
,
out_shape
);
return
{
out_reshaped
};
}
shared_ptr
<
Node
>
MatMul
::
copy_with_new_args
(
const
NodeVector
&
new_args
)
const
{
check_new_args_count
(
this
,
new_args
);
return
make_shared
<
MatMul
>
(
new_args
.
at
(
0
),
new_args
.
at
(
1
),
m_transpose_a
,
m_transpose_b
);
}
constexpr
NodeTypeInfo
MatMulGrad
::
type_info
;
MatMulGrad
::
MatMulGrad
(
const
Output
<
Node
>&
A
,
const
Output
<
Node
>&
B
,
const
Output
<
Node
>&
Out
,
const
bool
transpose_a
,
const
bool
transpose_b
)
:
FusedOp
(
OutputVector
{
A
,
B
,
Out
})
,
m_transpose_a
{
transpose_a
}
,
m_transpose_b
{
transpose_b
}
{
constructor_validate_and_infer_types
();
}
void
MatMulGrad
::
pre_validate_and_infer_types
()
{
element
::
Type
input_element_type
=
get_input_element_type
(
0
);
NODE_VALIDATION_CHECK
(
this
,
input_element_type
.
is_dynamic
()
||
input_element_type
.
is_real
(),
"Argument element type must be f16, bf16, f32, f64 or dynamic (got "
,
input_element_type
,
")."
);
if
(
get_input_partial_shape
(
0
).
is_dynamic
()
||
get_input_partial_shape
(
1
).
is_dynamic
()
||
get_input_partial_shape
(
2
).
is_dynamic
())
{
set_output_type
(
0
,
input_element_type
,
PartialShape
::
dynamic
());
set_output_type
(
1
,
input_element_type
,
PartialShape
::
dynamic
());
}
}
NodeVector
MatMulGrad
::
decompose_op
()
const
{
auto
x
=
input_value
(
0
).
get_node_shared_ptr
();
auto
y
=
input_value
(
1
).
get_node_shared_ptr
();
auto
dout
=
input_value
(
2
).
get_node_shared_ptr
();
auto
dout_shape
=
dout
->
get_shape
();
auto
x_shape
=
x
->
get_shape
();
auto
y_shape
=
y
->
get_shape
();
size_t
nx
=
x_shape
.
size
();
size_t
ny
=
y_shape
.
size
();
size_t
ndout
=
dout_shape
.
size
();
shared_ptr
<
Node
>
x2
,
y2
,
dout2
;
x2
=
transpose_and_flatten3d
(
x
,
false
);
y2
=
transpose_and_flatten3d
(
y
,
false
,
false
);
dout2
=
transpose_and_flatten3d
(
dout
,
false
);
auto
x2_shape
=
x2
->
get_shape
();
auto
y2_shape
=
y2
->
get_shape
();
if
(
nx
>=
3
||
ny
>=
3
)
{
shared_ptr
<
Node
>
dout_temp
;
if
(
ndout
==
2
)
{
dout_temp
=
make_shared
<
op
::
Reshape
>
(
dout
,
AxisVector
{
0
,
1
},
Shape
{
dout_shape
[
0
],
dout_shape
[
1
],
1
});
if
(
ny
<
3
)
{
dout2
=
dout_temp
;
}
else
{
dout2
=
transpose_and_flatten3d
(
dout_temp
,
true
);
}
}
x2
=
broadcast_to_3d
(
x2
,
y_shape
[
0
]);
y2
=
broadcast_to_3d
(
y2
,
x_shape
[
0
]);
}
else
{
dout2
=
transpose_and_flatten3d
(
dout
,
false
,
nx
==
1
&&
m_transpose_a
==
false
);
}
if
(
m_transpose_b
==
false
)
{
y2
=
transpose_and_flatten3d
(
y2
,
true
);
}
if
(
m_transpose_a
==
false
)
{
x2
=
transpose_and_flatten3d
(
x2
,
true
);
}
auto
dx
=
dot_helper
(
dout2
,
y2
);
auto
dy
=
dot_helper
(
x2
,
dout2
);
if
(
m_transpose_a
==
true
)
{
dx
=
transpose_and_flatten3d
(
dx
,
true
);
}
if
(
m_transpose_b
==
true
)
{
dy
=
transpose_and_flatten3d
(
dy
,
true
);
}
if
(
nx
<
3
&&
ny
>=
3
)
{
dx
=
make_shared
<
op
::
Sum
>
(
dx
,
AxisSet
{
0
});
}
if
(
ny
<
3
&&
nx
>=
3
)
{
dy
=
make_shared
<
op
::
Sum
>
(
dy
,
AxisSet
{
0
});
}
auto
dx_t
=
reshape_to_original
(
dx
,
x_shape
);
auto
dy_t
=
reshape_to_original
(
dy
,
y_shape
);
return
NodeVector
{
dx_t
,
dy_t
};
}
shared_ptr
<
Node
>
MatMulGrad
::
copy_with_new_args
(
const
NodeVector
&
new_args
)
const
{
check_new_args_count
(
this
,
new_args
);
return
make_shared
<
MatMulGrad
>
(
new_args
.
at
(
0
),
new_args
.
at
(
1
),
new_args
.
at
(
2
),
m_transpose_a
,
m_transpose_b
);
}
src/ngraph/frontend/fluid/operators/matmul.hpp
deleted
100644 → 0
View file @
c4bcabac
//*****************************************************************************
// Copyright 2017-2020 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#pragma once
#include "ngraph/node.hpp"
#include "ngraph/op/op.hpp"
#include "ngraph/op/util/fused_op.hpp"
using
namespace
std
;
using
namespace
ngraph
;
namespace
ngraph
{
namespace
fluid
{
/// \brief Operator performing Matrix Multiplication.
class
NGRAPH_API
MatMul
:
public
op
::
util
::
FusedOp
{
public
:
static
constexpr
NodeTypeInfo
type_info
{
"MatMul"
,
0
};
const
NodeTypeInfo
&
get_type_info
()
const
override
{
return
type_info
;
}
MatMul
()
=
default
;
/// \brief Constructs a MatMul operation.
///
/// \param A Matrix A
/// \param B Matrix B
/// \param transpose_a If matrix A should be transposed.
/// \param transpose_b If matrix B should be transposed.
MatMul
(
const
Output
<
Node
>&
A
,
const
Output
<
Node
>&
B
,
const
bool
transpose_a
,
const
bool
transpose_b
);
virtual
NodeVector
decompose_op
()
const
override
;
void
pre_validate_and_infer_types
()
override
;
virtual
shared_ptr
<
Node
>
copy_with_new_args
(
const
NodeVector
&
new_args
)
const
override
;
bool
get_transpose_a
()
const
{
return
m_transpose_a
;
}
bool
get_transpose_b
()
const
{
return
m_transpose_b
;
}
private
:
bool
m_transpose_a
{
false
};
bool
m_transpose_b
{
false
};
};
class
NGRAPH_API
MatMulGrad
:
public
op
::
util
::
FusedOp
{
public
:
static
constexpr
NodeTypeInfo
type_info
{
"MatMulGrad"
,
0
};
const
NodeTypeInfo
&
get_type_info
()
const
override
{
return
type_info
;
}
MatMulGrad
()
=
default
;
/// \brief Constructs a MatMul Grad operation.
///
/// \param A Matrix A
/// \param B Matrix B
/// \param transpose_a If matrix A should be transposed.
/// \param transpose_b If matrix B should be transposed.
MatMulGrad
(
const
Output
<
Node
>&
A
,
const
Output
<
Node
>&
B
,
const
Output
<
Node
>&
Out
,
const
bool
transpose_a
,
const
bool
transpose_b
);
virtual
NodeVector
decompose_op
()
const
override
;
void
pre_validate_and_infer_types
()
override
;
virtual
shared_ptr
<
Node
>
copy_with_new_args
(
const
NodeVector
&
new_args
)
const
override
;
bool
get_transpose_a
()
const
{
return
m_transpose_a
;
}
bool
get_transpose_b
()
const
{
return
m_transpose_b
;
}
private
:
bool
m_transpose_a
{
false
};
bool
m_transpose_b
{
false
};
};
}
// namespace fluid
}
// namespace ngraph
src/ngraph/frontend/fluid/operators/pool.cpp
deleted
100644 → 0
View file @
c4bcabac
//*****************************************************************************
// Copyright 2017-2020 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#include "ngraph/frontend/fluid/operators/pool.hpp"
#include "ngraph/op/avg_pool.hpp"
#include "ngraph/op/max_pool.hpp"
using
namespace
std
;
using
namespace
ngraph
::
fluid
;
static
size_t
calculate_adaptive
(
size_t
input_dim
,
size_t
window_dim
)
{
return
floor
(
input_dim
/
window_dim
);
}
constexpr
NodeTypeInfo
Pool
::
type_info
;
Pool
::
Pool
(
const
Output
<
Node
>&
x
,
const
Shape
&
window_shape
,
const
Strides
&
window_movement_strides
,
const
Shape
&
padding
,
const
bool
global_pooling
,
const
bool
ceil_mode
,
const
bool
exclusive
,
const
bool
adaptive
,
const
string
pooling_type
)
:
FusedOp
({
x
})
,
m_window_shape
(
window_shape
)
,
m_window_movement_strides
(
window_movement_strides
)
,
m_padding
(
padding
)
,
m_global_pooling
(
global_pooling
)
,
m_ceil_mode
(
ceil_mode
)
,
m_exclusive
(
exclusive
)
,
m_adaptive
(
adaptive
)
,
m_pooling_type
(
pooling_type
)
{
constructor_validate_and_infer_types
();
}
NodeVector
Pool
::
decompose_op
()
const
{
auto
x
=
input_value
(
0
);
auto
x_shape
=
get_input_shape
(
0
);
Shape
window_shape
=
get_window_shape
();
Strides
strides
=
get_window_movement_strides
();
Shape
padding
=
get_padding
();
bool
global_pooling
=
get_global_pooling
();
bool
exclusive
=
get_exclusive
();
bool
adaptive
=
get_adaptive
();
string
pooling_type
=
get_pooling_type
();
NODE_VALIDATION_CHECK
(
this
,
x_shape
.
size
()
-
2
==
window_shape
.
size
(),
"Supporting 2d pooling only"
);
if
(
global_pooling
)
{
for
(
size_t
i
=
0
;
i
<
window_shape
.
size
();
++
i
)
{
padding
[
i
]
=
0
;
window_shape
[
i
]
=
x_shape
[
i
+
2
];
}
}
shared_ptr
<
Node
>
pool
;
if
(
pooling_type
==
"max"
)
{
pool
=
make_shared
<
op
::
MaxPool
>
(
x
,
window_shape
,
strides
,
padding
,
padding
);
}
else
if
(
pooling_type
==
"avg"
)
{
if
(
adaptive
)
{
if
(
x_shape
.
size
()
==
4
)
{
strides
[
0
]
=
calculate_adaptive
(
x_shape
[
2
],
window_shape
[
0
]);
strides
[
1
]
=
calculate_adaptive
(
x_shape
[
3
],
window_shape
[
1
]);
}
pool
=
make_shared
<
op
::
AvgPool
>
(
x
,
window_shape
,
strides
);
}
else
{
if
(
padding
[
0
]
==
0
&&
padding
[
1
]
==
0
)
{
exclusive
=
false
;
}
pool
=
make_shared
<
op
::
AvgPool
>
(
x
,
window_shape
,
strides
,
padding
,
padding
,
!
exclusive
);
}
}
else
{
throw
ngraph_error
(
"Unsupported pooling type"
);
}
return
{
pool
};
}
shared_ptr
<
Node
>
Pool
::
copy_with_new_args
(
const
NodeVector
&
new_args
)
const
{
check_new_args_count
(
this
,
new_args
);
return
make_shared
<
Pool
>
(
new_args
.
at
(
0
),
get_window_shape
(),
get_window_movement_strides
(),
get_padding
(),
get_global_pooling
(),
get_ceil_mode
(),
get_exclusive
(),
get_adaptive
(),
get_pooling_type
());
}
void
Pool
::
pre_validate_and_infer_types
()
{
auto
shape
=
get_input_partial_shape
(
0
);
if
(
shape
.
is_dynamic
())
{
set_output_type
(
0
,
get_input_element_type
(
0
),
PartialShape
::
dynamic
());
}
}
constexpr
NodeTypeInfo
PoolGrad
::
type_info
;
PoolGrad
::
PoolGrad
(
const
Output
<
Node
>&
x
,
const
Output
<
Node
>&
output
,
const
Output
<
Node
>&
output_delta
,
const
Shape
&
window_shape
,
const
Strides
&
window_movement_strides
,
const
Shape
&
padding
,
const
bool
global_pooling
,
const
bool
exclusive
,
const
bool
adaptive
,
const
string
pooling_type
)
:
FusedOp
({
x
,
output
,
output_delta
})
,
m_window_shape
(
window_shape
)
,
m_window_movement_strides
(
window_movement_strides
)
,
m_padding
(
padding
)
,
m_global_pooling
(
global_pooling
)
,
m_exclusive
(
exclusive
)
,
m_adaptive
(
adaptive
)
,
m_pooling_type
(
pooling_type
)
{
constructor_validate_and_infer_types
();
}
void
PoolGrad
::
pre_validate_and_infer_types
()
{
if
(
get_input_partial_shape
(
0
).
is_dynamic
()
||
get_input_partial_shape
(
1
).
is_dynamic
()
||
get_input_partial_shape
(
2
).
is_dynamic
())
{
set_output_type
(
0
,
get_input_element_type
(
0
),
PartialShape
::
dynamic
());
}
}
shared_ptr
<
Node
>
PoolGrad
::
copy_with_new_args
(
const
NodeVector
&
new_args
)
const
{
check_new_args_count
(
this
,
new_args
);
return
make_shared
<
PoolGrad
>
(
new_args
.
at
(
0
),
new_args
.
at
(
1
),
new_args
.
at
(
2
),
get_window_shape
(),
get_window_movement_strides
(),
get_padding
(),
get_global_pooling
(),
get_exclusive
(),
get_adaptive
(),
get_pooling_type
());
}
NodeVector
PoolGrad
::
decompose_op
()
const
{
auto
x
=
input_value
(
0
);
auto
x_shape
=
get_input_shape
(
0
);
auto
output
=
input_value
(
1
);
auto
output_delta
=
input_value
(
2
);
Shape
window_shape
=
get_window_shape
();
Strides
strides
=
get_window_movement_strides
();
Shape
padding
=
get_padding
();
bool
global_pooling
=
get_global_pooling
();
bool
exclusive
=
get_exclusive
();
bool
adaptive
=
get_adaptive
();
string
pooling_type
=
get_pooling_type
();
NODE_VALIDATION_CHECK
(
this
,
x_shape
.
size
()
-
2
==
window_shape
.
size
(),
"Supporting 2d pooling only"
);
if
(
global_pooling
)
{
for
(
size_t
i
=
0
;
i
<
window_shape
.
size
();
++
i
)
{
padding
[
i
]
=
0
;
window_shape
[
i
]
=
x_shape
[
i
+
2
];
}
}
shared_ptr
<
Node
>
pool_grad
;
if
(
pooling_type
==
"max"
)
{
pool_grad
=
make_shared
<
op
::
MaxPoolBackprop
>
(
x
,
output_delta
,
output
,
window_shape
,
strides
,
padding
,
padding
);
}
else
if
(
pooling_type
==
"avg"
)
{
if
(
adaptive
&&
x_shape
.
size
()
==
4
)
{
strides
[
0
]
=
calculate_adaptive
(
x_shape
[
2
],
window_shape
[
0
]);
strides
[
1
]
=
calculate_adaptive
(
x_shape
[
3
],
window_shape
[
1
]);
}
else
if
(
padding
[
0
]
==
0
&&
padding
[
1
]
==
0
)
{
exclusive
=
false
;
}
pool_grad
=
make_shared
<
op
::
AvgPoolBackprop
>
(
x
.
get_shape
(),
output_delta
,
window_shape
,
strides
,
padding
,
padding
,
!
exclusive
);
}
else
{
throw
ngraph_error
(
"Unsupported pooling type"
);
}
return
{
pool_grad
};
}
src/ngraph/frontend/fluid/operators/pool.hpp
deleted
100644 → 0
View file @
c4bcabac
//*****************************************************************************
// Copyright 2017-2020 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#pragma once
#include "ngraph/node.hpp"
#include "ngraph/op/op.hpp"
#include "ngraph/op/util/fused_op.hpp"
using
namespace
std
;
using
namespace
ngraph
;
namespace
ngraph
{
namespace
fluid
{
/// \brief Fluid pool
class
NGRAPH_API
Pool
:
public
ngraph
::
op
::
util
::
FusedOp
{
public
:
static
constexpr
NodeTypeInfo
type_info
{
"FluidPool"
,
0
};
const
NodeTypeInfo
&
get_type_info
()
const
override
{
return
type_info
;
}
Pool
()
=
default
;
/// \brief Constructs a Pool operation.
///
/// \param x Input x
Pool
(
const
Output
<
Node
>&
x
,
const
Shape
&
window_shape
,
const
Strides
&
window_movement_strides
,
const
Shape
&
padding
,
const
bool
global_pooling
,
const
bool
ceil_mode
,
const
bool
exclusive
,
const
bool
adaptive
,
const
string
pooling_type
);
virtual
NodeVector
decompose_op
()
const
override
;
virtual
void
pre_validate_and_infer_types
()
override
;
virtual
std
::
shared_ptr
<
Node
>
copy_with_new_args
(
const
NodeVector
&
new_args
)
const
override
;
const
Shape
&
get_window_shape
()
const
{
return
m_window_shape
;
}
void
set_window_shape
(
const
Shape
&
window_shape
)
{
m_window_shape
=
window_shape
;
}
const
Strides
&
get_window_movement_strides
()
const
{
return
m_window_movement_strides
;
}
void
set_window_movement_strides
(
const
Strides
&
window_movement_strides
)
{
m_window_movement_strides
=
window_movement_strides
;
}
const
Shape
&
get_padding
()
const
{
return
m_padding
;
}
void
set_padding
(
const
Shape
&
padding
)
{
m_padding
=
padding
;
}
bool
get_global_pooling
()
const
{
return
m_global_pooling
;
}
bool
get_ceil_mode
()
const
{
return
m_ceil_mode
;
}
bool
get_exclusive
()
const
{
return
m_exclusive
;
}
bool
get_adaptive
()
const
{
return
m_adaptive
;
}
const
string
get_pooling_type
()
const
{
return
m_pooling_type
;
}
protected
:
Shape
m_window_shape
;
Strides
m_window_movement_strides
;
Shape
m_padding
;
bool
m_global_pooling
;
bool
m_ceil_mode
;
bool
m_exclusive
;
bool
m_adaptive
;
string
m_pooling_type
;
};
/// \brief Fluid reduce_sum_grad
class
NGRAPH_API
PoolGrad
:
public
ngraph
::
op
::
util
::
FusedOp
{
public
:
static
constexpr
NodeTypeInfo
type_info
{
"FluidPoolGrad"
,
0
};
const
NodeTypeInfo
&
get_type_info
()
const
override
{
return
type_info
;
}
PoolGrad
()
=
default
;
/// \brief Constructs a PoolGrad operation.
///
/// \param x Input tensor
PoolGrad
(
const
Output
<
Node
>&
x
,
const
Output
<
Node
>&
output
,
const
Output
<
Node
>&
output_delta
,
const
Shape
&
window_shape
,
const
Strides
&
window_movement_strides
,
const
Shape
&
padding
,
const
bool
global_pooling
,
const
bool
exclusive
,
const
bool
adaptive
,
const
string
pooling_type
);
virtual
NodeVector
decompose_op
()
const
override
;
virtual
void
pre_validate_and_infer_types
()
override
;
virtual
std
::
shared_ptr
<
Node
>
copy_with_new_args
(
const
NodeVector
&
new_args
)
const
override
;
const
Shape
&
get_window_shape
()
const
{
return
m_window_shape
;
}
void
set_window_shape
(
const
Shape
&
window_shape
)
{
m_window_shape
=
window_shape
;
}
const
Strides
&
get_window_movement_strides
()
const
{
return
m_window_movement_strides
;
}
void
set_window_movement_strides
(
const
Strides
&
window_movement_strides
)
{
m_window_movement_strides
=
window_movement_strides
;
}
const
Shape
&
get_padding
()
const
{
return
m_padding
;
}
void
set_padding
(
const
Shape
&
padding
)
{
m_padding
=
padding
;
}
bool
get_global_pooling
()
const
{
return
m_global_pooling
;
}
bool
get_exclusive
()
const
{
return
m_exclusive
;
}
bool
get_adaptive
()
const
{
return
m_adaptive
;
}
const
string
get_pooling_type
()
const
{
return
m_pooling_type
;
}
protected
:
Shape
m_window_shape
;
Strides
m_window_movement_strides
;
Shape
m_padding
;
bool
m_global_pooling
;
bool
m_exclusive
;
bool
m_adaptive
;
string
m_pooling_type
;
};
}
}
src/ngraph/frontend/fluid/operators/reduce_sum.cpp
deleted
100644 → 0
View file @
c4bcabac
//*****************************************************************************
// Copyright 2017-2020 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#include <cmath>
#include <cstring>
#include <numeric>
#include "ngraph/builder/make_constant.hpp"
#include "ngraph/frontend/fluid/operators/reduce_sum.hpp"
#include "ngraph/op/broadcast.hpp"
#include "ngraph/op/reduce_sum.hpp"
#include "ngraph/op/reshape.hpp"
using
namespace
std
;
using
namespace
ngraph
::
fluid
;
constexpr
NodeTypeInfo
ReduceSum
::
type_info
;
ReduceSum
::
ReduceSum
(
const
Output
<
Node
>&
x
,
const
vector
<
int
>&
dim
,
bool
reduce_all
,
bool
keep_dim
)
:
FusedOp
({
x
})
,
m_dim
(
dim
)
,
m_reduce_all
(
reduce_all
)
,
m_keep_dim
(
keep_dim
)
{
constructor_validate_and_infer_types
();
}
NodeVector
ReduceSum
::
decompose_op
()
const
{
auto
shape
=
get_input_partial_shape
(
0
);
if
(
shape
.
is_dynamic
())
{
throw
ngraph_error
(
"Input needs to have static shape to decompose"
);
}
auto
input_shape
=
shape
.
to_shape
();
int
input_rank
=
static_cast
<
int
>
(
input_shape
.
size
());
NodeVector
retval
;
vector
<
size_t
>
axes
;
// Use reduce_sum v1 to support keep_dim
if
(
m_reduce_all
)
{
for
(
size_t
axis
=
0
;
axis
<
input_rank
;
axis
++
)
{
axes
.
emplace_back
(
axis
);
}
}
else
{
for
(
int
axis
:
m_dim
)
{
axes
.
emplace_back
(
axis
<
0
?
static_cast
<
size_t
>
(
axis
+
input_rank
)
:
static_cast
<
size_t
>
(
axis
));
}
}
auto
axes_node
=
make_shared
<
ngraph
::
op
::
Constant
>
(
element
::
i64
,
Shape
{
axes
.
size
()},
axes
);
auto
node
=
make_shared
<
ngraph
::
op
::
v1
::
ReduceSum
>
(
input_value
(
0
),
axes_node
,
m_keep_dim
);
retval
.
emplace_back
(
node
);
return
retval
;
}
void
ReduceSum
::
pre_validate_and_infer_types
()
{
auto
shape
=
get_input_partial_shape
(
0
);
if
(
shape
.
is_dynamic
())
{
set_output_type
(
0
,
get_input_element_type
(
0
),
PartialShape
::
dynamic
());
}
}
shared_ptr
<
Node
>
ReduceSum
::
copy_with_new_args
(
const
NodeVector
&
new_args
)
const
{
check_new_args_count
(
this
,
new_args
);
return
make_shared
<
ReduceSum
>
(
new_args
.
at
(
0
),
m_dim
,
m_reduce_all
,
m_keep_dim
);
}
constexpr
NodeTypeInfo
ReduceSumGrad
::
type_info
;
ReduceSumGrad
::
ReduceSumGrad
(
const
Output
<
Node
>&
x
,
const
Output
<
Node
>&
y
,
const
vector
<
int
>&
dim
,
bool
reduce_all
,
bool
keep_dim
)
:
FusedOp
({
x
,
y
})
,
m_dim
(
dim
)
,
m_reduce_all
(
reduce_all
)
,
m_keep_dim
(
keep_dim
)
{
constructor_validate_and_infer_types
();
}
NodeVector
ReduceSumGrad
::
decompose_op
()
const
{
auto
x_shape
=
get_input_partial_shape
(
0
);
auto
y_shape
=
get_input_partial_shape
(
1
);
if
(
x_shape
.
is_dynamic
()
||
y_shape
.
is_dynamic
())
{
throw
ngraph_error
(
"All input needs to have static shape to decompose"
);
}
auto
input_shape
=
x_shape
.
to_shape
();
int
input_rank
=
static_cast
<
int
>
(
input_shape
.
size
());
NodeVector
retval
;
vector
<
size_t
>
axes
;
if
(
m_reduce_all
)
{
for
(
size_t
axis
=
0
;
axis
<
input_rank
;
axis
++
)
{
axes
.
emplace_back
(
axis
);
}
}
else
{
for
(
int
axis
:
m_dim
)
{
axes
.
emplace_back
(
axis
<
0
?
static_cast
<
size_t
>
(
axis
+
input_rank
)
:
static_cast
<
size_t
>
(
axis
));
}
}
AxisSet
red_axes
(
axes
);
auto
grad
=
input_value
(
1
);
// squeeze kept dim in y
if
(
m_keep_dim
)
{
auto
grad_shape
=
y_shape
.
to_shape
();
AxisVector
axis_vec
(
grad_shape
.
size
());
iota
(
axis_vec
.
begin
(),
axis_vec
.
end
(),
0
);
for
(
size_t
axis
:
axes
)
{
grad_shape
[
axis
]
=
0
;
}
vector
<
size_t
>
squeezed
;
for
(
size_t
dim
:
grad_shape
)
{
if
(
dim
!=
0
)
{
squeezed
.
emplace_back
(
dim
);
}
}
Shape
squeezed_grad_shape
(
squeezed
);
grad
=
make_shared
<
ngraph
::
op
::
v0
::
Reshape
>
(
grad
,
axis_vec
,
squeezed_grad_shape
);
}
// broadcast the reduced axes
auto
node
=
make_shared
<
ngraph
::
op
::
v0
::
Broadcast
>
(
grad
,
input_shape
,
red_axes
);
retval
.
emplace_back
(
node
);
return
retval
;
}
void
ReduceSumGrad
::
pre_validate_and_infer_types
()
{
auto
shape
=
get_input_partial_shape
(
0
);
if
(
shape
.
is_dynamic
())
{
set_output_type
(
0
,
get_input_element_type
(
0
),
PartialShape
::
dynamic
());
}
}
shared_ptr
<
Node
>
ReduceSumGrad
::
copy_with_new_args
(
const
NodeVector
&
new_args
)
const
{
check_new_args_count
(
this
,
new_args
);
return
make_shared
<
ReduceSumGrad
>
(
new_args
.
at
(
0
),
new_args
.
at
(
1
),
m_dim
,
m_reduce_all
,
m_keep_dim
);
}
src/ngraph/frontend/fluid/operators/reduce_sum.hpp
deleted
100644 → 0
View file @
c4bcabac
//*****************************************************************************
// Copyright 2017-2020 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#pragma once
#include "ngraph/node.hpp"
#include "ngraph/op/op.hpp"
#include "ngraph/op/util/fused_op.hpp"
using
namespace
std
;
using
namespace
ngraph
;
namespace
ngraph
{
namespace
fluid
{
/// \brief Fluid reduce_sum
class
NGRAPH_API
ReduceSum
:
public
ngraph
::
op
::
util
::
FusedOp
{
public
:
static
constexpr
NodeTypeInfo
type_info
{
"FluidReduceSum"
,
0
};
const
NodeTypeInfo
&
get_type_info
()
const
override
{
return
type_info
;
}
ReduceSum
()
=
default
;
/// \brief Constructs a ReduceSum operation.
///
/// \param data Input tensor
ReduceSum
(
const
Output
<
Node
>&
data
,
const
vector
<
int
>&
dim
,
bool
reduce_all
,
bool
keep_dim
);
virtual
NodeVector
decompose_op
()
const
override
;
virtual
void
pre_validate_and_infer_types
()
override
;
virtual
std
::
shared_ptr
<
Node
>
copy_with_new_args
(
const
NodeVector
&
new_args
)
const
override
;
protected
:
vector
<
int
>
m_dim
;
AxisSet
m_reduction_axes
;
bool
m_reduce_all
;
bool
m_keep_dim
;
};
/// \brief Fluid reduce_sum_grad
class
NGRAPH_API
ReduceSumGrad
:
public
ngraph
::
op
::
util
::
FusedOp
{
public
:
static
constexpr
NodeTypeInfo
type_info
{
"FluidReduceSumGrad"
,
0
};
const
NodeTypeInfo
&
get_type_info
()
const
override
{
return
type_info
;
}
ReduceSumGrad
()
=
default
;
/// \brief Constructs a ReduceSumGrad operation.
///
/// \param data Input tensor
ReduceSumGrad
(
const
Output
<
Node
>&
x
,
const
Output
<
Node
>&
y
,
const
vector
<
int
>&
dim
,
bool
reduce_all
,
bool
keep_dim
);
virtual
NodeVector
decompose_op
()
const
override
;
virtual
void
pre_validate_and_infer_types
()
override
;
virtual
std
::
shared_ptr
<
Node
>
copy_with_new_args
(
const
NodeVector
&
new_args
)
const
override
;
protected
:
vector
<
int
>
m_dim
;
AxisSet
m_reduction_axes
;
bool
m_reduce_all
;
bool
m_keep_dim
;
};
}
}
src/ngraph/frontend/fluid/operators/test/CMakeLists.txt
deleted
100644 → 0
View file @
c4bcabac
# ******************************************************************************
# Copyright 2017-2020 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ******************************************************************************
if
(
NOT NGRAPH_FLUID_TEST_ENABLE
)
message
(
STATUS
"Fluid tests disabled"
)
return
()
endif
()
if
(
NOT NGRAPH_CPU_ENABLE
)
message
(
STATUS
"Fluid tests needs CPU enabled"
)
return
()
endif
()
if
(
NOT NGRAPH_TEST_UTIL_ENABLE
)
message
(
WARNING
"Fluid test: Turning on test util!"
)
set
(
NGRAPH_TEST_UTIL_ENABLE ON
)
endif
()
message
(
STATUS
"fluid tests enabled"
)
if
(
LINUX
)
set
(
CMAKE_BUILD_WITH_INSTALL_RPATH FALSE
)
endif
()
if
(
CMAKE_CXX_COMPILER_ID STREQUAL
"Clang"
)
if
(
CMAKE_CXX_COMPILER_VERSION VERSION_GREATER
"4.0.0"
)
# gtest has issues with this with v1.8.x
# gtest issue is supposed to be addressed after v1.8.x
add_compile_options
(
-Wno-zero-as-null-pointer-constant
)
endif
()
endif
()
set
(
SRC
main.cpp
reduce_sum.cpp
lookup_table.cpp
)
set
(
CMAKE_RUNTIME_OUTPUT_DIRECTORY
${
CMAKE_CURRENT_BINARY_DIR
}
/../../../../../../test
)
add_executable
(
fluid-test
${
SRC
}
)
target_include_directories
(
fluid-test PRIVATE
"../../../../../../test"
)
target_link_libraries
(
fluid-test PRIVATE ngraph_test_util
)
target_link_libraries
(
fluid-test PRIVATE ngraph libgtest
)
if
(
NOT WIN32
)
target_link_libraries
(
fluid-test PRIVATE pthread
)
endif
()
target_link_libraries
(
fluid-test PRIVATE
${
CMAKE_DL_LIBS
}
)
if
(
"
${
CMAKE_CXX_COMPILER_ID
}
"
MATCHES
"^(Apple)?Clang$"
)
target_compile_options
(
fluid-test PRIVATE -Wno-undef -Wno-reserved-id-macro
)
endif
()
# So many type_prop tests these days that we need to set /bigobj flag for MSVC.
# We should probably split up type_prop.cpp.
if
(
MSVC
)
target_compile_options
(
fluid-test PRIVATE
"/bigobj"
)
endif
()
# The INTERPRETER backend is required for convolution, and backwards unit tests
target_link_libraries
(
fluid-test PRIVATE cpu_backend
)
target_link_libraries
(
fluid-test PRIVATE libmkldnn
)
target_compile_definitions
(
fluid-test PRIVATE NGRAPH_CPU_ENABLE
)
if
(
NGRAPH_TBB_ENABLE
)
target_compile_definitions
(
fluid-test PRIVATE
"NGRAPH_TBB_ENABLE"
)
endif
()
src/ngraph/frontend/fluid/operators/test/lookup_table.cpp
deleted
100644 → 0
View file @
c4bcabac
//*****************************************************************************
// Copyright 2017-2020 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#include <algorithm>
#include <cinttypes>
#include <cmath>
#include <cstdlib>
#include <random>
#include <string>
#undef IN_NGRAPH_LIBRARY
#include "gtest/gtest.h"
#include "ngraph/frontend/fluid/operators/lookup_table.hpp"
#include "ngraph/ngraph.hpp"
#include "util/all_close.hpp"
#include "util/all_close_f.hpp"
#include "util/ndarray.hpp"
#include "util/random.hpp"
#include "util/test_control.hpp"
#include "util/test_tools.hpp"
static
std
::
mt19937_64
random_generator
;
using
namespace
std
;
using
namespace
ngraph
;
static
string
s_manifest
=
"test.manifest"
;
NGRAPH_TEST
(
CPU
,
fluid_lookup_table_v2
)
{
Shape
params_shape
{
3
,
2
};
Shape
indices_shape
{
2
,
2
,
3
,
4
};
Shape
out_shape
{
2
,
2
,
3
,
4
,
2
};
auto
P
=
make_shared
<
op
::
Parameter
>
(
element
::
u8
,
params_shape
);
auto
I
=
make_shared
<
op
::
Parameter
>
(
element
::
i32
,
indices_shape
);
auto
G
=
make_shared
<
fluid
::
LookupTable2
>
(
P
,
I
,
-
1
);
auto
f
=
make_shared
<
Function
>
(
G
,
ParameterVector
{
P
,
I
});
auto
backend
=
runtime
::
Backend
::
create
(
"CPU"
);
// Create some tensors for input/output
auto
p
=
backend
->
create_tensor
(
element
::
u8
,
params_shape
);
copy_data
(
p
,
vector
<
uint8_t
>
{
10
,
11
,
20
,
21
,
30
,
31
});
auto
i
=
backend
->
create_tensor
(
element
::
i32
,
indices_shape
);
copy_data
(
i
,
vector
<
int32_t
>
{
0
,
1
,
1
,
2
,
0
,
1
,
1
,
2
,
0
,
1
,
1
,
2
,
0
,
1
,
1
,
2
,
0
,
1
,
1
,
2
,
0
,
1
,
1
,
2
,
0
,
1
,
1
,
2
,
0
,
1
,
1
,
2
,
0
,
1
,
1
,
2
,
0
,
1
,
1
,
2
,
0
,
1
,
1
,
2
,
0
,
1
,
1
,
2
});
auto
result
=
backend
->
create_tensor
(
element
::
u8
,
out_shape
);
auto
c
=
backend
->
compile
(
f
);
c
->
call_with_validate
({
result
},
{
p
,
i
});
EXPECT_TRUE
(
test
::
all_close
(
(
vector
<
uint8_t
>
{
10
,
11
,
20
,
21
,
20
,
21
,
30
,
31
,
10
,
11
,
20
,
21
,
20
,
21
,
30
,
31
,
10
,
11
,
20
,
21
,
20
,
21
,
30
,
31
,
10
,
11
,
20
,
21
,
20
,
21
,
30
,
31
,
10
,
11
,
20
,
21
,
20
,
21
,
30
,
31
,
10
,
11
,
20
,
21
,
20
,
21
,
30
,
31
,
10
,
11
,
20
,
21
,
20
,
21
,
30
,
31
,
10
,
11
,
20
,
21
,
20
,
21
,
30
,
31
,
10
,
11
,
20
,
21
,
20
,
21
,
30
,
31
,
10
,
11
,
20
,
21
,
20
,
21
,
30
,
31
,
10
,
11
,
20
,
21
,
20
,
21
,
30
,
31
,
10
,
11
,
20
,
21
,
20
,
21
,
30
,
31
}),
read_vector
<
uint8_t
>
(
result
)));
}
src/ngraph/frontend/fluid/operators/test/main.cpp
deleted
100644 → 0
View file @
c4bcabac
//*****************************************************************************
// Copyright 2017-2020 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#include <chrono>
#include <iostream>
#undef IN_NGRAPH_LIBRARY
#include "gtest/gtest.h"
#include "ngraph/log.hpp"
#include "ngraph/ngraph.hpp"
#include "ngraph/runtime/backend.hpp"
#include "ngraph/runtime/backend_manager.hpp"
using
namespace
std
;
int
main
(
int
argc
,
char
**
argv
)
{
const
string
cpath_flag
{
"--cpath"
};
string
cpath
;
const
char
*
exclude
=
"--gtest_filter=-benchmark.*"
;
vector
<
char
*>
argv_vector
;
argv_vector
.
push_back
(
argv
[
0
]);
argv_vector
.
push_back
(
const_cast
<
char
*>
(
exclude
));
for
(
int
i
=
1
;
i
<
argc
;
i
++
)
{
argv_vector
.
push_back
(
argv
[
i
]);
}
argc
=
argv_vector
.
size
();
::
testing
::
InitGoogleTest
(
&
argc
,
argv_vector
.
data
());
for
(
int
i
=
1
;
i
<
argc
;
i
++
)
{
if
(
cpath_flag
==
argv
[
i
]
&&
(
++
i
)
<
argc
)
{
cpath
=
argv
[
i
];
}
}
ngraph
::
runtime
::
Backend
::
set_backend_shared_library_search_directory
(
cpath
);
#ifdef NGRAPH_CPU_ENABLE
ngraph_register_cpu_backend
();
#endif
auto
start
=
std
::
chrono
::
system_clock
::
now
();
int
rc
=
RUN_ALL_TESTS
();
auto
elapsed
=
std
::
chrono
::
duration_cast
<
std
::
chrono
::
milliseconds
>
(
std
::
chrono
::
system_clock
::
now
()
-
start
);
NGRAPH_DEBUG_PRINT
(
"[MAIN] Tests finished: Time: %d ms Exit code: %d"
,
elapsed
.
count
(),
rc
);
return
rc
;
}
src/ngraph/frontend/fluid/operators/test/reduce_sum.cpp
deleted
100644 → 0
View file @
c4bcabac
//*****************************************************************************
// Copyright 2017-2020 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#include <algorithm>
#include <cinttypes>
#include <cmath>
#include <cstdlib>
#include <random>
#include <string>
#undef IN_NGRAPH_LIBRARY
#include "gtest/gtest.h"
#include "ngraph/frontend/fluid/operators/reduce_sum.hpp"
#include "ngraph/ngraph.hpp"
#include "util/all_close.hpp"
#include "util/all_close_f.hpp"
#include "util/ndarray.hpp"
#include "util/random.hpp"
#include "util/test_control.hpp"
#include "util/test_tools.hpp"
static
std
::
mt19937_64
random_generator
;
using
namespace
std
;
using
namespace
ngraph
;
static
string
s_manifest
=
"test.manifest"
;
NGRAPH_TEST
(
CPU
,
fluid_reduce_sum_dynamic
)
{
// Create a graph for f(x,axes:int32) = Sum(x,Convert<int64>(axes)).
auto
x
=
make_shared
<
op
::
Parameter
>
(
element
::
f32
,
PartialShape
::
dynamic
());
auto
y
=
make_shared
<
op
::
Parameter
>
(
element
::
f32
,
PartialShape
::
dynamic
());
vector
<
int
>
dim
=
{
-
1
};
auto
sum
=
make_shared
<
ngraph
::
fluid
::
ReduceSum
>
(
x
,
dim
,
false
,
false
);
auto
grad
=
make_shared
<
ngraph
::
fluid
::
ReduceSumGrad
>
(
x
,
y
,
dim
,
false
,
false
);
ASSERT_TRUE
(
sum
->
get_output_partial_shape
(
0
).
rank
().
is_dynamic
());
ASSERT_TRUE
(
grad
->
get_output_partial_shape
(
0
).
rank
().
is_dynamic
());
auto
f
=
make_shared
<
Function
>
(
NodeVector
{
sum
},
ParameterVector
{
x
});
auto
g
=
make_shared
<
Function
>
(
NodeVector
{
grad
},
ParameterVector
{
x
,
y
});
auto
backend
=
runtime
::
Backend
::
create
(
"CPU"
,
true
);
auto
ex
=
backend
->
compile
(
f
);
auto
gex
=
backend
->
compile
(
g
);
auto
t_r
=
backend
->
create_dynamic_tensor
(
element
::
f32
,
PartialShape
::
dynamic
());
auto
t_gr
=
backend
->
create_dynamic_tensor
(
element
::
f32
,
PartialShape
::
dynamic
());
std
::
vector
<
Shape
>
x_shapes
{
Shape
{
2
,
3
},
Shape
{
5
}};
std
::
vector
<
Shape
>
y_shapes
{
Shape
{
2
},
Shape
{}};
std
::
vector
<
std
::
vector
<
float
>>
inputs
{{
1
,
2
,
3
,
4
,
5
,
6
},
{
1
,
2
,
3
,
4
,
5
}};
std
::
vector
<
std
::
vector
<
float
>>
grads
{{
1
,
2
},
{
1
}};
std
::
vector
<
Shape
>
expected_result_shapes
{
Shape
{
2
},
Shape
{}};
std
::
vector
<
Shape
>
expected_gresult_shapes
{
Shape
{
2
,
3
},
Shape
{
5
}};
std
::
vector
<
std
::
vector
<
float
>>
expected_results
{{
6
,
15
},
{
15
}};
std
::
vector
<
std
::
vector
<
float
>>
expected_gresults
{{
1
,
1
,
1
,
2
,
2
,
2
},
{
1
,
1
,
1
,
1
,
1
}};
for
(
size_t
i
=
0
;
i
<
x_shapes
.
size
();
i
++
)
{
auto
t_x
=
backend
->
create_tensor
(
element
::
f32
,
x_shapes
[
i
]);
auto
t_y
=
backend
->
create_tensor
(
element
::
f32
,
y_shapes
[
i
]);
copy_data
(
t_x
,
inputs
[
i
]);
copy_data
(
t_y
,
grads
[
i
]);
ex
->
call_with_validate
({
t_r
},
{
t_x
});
gex
->
call_with_validate
({
t_gr
},
{
t_x
,
t_y
});
ASSERT_EQ
(
t_r
->
get_shape
(),
expected_result_shapes
[
i
]);
ASSERT_EQ
(
t_gr
->
get_shape
(),
expected_gresult_shapes
[
i
]);
auto
results
=
read_vector
<
float
>
(
t_r
);
auto
gresults
=
read_vector
<
float
>
(
t_gr
);
ASSERT_TRUE
(
test
::
all_close_f
(
results
,
expected_results
[
i
],
MIN_FLOAT_TOLERANCE_BITS
));
ASSERT_TRUE
(
test
::
all_close_f
(
gresults
,
expected_gresults
[
i
],
MIN_FLOAT_TOLERANCE_BITS
));
}
}
NGRAPH_TEST
(
CPU
,
fluid_reduce_sum_all_dynamic
)
{
// Create a graph for f(x,axes:int32) = Sum(x,Convert<int64>(axes)).
auto
x
=
make_shared
<
op
::
Parameter
>
(
element
::
f32
,
PartialShape
::
dynamic
());
auto
y
=
make_shared
<
op
::
Parameter
>
(
element
::
f32
,
PartialShape
::
dynamic
());
vector
<
int
>
dim
=
{
-
1
};
auto
sum
=
make_shared
<
ngraph
::
fluid
::
ReduceSum
>
(
x
,
dim
,
true
,
false
);
auto
grad
=
make_shared
<
ngraph
::
fluid
::
ReduceSumGrad
>
(
x
,
y
,
dim
,
true
,
false
);
ASSERT_TRUE
(
sum
->
get_output_partial_shape
(
0
).
rank
().
is_dynamic
());
ASSERT_TRUE
(
grad
->
get_output_partial_shape
(
0
).
rank
().
is_dynamic
());
auto
f
=
make_shared
<
Function
>
(
NodeVector
{
sum
},
ParameterVector
{
x
});
auto
g
=
make_shared
<
Function
>
(
NodeVector
{
grad
},
ParameterVector
{
x
,
y
});
auto
backend
=
runtime
::
Backend
::
create
(
"CPU"
,
true
);
auto
ex
=
backend
->
compile
(
f
);
auto
gex
=
backend
->
compile
(
g
);
auto
t_r
=
backend
->
create_dynamic_tensor
(
element
::
f32
,
PartialShape
::
dynamic
());
auto
t_gr
=
backend
->
create_dynamic_tensor
(
element
::
f32
,
PartialShape
::
dynamic
());
std
::
vector
<
Shape
>
x_shapes
{
Shape
{
2
,
3
},
Shape
{
5
}};
std
::
vector
<
Shape
>
y_shapes
{
Shape
{},
Shape
{}};
std
::
vector
<
std
::
vector
<
float
>>
inputs
{{
1
,
2
,
3
,
4
,
5
,
6
},
{
1
,
2
,
3
,
4
,
5
}};
std
::
vector
<
std
::
vector
<
float
>>
grads
{{
2
},
{
1
}};
std
::
vector
<
Shape
>
expected_result_shapes
{
Shape
{},
Shape
{}};
std
::
vector
<
Shape
>
expected_gresult_shapes
{
Shape
{
2
,
3
},
Shape
{
5
}};
std
::
vector
<
std
::
vector
<
float
>>
expected_results
{{
21
},
{
15
}};
std
::
vector
<
std
::
vector
<
float
>>
expected_gresults
{{
2
,
2
,
2
,
2
,
2
,
2
},
{
1
,
1
,
1
,
1
,
1
}};
for
(
size_t
i
=
0
;
i
<
x_shapes
.
size
();
i
++
)
{
auto
t_x
=
backend
->
create_tensor
(
element
::
f32
,
x_shapes
[
i
]);
auto
t_y
=
backend
->
create_tensor
(
element
::
f32
,
y_shapes
[
i
]);
copy_data
(
t_x
,
inputs
[
i
]);
copy_data
(
t_y
,
grads
[
i
]);
ex
->
call_with_validate
({
t_r
},
{
t_x
});
gex
->
call_with_validate
({
t_gr
},
{
t_x
,
t_y
});
ASSERT_EQ
(
t_r
->
get_shape
(),
expected_result_shapes
[
i
]);
ASSERT_EQ
(
t_gr
->
get_shape
(),
expected_gresult_shapes
[
i
]);
auto
results
=
read_vector
<
float
>
(
t_r
);
auto
gresults
=
read_vector
<
float
>
(
t_gr
);
ASSERT_TRUE
(
test
::
all_close_f
(
results
,
expected_results
[
i
],
MIN_FLOAT_TOLERANCE_BITS
));
ASSERT_TRUE
(
test
::
all_close_f
(
gresults
,
expected_gresults
[
i
],
MIN_FLOAT_TOLERANCE_BITS
));
}
}
NGRAPH_TEST
(
CPU
,
fluid_reduce_sum_dynamic_keep_dim
)
{
// Create a graph for f(x,axes:int32) = Sum(x,Convert<int64>(axes)).
auto
x
=
make_shared
<
op
::
Parameter
>
(
element
::
f32
,
PartialShape
::
dynamic
());
auto
y
=
make_shared
<
op
::
Parameter
>
(
element
::
f32
,
PartialShape
::
dynamic
());
vector
<
int
>
dim
=
{
-
1
};
auto
sum
=
make_shared
<
ngraph
::
fluid
::
ReduceSum
>
(
x
,
dim
,
false
,
true
);
auto
grad
=
make_shared
<
ngraph
::
fluid
::
ReduceSumGrad
>
(
x
,
y
,
dim
,
false
,
true
);
ASSERT_TRUE
(
sum
->
get_output_partial_shape
(
0
).
rank
().
is_dynamic
());
ASSERT_TRUE
(
grad
->
get_output_partial_shape
(
0
).
rank
().
is_dynamic
());
auto
f
=
make_shared
<
Function
>
(
NodeVector
{
sum
},
ParameterVector
{
x
});
auto
g
=
make_shared
<
Function
>
(
NodeVector
{
grad
},
ParameterVector
{
x
,
y
});
auto
backend
=
runtime
::
Backend
::
create
(
"CPU"
,
true
);
auto
ex
=
backend
->
compile
(
f
);
auto
gex
=
backend
->
compile
(
g
);
auto
t_r
=
backend
->
create_dynamic_tensor
(
element
::
f32
,
PartialShape
::
dynamic
());
auto
t_gr
=
backend
->
create_dynamic_tensor
(
element
::
f32
,
PartialShape
::
dynamic
());
std
::
vector
<
Shape
>
x_shapes
{
Shape
{
2
,
3
},
Shape
{
5
}};
std
::
vector
<
Shape
>
y_shapes
{
Shape
{
2
,
1
},
Shape
{
1
}};
std
::
vector
<
std
::
vector
<
float
>>
inputs
{{
1
,
2
,
3
,
4
,
5
,
6
},
{
1
,
2
,
3
,
4
,
5
}};
std
::
vector
<
std
::
vector
<
float
>>
grads
{{
1
,
2
},
{
1
}};
std
::
vector
<
Shape
>
expected_result_shapes
{
Shape
{
2
,
1
},
Shape
{
1
}};
std
::
vector
<
Shape
>
expected_gresult_shapes
{
Shape
{
2
,
3
},
Shape
{
5
}};
std
::
vector
<
std
::
vector
<
float
>>
expected_results
{{
6
,
15
},
{
15
}};
std
::
vector
<
std
::
vector
<
float
>>
expected_gresults
{{
1
,
1
,
1
,
2
,
2
,
2
},
{
1
,
1
,
1
,
1
,
1
}};
for
(
size_t
i
=
0
;
i
<
x_shapes
.
size
();
i
++
)
{
auto
t_x
=
backend
->
create_tensor
(
element
::
f32
,
x_shapes
[
i
]);
auto
t_y
=
backend
->
create_tensor
(
element
::
f32
,
y_shapes
[
i
]);
copy_data
(
t_x
,
inputs
[
i
]);
copy_data
(
t_y
,
grads
[
i
]);
ex
->
call_with_validate
({
t_r
},
{
t_x
});
gex
->
call_with_validate
({
t_gr
},
{
t_x
,
t_y
});
ASSERT_EQ
(
t_r
->
get_shape
(),
expected_result_shapes
[
i
]);
ASSERT_EQ
(
t_gr
->
get_shape
(),
expected_gresult_shapes
[
i
]);
auto
results
=
read_vector
<
float
>
(
t_r
);
auto
gresults
=
read_vector
<
float
>
(
t_gr
);
ASSERT_TRUE
(
test
::
all_close_f
(
results
,
expected_results
[
i
],
MIN_FLOAT_TOLERANCE_BITS
));
ASSERT_TRUE
(
test
::
all_close_f
(
gresults
,
expected_gresults
[
i
],
MIN_FLOAT_TOLERANCE_BITS
));
}
}
NGRAPH_TEST
(
CPU
,
fluid_reduce_sum_all_dynamic_keep_dim
)
{
// Create a graph for f(x,axes:int32) = Sum(x,Convert<int64>(axes)).
auto
x
=
make_shared
<
op
::
Parameter
>
(
element
::
f32
,
PartialShape
::
dynamic
());
auto
y
=
make_shared
<
op
::
Parameter
>
(
element
::
f32
,
PartialShape
::
dynamic
());
vector
<
int
>
dim
=
{
-
1
};
auto
sum
=
make_shared
<
ngraph
::
fluid
::
ReduceSum
>
(
x
,
dim
,
true
,
true
);
auto
grad
=
make_shared
<
ngraph
::
fluid
::
ReduceSumGrad
>
(
x
,
y
,
dim
,
true
,
true
);
ASSERT_TRUE
(
sum
->
get_output_partial_shape
(
0
).
rank
().
is_dynamic
());
ASSERT_TRUE
(
grad
->
get_output_partial_shape
(
0
).
rank
().
is_dynamic
());
auto
f
=
make_shared
<
Function
>
(
NodeVector
{
sum
},
ParameterVector
{
x
});
auto
g
=
make_shared
<
Function
>
(
NodeVector
{
grad
},
ParameterVector
{
x
,
y
});
auto
backend
=
runtime
::
Backend
::
create
(
"CPU"
,
true
);
auto
ex
=
backend
->
compile
(
f
);
auto
gex
=
backend
->
compile
(
g
);
auto
t_r
=
backend
->
create_dynamic_tensor
(
element
::
f32
,
PartialShape
::
dynamic
());
auto
t_gr
=
backend
->
create_dynamic_tensor
(
element
::
f32
,
PartialShape
::
dynamic
());
std
::
vector
<
Shape
>
x_shapes
{
Shape
{
2
,
3
},
Shape
{
5
}};
std
::
vector
<
Shape
>
y_shapes
{
Shape
{
1
,
1
},
Shape
{
1
}};
std
::
vector
<
std
::
vector
<
float
>>
inputs
{{
1
,
2
,
3
,
4
,
5
,
6
},
{
1
,
2
,
3
,
4
,
5
}};
std
::
vector
<
std
::
vector
<
float
>>
grads
{{
2
},
{
1
}};
std
::
vector
<
Shape
>
expected_result_shapes
{
Shape
{
1
,
1
},
Shape
{
1
}};
std
::
vector
<
Shape
>
expected_gresult_shapes
{
Shape
{
2
,
3
},
Shape
{
5
}};
std
::
vector
<
std
::
vector
<
float
>>
expected_results
{{
21
},
{
15
}};
std
::
vector
<
std
::
vector
<
float
>>
expected_gresults
{{
2
,
2
,
2
,
2
,
2
,
2
},
{
1
,
1
,
1
,
1
,
1
}};
for
(
size_t
i
=
0
;
i
<
x_shapes
.
size
();
i
++
)
{
auto
t_x
=
backend
->
create_tensor
(
element
::
f32
,
x_shapes
[
i
]);
auto
t_y
=
backend
->
create_tensor
(
element
::
f32
,
y_shapes
[
i
]);
copy_data
(
t_x
,
inputs
[
i
]);
copy_data
(
t_y
,
grads
[
i
]);
ex
->
call_with_validate
({
t_r
},
{
t_x
});
gex
->
call_with_validate
({
t_gr
},
{
t_x
,
t_y
});
ASSERT_EQ
(
t_r
->
get_shape
(),
expected_result_shapes
[
i
]);
ASSERT_EQ
(
t_gr
->
get_shape
(),
expected_gresult_shapes
[
i
]);
auto
results
=
read_vector
<
float
>
(
t_r
);
auto
gresults
=
read_vector
<
float
>
(
t_gr
);
ASSERT_TRUE
(
test
::
all_close_f
(
results
,
expected_results
[
i
],
MIN_FLOAT_TOLERANCE_BITS
));
ASSERT_TRUE
(
test
::
all_close_f
(
gresults
,
expected_gresults
[
i
],
MIN_FLOAT_TOLERANCE_BITS
));
}
}
src/ngraph/frontend/fluid/operators/test/test.manifest
deleted
100644 → 0
View file @
c4bcabac
# Add test names to disable
# This does not work for now
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment