Skip to content
Projects
Groups
Snippets
Help
Loading...
Sign in / Register
Toggle navigation
N
ngraph
Project
Project
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Packages
Packages
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
submodule
ngraph
Commits
96ae6fdb
Commit
96ae6fdb
authored
7 years ago
by
Adam Procter
Committed by
GitHub
7 years ago
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Add unparameterized constants, and collapse scalar and tensor constants (#190)
parent
4bec2307
Hide whitespace changes
Inline
Side-by-side
Showing
15 changed files
with
353 additions
and
105 deletions
+353
-105
constant.cpp
src/ngraph/ops/constant.cpp
+27
-2
constant.hpp
src/ngraph/ops/constant.hpp
+37
-53
acos.hpp
src/ngraph/runtime/ngvm/eigen/acos.hpp
+1
-2
asin.hpp
src/ngraph/runtime/ngvm/eigen/asin.hpp
+1
-2
atan.hpp
src/ngraph/runtime/ngvm/eigen/atan.hpp
+1
-2
cos.hpp
src/ngraph/runtime/ngvm/eigen/cos.hpp
+1
-2
cosh.hpp
src/ngraph/runtime/ngvm/eigen/cosh.hpp
+1
-2
sinh.hpp
src/ngraph/runtime/ngvm/eigen/sinh.hpp
+1
-2
tan.hpp
src/ngraph/runtime/ngvm/eigen/tan.hpp
+1
-2
tanh.hpp
src/ngraph/runtime/ngvm/eigen/tanh.hpp
+1
-2
external_function.cpp
src/ngraph/runtime/ngvm/external_function.cpp
+19
-7
element_type.hpp
src/ngraph/types/element_type.hpp
+49
-0
build_graph.cpp
test/build_graph.cpp
+14
-13
execute.cpp
test/execute.cpp
+104
-14
type_prop.cpp
test/type_prop.cpp
+95
-0
No files found.
src/ngraph/ops/constant.cpp
View file @
96ae6fdb
...
...
@@ -16,10 +16,35 @@
using
namespace
ngraph
::
op
;
void
Scalar
ConstantBase
::
propagate_types
()
void
ConstantBase
::
propagate_types
()
{
}
void
TensorConstantBase
::
propagate_types
()
template
<
typename
ET
>
void
check_value_strings
(
const
std
::
vector
<
std
::
string
>&
value_strings
)
{
auto
result
=
ET
::
read
(
value_strings
);
}
void
Constant
::
propagate_types
()
{
// No actual type propagation is done here; however, we check the number of value strings and
// also call check_value_strings just to make sure the result will be parseable at compile
// time. (It will throw an exception if not.)
auto
tvt
=
std
::
dynamic_pointer_cast
<
const
TensorViewType
>
(
get_value_type
());
if
(
nullptr
==
tvt
)
{
throw
ngraph_error
(
"Constant does not have tensor view type"
);
}
auto
shape
=
tvt
->
get_shape
();
if
(
ngraph
::
shape_size
(
shape
)
!=
m_value_strings
.
size
())
{
throw
ngraph_error
(
"Constant does not have the expected number of literals"
);
}
auto
&
et
=
tvt
->
get_element_type
();
FUNCTION_ON_ELEMENT_TYPE
(
et
,
"Constant has unhandled element type"
,
check_value_strings
,
m_value_strings
);
}
This diff is collapsed.
Click to expand it.
src/ngraph/ops/constant.hpp
View file @
96ae6fdb
...
...
@@ -24,11 +24,11 @@ namespace ngraph
{
namespace
op
{
// Defines methods to all constant
scalar
s
class
Scalar
ConstantBase
:
public
Node
// Defines methods to all constants
class
ConstantBase
:
public
Node
{
protected
:
Scalar
ConstantBase
(
const
std
::
shared_ptr
<
TensorViewType
>&
type
)
ConstantBase
(
const
std
::
shared_ptr
<
TensorViewType
>&
type
)
:
Node
({},
type
)
{
}
...
...
@@ -36,10 +36,9 @@ namespace ngraph
virtual
void
propagate_types
()
override
;
};
// Implement a constant scalar for each element type.
// The static make method takes a
// Implement a constant tensor for each element type.
template
<
typename
T
>
class
ScalarConstant
:
public
Scalar
ConstantBase
class
ParameterizedConstant
:
public
ConstantBase
{
public
:
// The ngraph element type
...
...
@@ -47,13 +46,15 @@ namespace ngraph
// The C++ type that holds the element type
using
type
=
typename
T
::
type
;
ScalarConstant
(
typename
T
::
type
value
)
:
ScalarConstantBase
(
std
::
make_shared
<
TensorViewType
>
(
T
::
element_type
(),
Shape
{}))
ParameterizedConstant
(
const
Shape
&
shape
,
typename
std
::
shared_ptr
<
ngraph
::
runtime
::
ParameterizedTensorView
<
T
>>&
value
)
:
ConstantBase
(
std
::
make_shared
<
TensorViewType
>
(
T
::
element_type
(),
shape
))
,
m_value
(
value
)
{
}
virtual
std
::
string
description
()
const
override
{
return
"
Scalar
Constant"
;
}
virtual
std
::
string
description
()
const
override
{
return
"
Parameterized
Constant"
;
}
virtual
std
::
string
get_node_id
()
const
override
{
std
::
stringstream
ss
;
...
...
@@ -61,48 +62,41 @@ namespace ngraph
return
ss
.
str
();
}
type
get_value
()
const
{
return
m_value
;
}
protected
:
typename
T
::
type
m_value
;
};
using
Float32ScalarConstant
=
ScalarConstant
<
element
::
Float32
>
;
using
Int8ScalarConstant
=
ScalarConstant
<
element
::
Int8
>
;
using
Int32ScalarConstant
=
ScalarConstant
<
element
::
Int32
>
;
using
Int64ScalarConstant
=
ScalarConstant
<
element
::
Int64
>
;
using
UInt8ScalarConstant
=
ScalarConstant
<
element
::
UInt8
>
;
using
UInt32ScalarConstant
=
ScalarConstant
<
element
::
UInt32
>
;
using
UInt64ScalarConstant
=
ScalarConstant
<
element
::
UInt64
>
;
// Defines methods to all constant tensors
class
TensorConstantBase
:
public
Node
{
protected
:
TensorConstantBase
(
const
std
::
shared_ptr
<
TensorViewType
>&
type
)
:
Node
({},
type
)
typename
std
::
shared_ptr
<
ngraph
::
runtime
::
ParameterizedTensorView
<
T
>>
get_value
()
const
{
return
m_value
;
}
virtual
void
propagate_types
()
override
;
protected
:
std
::
shared_ptr
<
ngraph
::
runtime
::
ParameterizedTensorView
<
T
>>
m_value
;
};
// Implement a constant tensor for each element type.
template
<
typename
T
>
class
TensorConstant
:
public
TensorConstantBase
using
Float32Constant
=
ParameterizedConstant
<
element
::
Float32
>
;
using
Int8Constant
=
ParameterizedConstant
<
element
::
Int8
>
;
using
Int32Constant
=
ParameterizedConstant
<
element
::
Int32
>
;
using
Int64Constant
=
ParameterizedConstant
<
element
::
Int64
>
;
using
UInt8Constant
=
ParameterizedConstant
<
element
::
UInt8
>
;
using
UInt32Constant
=
ParameterizedConstant
<
element
::
UInt32
>
;
using
UInt64Constant
=
ParameterizedConstant
<
element
::
UInt64
>
;
class
Constant
:
public
ConstantBase
{
public
:
// The ngraph element type
using
element_type
=
T
;
// The C++ type that holds the element type
using
type
=
typename
T
::
type
;
Constant
(
const
element
::
Type
&
et
,
const
Shape
&
shape
,
const
std
::
vector
<
std
::
string
>&
value_strings
)
:
ConstantBase
(
std
::
make_shared
<
TensorViewType
>
(
et
,
shape
))
,
m_value_strings
(
value_strings
)
{
}
TensorConstant
(
const
Shape
&
shape
)
:
TensorConstantBase
(
std
::
make_shared
<
TensorViewType
>
(
T
::
element_type
()
,
shape
))
,
m_value
(
ngraph
::
runtime
::
make_tensor
<
T
>
(
shape
)
)
Constant
(
const
element
::
Type
&
et
,
const
Shape
&
shape
,
const
std
::
string
&
value_string
)
:
ConstantBase
(
std
::
make_shared
<
TensorViewType
>
(
et
,
shape
))
,
m_value
_strings
(
ngraph
::
shape_size
(
shape
),
value_string
)
{
}
virtual
std
::
string
description
()
const
override
{
return
"
Tensor
Constant"
;
}
virtual
std
::
string
description
()
const
override
{
return
"Constant"
;
}
virtual
std
::
string
get_node_id
()
const
override
{
std
::
stringstream
ss
;
...
...
@@ -110,21 +104,11 @@ namespace ngraph
return
ss
.
str
();
}
typename
std
::
shared_ptr
<
ngraph
::
runtime
::
ParameterizedTensorView
<
T
>>
get_value
()
const
{
return
m_value
;
}
const
std
::
vector
<
std
::
string
>&
get_value_strings
()
const
{
return
m_value_strings
;
}
virtual
void
propagate_types
()
override
;
protected
:
std
::
shared_ptr
<
ngraph
::
runtime
::
ParameterizedTensorView
<
T
>>
m_value
;
const
std
::
vector
<
std
::
string
>
m_value_strings
;
};
using
Float32TensorConstant
=
TensorConstant
<
element
::
Float32
>
;
using
Int8TensorConstant
=
TensorConstant
<
element
::
Int8
>
;
using
Int32TensorConstant
=
TensorConstant
<
element
::
Int32
>
;
using
Int64TensorConstant
=
TensorConstant
<
element
::
Int64
>
;
using
UInt8TensorConstant
=
TensorConstant
<
element
::
UInt8
>
;
using
UInt32TensorConstant
=
TensorConstant
<
element
::
UInt32
>
;
using
UInt64TensorConstant
=
TensorConstant
<
element
::
UInt64
>
;
}
}
This diff is collapsed.
Click to expand it.
src/ngraph/runtime/ngvm/eigen/acos.hpp
View file @
96ae6fdb
...
...
@@ -51,4 +51,4 @@ namespace ngraph
}
}
}
}
\ No newline at end of file
}
This diff is collapsed.
Click to expand it.
src/ngraph/runtime/ngvm/eigen/asin.hpp
View file @
96ae6fdb
...
...
@@ -51,4 +51,4 @@ namespace ngraph
}
}
}
}
\ No newline at end of file
}
This diff is collapsed.
Click to expand it.
src/ngraph/runtime/ngvm/eigen/atan.hpp
View file @
96ae6fdb
...
...
@@ -51,4 +51,4 @@ namespace ngraph
}
}
}
}
\ No newline at end of file
}
This diff is collapsed.
Click to expand it.
src/ngraph/runtime/ngvm/eigen/cos.hpp
View file @
96ae6fdb
...
...
@@ -51,4 +51,4 @@ namespace ngraph
}
}
}
}
\ No newline at end of file
}
This diff is collapsed.
Click to expand it.
src/ngraph/runtime/ngvm/eigen/cosh.hpp
View file @
96ae6fdb
...
...
@@ -51,4 +51,4 @@ namespace ngraph
}
}
}
}
\ No newline at end of file
}
This diff is collapsed.
Click to expand it.
src/ngraph/runtime/ngvm/eigen/sinh.hpp
View file @
96ae6fdb
...
...
@@ -51,4 +51,4 @@ namespace ngraph
}
}
}
}
\ No newline at end of file
}
This diff is collapsed.
Click to expand it.
src/ngraph/runtime/ngvm/eigen/tan.hpp
View file @
96ae6fdb
...
...
@@ -51,4 +51,4 @@ namespace ngraph
}
}
}
}
\ No newline at end of file
}
This diff is collapsed.
Click to expand it.
src/ngraph/runtime/ngvm/eigen/tanh.hpp
View file @
96ae6fdb
...
...
@@ -51,4 +51,4 @@ namespace ngraph
}
}
}
}
\ No newline at end of file
}
This diff is collapsed.
Click to expand it.
src/ngraph/runtime/ngvm/external_function.cpp
View file @
96ae6fdb
...
...
@@ -315,15 +315,10 @@ ExternalFunction::ExternalFunction(const std::shared_ptr<ngraph::Function>& func
#define REGISTER_CONSTANT_INSTRUCTIONS(T) \
{ \
REGISTER_INSTRUCTION( \
op::ScalarConstant<T>, \
eigen::ConstantInstruction<T>, \
std::vector<T::type>{dynamic_cast<const op::ScalarConstant<T>*>(n)->get_value()}, \
out[0]); \
REGISTER_INSTRUCTION( \
op::TensorConstant<T>, \
op::ParameterizedConstant<T>, \
eigen::ConstantInstruction<T>, \
std::vector<T::type>{ \
dynamic_cast<const op::
TensorConstant<T>*>(n)->get_value()->get_vector()},
\
dynamic_cast<const op::
ParameterizedConstant<T>*>(n)->get_value()->get_vector()},
\
out[0]); \
}
...
...
@@ -371,6 +366,23 @@ ExternalFunction::OpMap& ExternalFunction::get_op_map()
REGISTER_NUMERIC_BINOP
(
op
::
Multiply
,
eigen
::
MultiplyInstruction
);
REGISTER_NUMERIC_BINOP
(
op
::
Subtract
,
eigen
::
SubtractInstruction
);
REGISTER_TO_OP_MAP
(
op
::
Constant
)
{
auto
c
=
static_cast
<
const
op
::
Constant
*>
(
n
);
auto
c_tensor_type
=
dynamic_pointer_cast
<
const
TensorViewType
>
(
c
->
get_value_type
());
assert
(
nullptr
!=
c_tensor_type
);
auto
&
c_element_type
=
c_tensor_type
->
get_element_type
();
auto
c_value_strings
=
c
->
get_value_strings
();
#define M_REGISTER_POLYMORPHIC_CONSTANT(ET) \
ef->get_instructions()->push_back( \
make_shared<eigen::ConstantInstruction<ET>>(ET::read(c_value_strings), out[0]));
DO_ON_ELEMENT_TYPE
(
c_element_type
,
"Constant has unhandled element type"
,
M_REGISTER_POLYMORPHIC_CONSTANT
);
};
REGISTER_POLYMORPHIC_BINOP
(
op
::
Equal
,
eigen
::
EqualInstruction
);
REGISTER_POLYMORPHIC_BINOP
(
op
::
NotEqual
,
eigen
::
NotEqualInstruction
);
...
...
This diff is collapsed.
Click to expand it.
src/ngraph/types/element_type.hpp
View file @
96ae6fdb
...
...
@@ -116,6 +116,35 @@ namespace ngraph
{
return
std
::
make_shared
<
runtime
::
ParameterizedTensorView
<
TraitedType
<
T
>>>
(
shape
);
}
static
T
read
(
const
std
::
string
&
s
)
{
T
result
;
std
::
stringstream
ss
;
ss
<<
s
;
ss
>>
result
;
// Check that (1) parsing succeeded and (2) the entire string was used.
if
(
ss
.
fail
()
||
ss
.
rdbuf
()
->
in_avail
()
!=
0
)
{
throw
ngraph_error
(
"Could not parse literal"
);
}
return
result
;
}
static
std
::
vector
<
T
>
read
(
const
std
::
vector
<
std
::
string
>&
ss
)
{
std
::
vector
<
T
>
result
;
for
(
auto
s
:
ss
)
{
result
.
push_back
(
read
(
s
));
}
return
result
;
}
};
NGRAPH_DEFINE_TRAITED_TYPE_NAME
(
char
)
...
...
@@ -143,3 +172,23 @@ namespace ngraph
using
UInt64
=
TraitedType
<
uint64_t
>
;
}
}
//
// Utility macro for dispatching an element type-templated function at runtime.
//
// clang-format off
// Sorry, but you really don't want to see what clang-format does to this thing. :)
#define FUNCTION_ON_ELEMENT_TYPE(et, err_msg, f, ...) \
( \
((et) == element::Bool::element_type()) ? (f<element::Bool>(__VA_ARGS__)) : \
((et) == element::Float32::element_type()) ? (f<element::Float32>(__VA_ARGS__)) : \
((et) == element::Int8::element_type()) ? (f<element::Int8>(__VA_ARGS__)) : \
((et) == element::Int32::element_type()) ? (f<element::Int32>(__VA_ARGS__)) : \
((et) == element::Int64::element_type()) ? (f<element::Int64>(__VA_ARGS__)) : \
((et) == element::UInt8::element_type()) ? (f<element::UInt8>(__VA_ARGS__)) : \
((et) == element::UInt32::element_type()) ? (f<element::UInt32>(__VA_ARGS__)) : \
((et) == element::UInt64::element_type()) ? (f<element::UInt64>(__VA_ARGS__)) : \
(throw ngraph_error(err_msg)) \
)
// clang-format on
This diff is collapsed.
Click to expand it.
test/build_graph.cpp
View file @
96ae6fdb
...
...
@@ -80,23 +80,22 @@ TEST(build_graph, node_comparison)
TEST
(
build_graph
,
literal
)
{
// float scalar from a float
//auto float0 = FloatScalarConstant::make(3.0);
auto
float0
=
make_shared
<
op
::
Float32ScalarConstant
>
(
3.0
);
//auto float0 = FloatConstant::make(3.0);
auto
float_t
=
ngraph
::
runtime
::
make_tensor
<
element
::
Float32
>
(
Shape
{});
(
*
float_t
)
=
std
::
vector
<
float
>
{
3.0
};
auto
float0
=
make_shared
<
op
::
Float32Constant
>
(
Shape
{},
float_t
);
auto
float_scalar_type
=
make_shared
<
TensorViewType
>
(
element
::
Float32
::
element_type
(),
Shape
{});
ASSERT_EQ
(
float0
->
get_value
()
,
3.0
);
ASSERT_EQ
(
float0
->
get_value
()
->
get_vector
(),
std
::
vector
<
float
>
{
3.0
}
);
ASSERT_EQ
(
*
float0
->
get_value_type
(),
*
float_scalar_type
);
auto
d
=
make_shared
<
op
::
Dot
>
(
float0
,
float0
);
ASSERT_EQ
(
d
->
get_arguments
().
at
(
0
),
float0
);
ASSERT_EQ
(
d
->
get_arguments
().
at
(
1
),
float0
);
// float scalar from an int
auto
float1
=
make_shared
<
op
::
Float32ScalarConstant
>
(
3
);
ASSERT_EQ
(
float1
->
get_value
(),
3
);
ASSERT_EQ
(
*
float1
->
get_value_type
(),
*
float_scalar_type
);
auto
int32_0
=
make_shared
<
op
::
Int32ScalarConstant
>
(
3.0
);
auto
int32_t
=
ngraph
::
runtime
::
make_tensor
<
element
::
Int32
>
(
Shape
{});
(
*
int32_t
)
=
std
::
vector
<
int
>
{
3
};
auto
int32_0
=
make_shared
<
op
::
Int32Constant
>
(
Shape
{},
int32_t
);
auto
int32_scalar_type
=
make_shared
<
TensorViewType
>
(
element
::
Int32
::
element_type
(),
Shape
{});
ASSERT_EQ
(
int32_0
->
get_value
()
,
3
);
ASSERT_EQ
(
int32_0
->
get_value
()
->
get_vector
(),
std
::
vector
<
int
>
{
3
}
);
ASSERT_EQ
(
*
int32_0
->
get_value_type
(),
*
int32_scalar_type
);
ASSERT_NE
(
*
int32_0
->
get_value_type
(),
*
float_scalar_type
);
}
...
...
@@ -104,8 +103,9 @@ TEST(build_graph, literal)
TEST
(
build_graph
,
tensor
)
{
// float scalar from a float
//auto float0 = FloatScalarConstant::make(3.0);
auto
float0
=
make_shared
<
op
::
Float32TensorConstant
>
(
Shape
{
2
,
3
});
//auto float0 = FloatConstant::make(3.0);
auto
float_t
=
ngraph
::
runtime
::
make_tensor
<
element
::
Float32
>
(
Shape
{
2
,
3
});
auto
float0
=
make_shared
<
op
::
Float32Constant
>
(
Shape
{
2
,
3
},
float_t
);
auto
float_tensor_type
=
make_shared
<
TensorViewType
>
(
element
::
Float32
::
element_type
(),
Shape
{
2
,
3
});
ASSERT_EQ
(
*
float0
->
get_value_type
(),
*
float_tensor_type
);
...
...
@@ -113,7 +113,8 @@ TEST(build_graph, tensor)
ASSERT_EQ
(
d
->
get_arguments
().
at
(
0
),
float0
);
ASSERT_EQ
(
d
->
get_arguments
().
at
(
1
),
float0
);
auto
int32_0
=
make_shared
<
op
::
Int32TensorConstant
>
(
Shape
{
3
,
5
});
auto
int32_t
=
ngraph
::
runtime
::
make_tensor
<
element
::
Int32
>
(
Shape
{
3
,
5
});
auto
int32_0
=
make_shared
<
op
::
Int32Constant
>
(
Shape
{
3
,
5
},
int32_t
);
auto
int32_tensor_type
=
make_shared
<
TensorViewType
>
(
element
::
Int32
::
element_type
(),
Shape
{
3
,
5
});
ASSERT_EQ
(
*
int32_0
->
get_value_type
(),
*
int32_tensor_type
);
...
...
This diff is collapsed.
Click to expand it.
test/execute.cpp
View file @
96ae6fdb
...
...
@@ -997,7 +997,9 @@ TEST(execute, subtract)
TEST
(
execute
,
scalar_constant
)
{
auto
shape
=
Shape
{};
auto
A
=
make_shared
<
op
::
ScalarConstant
<
element
::
Float32
>>
(
-
3.0
f
);
auto
t
=
ngraph
::
runtime
::
make_tensor
<
element
::
Float32
>
(
shape
);
(
*
t
)
=
std
::
vector
<
float
>
{
-
3.0
f
};
auto
A
=
make_shared
<
op
::
ParameterizedConstant
<
element
::
Float32
>>
(
shape
,
t
);
auto
rt
=
make_shared
<
TensorViewType
>
(
element
::
Float32
::
element_type
(),
shape
);
auto
f
=
make_shared
<
Function
>
(
A
,
rt
,
op
::
Parameters
{});
...
...
@@ -1016,8 +1018,9 @@ TEST(execute, scalar_constant)
TEST
(
execute
,
tensor_constant
)
{
auto
shape
=
Shape
{
2
,
2
,
2
};
auto
A
=
make_shared
<
op
::
TensorConstant
<
element
::
Float32
>>
(
shape
);
A
->
get_value
()
->
get_vector
()
=
{
1
,
2
,
3
,
4
,
5
,
6
,
7
,
8
};
auto
t
=
ngraph
::
runtime
::
make_tensor
<
element
::
Float32
>
(
shape
);
(
*
t
)
=
std
::
vector
<
float
>
{
1
,
2
,
3
,
4
,
5
,
6
,
7
,
8
};
auto
A
=
make_shared
<
op
::
ParameterizedConstant
<
element
::
Float32
>>
(
shape
,
t
);
auto
rt
=
make_shared
<
TensorViewType
>
(
element
::
Float32
::
element_type
(),
shape
);
auto
f
=
make_shared
<
Function
>
(
A
,
rt
,
op
::
Parameters
{});
...
...
@@ -1036,8 +1039,9 @@ TEST(execute, tensor_constant)
TEST
(
execute
,
tensor_constant_with_op
)
{
auto
shape
=
Shape
{
2
,
2
,
2
};
auto
A
=
make_shared
<
op
::
TensorConstant
<
element
::
Float32
>>
(
shape
);
A
->
get_value
()
->
get_vector
()
=
{
-
1
,
2
,
3
,
-
4
,
5
,
-
6
,
-
7
,
8
};
auto
t
=
ngraph
::
runtime
::
make_tensor
<
element
::
Float32
>
(
shape
);
(
*
t
)
=
std
::
vector
<
float
>
{
-
1
,
2
,
3
,
-
4
,
5
,
-
6
,
-
7
,
8
};
auto
A
=
make_shared
<
op
::
ParameterizedConstant
<
element
::
Float32
>>
(
shape
,
t
);
auto
rt
=
make_shared
<
TensorViewType
>
(
element
::
Float32
::
element_type
(),
shape
);
auto
f
=
make_shared
<
Function
>
(
make_shared
<
op
::
Abs
>
(
A
),
rt
,
op
::
Parameters
{});
...
...
@@ -1882,7 +1886,7 @@ TEST(execute, sin)
auto
result
=
backend
->
make_parameterized_tensor_view
<
element
::
Float32
>
(
shape
);
std
::
transform
(
input
.
begin
(),
input
.
end
(),
input
.
begin
(),
[](
float
f
)
->
float
{
return
sinf
(
f
);
});
input
.
begin
(),
input
.
end
(),
input
.
begin
(),
[](
float
x
)
->
float
{
return
sinf
(
x
);
});
(
*
cf
)({
a
},
{
result
});
ASSERT_EQ
(
input
,
result
->
get_vector
());
...
...
@@ -1908,7 +1912,7 @@ TEST(execute, cos)
auto
result
=
backend
->
make_parameterized_tensor_view
<
element
::
Float32
>
(
shape
);
std
::
transform
(
input
.
begin
(),
input
.
end
(),
input
.
begin
(),
[](
float
f
)
->
float
{
return
cosf
(
f
);
});
input
.
begin
(),
input
.
end
(),
input
.
begin
(),
[](
float
x
)
->
float
{
return
cosf
(
x
);
});
(
*
cf
)({
a
},
{
result
});
ASSERT_EQ
(
input
,
result
->
get_vector
());
...
...
@@ -1934,7 +1938,7 @@ TEST(execute, tan)
auto
result
=
backend
->
make_parameterized_tensor_view
<
element
::
Float32
>
(
shape
);
std
::
transform
(
input
.
begin
(),
input
.
end
(),
input
.
begin
(),
[](
float
f
)
->
float
{
return
tanf
(
f
);
});
input
.
begin
(),
input
.
end
(),
input
.
begin
(),
[](
float
x
)
->
float
{
return
tanf
(
x
);
});
(
*
cf
)({
a
},
{
result
});
ASSERT_EQ
(
input
,
result
->
get_vector
());
...
...
@@ -1959,7 +1963,7 @@ TEST(execute, asin)
auto
result
=
backend
->
make_parameterized_tensor_view
<
element
::
Float32
>
(
shape
);
std
::
transform
(
input
.
begin
(),
input
.
end
(),
input
.
begin
(),
[](
float
f
)
->
float
{
return
asinf
(
f
);
});
input
.
begin
(),
input
.
end
(),
input
.
begin
(),
[](
float
x
)
->
float
{
return
asinf
(
x
);
});
(
*
cf
)({
a
},
{
result
});
ASSERT_EQ
(
input
,
result
->
get_vector
());
...
...
@@ -1984,7 +1988,7 @@ TEST(execute, acos)
auto
result
=
backend
->
make_parameterized_tensor_view
<
element
::
Float32
>
(
shape
);
std
::
transform
(
input
.
begin
(),
input
.
end
(),
input
.
begin
(),
[](
float
f
)
->
float
{
return
acosf
(
f
);
});
input
.
begin
(),
input
.
end
(),
input
.
begin
(),
[](
float
x
)
->
float
{
return
acosf
(
x
);
});
(
*
cf
)({
a
},
{
result
});
ASSERT_EQ
(
input
,
result
->
get_vector
());
...
...
@@ -2009,7 +2013,7 @@ TEST(execute, atan)
auto
result
=
backend
->
make_parameterized_tensor_view
<
element
::
Float32
>
(
shape
);
std
::
transform
(
input
.
begin
(),
input
.
end
(),
input
.
begin
(),
[](
float
f
)
->
float
{
return
atanf
(
f
);
});
input
.
begin
(),
input
.
end
(),
input
.
begin
(),
[](
float
x
)
->
float
{
return
atanf
(
x
);
});
(
*
cf
)({
a
},
{
result
});
ASSERT_EQ
(
input
,
result
->
get_vector
());
...
...
@@ -2034,7 +2038,7 @@ TEST(execute, sinh)
auto
result
=
backend
->
make_parameterized_tensor_view
<
element
::
Float32
>
(
shape
);
std
::
transform
(
input
.
begin
(),
input
.
end
(),
input
.
begin
(),
[](
float
f
)
->
float
{
return
sinhf
(
f
);
});
input
.
begin
(),
input
.
end
(),
input
.
begin
(),
[](
float
x
)
->
float
{
return
sinhf
(
x
);
});
(
*
cf
)({
a
},
{
result
});
ASSERT_EQ
(
input
,
result
->
get_vector
());
...
...
@@ -2059,7 +2063,7 @@ TEST(execute, cosh)
auto
result
=
backend
->
make_parameterized_tensor_view
<
element
::
Float32
>
(
shape
);
std
::
transform
(
input
.
begin
(),
input
.
end
(),
input
.
begin
(),
[](
float
f
)
->
float
{
return
coshf
(
f
);
});
input
.
begin
(),
input
.
end
(),
input
.
begin
(),
[](
float
x
)
->
float
{
return
coshf
(
x
);
});
(
*
cf
)({
a
},
{
result
});
ASSERT_EQ
(
input
,
result
->
get_vector
());
...
...
@@ -2084,7 +2088,7 @@ TEST(execute, tanh)
auto
result
=
backend
->
make_parameterized_tensor_view
<
element
::
Float32
>
(
shape
);
std
::
transform
(
input
.
begin
(),
input
.
end
(),
input
.
begin
(),
[](
float
f
)
->
float
{
return
tanhf
(
f
);
});
input
.
begin
(),
input
.
end
(),
input
.
begin
(),
[](
float
x
)
->
float
{
return
tanhf
(
x
);
});
(
*
cf
)({
a
},
{
result
});
ASSERT_EQ
(
input
,
result
->
get_vector
());
...
...
@@ -2184,3 +2188,89 @@ TEST(execute, slice_vector)
(
*
cf
)({
a
},
{
result
});
ASSERT_EQ
((
vector
<
float
>
{
2
,
3
,
4
,
5
,
6
,
7
,
8
,
9
,
10
,
11
,
12
,
13
}),
result
->
get_vector
());
}
TEST
(
execute
,
scalar_constant_float32
)
{
auto
rt
=
make_shared
<
TensorViewType
>
(
element
::
Float32
::
element_type
(),
Shape
{});
auto
r
=
make_shared
<
op
::
Constant
>
(
element
::
Float32
::
element_type
(),
Shape
{},
"4.8"
);
auto
f
=
make_shared
<
Function
>
(
r
,
rt
,
op
::
Parameters
{});
auto
manager
=
runtime
::
Manager
::
get
(
"NGVM"
);
auto
external
=
manager
->
compile
(
f
);
auto
backend
=
manager
->
allocate_backend
();
auto
cf
=
backend
->
make_call_frame
(
external
);
// Create some tensors for input/output
auto
result
=
ngraph
::
runtime
::
make_tensor
<
element
::
Float32
>
(
Shape
{});
(
*
cf
)({},
{
result
});
ASSERT_EQ
(
vector
<
float
>
{
std
::
strtof
(
"4.8"
,
NULL
)},
result
->
get_vector
());
}
TEST
(
execute
,
scalar_constant_int64
)
{
auto
rt
=
make_shared
<
TensorViewType
>
(
element
::
Int64
::
element_type
(),
Shape
{});
auto
r
=
make_shared
<
op
::
Constant
>
(
element
::
Int64
::
element_type
(),
Shape
{},
"2112"
);
auto
f
=
make_shared
<
Function
>
(
r
,
rt
,
op
::
Parameters
{});
auto
manager
=
runtime
::
Manager
::
get
(
"NGVM"
);
auto
external
=
manager
->
compile
(
f
);
auto
backend
=
manager
->
allocate_backend
();
auto
cf
=
backend
->
make_call_frame
(
external
);
// Create some tensors for input/output
auto
result
=
ngraph
::
runtime
::
make_tensor
<
element
::
Int64
>
(
Shape
{});
(
*
cf
)({},
{
result
});
ASSERT_EQ
(
vector
<
element
::
Int64
::
type
>
{
std
::
strtol
(
"2112"
,
NULL
,
10
)},
result
->
get_vector
());
}
TEST
(
execute
,
tensor_constant_float32
)
{
auto
shape
=
Shape
{
2
,
2
};
auto
rt
=
make_shared
<
TensorViewType
>
(
element
::
Float32
::
element_type
(),
shape
);
auto
r
=
make_shared
<
op
::
Constant
>
(
element
::
Float32
::
element_type
(),
shape
,
std
::
vector
<
std
::
string
>
{
"4.8"
,
"4.7"
,
"-5.3"
,
"0"
});
auto
f
=
make_shared
<
Function
>
(
r
,
rt
,
op
::
Parameters
{});
auto
manager
=
runtime
::
Manager
::
get
(
"NGVM"
);
auto
external
=
manager
->
compile
(
f
);
auto
backend
=
manager
->
allocate_backend
();
auto
cf
=
backend
->
make_call_frame
(
external
);
// Create some tensors for input/output
auto
result
=
ngraph
::
runtime
::
make_tensor
<
element
::
Float32
>
(
shape
);
(
*
cf
)({},
{
result
});
ASSERT_EQ
((
vector
<
float
>
{
std
::
strtof
(
"4.8"
,
NULL
),
std
::
strtof
(
"4.7"
,
NULL
),
std
::
strtof
(
"-5.3"
,
NULL
),
std
::
strtof
(
"0"
,
NULL
)}),
result
->
get_vector
());
}
TEST
(
execute
,
tensor_constant_int64
)
{
auto
shape
=
Shape
{
2
,
2
};
auto
rt
=
make_shared
<
TensorViewType
>
(
element
::
Int64
::
element_type
(),
shape
);
auto
r
=
make_shared
<
op
::
Constant
>
(
element
::
Int64
::
element_type
(),
shape
,
std
::
vector
<
std
::
string
>
{
"2112"
,
"1848"
,
"1776"
,
"1964"
});
auto
f
=
make_shared
<
Function
>
(
r
,
rt
,
op
::
Parameters
{});
auto
manager
=
runtime
::
Manager
::
get
(
"NGVM"
);
auto
external
=
manager
->
compile
(
f
);
auto
backend
=
manager
->
allocate_backend
();
auto
cf
=
backend
->
make_call_frame
(
external
);
// Create some tensors for input/output
auto
result
=
ngraph
::
runtime
::
make_tensor
<
element
::
Int64
>
(
shape
);
(
*
cf
)({},
{
result
});
ASSERT_EQ
((
vector
<
element
::
Int64
::
type
>
{
std
::
strtol
(
"2112"
,
NULL
,
10
),
std
::
strtol
(
"1848"
,
NULL
,
10
),
std
::
strtol
(
"1776"
,
NULL
,
10
),
std
::
strtol
(
"1964"
,
NULL
,
10
)}),
result
->
get_vector
());
}
This diff is collapsed.
Click to expand it.
test/type_prop.cpp
View file @
96ae6fdb
...
...
@@ -1517,3 +1517,98 @@ TEST(type_prop, slice_deduce_matrix_upper_extra)
FAIL
()
<<
"Deduced type check failed for unexpected reason"
;
}
}
TEST
(
type_prop
,
scalar_constant_deduce_float32
)
{
auto
c
=
make_shared
<
op
::
Constant
>
(
element
::
Float32
::
element_type
(),
Shape
{},
"208"
);
c
->
propagate_types
();
ASSERT_EQ
(
*
(
c
->
get_value_type
()),
TensorViewType
(
element
::
Float32
::
element_type
(),
Shape
{}));
}
TEST
(
type_prop
,
scalar_constant_deduce_bool
)
{
auto
c
=
make_shared
<
op
::
Constant
>
(
element
::
Bool
::
element_type
(),
Shape
{},
"1"
);
c
->
propagate_types
();
ASSERT_EQ
(
*
(
c
->
get_value_type
()),
TensorViewType
(
element
::
Bool
::
element_type
(),
Shape
{}));
}
TEST
(
type_prop
,
tensor_constant_deduce_float32
)
{
auto
c
=
make_shared
<
op
::
Constant
>
(
element
::
Float32
::
element_type
(),
Shape
{
2
,
2
},
std
::
vector
<
std
::
string
>
{
"208"
,
"208"
,
"208"
,
"208"
});
c
->
propagate_types
();
ASSERT_EQ
(
*
(
c
->
get_value_type
()),
TensorViewType
(
element
::
Float32
::
element_type
(),
Shape
{
2
,
2
}));
}
TEST
(
type_prop
,
tensor_constant_deduce_bool
)
{
auto
c
=
make_shared
<
op
::
Constant
>
(
element
::
Bool
::
element_type
(),
Shape
{
2
,
2
},
std
::
vector
<
std
::
string
>
{
"1"
,
"1"
,
"1"
,
"1"
});
c
->
propagate_types
();
ASSERT_EQ
(
*
(
c
->
get_value_type
()),
TensorViewType
(
element
::
Bool
::
element_type
(),
Shape
{
2
,
2
}));
}
TEST
(
type_prop
,
tensor_constant_bad_parse
)
{
auto
c
=
make_shared
<
op
::
Constant
>
(
element
::
Bool
::
element_type
(),
Shape
{
2
,
2
},
std
::
vector
<
std
::
string
>
{
"1"
,
"grunk"
,
"1"
,
"1"
});
try
{
c
->
propagate_types
();
// Should have thrown, so fail if it didn't
FAIL
()
<<
"Bad literal parse not detected"
;
}
catch
(
const
ngraph_error
&
error
)
{
EXPECT_EQ
(
error
.
what
(),
std
::
string
(
"Could not parse literal"
));
}
catch
(...)
{
FAIL
()
<<
"Deduced type check failed for unexpected reason"
;
}
}
TEST
(
type_prop
,
tensor_constant_bad_parse_float_for_int
)
{
auto
c
=
make_shared
<
op
::
Constant
>
(
element
::
Int32
::
element_type
(),
Shape
{
2
,
2
},
std
::
vector
<
std
::
string
>
{
"1"
,
"2.7"
,
"1"
,
"1"
});
try
{
c
->
propagate_types
();
// Should have thrown, so fail if it didn't
FAIL
()
<<
"Bad literal parse not detected"
;
}
catch
(
const
ngraph_error
&
error
)
{
EXPECT_EQ
(
error
.
what
(),
std
::
string
(
"Could not parse literal"
));
}
catch
(...)
{
FAIL
()
<<
"Deduced type check failed for unexpected reason"
;
}
}
TEST
(
type_prop
,
tensor_constant_bad_count
)
{
auto
c
=
make_shared
<
op
::
Constant
>
(
element
::
Bool
::
element_type
(),
Shape
{
2
,
2
},
std
::
vector
<
std
::
string
>
{
"1"
,
"1"
,
"1"
});
try
{
c
->
propagate_types
();
// Should have thrown, so fail if it didn't
FAIL
()
<<
"Incorrect number of literals not detected"
;
}
catch
(
const
ngraph_error
&
error
)
{
EXPECT_EQ
(
error
.
what
(),
std
::
string
(
"Constant does not have the expected number of literals"
));
}
catch
(...)
{
FAIL
()
<<
"Deduced type check failed for unexpected reason"
;
}
}
This diff is collapsed.
Click to expand it.
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment