Commit eea59f20 authored by Jaikrishnan Menon's avatar Jaikrishnan Menon

CPU: Implement op::Constant

Ops are now being implemented in priority order based on
MNIST MLP requirements
parent 83c638fa
......@@ -906,3 +906,28 @@ void Emitter::EMITTER_DECL(EmitConvert)
".template cast<typename " + element_type_names[TI(result_element_type)] + "::type>();\n"
" }\n";
}
void Emitter::EMITTER_DECL(EmitConstant)
{
auto c = static_cast<const op::Constant*>(n);
auto c_tensor_type = dynamic_pointer_cast<const TensorViewType>(c->get_value_type());
assert(c_tensor_type);
auto& c_element_type = c_tensor_type->get_element_type();
auto c_value_strings = c->get_value_strings();
TU +=
" {\n"
" call_frame->get_parameterized_tensor_view<" +
element_type_names[TI(c_element_type)] + ">(" + to_string(outputs[0].get_index()) +
")->get_vector() = std::vector<" + element_type_names[TI(c_element_type)] +
"::type>{";
for (size_t i = 0; i < c_value_strings.size(); i++)
{
if (i)
TU += ", ";
TU += c_value_strings[i];
}
TU += "};\n }\n";
}
......@@ -75,6 +75,7 @@ namespace ngraph
void EMITTER_DECL(EmitParameterizedConstantUInt64);
void EMITTER_DECL(EmitBroadcast);
void EMITTER_DECL(EmitConvert);
void EMITTER_DECL(EmitConstant);
};
}
}
......
......@@ -105,6 +105,7 @@ static const OpMap dispatcher{
&Emitter::EmitParameterizedConstantUInt64},
{TI(ngraph::op::Broadcast), &Emitter::EmitBroadcast},
{TI(ngraph::op::Convert), &Emitter::EmitConvert},
{TI(ngraph::op::Constant), &Emitter::EmitConstant},
};
#undef TI
......
......@@ -2218,14 +2218,15 @@ TEST(execute, slice_vector)
(*cf)({a}, {result});
ASSERT_EQ((vector<float>{2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13}), result->get_vector());
}
*/
TEST(execute, scalar_constant_float32)
TEST(cpu, scalar_constant_float32)
{
auto rt = make_shared<TensorViewType>(element::Float32::element_type(), Shape{});
auto r = make_shared<op::Constant>(element::Float32::element_type(), Shape{}, "4.8");
auto f = make_shared<Function>(r, rt, op::Parameters{});
auto manager = runtime::Manager::get("NGVM");
auto manager = runtime::Manager::get("CPU");
auto external = manager->compile(f);
auto backend = manager->allocate_backend();
auto cf = backend->make_call_frame(external);
......@@ -2237,13 +2238,13 @@ TEST(execute, scalar_constant_float32)
ASSERT_EQ(vector<float>{std::strtof("4.8", NULL)}, result->get_vector());
}
TEST(execute, scalar_constant_int64)
TEST(cpu, scalar_constant_int64)
{
auto rt = make_shared<TensorViewType>(element::Int64::element_type(), Shape{});
auto r = make_shared<op::Constant>(element::Int64::element_type(), Shape{}, "2112");
auto f = make_shared<Function>(r, rt, op::Parameters{});
auto manager = runtime::Manager::get("NGVM");
auto manager = runtime::Manager::get("CPU");
auto external = manager->compile(f);
auto backend = manager->allocate_backend();
auto cf = backend->make_call_frame(external);
......@@ -2255,7 +2256,7 @@ TEST(execute, scalar_constant_int64)
ASSERT_EQ(vector<element::Int64::type>{std::strtol("2112", NULL, 10)}, result->get_vector());
}
TEST(execute, tensor_constant_float32)
TEST(cpu, tensor_constant_float32)
{
auto shape = Shape{2, 2};
auto rt = make_shared<TensorViewType>(element::Float32::element_type(), shape);
......@@ -2264,7 +2265,7 @@ TEST(execute, tensor_constant_float32)
std::vector<std::string>{"4.8", "4.7", "-5.3", "0"});
auto f = make_shared<Function>(r, rt, op::Parameters{});
auto manager = runtime::Manager::get("NGVM");
auto manager = runtime::Manager::get("CPU");
auto external = manager->compile(f);
auto backend = manager->allocate_backend();
auto cf = backend->make_call_frame(external);
......@@ -2280,7 +2281,7 @@ TEST(execute, tensor_constant_float32)
result->get_vector());
}
TEST(execute, tensor_constant_int64)
TEST(cpu, tensor_constant_int64)
{
auto shape = Shape{2, 2};
auto rt = make_shared<TensorViewType>(element::Int64::element_type(), shape);
......@@ -2289,7 +2290,7 @@ TEST(execute, tensor_constant_int64)
std::vector<std::string>{"2112", "1848", "1776", "1964"});
auto f = make_shared<Function>(r, rt, op::Parameters{});
auto manager = runtime::Manager::get("NGVM");
auto manager = runtime::Manager::get("CPU");
auto external = manager->compile(f);
auto backend = manager->allocate_backend();
auto cf = backend->make_call_frame(external);
......@@ -2305,6 +2306,7 @@ TEST(execute, tensor_constant_int64)
result->get_vector());
}
/*
// Trivial case with no summed axes.
TEST(execute, sum_trivial)
{
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment