Commit 1572d31f authored by Adam Rogowiec's avatar Adam Rogowiec Committed by Scott Cyphers

Return 0 or 1 when converting to bool (#2552)

* Update CPU backend.

* Update INTERPRETER backend.

* Update unit tests.

* Don't use std:: prefix.

* Convert to bool for GPU backend in DEX mode.

* Handle boolean elements in Intel GPU backend.

* Disable convert to bool UT for IGPU.

* Update quoting style to fix errors raised by updated version of flake8.
parent 54db6552
...@@ -42,7 +42,7 @@ namespace ngraph ...@@ -42,7 +42,7 @@ namespace ngraph
if (out[0].get_element_type() == element::boolean) if (out[0].get_element_type() == element::boolean)
{ {
SELECT_KERNEL( SELECT_KERNEL(
kernel, args[0].get_element_type(), runtime::cpu::kernel::convert_to_i8); kernel, args[0].get_element_type(), runtime::cpu::kernel::convert_to_bool);
} }
else if (out[0].get_element_type() == element::f32) else if (out[0].get_element_type() == element::f32)
{ {
......
...@@ -104,6 +104,12 @@ namespace ngraph ...@@ -104,6 +104,12 @@ namespace ngraph
{ {
convert<InputElementType, uint64_t>(input, output, count, arena); convert<InputElementType, uint64_t>(input, output, count, arena);
} }
template <typename InputElementType>
void convert_to_bool(void* input, void* output, size_t count, int arena)
{
convert<InputElementType, bool>(input, output, count, arena);
}
} }
} }
} }
......
...@@ -61,7 +61,15 @@ namespace ngraph ...@@ -61,7 +61,15 @@ namespace ngraph
{ {
dtypes.push_back(arg.get_type()); dtypes.push_back(arg.get_type());
} }
dtypes.push_back(out[0].get_type()); // Special case for bool data type.
if (out[0].get_element_type() == element::boolean)
{
dtypes.push_back("bool");
}
else
{
dtypes.push_back(out[0].get_type());
}
auto ew_index = cuda_emitter->build_elementwise<T>(dtypes, out[0].get_shape()); auto ew_index = cuda_emitter->build_elementwise<T>(dtypes, out[0].get_shape());
return compiled_function->add_to_runtime(ew_index, function_name, args, out); return compiled_function->add_to_runtime(ew_index, function_name, args, out);
......
...@@ -43,6 +43,7 @@ string runtime::intelgpu::get_opencl_type_name(const element::Type& ngraph_type) ...@@ -43,6 +43,7 @@ string runtime::intelgpu::get_opencl_type_name(const element::Type& ngraph_type)
case element::Type_t::u16: return "ushort"; case element::Type_t::u16: return "ushort";
case element::Type_t::i8: return "char"; case element::Type_t::i8: return "char";
case element::Type_t::u8: return "uchar"; case element::Type_t::u8: return "uchar";
case element::Type_t::boolean: return "bool";
} }
return ngraph_type.c_type_string(); return ngraph_type.c_type_string();
......
...@@ -23,6 +23,8 @@ shape_of_matrix ...@@ -23,6 +23,8 @@ shape_of_matrix
shape_of_scalar shape_of_scalar
shape_of_vector shape_of_vector
floor_int32 floor_int32
convert_int32_bool
convert_float32_bool
# Unsupported extra padding modes # Unsupported extra padding modes
pad_edge_1d pad_edge_1d
......
...@@ -524,7 +524,7 @@ private: ...@@ -524,7 +524,7 @@ private:
switch (type.get_type_enum()) switch (type.get_type_enum())
{ {
case element::Type_t::boolean: case element::Type_t::boolean:
reference::convert<T>( reference::convert_to_bool<T>(
args[0]->get_data_ptr<const T>(), out[0]->get_data_ptr<char>(), element_count); args[0]->get_data_ptr<const T>(), out[0]->get_data_ptr<char>(), element_count);
break; break;
case element::Type_t::f32: case element::Type_t::f32:
......
...@@ -27,11 +27,20 @@ namespace ngraph ...@@ -27,11 +27,20 @@ namespace ngraph
template <typename TI, typename TO> template <typename TI, typename TO>
void convert(const TI* arg, TO* out, size_t count) void convert(const TI* arg, TO* out, size_t count)
{ {
for (size_t i = 0; i < count; i++) for (size_t i = 0; i < count; ++i)
{ {
out[i] = static_cast<TO>(arg[i]); out[i] = static_cast<TO>(arg[i]);
} }
} }
template <typename T>
void convert_to_bool(const T* arg, char* out, size_t count)
{
for (size_t i = 0; i < count; ++i)
{
out[i] = static_cast<char>(static_cast<bool>(arg[i]));
}
}
} }
} }
} }
...@@ -1467,40 +1467,49 @@ NGRAPH_TEST(${BACKEND_NAME}, convert_uint16_float32) ...@@ -1467,40 +1467,49 @@ NGRAPH_TEST(${BACKEND_NAME}, convert_uint16_float32)
NGRAPH_TEST(${BACKEND_NAME}, convert_int32_bool) NGRAPH_TEST(${BACKEND_NAME}, convert_int32_bool)
{ {
Shape shape{2, 2}; Shape shape{2, 3};
auto A = make_shared<op::Parameter>(element::i32, shape); auto A = make_shared<op::Parameter>(element::i32, shape);
auto f = auto f =
make_shared<Function>(make_shared<op::Convert>(A, element::boolean), ParameterVector{A}); make_shared<Function>(make_shared<op::Convert>(A, element::boolean), ParameterVector{A});
auto backend = runtime::Backend::create("${BACKEND_NAME}"); auto backend = runtime::Backend::create("${BACKEND_NAME}");
int32_t lowest = std::numeric_limits<int32_t>::lowest();
int32_t max = std::numeric_limits<int32_t>::max();
// Create some tensors for input/output // Create some tensors for input/output
auto a = backend->create_tensor(element::i32, shape); auto a = backend->create_tensor(element::i32, shape);
copy_data(a, vector<int32_t>{1, 2, 3, 4}); copy_data(a, vector<int32_t>{0, 12, 23, 0, lowest, max});
auto result = backend->create_tensor(element::boolean, shape); auto result = backend->create_tensor(element::boolean, shape);
auto handle = backend->compile(f); auto handle = backend->compile(f);
handle->call_with_validate({result}, {a}); handle->call_with_validate({result}, {a});
EXPECT_EQ((vector<char>{1, 2, 3, 4}), read_vector<char>(result)); EXPECT_EQ((vector<char>{0, 1, 1, 0, 1, 1}), read_vector<char>(result));
} }
NGRAPH_TEST(${BACKEND_NAME}, convert_float32_bool) NGRAPH_TEST(${BACKEND_NAME}, convert_float32_bool)
{ {
Shape shape{2, 2}; Shape shape{3, 3};
auto A = make_shared<op::Parameter>(element::f32, shape); auto A = make_shared<op::Parameter>(element::f32, shape);
auto f = auto f =
make_shared<Function>(make_shared<op::Convert>(A, element::boolean), ParameterVector{A}); make_shared<Function>(make_shared<op::Convert>(A, element::boolean), ParameterVector{A});
auto backend = runtime::Backend::create("${BACKEND_NAME}"); auto backend = runtime::Backend::create("${BACKEND_NAME}");
float lowest = std::numeric_limits<float>::lowest();
float max = std::numeric_limits<float>::max();
float min = std::numeric_limits<float>::min();
float pos_inf = std::numeric_limits<float>::infinity();
float neg_inf = -std::numeric_limits<float>::infinity();
// Create some tensors for input/output // Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape); auto a = backend->create_tensor(element::f32, shape);
copy_data(a, vector<float>{1, 2, 3, 4}); copy_data(a, vector<float>{0.f, 1.5745f, 0.12352f, 0.f, lowest, max, min, pos_inf, neg_inf});
auto result = backend->create_tensor(element::boolean, shape); auto result = backend->create_tensor(element::boolean, shape);
auto handle = backend->compile(f); auto handle = backend->compile(f);
handle->call_with_validate({result}, {a}); handle->call_with_validate({result}, {a});
EXPECT_EQ((vector<char>{1, 2, 3, 4}), read_vector<char>(result)); EXPECT_EQ((vector<char>{0, 1, 1, 0, 1, 1, 1, 1, 1}), read_vector<char>(result));
} }
NGRAPH_TEST(${BACKEND_NAME}, slice_scalar) NGRAPH_TEST(${BACKEND_NAME}, slice_scalar)
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment