Commit 1572d31f authored by Adam Rogowiec's avatar Adam Rogowiec Committed by Scott Cyphers

Return 0 or 1 when converting to bool (#2552)

* Update CPU backend.

* Update INTERPRETER backend.

* Update unit tests.

* Don't use std:: prefix.

* Convert to bool for GPU backend in DEX mode.

* Handle boolean elements in Intel GPU backend.

* Disable convert to bool UT for IGPU.

* Update quoting style to fix errors raised by updated version of flake8.
parent 54db6552
......@@ -42,7 +42,7 @@ namespace ngraph
if (out[0].get_element_type() == element::boolean)
{
SELECT_KERNEL(
kernel, args[0].get_element_type(), runtime::cpu::kernel::convert_to_i8);
kernel, args[0].get_element_type(), runtime::cpu::kernel::convert_to_bool);
}
else if (out[0].get_element_type() == element::f32)
{
......
......@@ -104,6 +104,12 @@ namespace ngraph
{
convert<InputElementType, uint64_t>(input, output, count, arena);
}
template <typename InputElementType>
void convert_to_bool(void* input, void* output, size_t count, int arena)
{
convert<InputElementType, bool>(input, output, count, arena);
}
}
}
}
......
......@@ -61,7 +61,15 @@ namespace ngraph
{
dtypes.push_back(arg.get_type());
}
dtypes.push_back(out[0].get_type());
// Special case for bool data type.
if (out[0].get_element_type() == element::boolean)
{
dtypes.push_back("bool");
}
else
{
dtypes.push_back(out[0].get_type());
}
auto ew_index = cuda_emitter->build_elementwise<T>(dtypes, out[0].get_shape());
return compiled_function->add_to_runtime(ew_index, function_name, args, out);
......
......@@ -43,6 +43,7 @@ string runtime::intelgpu::get_opencl_type_name(const element::Type& ngraph_type)
case element::Type_t::u16: return "ushort";
case element::Type_t::i8: return "char";
case element::Type_t::u8: return "uchar";
case element::Type_t::boolean: return "bool";
}
return ngraph_type.c_type_string();
......
......@@ -23,6 +23,8 @@ shape_of_matrix
shape_of_scalar
shape_of_vector
floor_int32
convert_int32_bool
convert_float32_bool
# Unsupported extra padding modes
pad_edge_1d
......
......@@ -524,7 +524,7 @@ private:
switch (type.get_type_enum())
{
case element::Type_t::boolean:
reference::convert<T>(
reference::convert_to_bool<T>(
args[0]->get_data_ptr<const T>(), out[0]->get_data_ptr<char>(), element_count);
break;
case element::Type_t::f32:
......
......@@ -27,11 +27,20 @@ namespace ngraph
template <typename TI, typename TO>
void convert(const TI* arg, TO* out, size_t count)
{
for (size_t i = 0; i < count; i++)
for (size_t i = 0; i < count; ++i)
{
out[i] = static_cast<TO>(arg[i]);
}
}
template <typename T>
void convert_to_bool(const T* arg, char* out, size_t count)
{
for (size_t i = 0; i < count; ++i)
{
out[i] = static_cast<char>(static_cast<bool>(arg[i]));
}
}
}
}
}
......@@ -1467,40 +1467,49 @@ NGRAPH_TEST(${BACKEND_NAME}, convert_uint16_float32)
NGRAPH_TEST(${BACKEND_NAME}, convert_int32_bool)
{
Shape shape{2, 2};
Shape shape{2, 3};
auto A = make_shared<op::Parameter>(element::i32, shape);
auto f =
make_shared<Function>(make_shared<op::Convert>(A, element::boolean), ParameterVector{A});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
int32_t lowest = std::numeric_limits<int32_t>::lowest();
int32_t max = std::numeric_limits<int32_t>::max();
// Create some tensors for input/output
auto a = backend->create_tensor(element::i32, shape);
copy_data(a, vector<int32_t>{1, 2, 3, 4});
copy_data(a, vector<int32_t>{0, 12, 23, 0, lowest, max});
auto result = backend->create_tensor(element::boolean, shape);
auto handle = backend->compile(f);
handle->call_with_validate({result}, {a});
EXPECT_EQ((vector<char>{1, 2, 3, 4}), read_vector<char>(result));
EXPECT_EQ((vector<char>{0, 1, 1, 0, 1, 1}), read_vector<char>(result));
}
NGRAPH_TEST(${BACKEND_NAME}, convert_float32_bool)
{
Shape shape{2, 2};
Shape shape{3, 3};
auto A = make_shared<op::Parameter>(element::f32, shape);
auto f =
make_shared<Function>(make_shared<op::Convert>(A, element::boolean), ParameterVector{A});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
float lowest = std::numeric_limits<float>::lowest();
float max = std::numeric_limits<float>::max();
float min = std::numeric_limits<float>::min();
float pos_inf = std::numeric_limits<float>::infinity();
float neg_inf = -std::numeric_limits<float>::infinity();
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape);
copy_data(a, vector<float>{1, 2, 3, 4});
copy_data(a, vector<float>{0.f, 1.5745f, 0.12352f, 0.f, lowest, max, min, pos_inf, neg_inf});
auto result = backend->create_tensor(element::boolean, shape);
auto handle = backend->compile(f);
handle->call_with_validate({result}, {a});
EXPECT_EQ((vector<char>{1, 2, 3, 4}), read_vector<char>(result));
EXPECT_EQ((vector<char>{0, 1, 1, 0, 1, 1, 1, 1, 1}), read_vector<char>(result));
}
NGRAPH_TEST(${BACKEND_NAME}, slice_scalar)
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment