Commit cf220930 authored by Jaikrishnan Menon's avatar Jaikrishnan Menon

CPU Direct Execution: Implement And

parent 05545092
...@@ -80,6 +80,7 @@ ...@@ -80,6 +80,7 @@
#include "ngraph/runtime/cpu/cpu_op_annotations.hpp" #include "ngraph/runtime/cpu/cpu_op_annotations.hpp"
#include "ngraph/runtime/cpu/kernel/abs.hpp" #include "ngraph/runtime/cpu/kernel/abs.hpp"
#include "ngraph/runtime/cpu/kernel/add.hpp" #include "ngraph/runtime/cpu/kernel/add.hpp"
#include "ngraph/runtime/cpu/kernel/and.hpp"
#include "ngraph/runtime/cpu/kernel/broadcast.hpp" #include "ngraph/runtime/cpu/kernel/broadcast.hpp"
#include "ngraph/runtime/cpu/kernel/ceil.hpp" #include "ngraph/runtime/cpu/kernel/ceil.hpp"
#include "ngraph/runtime/cpu/kernel/cwise_pow.hpp" #include "ngraph/runtime/cpu/kernel/cwise_pow.hpp"
...@@ -181,6 +182,40 @@ namespace ngraph ...@@ -181,6 +182,40 @@ namespace ngraph
BUILD_BINARY_ELEMWISE_FUNCTOR(runtime::cpu::kernel::less_eq); BUILD_BINARY_ELEMWISE_FUNCTOR(runtime::cpu::kernel::less_eq);
} }
template <>
void Builder::BUILDER_DECL(ngraph::op::And)
{
auto& functors = external_function->get_functors();
auto& tensor_data = external_function->get_tensor_data();
auto element_count = out[0].get_size();
auto& arg0_tensor = tensor_data[args[0].get_name()];
auto& arg1_tensor = tensor_data[args[1].get_name()];
auto& out0_tensor = tensor_data[out[0].get_name()];
auto functor = [&, element_count](CPURuntimeContext* ctx) {
runtime::cpu::kernel::logical_and(arg0_tensor, arg1_tensor, out0_tensor, element_count);
};
functors.emplace_back(functor);
}
template <>
void Builder::BUILDER_DECL(ngraph::op::Or)
{
auto& functors = external_function->get_functors();
auto& tensor_data = external_function->get_tensor_data();
auto element_count = out[0].get_size();
auto& arg0_tensor = tensor_data[args[0].get_name()];
auto& arg1_tensor = tensor_data[args[1].get_name()];
auto& out0_tensor = tensor_data[out[0].get_name()];
auto functor = [&, element_count](CPURuntimeContext* ctx) {
runtime::cpu::kernel::logical_or(arg0_tensor, arg1_tensor, out0_tensor, element_count);
};
functors.emplace_back(functor);
}
template <> template <>
void Builder::BUILDER_DECL(ngraph::op::Maximum) void Builder::BUILDER_DECL(ngraph::op::Maximum)
{ {
...@@ -322,6 +357,8 @@ namespace ngraph ...@@ -322,6 +357,8 @@ namespace ngraph
REGISTER_OP_BUILDER(LessEq); REGISTER_OP_BUILDER(LessEq);
REGISTER_OP_BUILDER(Maximum); REGISTER_OP_BUILDER(Maximum);
REGISTER_OP_BUILDER(Minimum); REGISTER_OP_BUILDER(Minimum);
REGISTER_OP_BUILDER(And);
REGISTER_OP_BUILDER(Or);
} }
} }
} }
/*******************************************************************************
* Copyright 2018 Intel Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*******************************************************************************/
#pragma once
#define EIGEN_USE_THREADS
#include <unsupported/Eigen/CXX11/Tensor>
#include "ngraph/runtime/cpu/kernel/eigen_thread_pool.hpp"
namespace ngraph
{
namespace runtime
{
namespace cpu
{
namespace kernel
{
void logical_and(void* input0, void* input1, void* output, size_t count)
{
Eigen::array<Eigen::Index, 1> out_dims, in_dims;
out_dims[0] = in_dims[0] = count;
Eigen::TensorMap<Eigen::Tensor<char, 1, Eigen::RowMajor>> out(
static_cast<char*>(output), out_dims);
Eigen::TensorMap<Eigen::Tensor<char, 1, Eigen::RowMajor>> in0(
static_cast<char*>(input0), in_dims);
Eigen::TensorMap<Eigen::Tensor<char, 1, Eigen::RowMajor>> in1(
static_cast<char*>(input1), in_dims);
out.device(eigen::global_thread_pool_device) =
(in0 && in1).template cast<char>();
}
}
}
}
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment