Commit e2064cc2 authored by Jaikrishnan Menon's avatar Jaikrishnan Menon Committed by Robert Kimball

CPU Direct Execution: Implement Pad (#1320)

* CPU Direct Execution: Implement Pad

* Add Pad builder to the build script

* Add missed changes during commit
parent f1c3e4ab
......@@ -41,6 +41,7 @@ set(SRC
builder/max_pool.cpp
builder/min.cpp
builder/relu.cpp
builder/pad.cpp
builder/product.cpp
builder/reshape.cpp
builder/reverse.cpp
......
/*******************************************************************************
* Copyright 2018 Intel Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*******************************************************************************/
#include <cstring>
#include "ngraph/op/pad.hpp"
#include "ngraph/runtime/cpu/cpu_builder.hpp"
#include "ngraph/runtime/cpu/kernel/pad.hpp"
#include "ngraph/shape.hpp"
using namespace std;
using namespace ngraph;
namespace ngraph
{
namespace runtime
{
namespace cpu
{
template <>
void Builder::BUILDER_DECL(ngraph::op::Pad)
{
auto& functors = external_function->get_functors();
auto& tensor_data = external_function->get_tensor_data();
auto& arg_tensor = tensor_data[args[0].get_name()];
auto& padding_value = tensor_data[args[1].get_name()];
auto& out_tensor = tensor_data[out[0].get_name()];
auto pad = static_cast<const ngraph::op::Pad*>(node);
auto arg_shape = args[0].get_shape();
auto out_shape = out[0].get_shape();
auto padding_below = pad->get_padding_below();
auto padding_above = pad->get_padding_above();
if (pad->get_padding_interior() == Shape(arg_shape.size()))
{
std::function<decltype(runtime::cpu::kernel::pad<float, 1>)> kernel;
SELECT_KERNEL_BY_RANK(kernel,
args[0].get_element_type(),
arg_shape.size(),
runtime::cpu::kernel::pad);
auto functor = [&, kernel, arg_shape, out_shape, padding_below, padding_above](
CPURuntimeContext* ctx) {
kernel(arg_tensor,
out_tensor,
padding_value,
arg_shape,
out_shape,
padding_below,
padding_above);
};
functors.emplace_back(functor);
}
else
{
auto padding_interior = pad->get_padding_interior();
std::function<decltype(runtime::cpu::kernel::pad<float>)> kernel;
SELECT_KERNEL(kernel, args[0].get_element_type(), runtime::cpu::kernel::pad);
auto functor = [&,
kernel,
arg_shape,
out_shape,
padding_below,
padding_above,
padding_interior](CPURuntimeContext* ctx) {
kernel(arg_tensor,
padding_value,
out_tensor,
arg_shape,
out_shape,
padding_below,
padding_above,
padding_interior);
};
functors.emplace_back(functor);
}
}
REGISTER_OP_BUILDER(Pad);
}
}
}
......@@ -3505,7 +3505,7 @@ namespace ngraph
{
writer << "cpu::kernel::pad_4d_float32(" << args[0].get_name() << ",\n"
<< " " << out[0].get_name() << ",\n"
<< " *(" << args[1].get_name() << "),\n"
<< " " << args[1].get_name() << ",\n"
<< " {" << join(arg0_shape) << "},\n"
<< " {" << join(result_shape) << "},\n"
<< " {" << join(pad->get_padding_below())
......
......@@ -136,7 +136,7 @@ namespace ngraph
{
void pad_4d_float32(float* input,
float* output,
float pad_value,
float* pad_value,
const Shape& input_shape,
const Shape& output_shape,
const Shape& padding_below,
......
......@@ -26,7 +26,7 @@ namespace ngraph
{
void pad_4d_float32(float* input,
float* output,
float pad_value,
float* pad_value,
const Shape& input_shape,
const Shape& output_shape,
const Shape& padding_below,
......
......@@ -20,6 +20,7 @@
#include <unsupported/Eigen/CXX11/Tensor>
#include "ngraph/runtime/cpu/kernel/eigen_thread_pool.hpp"
#include "ngraph/runtime/reference/pad.hpp"
#include "ngraph/shape.hpp"
namespace ngraph
......@@ -31,9 +32,9 @@ namespace ngraph
namespace kernel
{
template <typename ElementType, unsigned int Rank>
void pad(ElementType* input,
ElementType* output,
ElementType pad_value,
void pad(void* input,
void* output,
void* pad_value,
const Shape& input_shape,
const Shape& output_shape,
const Shape& padding_below,
......@@ -49,11 +50,32 @@ namespace ngraph
padding[i] = {padding_below[i], padding_above[i]};
}
Eigen::TensorMap<Eigen::Tensor<ElementType, Rank, Eigen::RowMajor>> out(
output, out_dims);
Eigen::TensorMap<Eigen::Tensor<ElementType, Rank, Eigen::RowMajor>> in(input,
in_dims);
static_cast<ElementType*>(output), out_dims);
Eigen::TensorMap<Eigen::Tensor<ElementType, Rank, Eigen::RowMajor>> in(
static_cast<ElementType*>(input), in_dims);
out.device(eigen::global_thread_pool_device) = in.pad(padding, pad_value);
out.device(eigen::global_thread_pool_device) =
in.pad(padding, *static_cast<ElementType*>(pad_value));
}
template <typename ElementType>
void pad(const void* arg0,
const void* arg1,
void* out,
const Shape& arg0_shape,
const Shape& out_shape,
const Shape& padding_below,
const Shape& padding_above,
const Shape& padding_interior)
{
reference::pad(static_cast<const ElementType*>(arg0),
static_cast<const ElementType*>(arg1),
static_cast<ElementType*>(out),
arg0_shape,
out_shape,
padding_below,
padding_above,
padding_interior);
}
}
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment