Commit 60ae9e8c authored by Jaikrishnan Menon's avatar Jaikrishnan Menon

CPU Direct Execution: Implement common reduction builder and Max

Also modify existing kernel so it works within the builder framework
parent 2d543ee4
/*******************************************************************************
* Copyright 2018 Intel Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*******************************************************************************/
#include <cstring>
#include "ngraph/op/max.hpp"
#include "ngraph/runtime/cpu/cpu_builder.hpp"
#include "ngraph/runtime/cpu/kernel/reduce_max.hpp"
#include "reduction.hpp"
using namespace std;
using namespace ngraph;
namespace ngraph
{
namespace runtime
{
namespace cpu
{
template <>
void Builder::BUILDER_DECL(ngraph::op::Max)
{
BUILD_REDUCTION_FUNCTOR(Max, max);
}
REGISTER_OP_BUILDER(Max);
}
}
}
This diff is collapsed.
...@@ -20,6 +20,7 @@ ...@@ -20,6 +20,7 @@
#include <unsupported/Eigen/CXX11/Tensor> #include <unsupported/Eigen/CXX11/Tensor>
#include "ngraph/runtime/cpu/kernel/eigen_thread_pool.hpp" #include "ngraph/runtime/cpu/kernel/eigen_thread_pool.hpp"
#include "ngraph/runtime/reference/max.hpp"
#include "ngraph/shape.hpp" #include "ngraph/shape.hpp"
namespace ngraph namespace ngraph
...@@ -31,8 +32,8 @@ namespace ngraph ...@@ -31,8 +32,8 @@ namespace ngraph
namespace kernel namespace kernel
{ {
template <typename ElementType, unsigned int Rank> template <typename ElementType, unsigned int Rank>
void reduce_max_all(ElementType* input, void reduce_max_all(void* input,
ElementType* output, void* output,
const Shape& input_shape, const Shape& input_shape,
const Shape& output_shape) const Shape& output_shape)
{ {
...@@ -44,16 +45,16 @@ namespace ngraph ...@@ -44,16 +45,16 @@ namespace ngraph
in_dims[i] = input_shape[i]; in_dims[i] = input_shape[i];
} }
Eigen::TensorMap<Eigen::Tensor<ElementType, 0, Eigen::RowMajor>> out(output, Eigen::TensorMap<Eigen::Tensor<ElementType, 0, Eigen::RowMajor>> out(
out_dims); static_cast<ElementType*>(output), out_dims);
Eigen::TensorMap<Eigen::Tensor<ElementType, Rank, Eigen::RowMajor>> in(input, Eigen::TensorMap<Eigen::Tensor<ElementType, Rank, Eigen::RowMajor>> in(
in_dims); static_cast<ElementType*>(input), in_dims);
out.device(eigen::global_thread_pool_device) = in.maximum(); out.device(eigen::global_thread_pool_device) = in.maximum();
} }
template <typename ElementType, unsigned int Rank, unsigned int ReductionDims> template <typename ElementType, unsigned int Rank, unsigned int ReductionDims>
void reduce_max(ElementType* input, void reduce_max(void* input,
ElementType* output, void* output,
const Shape& input_shape, const Shape& input_shape,
const Shape& output_shape, const Shape& output_shape,
const AxisSet& reduction_axes) const AxisSet& reduction_axes)
...@@ -80,11 +81,69 @@ namespace ngraph ...@@ -80,11 +81,69 @@ namespace ngraph
Eigen::TensorMap< Eigen::TensorMap<
Eigen::Tensor<ElementType, Rank - ReductionDims, Eigen::RowMajor>> Eigen::Tensor<ElementType, Rank - ReductionDims, Eigen::RowMajor>>
out(output, out_dims); out(static_cast<ElementType*>(output), out_dims);
Eigen::TensorMap<Eigen::Tensor<ElementType, Rank, Eigen::RowMajor>> in(input, Eigen::TensorMap<Eigen::Tensor<ElementType, Rank, Eigen::RowMajor>> in(
in_dims); static_cast<ElementType*>(input), in_dims);
out.device(eigen::global_thread_pool_device) = in.maximum(reduction_dims); out.device(eigen::global_thread_pool_device) = in.maximum(reduction_dims);
} }
template <typename ElementType, unsigned int Rank>
void reduce_max_1rd(void* input,
void* output,
const Shape& input_shape,
const Shape& output_shape,
const AxisSet& reduction_axes)
{
reduce_max<ElementType, Rank, 1>(
input, output, input_shape, output_shape, reduction_axes);
}
template <typename ElementType>
void reduce_max_3d_2rd(void* input,
void* output,
const Shape& input_shape,
const Shape& output_shape,
const AxisSet& reduction_axes)
{
reduce_max<ElementType, 3, 2>(
input, output, input_shape, output_shape, reduction_axes);
}
template <typename ElementType>
void reduce_max_4d_2rd(void* input,
void* output,
const Shape& input_shape,
const Shape& output_shape,
const AxisSet& reduction_axes)
{
reduce_max<ElementType, 4, 2>(
input, output, input_shape, output_shape, reduction_axes);
}
template <typename ElementType>
void reduce_max_5d_2rd(void* input,
void* output,
const Shape& input_shape,
const Shape& output_shape,
const AxisSet& reduction_axes)
{
reduce_max<ElementType, 5, 2>(
input, output, input_shape, output_shape, reduction_axes);
}
template <typename ElementType>
void max(void* arg,
void* out,
const Shape& in_shape,
const Shape& out_shape,
const AxisSet& reduction_axes)
{
reference::max(static_cast<ElementType*>(arg),
static_cast<ElementType*>(out),
in_shape,
out_shape,
reduction_axes);
}
} }
} }
} }
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment