Commit cec89708 authored by Robert Kimball's avatar Robert Kimball Committed by Scott Cyphers

Include cleanup (#583)

* cleanup

* cleanup

* fix all headers to be standalone as far as includes go

* include cleanup

* cleanup includes

* cleanup

* include tester

* wip

* cleanup

* cleanup

* cleanup
parent ca54c986
......@@ -20,6 +20,7 @@
#include "ngraph/builder/numpy_transpose.hpp"
#include "ngraph/except.hpp"
#include "ngraph/ops/reshape.hpp"
#include "ngraph/util.hpp"
namespace ngraph
{
......
......@@ -17,12 +17,7 @@
#pragma once
#include "ngraph/axis_vector.hpp"
#include "ngraph/function.hpp"
#include "ngraph/node.hpp"
#include "ngraph/ops/constant.hpp"
#include "ngraph/ops/parameter.hpp"
#include "ngraph/ops/reduce.hpp"
#include "ngraph/types/type.hpp"
namespace ngraph
{
......
......@@ -19,7 +19,7 @@
#include "ngraph/axis_set.hpp"
#include "ngraph/builder/autobroadcast.hpp"
#include "ngraph/builder/reduce_ops.hpp"
#include "ngraph/ops/add.hpp"
#include "ngraph/ops/constant.hpp"
#include "ngraph/ops/divide.hpp"
#include "ngraph/ops/multiply.hpp"
#include "ngraph/ops/power.hpp"
......
......@@ -17,12 +17,7 @@
#pragma once
#include "ngraph/axis_set.hpp"
#include "ngraph/function.hpp"
#include "ngraph/node.hpp"
#include "ngraph/ops/constant.hpp"
#include "ngraph/ops/parameter.hpp"
#include "ngraph/ops/reduce.hpp"
#include "ngraph/types/type.hpp"
namespace ngraph
{
......
......@@ -19,8 +19,6 @@
#include <sstream>
#include <string>
#include "ngraph/log.hpp"
namespace ngraph
{
namespace codegen
......
......@@ -16,7 +16,6 @@
#pragma once
#include <functional>
#include <memory>
#include <string>
#include <vector>
......
......@@ -16,6 +16,7 @@
#pragma once
#include <functional>
#include <memory>
#include "ngraph/codegen/compiler.hpp"
......
......@@ -14,7 +14,6 @@
* limitations under the License.
*******************************************************************************/
#include <algorithm>
#include <cstdio>
#include <iostream>
#include <sstream>
......@@ -89,8 +88,10 @@ CoordinateTransform::CoordinateTransform(const Shape& source_shape,
}
AxisVector all_axes(m_n_axes);
size_t n = 0;
std::generate(all_axes.begin(), all_axes.end(), [&n]() -> size_t { return n++; });
for (size_t i = 0; i < all_axes.size(); i++)
{
all_axes[i] = i;
}
if (!std::is_permutation(all_axes.begin(), all_axes.end(), source_axis_order.begin()))
{
......
......@@ -16,11 +16,6 @@
#pragma once
#include <cassert>
#include <cstdio>
#include <iostream>
#include <vector>
#include "ngraph/axis_vector.hpp"
#include "ngraph/coordinate.hpp"
#include "ngraph/coordinate_diff.hpp"
......
......@@ -16,6 +16,8 @@
#pragma once
#include <cstddef>
namespace ngraph
{
namespace descriptor
......
......@@ -17,6 +17,7 @@
#pragma once
#include <cassert>
#include <memory>
#include "ngraph/descriptor/buffer.hpp"
......
......@@ -21,9 +21,6 @@
#include "ngraph/types/type.hpp"
using namespace ngraph;
using ngraph::Shape;
using ngraph::descriptor::TensorView;
using ngraph::TensorViewType;
descriptor::layout::DenseTensorViewLayout::DenseTensorViewLayout(const TensorView& tensor_view)
: TensorViewLayout(tensor_view)
......
......@@ -17,7 +17,6 @@
#pragma once
#include <memory>
#include <tuple>
#include <vector>
#include "ngraph/descriptor/buffer_pos.hpp"
......
......@@ -20,9 +20,6 @@
#include "ngraph/descriptor/tensor.hpp"
#include "ngraph/descriptor/tensor_view.hpp"
#include "ngraph/log.hpp"
#include "ngraph/shape.hpp"
#include "ngraph/types/type.hpp"
namespace ngraph
{
......@@ -30,9 +27,6 @@ namespace ngraph
namespace descriptor
{
class Tensor;
class TensorViewLayout;
/// @brief A PrimaryTensorView owns the tensor. All other views are the result
/// of some index operation on the primary view.
class PrimaryTensorView : public TensorView
......
......@@ -23,12 +23,7 @@
#include <string>
#include <vector>
#include "ngraph/descriptor/output.hpp"
#include "ngraph/descriptor/tensor_view.hpp"
#include "ngraph/log.hpp"
#include "ngraph/node.hpp"
#include "ngraph/ops/op.hpp"
#include "ngraph/ops/parameter.hpp"
#include "ngraph/ops/parameter_vector.hpp"
#include "ngraph/types/type.hpp"
......
......@@ -14,13 +14,9 @@
* limitations under the License.
*******************************************************************************/
#include <algorithm>
#include <cassert>
#include <deque>
#include <forward_list>
#include <iomanip>
#include <iterator>
#include <map>
#include <unordered_map>
#include <unordered_set>
#include <vector>
......
......@@ -16,27 +16,18 @@
#pragma once
#include <algorithm>
#include <chrono>
#include <deque>
#include <functional>
#include <iostream>
#include <list>
#include <map>
#include <memory>
#include <sstream>
#include <string>
#include <unordered_map>
#include <unordered_set>
#include <vector>
#include "ngraph/function.hpp"
#include "ngraph/node.hpp"
#include "ngraph/placement.hpp"
namespace ngraph
{
class Node;
class Function;
namespace descriptor
{
class Input;
......
......@@ -16,14 +16,15 @@
#pragma once
#include <memory>
#include <vector>
#include "ngraph/ops/parameter.hpp"
namespace ngraph
{
namespace op
{
class Parameter;
/// \brief Zero or more nodes.
class ParameterVector : public std::vector<std::shared_ptr<op::Parameter>>
{
......
......@@ -14,10 +14,11 @@
* limitations under the License.
*******************************************************************************/
#include "ngraph/ops/reverse.hpp"
#include "ngraph/function.hpp"
#include <algorithm>
#include <sstream>
#include "ngraph/function.hpp"
#include "ngraph/ops/reverse.hpp"
using namespace std;
using namespace ngraph;
......
......@@ -16,6 +16,7 @@
#include "ngraph/ops/select_and_scatter.hpp"
#include "ngraph/function.hpp"
#include "ngraph/ops/parameter.hpp"
#include "ngraph/util.hpp"
using namespace std;
......
/*******************************************************************************
* Copyright 2017-2018 Intel Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*******************************************************************************/
#include <memory>
#include <sstream>
#include "ngraph/ops/xla_get_tuple_element.hpp"
#include "ngraph/ops/xla_tuple.hpp"
using namespace std;
using namespace ngraph;
op::XLAGetTupleElement::XLAGetTupleElement(const std::shared_ptr<Node>& arg, size_t n)
: XLANode("XLAGetTupleElement", {arg})
, m_n{n}
{
m_arg = dynamic_pointer_cast<XLANode>(arg);
if (m_arg == nullptr || m_arg->get_tuple_value() == nullptr)
{
throw ngraph_error("Argument must be a tuple view");
}
const Nodes& elements = m_arg->get_tuple_elements();
if (m_n >= elements.size())
{
throw ngraph_error("Indexing tuple beyond its size");
}
}
Nodes op::XLAGetTupleElement::get_input_ops() //const
{
return Nodes{m_arg};
}
shared_ptr<const op::XLATuple> op::XLAGetTupleElement::get_tuple_value() const
{
return dynamic_pointer_cast<const op::XLATuple>(m_arg->get_tuple_elements().at(m_n));
}
const Nodes& op::XLAGetTupleElement::get_tuple_elements() const
{
return get_tuple_value()->get_tuple_elements();
}
/*******************************************************************************
* Copyright 2017-2018 Intel Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*******************************************************************************/
#pragma once
#include "ngraph/node.hpp"
#include "ngraph/ops/xla_node.hpp"
namespace ngraph
{
namespace op
{
/// \brief Operation to get an element from a tuple.
///
/// ## Parameters
///
/// | | Description |
/// | --- | ------------------------------------------------------------------ |
/// | `n` | The position of the element (0-based) to get from the input tuple. |
///
/// ## Inputs
///
/// | | Type | Description |
/// | ------ | ----------------------------------------------------------- | ------------------------------------------ |
/// | `arg` | \f$(T_1,\dots,T_{n-1},T_n,T_{n+1},\dots,T_m)~(m \geq 1)\f$ | An input tuple with at least `n` elements. |
///
/// ## Output
///
/// | Type | Description |
/// | --------- | ------------------------------------- |
/// | \f$T_n\f$ | The `n`th element of the input tuple. |
class XLAGetTupleElement : public XLANode
{
public:
/// \brief Constructs a get-tuple-element operation.
///
/// \param arg The input tuple.
/// \param n The index of the tuple element to get.
XLAGetTupleElement(const std::shared_ptr<Node>& arg, size_t n);
virtual std::shared_ptr<Node> copy_with_new_args(
const std::vector<std::shared_ptr<Node>>& new_args) const override
{
if (new_args.size() != 1)
{
throw ngraph_error("Incorrect number of new arguments");
}
return std::make_shared<XLAGetTupleElement>(new_args.at(0), m_n);
}
virtual Nodes get_input_ops() override; //const;
virtual std::shared_ptr<const XLATuple> get_tuple_value() const override;
virtual const Nodes& get_tuple_elements() const override;
/// \return The index of the tuple element to get.
size_t get_n() const { return m_n; }
protected:
std::shared_ptr<XLANode> m_arg;
size_t m_n;
};
}
}
......@@ -16,6 +16,7 @@
#include "inliner.hpp"
#include "ngraph/graph_util.hpp"
#include "ngraph/log.hpp"
#include "ngraph/ops/function_call.hpp"
std::vector<std::shared_ptr<ngraph::op::FunctionCall>>
......
......@@ -17,7 +17,6 @@
#pragma once
#include "ngraph/node.hpp"
#include "ngraph/pattern/matcher.hpp"
#include "ngraph/pattern/op/pattern.hpp"
namespace ngraph
......
......@@ -17,7 +17,6 @@
#pragma once
#include "ngraph/node.hpp"
#include "ngraph/pattern/matcher.hpp"
#include "ngraph/pattern/op/pattern.hpp"
namespace ngraph
......
......@@ -20,7 +20,6 @@
#include "ngraph/node.hpp"
#include "ngraph/pass/graph_rewrite.hpp"
#include "ngraph/pattern/matcher.hpp"
namespace ngraph
{
......
......@@ -14,6 +14,8 @@
* limitations under the License.
*******************************************************************************/
#include <memory>
#include "ngraph/runtime/aligned_buffer.hpp"
using namespace ngraph;
......
......@@ -17,7 +17,6 @@
#pragma once
#include <cstddef>
#include <memory>
namespace ngraph
{
......
......@@ -18,7 +18,6 @@
#include <memory>
#include "ngraph/log.hpp"
#include "ngraph/shape.hpp"
#include "ngraph/types/element_type.hpp"
......
......@@ -19,7 +19,6 @@
#include <memory>
#include <vector>
#include "ngraph/function.hpp"
#include "ngraph/runtime/tensor_view.hpp"
namespace ngraph
......
......@@ -17,7 +17,6 @@
#pragma once
#include <cstdint>
#include <list>
#include <string>
#include <vector>
......
......@@ -15,6 +15,8 @@
*******************************************************************************/
#include "matmul_bias.hpp"
#include "ngraph/log.hpp"
#include "ngraph/util.hpp"
std::shared_ptr<ngraph::Node>
ngraph::op::MatmulBias::copy_with_new_args(const NodeVector& new_args) const
......
......@@ -16,11 +16,7 @@
#pragma once
#include "ngraph/node.hpp"
#include "ngraph/ops/util/requires_tensor_view_args.hpp"
#include "ngraph/util.hpp"
#include <memory>
namespace ngraph
{
......
......@@ -25,6 +25,7 @@
#include "cpu_layout.hpp"
#include "ngraph/descriptor/output.hpp"
#include "ngraph/graph_util.hpp"
#include "ngraph/log.hpp"
#include "ngraph/ops/add.hpp"
#include "ngraph/ops/avg_pool.hpp"
#include "ngraph/ops/convolution.hpp"
......
......@@ -17,9 +17,6 @@
#pragma once
#include <memory>
#include <typeindex>
#include <typeinfo>
#include <unordered_map>
#include "ngraph/function.hpp"
......
......@@ -16,16 +16,11 @@
#pragma once
#include <functional>
#include <memory>
#include <typeindex>
#include <typeinfo>
#include <unordered_map>
#include "ngraph/function.hpp"
#include "ngraph/runtime/call_frame.hpp"
#include "ngraph/runtime/external_function.hpp"
#include "ngraph/runtime/interpreter/int_backend.hpp"
#include "ngraph/runtime/interpreter/int_call_frame.hpp"
namespace ngraph
{
......
......@@ -16,7 +16,7 @@
#pragma once
#include <cmath>
#include <cstddef>
namespace ngraph
{
......
......@@ -17,6 +17,7 @@
#pragma once
#include <cmath>
#include <cstddef>
namespace ngraph
{
......
......@@ -16,6 +16,8 @@
#pragma once
#include <cstddef>
namespace ngraph
{
namespace runtime
......
......@@ -17,6 +17,7 @@
#pragma once
#include <cmath>
#include <cstddef>
namespace ngraph
{
......
......@@ -17,6 +17,7 @@
#pragma once
#include <cmath>
#include <cstddef>
namespace ngraph
{
......
......@@ -16,7 +16,6 @@
#pragma once
#include <algorithm>
#include <cmath>
#include <numeric>
#include <vector>
......
......@@ -17,6 +17,7 @@
#pragma once
#include <cmath>
#include <cstddef>
namespace ngraph
{
......
......@@ -16,6 +16,8 @@
#pragma once
#include <cstddef>
namespace ngraph
{
namespace runtime
......
......@@ -16,6 +16,8 @@
#pragma once
#include <cstddef>
namespace ngraph
{
namespace runtime
......
......@@ -119,10 +119,10 @@ namespace ngraph
}
AxisVector input_batch_transform_axis_order(2 + n_spatial_dimensions);
size_t n = 0;
std::generate(input_batch_transform_axis_order.begin(),
input_batch_transform_axis_order.end(),
[&n]() -> size_t { return n++; });
for (size_t i = 0; i < input_batch_transform_axis_order.size(); i++)
{
input_batch_transform_axis_order[i] = i;
}
CoordinateTransform input_batch_transform(
arg0_shape,
......
......@@ -16,6 +16,8 @@
#pragma once
#include <cstddef>
namespace ngraph
{
namespace runtime
......
......@@ -17,6 +17,7 @@
#pragma once
#include <cmath>
#include <cstddef>
namespace ngraph
{
......
......@@ -17,6 +17,7 @@
#pragma once
#include <cmath>
#include <cstddef>
namespace ngraph
{
......
......@@ -16,6 +16,8 @@
#pragma once
#include <cstddef>
#include <stdexcept>
#include <type_traits>
namespace ngraph
......
......@@ -19,6 +19,8 @@
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wfloat-equal"
#include <cstddef>
namespace ngraph
{
namespace runtime
......
......@@ -17,6 +17,7 @@
#pragma once
#include <cmath>
#include <cstddef>
namespace ngraph
{
......
......@@ -17,6 +17,7 @@
#pragma once
#include <cmath>
#include <cstddef>
namespace ngraph
{
......
......@@ -16,6 +16,8 @@
#pragma once
#include <cstddef>
namespace ngraph
{
namespace runtime
......
......@@ -16,6 +16,8 @@
#pragma once
#include <cstddef>
namespace ngraph
{
namespace runtime
......
......@@ -16,6 +16,8 @@
#pragma once
#include <cstddef>
namespace ngraph
{
namespace runtime
......
......@@ -16,6 +16,8 @@
#pragma once
#include <cstddef>
namespace ngraph
{
namespace runtime
......
......@@ -17,6 +17,7 @@
#pragma once
#include <cmath>
#include <cstddef>
namespace ngraph
{
......
......@@ -16,7 +16,6 @@
#pragma once
#include <algorithm>
#include <cmath>
#include <numeric>
......
......@@ -16,6 +16,8 @@
#pragma once
#include <cstddef>
namespace ngraph
{
namespace runtime
......
......@@ -16,6 +16,8 @@
#pragma once
#include <cstddef>
namespace ngraph
{
namespace runtime
......
......@@ -16,6 +16,8 @@
#pragma once
#include <cstddef>
namespace ngraph
{
namespace runtime
......
......@@ -16,6 +16,8 @@
#pragma once
#include <cstddef>
namespace ngraph
{
namespace runtime
......
......@@ -16,6 +16,8 @@
#pragma once
#include <cstddef>
namespace ngraph
{
namespace runtime
......
......@@ -19,6 +19,8 @@
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wfloat-equal"
#include <cstddef>
namespace ngraph
{
namespace runtime
......
......@@ -20,7 +20,6 @@
#include "ngraph/axis_vector.hpp"
#include "ngraph/coordinate_transform.hpp"
#include "ngraph/util.hpp"
namespace ngraph
{
......
......@@ -16,6 +16,9 @@
#pragma once
#include <cmath>
#include <cstddef>
namespace ngraph
{
namespace runtime
......
......@@ -17,6 +17,7 @@
#pragma once
#include <cmath>
#include <functional>
#include "ngraph/coordinate_transform.hpp"
......
......@@ -17,6 +17,7 @@
#pragma once
#include <cmath>
#include <functional>
#include "ngraph/coordinate_transform.hpp"
......
......@@ -16,6 +16,8 @@
#pragma once
#include <cstddef>
namespace ngraph
{
namespace runtime
......
......@@ -16,6 +16,8 @@
#pragma once
#include <cstddef>
namespace ngraph
{
namespace runtime
......
......@@ -17,6 +17,7 @@
#pragma once
#include <cmath>
#include <functional>
#include "ngraph/coordinate_transform.hpp"
......
......@@ -16,6 +16,8 @@
#pragma once
#include <cstddef>
namespace ngraph
{
namespace runtime
......
......@@ -17,6 +17,7 @@
#pragma once
#include <cmath>
#include <cstddef>
namespace ngraph
{
......
......@@ -17,6 +17,7 @@
#pragma once
#include <cmath>
#include <cstddef>
namespace ngraph
{
......
......@@ -17,6 +17,7 @@
#pragma once
#include <cmath>
#include <cstddef>
namespace ngraph
{
......
......@@ -16,6 +16,8 @@
#pragma once
#include <cstddef>
namespace ngraph
{
namespace runtime
......
......@@ -17,6 +17,7 @@
#pragma once
#include <cmath>
#include <cstddef>
namespace ngraph
{
......
......@@ -17,6 +17,7 @@
#pragma once
#include <cmath>
#include <cstddef>
namespace ngraph
{
......
......@@ -16,11 +16,9 @@
#pragma once
#include <functional>
#include <map>
#include <memory>
#include <string>
#include <vector>
namespace ngraph
{
......
......@@ -23,7 +23,6 @@
#include "ngraph/shape.hpp"
#include "ngraph/strides.hpp"
#include "ngraph/types/element_type.hpp"
#include "ngraph/util.hpp"
namespace ngraph
{
......
......@@ -55,6 +55,7 @@
#include "ngraph/ops/not_equal.hpp"
#include "ngraph/ops/one_hot.hpp"
#include "ngraph/ops/pad.hpp"
#include "ngraph/ops/parameter.hpp"
#include "ngraph/ops/power.hpp"
#include "ngraph/ops/product.hpp"
#include "ngraph/ops/reduce.hpp"
......
......@@ -17,7 +17,6 @@
#pragma once
#include <memory>
#include <unordered_map>
#include "ngraph/function.hpp"
#include "ngraph/node.hpp"
......
......@@ -17,7 +17,6 @@
#pragma once
#include <cstdio>
#include <iostream>
#include <vector>
#include "ngraph/axis_set.hpp"
......
......@@ -16,6 +16,7 @@
#pragma once
#include <cstddef>
#include <vector>
namespace ngraph
......
......@@ -14,13 +14,8 @@
* limitations under the License.
*******************************************************************************/
#include <cassert>
#include <cmath>
#include <iostream>
#include <vector>
#include "ngraph/log.hpp"
#include "ngraph/log.hpp"
#include "ngraph/types/element_type.hpp"
using namespace ngraph;
......
......@@ -20,15 +20,12 @@
#pragma once
#include <map>
#include <iostream>
#include <memory>
#include <sstream>
#include <string>
#include <type_traits>
#include <vector>
#include "ngraph/except.hpp"
#include "ngraph/log.hpp"
namespace ngraph
{
......
......@@ -16,8 +16,6 @@
#include <memory>
#include "ngraph/except.hpp"
#include "ngraph/log.hpp"
#include "ngraph/types/type.hpp"
#include "ngraph/util.hpp"
......
......@@ -16,18 +16,12 @@
#pragma once
#include <algorithm>
#include <chrono>
#include <deque>
#include <functional>
#include <iostream>
#include <list>
#include <map>
#include <memory>
#include <sstream>
#include <string>
#include <unordered_map>
#include <unordered_set>
#include <vector>
namespace ngraph
......@@ -208,40 +202,6 @@ namespace ngraph
return result;
}
template <class InputIt, class BinaryOp>
typename std::iterator_traits<InputIt>::value_type
reduce(InputIt first, InputIt last, BinaryOp op)
{
typename std::iterator_traits<InputIt>::value_type result;
if (first == last)
{
result = {};
}
else
{
result = *first++;
while (first != last)
{
result = op(result, *first);
first++;
}
}
return result;
}
template <typename T>
T plus(const T& a, const T& b)
{
return a + b;
}
template <typename T>
T mul(const T& a, const T& b)
{
return a * b;
}
template <typename T>
T ceil_div(const T& x, const T& y)
{
......
......@@ -21,6 +21,8 @@
#include <iomanip>
#include <iostream>
#include <random>
#include <sstream>
#include <string>
static std::mt19937_64 random_generator;
......
......@@ -40,6 +40,7 @@ set (SRC
main.cpp
op.cpp
graph_partition.cpp
includes.cpp
pass_liveness.cpp
pass_manager.cpp
pass_memory_layout.cpp
......@@ -150,6 +151,8 @@ include_directories(".")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DCURDIR=\\\"${CMAKE_CURRENT_SOURCE_DIR}\\\"")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DJSON_INCLUDES=\\\"${JSON_INCLUDE_DIR}\\\"")
if(NGRAPH_ADDRESS_SANITIZER)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -g -fsanitize=address -fno-omit-frame-pointer")
endif()
......
This diff is collapsed.
......@@ -143,42 +143,6 @@ TEST(util, contains)
EXPECT_FALSE(contains(v1, 8));
}
TEST(util, remove_from)
{
}
TEST(util, reduce)
{
{
std::vector<size_t> x = {};
size_t actual =
ngraph::reduce(x.begin(), x.end(), [](size_t a, size_t b) { return a + b; });
EXPECT_EQ(actual, 0);
}
{
std::vector<size_t> x = {10};
size_t actual =
ngraph::reduce(x.begin(), x.end(), [](size_t a, size_t b) { return a + b; });
EXPECT_EQ(actual, 10);
}
{
std::vector<size_t> x = {1, 2, 3, 4, 5, 6};
size_t actual =
ngraph::reduce(x.begin(), x.end(), [](size_t a, size_t b) { return a + b; });
EXPECT_EQ(actual, 21);
}
{
std::vector<size_t> x = {1, 2, 3, 4, 5, 6};
size_t actual = ngraph::reduce(x.begin(), x.end(), ngraph::plus<size_t>);
EXPECT_EQ(actual, 21);
}
{
std::vector<size_t> x = {1, 2, 3, 4, 5, 6};
size_t actual = ngraph::reduce(x.begin(), x.end(), ngraph::mul<size_t>);
EXPECT_EQ(actual, 720);
}
}
TEST(util, all_close)
{
auto manager = runtime::Manager::get("INTERPRETER");
......
......@@ -22,6 +22,7 @@
#include "ngraph/runtime/manager.hpp"
#include "ngraph/runtime/tensor_view.hpp"
#include "ngraph/serializer.hpp"
#include "ngraph/util.hpp"
#include "random.hpp"
std::multimap<size_t, std::string>
......
......@@ -14,6 +14,10 @@
* limitations under the License.
*******************************************************************************/
#pragma once
#include <map>
#include <ngraph/runtime/call_frame.hpp>
#include "test_tools.hpp"
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment