Commit f8a0f784 authored by Adam Procter's avatar Adam Procter

Merge remote-tracking branch 'origin/master' into r0.10

parents cb4efed3 403a09ce
......@@ -105,6 +105,7 @@ option(NGRAPH_ONNX_IMPORT_ENABLE "Enable ONNX importer" FALSE)
option(NGRAPH_DEX_ONLY "Build CPU DEX without codegen" FALSE)
option(NGRAPH_CODE_COVERAGE_ENABLE "Enable code coverage data collection" FALSE)
option(NGRAPH_LIB_VERSIONING_ENABLE "Enable shared library versioning" FALSE)
option(NGRAPH_PYTHON_BUILD_ENABLE "Enable build nGraph python package wheel" FALSE)
message(STATUS "NGRAPH_UNIT_TEST_ENABLE: ${NGRAPH_UNIT_TEST_ENABLE}")
message(STATUS "NGRAPH_TOOLS_ENABLE: ${NGRAPH_TOOLS_ENABLE}")
......
......@@ -924,9 +924,9 @@ def batch_norm(eps, # type: float
# type: (...) -> Node
"""Return batch normalization node."""
if mean is None and variance is None:
return BatchNormTraining(eps, gamma, beta, data)
return BatchNormTraining(data, gamma, beta, eps)
else:
return BatchNormInference(eps, gamma, beta, data, mean, variance)
return BatchNormInference(data, gamma, beta, mean, variance, eps)
@nameable_op
......
......@@ -30,10 +30,10 @@ void regclass_pyngraph_op_BatchNormTraining(py::module m)
batch_norm_training(m, "BatchNormTraining");
batch_norm_training.doc() =
"ngraph.impl.op.BatchNormTraining wraps ngraph::op::BatchNormTraining";
batch_norm_training.def(py::init<double,
batch_norm_training.def(py::init<const std::shared_ptr<ngraph::Node>&,
const std::shared_ptr<ngraph::Node>&,
const std::shared_ptr<ngraph::Node>&,
const std::shared_ptr<ngraph::Node>&>());
double>());
}
void regclass_pyngraph_op_BatchNormInference(py::module m)
......@@ -45,12 +45,12 @@ void regclass_pyngraph_op_BatchNormInference(py::module m)
batch_norm_inference.doc() =
"ngraph.impl.op.BatchNormInference wraps ngraph::op::BatchNormInference";
batch_norm_inference.def(py::init<double,
batch_norm_inference.def(py::init<const std::shared_ptr<ngraph::Node>&,
const std::shared_ptr<ngraph::Node>&,
const std::shared_ptr<ngraph::Node>&,
const std::shared_ptr<ngraph::Node>&,
const std::shared_ptr<ngraph::Node>&,
const std::shared_ptr<ngraph::Node>&>());
double>());
}
void regclass_pyngraph_op_BatchNormTrainingBackprop(py::module m)
......@@ -61,11 +61,11 @@ void regclass_pyngraph_op_BatchNormTrainingBackprop(py::module m)
batch_norm_training_backprop(m, "BatchNormTrainingBackprop");
batch_norm_training_backprop.doc() =
"ngraph.impl.op.BatchNormTrainingBackprop wraps ngraph::op::BatchNormTrainingBackprop";
batch_norm_training_backprop.def(py::init<double,
batch_norm_training_backprop.def(py::init<const std::shared_ptr<ngraph::Node>&,
const std::shared_ptr<ngraph::Node>&,
const std::shared_ptr<ngraph::Node>&,
const std::shared_ptr<ngraph::Node>&,
const std::shared_ptr<ngraph::Node>&,
const std::shared_ptr<ngraph::Node>&,
const std::shared_ptr<ngraph::Node>&>());
double>());
}
......@@ -352,7 +352,7 @@ with open(os.path.join(PYNGRAPH_SOURCE_DIR, 'requirements.txt')) as req:
requirements = req.read().splitlines()
setup(
name='ngraph',
name='ngraph-core',
version=__version__,
author='Intel',
author_email='intelnervana@intel.com',
......
......@@ -60,10 +60,10 @@ namespace ngraph
void replace_output(Output& output);
protected:
/// \return the tensor view for the connected output
/// \return the tensor for the connected output
std::shared_ptr<const Tensor> get_tensor_ptr() const;
/// \return the tensor view for the connected output
/// \return the tensor for the connected output
std::shared_ptr<Tensor> get_tensor_ptr();
public:
......
......@@ -32,7 +32,7 @@ namespace ngraph
{
namespace layout
{
/// \brief Interface for describing implementations of tensor views.
/// \brief Interface for describing implementations of tensors.
///
/// Kernel selection will need to pay attention to the layout.
class TensorLayout
......@@ -44,7 +44,7 @@ namespace ngraph
public:
virtual ~TensorLayout() {}
/// Extent of this view in buffer.
/// Extent of this tensor in buffer.
///
/// When we support non-linear buffers, this will need to be something other than size_t.
size_t get_size() const;
......
......@@ -39,7 +39,7 @@ namespace ngraph
public:
/// \param node Node that owns this output.
/// \param index Position of the output tensor in all output tensors
/// \param tensor The view of this tensor; where the value will be written
/// \param tensor The tensor where the value will be written
Output(Node* node, size_t index, const std::shared_ptr<Tensor>& tensor);
std::shared_ptr<Node> get_node() const;
......
......@@ -35,7 +35,7 @@ namespace ngraph
class TensorLayout;
}
/// \brief Compile-time descriptor of a first-class value that is a view of a tensor.
/// \brief Compile-time descriptor of a first-class value that is a tensor.
class Tensor
{
Tensor(const Tensor&) = delete;
......
......@@ -19,7 +19,6 @@
#include <onnx-ml.pb.h>
#include "ngraph/except.hpp"
#include "tensor.hpp"
namespace ngraph
......
......@@ -16,14 +16,12 @@
#pragma once
#include <onnx-ml.pb.h>
#include <string>
#include <vector>
#include <onnx-ml.pb.h>
#include "ngraph/parameter_vector.hpp"
#include "model.hpp"
#include "ngraph/parameter_vector.hpp"
#include "operator_set.hpp"
#include "value_info.hpp"
#include "weight.hpp"
......
......@@ -17,7 +17,6 @@
#pragma once
#include <onnx-ml.pb.h>
#include <ostream>
#include <string>
#include <unordered_map>
......
......@@ -21,7 +21,6 @@
#include <unordered_map>
#include "ngraph/node_vector.hpp"
#include "node.hpp"
namespace ngraph
......
......@@ -16,9 +16,8 @@
#pragma once
#include <vector>
#include <onnx-ml.pb.h>
#include <vector>
#include "ngraph/shape.hpp"
#include "ngraph/type/element_type.hpp"
......
......@@ -22,7 +22,6 @@
#include "ngraph/parameter_vector.hpp"
#include "ngraph/shape.hpp"
#include "ngraph/type/element_type.hpp"
#include "node.hpp"
#include "tensor.hpp"
#include "weight.hpp"
......
......@@ -16,12 +16,10 @@
#include <fstream>
#include "ngraph/except.hpp"
#include "core/graph.hpp"
#include "core/model.hpp"
#include "core/node.hpp"
#include "ngraph/except.hpp"
#include "onnx.hpp"
#include "ops_bridge.hpp"
......
......@@ -19,10 +19,9 @@
#include <iostream>
#include <string>
#include "ngraph/function.hpp"
#include "core/operator_set.hpp"
#include "core/weight.hpp"
#include "ngraph/function.hpp"
namespace ngraph
{
......
......@@ -18,11 +18,10 @@
#include <memory>
#include "core/node.hpp"
#include "ngraph/node_vector.hpp"
#include "ngraph/op/abs.hpp"
#include "core/node.hpp"
namespace ngraph
{
namespace onnx_import
......
......@@ -18,11 +18,10 @@
#include <memory>
#include "core/node.hpp"
#include "ngraph/node_vector.hpp"
#include "ngraph/op/acos.hpp"
#include "core/node.hpp"
namespace ngraph
{
namespace onnx_import
......
......@@ -16,10 +16,9 @@
#pragma once
#include "core/node.hpp"
#include "ngraph/node_vector.hpp"
#include "ngraph/op/add.hpp"
#include "core/node.hpp"
#include "utils/broadcasting.hpp"
namespace ngraph
......
......@@ -16,10 +16,9 @@
#pragma once
#include "core/node.hpp"
#include "ngraph/node_vector.hpp"
#include "ngraph/op/and.hpp"
#include "core/node.hpp"
#include "utils/broadcasting.hpp"
namespace ngraph
......
......@@ -18,11 +18,10 @@
#include <memory>
#include "core/node.hpp"
#include "ngraph/node_vector.hpp"
#include "ngraph/op/asin.hpp"
#include "core/node.hpp"
namespace ngraph
{
namespace onnx_import
......
......@@ -18,11 +18,10 @@
#include <memory>
#include "core/node.hpp"
#include "ngraph/node_vector.hpp"
#include "ngraph/op/atan.hpp"
#include "core/node.hpp"
namespace ngraph
{
namespace onnx_import
......
......@@ -17,7 +17,6 @@
#include "ngraph/node.hpp"
#include "ngraph/node_vector.hpp"
#include "ngraph/op/avg_pool.hpp"
#include "utils/convpool.hpp"
namespace ngraph
......
......@@ -16,9 +16,8 @@
#pragma once
#include "ngraph/node_vector.hpp"
#include "core/node.hpp"
#include "ngraph/node_vector.hpp"
namespace ngraph
{
......
......@@ -17,11 +17,10 @@
#include <cstdint>
#include <memory>
#include "ngraph/node_vector.hpp"
#include "ngraph/op/batch_norm.hpp"
#include "ngraph/frontend/onnx_import/exceptions.hpp"
#include "ngraph/frontend/onnx_import/op/batch_norm.hpp"
#include "ngraph/node_vector.hpp"
#include "ngraph/op/batch_norm.hpp"
namespace ngraph
{
......@@ -54,11 +53,11 @@ namespace ngraph
mean = inputs.at(3);
var = inputs.at(4);
return {std::make_shared<ngraph::op::BatchNormInference>(
epsilon, scale, bias, x, mean, var)};
x, scale, bias, mean, var, epsilon)};
}
return {
std::make_shared<ngraph::op::BatchNormTraining>(epsilon, scale, bias, x)};
std::make_shared<ngraph::op::BatchNormTraining>(x, scale, bias, epsilon)};
}
} // namespace set_1
......
......@@ -15,14 +15,12 @@
//*****************************************************************************
#include <memory>
#include <onnx-ml.pb.h>
#include "ngraph/op/convert.hpp"
#include "ngraph/type/element_type.hpp"
#include "cast.hpp"
#include "exceptions.hpp"
#include "ngraph/op/convert.hpp"
#include "ngraph/type/element_type.hpp"
namespace ngraph
{
......
......@@ -16,9 +16,8 @@
#pragma once
#include "ngraph/node_vector.hpp"
#include "core/node.hpp"
#include "ngraph/node_vector.hpp"
namespace ngraph
{
......
......@@ -18,11 +18,10 @@
#include <memory>
#include "core/node.hpp"
#include "ngraph/node_vector.hpp"
#include "ngraph/op/ceiling.hpp"
#include "core/node.hpp"
namespace ngraph
{
namespace onnx_import
......
......@@ -17,17 +17,14 @@
#include <limits>
#include <memory>
#include "clip.hpp"
#include "core/node.hpp"
#include "ngraph/node.hpp"
#include "ngraph/op/constant.hpp"
#include "ngraph/op/maximum.hpp"
#include "ngraph/op/minimum.hpp"
#include "core/node.hpp"
#include "utils/broadcasting.hpp"
#include "clip.hpp"
namespace ngraph
{
namespace onnx_import
......
......@@ -16,9 +16,8 @@
#pragma once
#include "ngraph/node_vector.hpp"
#include "core/node.hpp"
#include "ngraph/node_vector.hpp"
namespace ngraph
{
......
......@@ -15,7 +15,6 @@
//*****************************************************************************
#include "concat.hpp"
#include "ngraph/op/concat.hpp"
namespace ngraph
......
......@@ -16,9 +16,8 @@
#pragma once
#include "ngraph/node_vector.hpp"
#include "core/node.hpp"
#include "ngraph/node_vector.hpp"
namespace ngraph
{
......
......@@ -16,9 +16,8 @@
#pragma once
#include "ngraph/node_vector.hpp"
#include "core/node.hpp"
#include "ngraph/node_vector.hpp"
namespace ngraph
{
......
......@@ -18,18 +18,16 @@
#include <memory>
#include <vector>
#include "op/conv.hpp"
#include "ngraph/frontend/onnx_import/exceptions.hpp"
#include "ngraph/frontend/onnx_import/op/conv.hpp"
#include "ngraph/frontend/onnx_import/utils/broadcasting.hpp"
#include "ngraph/frontend/onnx_import/utils/convpool.hpp"
#include "ngraph/op/add.hpp"
#include "ngraph/op/broadcast.hpp"
#include "ngraph/op/concat.hpp"
#include "ngraph/op/convolution.hpp"
#include "ngraph/op/slice.hpp"
#include "ngraph/frontend/onnx_import/exceptions.hpp"
#include "ngraph/frontend/onnx_import/op/conv.hpp"
#include "ngraph/frontend/onnx_import/utils/broadcasting.hpp"
#include "ngraph/frontend/onnx_import/utils/convpool.hpp"
#include "op/conv.hpp"
namespace ngraph
{
......
......@@ -16,9 +16,8 @@
#pragma once
#include "ngraph/node_vector.hpp"
#include "core/node.hpp"
#include "ngraph/node_vector.hpp"
namespace ngraph
{
......
......@@ -16,9 +16,8 @@
#pragma once
#include "ngraph/node_vector.hpp"
#include "core/node.hpp"
#include "ngraph/node_vector.hpp"
namespace ngraph
{
......
......@@ -18,11 +18,10 @@
#include <memory>
#include "core/node.hpp"
#include "ngraph/node_vector.hpp"
#include "ngraph/op/cos.hpp"
#include "core/node.hpp"
namespace ngraph
{
namespace onnx_import
......
......@@ -16,10 +16,9 @@
#pragma once
#include "core/node.hpp"
#include "ngraph/node_vector.hpp"
#include "ngraph/op/divide.hpp"
#include "core/node.hpp"
#include "utils/broadcasting.hpp"
namespace ngraph
......
......@@ -18,7 +18,6 @@
#include <vector>
#include "ngraph/node.hpp"
#include "ngraph/op/add.hpp"
#include "ngraph/op/broadcast.hpp"
#include "ngraph/op/constant.hpp"
......@@ -27,7 +26,6 @@
#include "ngraph/op/minimum.hpp"
#include "ngraph/op/multiply.hpp"
#include "ngraph/op/subtract.hpp"
#include "utils/broadcasting.hpp"
#include "elu.hpp"
......
......@@ -16,9 +16,8 @@
#pragma once
#include "ngraph/node_vector.hpp"
#include "core/node.hpp"
#include "ngraph/node_vector.hpp"
namespace ngraph
{
......
......@@ -16,10 +16,9 @@
#pragma once
#include "core/node.hpp"
#include "ngraph/node_vector.hpp"
#include "ngraph/op/equal.hpp"
#include "core/node.hpp"
#include "utils/broadcasting.hpp"
namespace ngraph
......
......@@ -18,11 +18,10 @@
#include <memory>
#include "core/node.hpp"
#include "ngraph/node_vector.hpp"
#include "ngraph/op/exp.hpp"
#include "core/node.hpp"
namespace ngraph
{
namespace onnx_import
......
......@@ -15,7 +15,6 @@
//*****************************************************************************
#include "flatten.hpp"
#include "exceptions.hpp"
#include "utils/reshape.hpp"
......
......@@ -18,9 +18,8 @@
#include <memory>
#include "ngraph/node_vector.hpp"
#include "core/node.hpp"
#include "ngraph/node_vector.hpp"
namespace ngraph
{
......
......@@ -18,11 +18,10 @@
#include <memory>
#include "core/node.hpp"
#include "ngraph/node_vector.hpp"
#include "ngraph/op/floor.hpp"
#include "core/node.hpp"
namespace ngraph
{
namespace onnx_import
......
......@@ -15,17 +15,15 @@
//*****************************************************************************
#include "op/gemm.hpp"
#include "ngraph/frontend/onnx_import/exceptions.hpp"
#include "ngraph/frontend/onnx_import/utils/broadcasting.hpp"
#include "ngraph/frontend/onnx_import/utils/reshape.hpp"
#include "ngraph/op/add.hpp"
#include "ngraph/op/broadcast.hpp"
#include "ngraph/op/constant.hpp"
#include "ngraph/op/dot.hpp"
#include "ngraph/op/multiply.hpp"
#include "ngraph/frontend/onnx_import/exceptions.hpp"
#include "ngraph/frontend/onnx_import/utils/broadcasting.hpp"
#include "ngraph/frontend/onnx_import/utils/reshape.hpp"
namespace ngraph
{
namespace onnx_import
......
......@@ -18,9 +18,8 @@
#include <memory>
#include "ngraph/node_vector.hpp"
#include "core/node.hpp"
#include "ngraph/node_vector.hpp"
namespace ngraph
{
......
......@@ -17,7 +17,6 @@
#include "ngraph/node.hpp"
#include "ngraph/node_vector.hpp"
#include "ngraph/op/avg_pool.hpp"
#include "utils/convpool.hpp"
namespace ngraph
......
......@@ -16,9 +16,8 @@
#pragma once
#include "ngraph/node_vector.hpp"
#include "core/node.hpp"
#include "ngraph/node_vector.hpp"
namespace ngraph
{
......
......@@ -17,7 +17,6 @@
#include "ngraph/node.hpp"
#include "ngraph/node_vector.hpp"
#include "ngraph/op/max_pool.hpp"
#include "utils/convpool.hpp"
namespace ngraph
......
......@@ -16,9 +16,8 @@
#pragma once
#include "ngraph/node_vector.hpp"
#include "core/node.hpp"
#include "ngraph/node_vector.hpp"
namespace ngraph
{
......
......@@ -16,10 +16,9 @@
#pragma once
#include "core/node.hpp"
#include "ngraph/node_vector.hpp"
#include "ngraph/op/greater.hpp"
#include "core/node.hpp"
#include "utils/broadcasting.hpp"
namespace ngraph
......
......@@ -16,19 +16,16 @@
#include <memory>
#include "core/node.hpp"
#include "hard_sigmoid.hpp"
#include "ngraph/node.hpp"
#include "ngraph/op/add.hpp"
#include "ngraph/op/constant.hpp"
#include "ngraph/op/maximum.hpp"
#include "ngraph/op/minimum.hpp"
#include "ngraph/op/multiply.hpp"
#include "core/node.hpp"
#include "utils/broadcasting.hpp"
#include "hard_sigmoid.hpp"
namespace ngraph
{
namespace onnx_import
......
......@@ -16,9 +16,8 @@
#pragma once
#include "ngraph/node_vector.hpp"
#include "core/node.hpp"
#include "ngraph/node_vector.hpp"
namespace ngraph
{
......
......@@ -16,9 +16,8 @@
#pragma once
#include "ngraph/node_vector.hpp"
#include "core/node.hpp"
#include "ngraph/node_vector.hpp"
namespace ngraph
{
......
......@@ -16,21 +16,18 @@
#include <memory>
#include "exceptions.hpp"
#include "ngraph/node.hpp"
#include "ngraph/node_vector.hpp"
#include "ngraph/shape.hpp"
#include "ngraph/op/broadcast.hpp"
#include "ngraph/op/constant.hpp"
#include "ngraph/op/maximum.hpp"
#include "ngraph/op/multiply.hpp"
#include "exceptions.hpp"
#include "ngraph/shape.hpp"
#include "core/node.hpp"
#include "utils/broadcasting.hpp"
#include "leaky_relu.hpp"
#include "utils/broadcasting.hpp"
namespace ngraph
{
......
......@@ -16,9 +16,8 @@
#pragma once
#include "ngraph/node_vector.hpp"
#include "core/node.hpp"
#include "ngraph/node_vector.hpp"
namespace ngraph
{
......
......@@ -16,10 +16,9 @@
#pragma once
#include "core/node.hpp"
#include "ngraph/node_vector.hpp"
#include "ngraph/op/less.hpp"
#include "core/node.hpp"
#include "utils/broadcasting.hpp"
namespace ngraph
......
......@@ -18,11 +18,10 @@
#include <memory>
#include "core/node.hpp"
#include "ngraph/node_vector.hpp"
#include "ngraph/op/log.hpp"
#include "core/node.hpp"
namespace ngraph
{
namespace onnx_import
......
......@@ -18,13 +18,12 @@
#include <memory>
#include "core/node.hpp"
#include "ngraph/frontend/onnx_import/op/softmax.hpp"
#include "ngraph/node.hpp"
#include "ngraph/node_vector.hpp"
#include "ngraph/op/log.hpp"
#include "core/node.hpp"
#include "ngraph/frontend/onnx_import/op/softmax.hpp"
namespace ngraph
{
namespace onnx_import
......
......@@ -16,10 +16,9 @@
#include <memory>
#include "ngraph/op/lrn.hpp"
#include "core/node.hpp"
#include "lrn.hpp"
#include "ngraph/op/lrn.hpp"
namespace ngraph
{
......
......@@ -16,9 +16,8 @@
#pragma once
#include "ngraph/node_vector.hpp"
#include "core/node.hpp"
#include "ngraph/node_vector.hpp"
namespace ngraph
{
......
......@@ -24,6 +24,8 @@
#include <unordered_map>
#include <vector>
#include "exceptions.hpp"
#include "lstm.hpp"
#include "ngraph/node.hpp"
#include "ngraph/op/add.hpp"
#include "ngraph/op/concat.hpp"
......@@ -34,9 +36,6 @@
#include "ngraph/op/tanh.hpp"
#include "ngraph/shape.hpp"
#include "ngraph/type/element_type.hpp"
#include "exceptions.hpp"
#include "lstm.hpp"
#include "utils/broadcasting.hpp"
#include "utils/common.hpp"
#include "utils/reshape.hpp"
......
......@@ -16,9 +16,8 @@
#pragma once
#include "ngraph/node_vector.hpp"
#include "core/node.hpp"
#include "ngraph/node_vector.hpp"
namespace ngraph
{
......
......@@ -19,6 +19,8 @@
#include <memory>
#include <vector>
#include "exceptions.hpp"
#include "matmul.hpp"
#include "ngraph/coordinate.hpp"
#include "ngraph/log.hpp"
#include "ngraph/op/concat.hpp"
......@@ -26,9 +28,6 @@
#include "ngraph/op/reshape.hpp"
#include "ngraph/op/slice.hpp"
#include "ngraph/shape.hpp"
#include "exceptions.hpp"
#include "matmul.hpp"
#include "utils/broadcasting.hpp"
#include "utils/reshape.hpp"
......
......@@ -16,9 +16,8 @@
#pragma once
#include "ngraph/node_vector.hpp"
#include "core/node.hpp"
#include "ngraph/node_vector.hpp"
namespace ngraph
{
......
......@@ -16,10 +16,9 @@
#pragma once
#include "core/node.hpp"
#include "ngraph/node_vector.hpp"
#include "ngraph/op/maximum.hpp"
#include "core/node.hpp"
#include "utils/variadic.hpp"
namespace ngraph
......
......@@ -17,7 +17,6 @@
#include "ngraph/op/max_pool.hpp"
#include "ngraph/node.hpp"
#include "ngraph/node_vector.hpp"
#include "utils/convpool.hpp"
namespace ngraph
......
......@@ -16,9 +16,8 @@
#pragma once
#include "ngraph/node_vector.hpp"
#include "core/node.hpp"
#include "ngraph/node_vector.hpp"
namespace ngraph
{
......
......@@ -14,11 +14,10 @@
// limitations under the License.
//*****************************************************************************
#include "mean.hpp"
#include "ngraph/op/add.hpp"
#include "ngraph/op/constant.hpp"
#include "ngraph/op/divide.hpp"
#include "mean.hpp"
#include "utils/variadic.hpp"
namespace ngraph
......
......@@ -16,9 +16,8 @@
#pragma once
#include "ngraph/node_vector.hpp"
#include "core/node.hpp"
#include "ngraph/node_vector.hpp"
namespace ngraph
{
......
......@@ -16,10 +16,9 @@
#pragma once
#include "core/node.hpp"
#include "ngraph/node_vector.hpp"
#include "ngraph/op/minimum.hpp"
#include "core/node.hpp"
#include "utils/variadic.hpp"
namespace ngraph
......
......@@ -16,11 +16,10 @@
#pragma once
#include "core/node.hpp"
#include "ngraph/node_vector.hpp"
#include "ngraph/op/broadcast.hpp"
#include "ngraph/op/multiply.hpp"
#include "core/node.hpp"
#include "utils/broadcasting.hpp"
namespace ngraph
......
......@@ -16,11 +16,10 @@
#pragma once
#include "core/node.hpp"
#include "ngraph/node_vector.hpp"
#include "ngraph/op/negative.hpp"
#include "core/node.hpp"
namespace ngraph
{
namespace onnx_import
......
......@@ -16,10 +16,9 @@
#pragma once
#include "core/node.hpp"
#include "ngraph/node_vector.hpp"
#include "ngraph/op/not.hpp"
#include "core/node.hpp"
#include "utils/broadcasting.hpp"
namespace ngraph
......
......@@ -16,10 +16,9 @@
#pragma once
#include "core/node.hpp"
#include "ngraph/node_vector.hpp"
#include "ngraph/op/or.hpp"
#include "core/node.hpp"
#include "utils/broadcasting.hpp"
namespace ngraph
......
......@@ -16,10 +16,9 @@
#pragma once
#include "core/node.hpp"
#include "ngraph/node_vector.hpp"
#include "ngraph/op/power.hpp"
#include "core/node.hpp"
#include "utils/broadcasting.hpp"
namespace ngraph
......
......@@ -18,6 +18,7 @@
#include <iterator>
#include <memory>
#include "core/node.hpp"
#include "ngraph/node.hpp"
#include "ngraph/node_vector.hpp"
#include "ngraph/op/add.hpp"
......@@ -28,8 +29,6 @@
#include "ngraph/op/less.hpp"
#include "ngraph/op/multiply.hpp"
#include "ngraph/op/reshape.hpp"
#include "core/node.hpp"
#include "prelu.hpp"
#include "utils/broadcasting.hpp"
......
......@@ -16,9 +16,8 @@
#pragma once
#include "ngraph/node_vector.hpp"
#include "core/node.hpp"
#include "ngraph/node_vector.hpp"
namespace ngraph
{
......
......@@ -20,7 +20,6 @@
#include "ngraph/op/constant.hpp"
#include "ngraph/op/divide.hpp"
#include "ngraph/shape.hpp"
#include "utils/broadcasting.hpp"
#include "reciprocal.hpp"
......
......@@ -16,9 +16,8 @@
#pragma once
#include "ngraph/node_vector.hpp"
#include "core/node.hpp"
#include "ngraph/node_vector.hpp"
namespace ngraph
{
......
......@@ -22,7 +22,6 @@
#include "ngraph/op/constant.hpp"
#include "ngraph/op/divide.hpp"
#include "ngraph/shape.hpp"
#include "reduce.hpp"
#include "utils/broadcasting.hpp"
......
......@@ -18,6 +18,7 @@
#include <memory>
#include "core/node.hpp"
#include "ngraph/node_vector.hpp"
#include "ngraph/op/abs.hpp"
#include "ngraph/op/exp.hpp"
......@@ -28,8 +29,6 @@
#include "ngraph/op/product.hpp"
#include "ngraph/op/sqrt.hpp"
#include "ngraph/op/sum.hpp"
#include "core/node.hpp"
#include "utils/broadcasting.hpp"
#include "utils/reduction.hpp"
......
......@@ -16,11 +16,10 @@
#pragma once
#include "core/node.hpp"
#include "ngraph/node_vector.hpp"
#include "ngraph/op/relu.hpp"
#include "core/node.hpp"
namespace ngraph
{
namespace onnx_import
......
......@@ -18,12 +18,11 @@
#include <memory>
#include <vector>
#include "exceptions.hpp"
#include "ngraph/axis_vector.hpp"
#include "ngraph/op/constant.hpp"
#include "ngraph/op/reshape.hpp"
#include "ngraph/shape.hpp"
#include "exceptions.hpp"
#include "reshape.hpp"
#include "utils/reshape.hpp"
......
......@@ -16,9 +16,8 @@
#pragma once
#include "ngraph/node_vector.hpp"
#include "core/node.hpp"
#include "ngraph/node_vector.hpp"
namespace ngraph
{
......
......@@ -17,10 +17,9 @@
#include <memory>
#include <vector>
#include "core/node.hpp"
#include "ngraph/node.hpp"
#include "ngraph/node_vector.hpp"
#include "ngraph/shape.hpp"
#include "ngraph/op/add.hpp"
#include "ngraph/op/broadcast.hpp"
#include "ngraph/op/constant.hpp"
......@@ -29,11 +28,9 @@
#include "ngraph/op/minimum.hpp"
#include "ngraph/op/multiply.hpp"
#include "ngraph/op/subtract.hpp"
#include "core/node.hpp"
#include "utils/broadcasting.hpp"
#include "ngraph/shape.hpp"
#include "selu.hpp"
#include "utils/broadcasting.hpp"
namespace ngraph
{
......
......@@ -16,9 +16,8 @@
#pragma once
#include "ngraph/node_vector.hpp"
#include "core/node.hpp"
#include "ngraph/node_vector.hpp"
namespace ngraph
{
......
......@@ -17,11 +17,10 @@
#include <memory>
#include "ngraph/node.hpp"
#include "ngraph/op/constant.hpp"
#include "ngraph/shape.hpp"
#include "ngraph/type/element_type.hpp"
#include "ngraph/op/constant.hpp"
#include "shape.hpp"
namespace ngraph
......
......@@ -16,9 +16,8 @@
#pragma once
#include "ngraph/node_vector.hpp"
#include "core/node.hpp"
#include "ngraph/node_vector.hpp"
namespace ngraph
{
......
......@@ -18,11 +18,10 @@
#include <memory>
#include "core/node.hpp"
#include "ngraph/node_vector.hpp"
#include "ngraph/op/sigmoid.hpp"
#include "core/node.hpp"
namespace ngraph
{
namespace onnx_import
......
......@@ -18,11 +18,10 @@
#include <memory>
#include "core/node.hpp"
#include "ngraph/node_vector.hpp"
#include "ngraph/op/sin.hpp"
#include "core/node.hpp"
namespace ngraph
{
namespace onnx_import
......
......@@ -16,9 +16,8 @@
#pragma once
#include "ngraph/node_vector.hpp"
#include "core/node.hpp"
#include "ngraph/node_vector.hpp"
namespace ngraph
{
......
......@@ -20,7 +20,6 @@
#include "ngraph/node.hpp"
#include "ngraph/op/slice.hpp"
#include "slice.hpp"
#include "utils/common.hpp"
......
......@@ -16,9 +16,8 @@
#pragma once
#include "ngraph/node_vector.hpp"
#include "core/node.hpp"
#include "ngraph/node_vector.hpp"
namespace ngraph
{
......
......@@ -16,9 +16,8 @@
#include <numeric>
#include "ngraph/op/softmax.hpp"
#include "exceptions.hpp"
#include "ngraph/op/softmax.hpp"
#include "softmax.hpp"
namespace ngraph
......
......@@ -17,12 +17,10 @@
#include <memory>
#include "ngraph/node.hpp"
#include "ngraph/op/add.hpp"
#include "ngraph/op/constant.hpp"
#include "ngraph/op/exp.hpp"
#include "ngraph/op/log.hpp"
#include "utils/broadcasting.hpp"
#include "softplus.hpp"
......
......@@ -16,9 +16,8 @@
#pragma once
#include "ngraph/node_vector.hpp"
#include "core/node.hpp"
#include "ngraph/node_vector.hpp"
namespace ngraph
{
......
......@@ -22,7 +22,6 @@
#include "ngraph/op/constant.hpp"
#include "ngraph/op/divide.hpp"
#include "ngraph/shape.hpp"
#include "utils/broadcasting.hpp"
#include "softsign.hpp"
......
......@@ -16,9 +16,8 @@
#pragma once
#include "ngraph/node_vector.hpp"
#include "core/node.hpp"
#include "ngraph/node_vector.hpp"
namespace ngraph
{
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment