Unverified Commit 21e73812 authored by Adam Procter's avatar Adam Procter Committed by GitHub

Merge branch 'master' into krovatkin/ml_reuse_debug

parents 1f0c8d31 ec86e1a8
......@@ -6,11 +6,11 @@
/cmake/ @rkimballn1 @silee2
/.ci/ @aslepko @crlishka
/.ci/travis/ @postrational
/.ci/onnx/ @postrational
/contrib/docker/ @aslepko @crlishka
/.travis.yml @postrational
/.ci/ @aslepko
/.ci/travis/ @aslepko @postrational
/.ci/onnx/ @aslepko @postrational
/contrib/docker/ @aslepko
/.travis.yml @aslepko @postrational
/.clang-format @rkimballn1
/.gitattributes @rkimballn1
......
......@@ -42,3 +42,7 @@ add_custom_command(
)
add_custom_target(python_wheel DEPENDS ngraph ${CMAKE_BINARY_DIR}/python/dist/)
if (NGRAPH_CPU_ENABLE)
add_dependencies(python_wheel ext_mkldnn)
endif()
......@@ -15,8 +15,12 @@
# ******************************************************************************
"""ngraph module namespace, exposing factory functions for all ops and other classes."""
from pkg_resources import get_distribution
__version__ = get_distribution('ngraph-core').version
from pkg_resources import get_distribution, DistributionNotFound
try:
__version__ = get_distribution('ngraph-core').version
except DistributionNotFound:
__version__ = '0.0.0-dev'
from ngraph.ops import absolute
from ngraph.ops import absolute as abs
......
......@@ -374,6 +374,10 @@ class BuildExt(build_ext):
build_ext.build_extensions(self)
if sys.platform == 'darwin':
# This turns out to be needed when building using Anaconda python on macOS.
os.environ['MACOSX_DEPLOYMENT_TARGET'] = '10.9'
with open(os.path.join(PYNGRAPH_ROOT_DIR, 'requirements.txt')) as req:
requirements = req.read().splitlines()
......
......@@ -99,6 +99,14 @@ namespace ngraph
return op_list;
}
bool is_operator_supported(const std::string& op_name,
std::int64_t version,
const std::string& domain)
{
return OperatorsBridge::is_operator_registered(
op_name, version, domain == "ai.onnx" ? "" : domain);
}
} // namespace onnx_import
} // namespace ngraph
......@@ -52,6 +52,18 @@ namespace ngraph
std::set<std::string> get_supported_operators(std::int64_t version,
const std::string& domain);
/// \brief Determines whether ONNX operator is supported.
///
/// \param[in] op_name The ONNX operator name.
/// \param[in] version The ONNX operator set version.
/// \param[in] domain The domain the ONNX operator is registered to.
///
/// \return True if operator is supported, False otherwise.
///
bool is_operator_supported(const std::string& op_name,
std::int64_t version,
const std::string& domain = "ai.onnx");
/// \brief Convert an ONNX model to nGraph function
/// The function translated serialized ONNX model to nGraph function. The serialized
/// ONNX model is read from input stream.
......
......@@ -21,6 +21,7 @@
#include <unordered_map>
#include "core/attribute.hpp"
#include "ngraph/log.hpp"
#include "op/abs.hpp"
#include "op/acos.hpp"
#include "op/add.hpp"
......@@ -102,20 +103,19 @@ namespace ngraph
{
namespace detail
{
const Operator& find(const std::string& name,
std::int64_t version,
const std::string& domain,
const std::map<std::int64_t, Operator>& map)
const std::map<std::int64_t, Operator>::const_iterator
find(std::int64_t version, const std::map<std::int64_t, Operator>& map)
{
std::map<std::int64_t, Operator>::const_iterator it{};
while (version > 0)
{
const auto it = map.find(version--);
it = map.find(version--);
if (it != std::end(map))
{
return it->second;
return it;
}
}
throw error::UnsupportedVersion{name, version, domain};
return it;
}
}
......@@ -136,13 +136,51 @@ namespace ngraph
{
throw error::UnknownDomain{domain};
}
if (version > OperatorsBridge::LATEST_SUPPORTED_OPSET_VERSION)
{
NGRAPH_WARN << "Currently operator set version: " << version << " is unsupported."
<< " Falling back to: "
<< OperatorsBridge::LATEST_SUPPORTED_OPSET_VERSION;
}
for (const auto& op : dm->second)
{
result.emplace(op.first, detail::find(op.first, version, domain, op.second));
const auto& it = detail::find(version, op.second);
if (it == std::end(op.second))
{
throw error::UnsupportedVersion{op.first, version, domain};
}
result.emplace(op.first, it->second);
}
return result;
}
bool OperatorsBridge::_is_operator_registered(const std::string& name,
std::int64_t version,
const std::string& domain)
{
// search for domain
auto dm_map = m_map.find(domain);
if (dm_map == std::end(m_map))
{
return false;
}
// search for name
auto op_map = dm_map->second.find(name);
if (op_map == std::end(dm_map->second))
{
return false;
}
if (detail::find(version, op_map->second) != std::end(op_map->second))
{
return true;
}
else
{
return false;
}
}
#define REGISTER_OPERATOR(name_, ver_, fn_) \
m_map[""][name_].emplace(ver_, std::bind(op::set_##ver_::fn_, std::placeholders::_1))
......
......@@ -62,6 +62,8 @@ namespace ngraph
class OperatorsBridge
{
public:
static constexpr const int LATEST_SUPPORTED_OPSET_VERSION = ONNX_OPSET_VERSION;
OperatorsBridge(const OperatorsBridge&) = delete;
OperatorsBridge& operator=(const OperatorsBridge&) = delete;
OperatorsBridge(OperatorsBridge&&) = delete;
......@@ -80,6 +82,13 @@ namespace ngraph
instance()._register_operator(name, version, domain, std::move(fn));
}
static bool is_operator_registered(const std::string& name,
std::int64_t version,
const std::string& domain)
{
return instance()._is_operator_registered(name, version, domain);
}
private:
std::unordered_map<std::string,
std::unordered_map<std::string, std::map<std::int64_t, Operator>>>
......@@ -98,6 +107,9 @@ namespace ngraph
const std::string& domain,
Operator fn);
OperatorSet _get_operator_set(std::int64_t version, const std::string& domain);
bool _is_operator_registered(const std::string& name,
std::int64_t version,
const std::string& domain);
};
} // namespace onnx_import
......
......@@ -29,22 +29,30 @@
#include "ngraph/op/convolution.hpp"
#include "ngraph/op/dequantize.hpp"
#include "ngraph/op/get_output_element.hpp"
#include "ngraph/op/pad.hpp"
#include "ngraph/op/quantize.hpp"
#include "ngraph/op/reshape.hpp"
#include "ngraph/op/slice.hpp"
#include "ngraph/op/util/binary_elementwise_arithmetic.hpp"
#include "ngraph/op/util/unary_elementwise_arithmetic.hpp"
#include "ngraph/pattern/op/label.hpp"
#include "ngraph/util.hpp"
using namespace ngraph;
extern template ngraph::AxisVector
ngraph::apply_permutation<ngraph::AxisVector>(ngraph::AxisVector input,
ngraph::AxisVector order);
using ReshapeMap = std::unordered_map<std::shared_ptr<Node>, std::shared_ptr<op::Reshape>>;
extern template ngraph::Shape ngraph::apply_permutation<ngraph::Shape>(ngraph::Shape input,
ngraph::AxisVector order);
static std::string describe_reshape(std::shared_ptr<Node> node)
{
std::stringstream ss;
auto reshape = std::dynamic_pointer_cast<op::Reshape>(node);
ss << reshape->get_name()
<< " ( axis order = " << ngraph::vector_to_string(reshape->get_input_order())
<< " , shape = " << vector_to_string(reshape->get_shape()) << " ) "
<< " , child = " << reshape->get_argument(0)->get_name();
using ReshapeMap = std::unordered_map<std::shared_ptr<Node>, std::shared_ptr<op::Reshape>>;
return ss.str();
}
static std::shared_ptr<op::Reshape> combine_reshapes(std::shared_ptr<op::Reshape> r1,
std::shared_ptr<op::Reshape> r2)
......@@ -64,18 +72,6 @@ static void
target->get_inputs().at(input_index).replace_output(new_reshape->get_outputs().at(0));
}
std::string describe_reshape(std::shared_ptr<Node> node)
{
std::stringstream ss;
auto reshape = std::dynamic_pointer_cast<op::Reshape>(node);
ss << reshape->get_name()
<< " ( axis order = " << ngraph::vector_to_string(reshape->get_input_order())
<< " , shape = " << vector_to_string(reshape->get_shape()) << " ) "
<< " , child = " << reshape->get_argument(0)->get_name();
return ss.str();
}
static void delete_reshape(std::shared_ptr<Node> reshape)
{
NGRAPH_DEBUG << "Removing reshape " << reshape->get_name();
......@@ -256,6 +252,7 @@ static void sink_reshape(std::shared_ptr<op::Reshape> reshape,
mark_reshape_for_deletion(orig_reshape, reshapes_to_delete);
//replace reshape with combined one
ngraph::replace_node(reshape, new_reshape);
mark_reshape_for_deletion(new_reshape, reshapes_to_delete);
reorders[new_reshape] = new_reshape;
NGRAPH_DEBUG << "Combining " << describe_reshape(orig_reshape) << " and"
<< describe_reshape(reshape) << " into " << describe_reshape(new_reshape);
......@@ -309,6 +306,61 @@ static void sink_binary(std::shared_ptr<op::util::BinaryElementwiseArithmetic> b
}
}
static void sink_slice(std::shared_ptr<op::Slice> n,
ReshapeMap& reorders,
std::set<std::shared_ptr<Node>>& reshapes_to_delete)
{
auto arg_reshape = reorders.at(n->get_argument(0));
auto order = arg_reshape->get_input_order();
// we need the correct input shape to produce the right output shape
// we are going to create a label of the right input shape,
// so a new slice will have the right shape
auto def_order = ngraph::get_permutation_to_default_order(order);
auto input_shape = ngraph::apply_permutation(arg_reshape->get_shape(), def_order);
auto dummy_correct_shape =
std::make_shared<pattern::op::Label>(arg_reshape->get_element_type(), input_shape);
auto new_lower = ngraph::apply_permutation(n->get_lower_bounds(), def_order);
auto new_upper = ngraph::apply_permutation(n->get_upper_bounds(), def_order);
auto new_strides = ngraph::apply_permutation(n->get_strides(), def_order);
auto new_slice =
std::make_shared<op::Slice>(dummy_correct_shape, new_lower, new_upper, new_strides);
ngraph::replace_node(dummy_correct_shape, n->get_argument(0));
NGRAPH_DEBUG << "Replacing " << n->get_name() << " with " << new_slice->get_name();
ngraph::replace_node(n, new_slice);
auto new_reshape = std::make_shared<op::Reshape>(new_slice, order, n->get_shape());
NGRAPH_DEBUG << "Propagating " << describe_reshape(new_reshape) << " for " << n->get_name();
reorders[new_slice] = new_reshape;
}
static void sink_pad(std::shared_ptr<op::Pad> n,
ReshapeMap& reorders,
std::set<std::shared_ptr<Node>>& reshapes_to_delete)
{
auto arg_reshape = reorders.at(n->get_argument(0));
auto order = arg_reshape->get_input_order();
// we need the correct input shape to produce the right output shape
// we are going to create a label of the right input shape,
// so a new pad will have the right shape
auto def_order = ngraph::get_permutation_to_default_order(order);
auto input_shape = ngraph::apply_permutation(arg_reshape->get_shape(), def_order);
auto dummy_correct_shape =
std::make_shared<pattern::op::Label>(arg_reshape->get_element_type(), input_shape);
auto new_lower = ngraph::apply_permutation(n->get_padding_below(), def_order);
auto new_upper = ngraph::apply_permutation(n->get_padding_above(), def_order);
auto new_interior = ngraph::apply_permutation(n->get_padding_interior(), def_order);
auto new_pad = std::make_shared<op::Pad>(
dummy_correct_shape, n->get_argument(1), new_lower, new_upper, new_interior);
ngraph::replace_node(dummy_correct_shape, n->get_argument(0));
NGRAPH_DEBUG << "Replacing " << n->get_name() << " with " << new_pad->get_name();
ngraph::replace_node(n, new_pad);
auto new_reshape = std::make_shared<op::Reshape>(new_pad, order, n->get_shape());
NGRAPH_DEBUG << "Propagating " << describe_reshape(new_reshape) << " for " << n->get_name();
reorders[new_pad] = new_reshape;
}
static void sink_quantize(std::shared_ptr<op::Quantize> quantize,
ReshapeMap& reorders,
std::set<std::shared_ptr<Node>>& reshapes_to_delete)
......@@ -419,6 +471,14 @@ bool ngraph::pass::ReshapeSinking::run_on_function(std::shared_ptr<ngraph::Funct
{
sink_dequantize(dequantize, reorders, reshapes_to_delete);
}
else if (auto slice = std::dynamic_pointer_cast<op::Slice>(n))
{
sink_slice(slice, reorders, reshapes_to_delete);
}
else if (auto pad = std::dynamic_pointer_cast<op::Pad>(n))
{
sink_pad(pad, reorders, reshapes_to_delete);
}
else
{
materialize_shapes(n, reorders, reshapes_to_delete);
......
......@@ -17,6 +17,7 @@
#pragma once
#include "ngraph/pass/pass.hpp"
#include "ngraph/util.hpp"
namespace ngraph
{
......@@ -29,3 +30,17 @@ namespace ngraph
};
}
}
extern template ngraph::AxisVector
ngraph::apply_permutation<ngraph::AxisVector>(ngraph::AxisVector input,
ngraph::AxisVector order);
extern template ngraph::Coordinate
ngraph::apply_permutation<ngraph::Coordinate>(ngraph::Coordinate input,
ngraph::AxisVector order);
extern template ngraph::Strides
ngraph::apply_permutation<ngraph::Strides>(ngraph::Strides input, ngraph::AxisVector order);
extern template ngraph::Shape ngraph::apply_permutation<ngraph::Shape>(ngraph::Shape input,
ngraph::AxisVector order);
......@@ -145,3 +145,8 @@ vector<runtime::PerformanceCounter>
}
return rc;
}
bool runtime::cpu::CPU_Backend::is_supported(const Node& op) const
{
return true;
}
......@@ -58,6 +58,8 @@ namespace ngraph
std::vector<PerformanceCounter>
get_performance_data(std::shared_ptr<Function> func) const override;
bool is_supported(const Node& node) const override;
private:
class FunctionInstance
{
......
......@@ -1982,10 +1982,10 @@ void runtime::cpu::CPU_ExternalFunction::build()
file_util::path_join(s_debug_dir, m_function_name + "_debug.txt");
std::stringstream ss;
ss << "EXECUTION PLAN:\n";
ss << "\nEXECUTION PLAN:\n";
for (size_t i = 0; i < functors.size(); i++)
{
ss << op_names.at(i) << "will be executed with the following inputs:\n";
ss << op_names.at(i) << " will be executed with the following inputs:\n";
for (auto is : this->m_op_attrs.at(i).Inputs)
{
ss << "\t" << is << " = " << this->get_tensor_data(is) << std::endl;
......
......@@ -18,13 +18,10 @@
#include "ngraph/graph_util.hpp"
#include "ngraph/pass/manager.hpp"
#include "ngraph/pass/visualize_tree.hpp"
#include "ngraph/runtime/gpu/gpu_backend.hpp"
#include "ngraph/runtime/gpu/gpu_tensor.hpp"
#include "ngraph/runtime/host_tensor.hpp"
#include "ngraph/runtime/hybrid/hybrid_util.hpp"
#include "ngraph/runtime/hybrid/pass/assign_placement.hpp"
#include "ngraph/runtime/hybrid/pass/fix_get_output_element.hpp"
#include "ngraph/runtime/interpreter/int_backend.hpp"
#include "ngraph/runtime/tensor.hpp"
using namespace ngraph;
......@@ -205,32 +202,6 @@ bool runtime::hybrid::HybridBackend::is_supported(const Node& node) const
return true;
}
string runtime::hybrid::HybridBackend::get_placement_name(const runtime::Tensor* t)
{
string rc;
if (dynamic_cast<const runtime::HostTensor*>(t) != nullptr)
{
rc = "HostTensor";
}
else if (dynamic_cast<const runtime::gpu::GPUTensor*>(t) != nullptr)
{
rc = "GPUTensor";
}
return rc;
}
string runtime::hybrid::HybridBackend::get_placement_name(const runtime::Backend* t)
{
string rc;
if (dynamic_cast<const runtime::interpreter::INTBackend*>(t) != nullptr)
{
rc = "INTBackend";
}
else if (dynamic_cast<const runtime::gpu::GPU_Backend*>(t) != nullptr)
{
rc = "GPU_Backend";
}
return rc;
}
size_t runtime::hybrid::HybridBackend::get_placement(const runtime::Tensor* t)
{
size_t index = 0;
......
......@@ -70,7 +70,5 @@ private:
std::map<std::shared_ptr<ngraph::Function>, FunctionInstance> m_function_map;
std::vector<std::shared_ptr<runtime::Backend>> m_backend_list;
std::string get_placement_name(const runtime::Tensor* t);
std::string get_placement_name(const runtime::Backend* t);
size_t get_placement(const runtime::Tensor* t);
};
......@@ -478,6 +478,10 @@ T ngraph::apply_permutation(T input, AxisVector order)
template AxisVector ngraph::apply_permutation<AxisVector>(AxisVector input, AxisVector order);
template Shape ngraph::apply_permutation<Shape>(Shape input, AxisVector order);
template ngraph::Coordinate ngraph::apply_permutation<ngraph::Coordinate>(ngraph::Coordinate input,
ngraph::AxisVector order);
template ngraph::Strides ngraph::apply_permutation<ngraph::Strides>(ngraph::Strides input,
ngraph::AxisVector order);
AxisVector ngraph::get_default_order(const Shape& shape)
{
......
......@@ -65,13 +65,6 @@ set(SRC
set_source_files_properties(includes.cpp PROPERTIES COMPILE_DEFINITIONS
NGRAPH_INCLUDES="${PROJECT_SOURCE_DIR}/src/ngraph")
if (NGRAPH_ONNX_IMPORT_ENABLE)
list(APPEND SRC onnx_import.cpp)
if (NGRAPH_ONNXIFI_ENABLE)
list(APPEND SRC onnxifi.cpp onnxifi_span.cpp)
endif()
endif()
if (NGRAPH_INTERPRETER_ENABLE)
list(APPEND SRC
backend_debug_api.cpp
......@@ -140,13 +133,22 @@ set(MULTI_TEST_SRC
backend_unary_elementwise.in.cpp
convolution_test.in.cpp
)
if(NGRAPH_DISTRIBUTED_ENABLE)
list(APPEND MULTI_TEST_SRC distributed.in.cpp)
endif()
if (NGRAPH_CPU_ENABLE)
list(APPEND MULTI_TEST_SRC backend_graph_comparison.in.cpp)
endif()
if (NGRAPH_ONNX_IMPORT_ENABLE)
list(APPEND MULTI_TEST_SRC onnx_import.in.cpp)
if (NGRAPH_ONNXIFI_ENABLE)
list(APPEND SRC onnxifi.cpp onnxifi_span.cpp)
endif()
endif()
foreach(BACKEND_NAME ${ACTIVE_BACKEND_LIST})
# Some---but not all---autodiff tests go through multiple iterations with
# different random seeds. On the CPU backend this is currently very slow
......
......@@ -163,3 +163,43 @@ TEST(reshape_sinking, nasnet_pooladd)
size_t before_after = count_ops_of_type<op::Reshape>(func);
ASSERT_LE(before_after, before_count);
}
TEST(reshape_sinking, slice_pad)
{
Shape shape_a{100, 8, 8, 1};
AxisVector to_nhwc{0, 2, 3, 1};
AxisVector to_nchw{0, 3, 1, 2};
auto A = make_shared<op::Parameter>(element::f32, shape_a);
auto pad_value = op::Constant::create<float>(element::f32, Shape{}, std::vector<float>{0.0f});
Shape padding_below{0, 0, 0, 0};
Shape padding_above{0, 1, 1, 0};
Shape padding_interior{0, 0, 0, 0};
auto reshape1 = make_shared<op::Reshape>(A, to_nchw, Shape{100, 1, 8, 8});
auto maxpool =
make_shared<op::MaxPool>(reshape1, Shape{1, 1}, Strides{2, 2}, Shape{0, 0}, Shape{0, 0});
auto reshape2 = make_shared<op::Reshape>(maxpool, to_nhwc, Shape{100, 4, 4, 1});
auto pad =
make_shared<op::Pad>(reshape2, pad_value, padding_below, padding_above, padding_interior);
auto slice = make_shared<op::Slice>(
pad, Coordinate{0, 1, 1, 0}, Coordinate{100, 5, 5, 1}, Strides{1, 1, 1, 1});
auto reshape3 = make_shared<op::Reshape>(slice, to_nchw, Shape{100, 1, 4, 4});
auto avgpool = make_shared<op::AvgPool>(reshape3, Shape{1, 1}, Strides{2, 2});
auto reshape4 = make_shared<op::Reshape>(avgpool, to_nhwc, Shape{100, 1, 2, 2});
auto f = make_shared<Function>(reshape4, ParameterVector{A});
pass::Manager pass_manager;
size_t before_count = count_ops_of_type<op::Reshape>(f);
pass_manager.register_pass<pass::VisualizeTree>("before.pdf");
pass_manager.register_pass<pass::ReshapeSinking>();
pass_manager.register_pass<pass::ReshapeElimination>();
pass_manager.register_pass<pass::CommonSubexpressionElimination>();
pass_manager.register_pass<pass::VisualizeTree>("after.pdf");
pass_manager.run_passes(f);
size_t before_after = count_ops_of_type<op::Reshape>(f);
ASSERT_LE(before_after, before_count);
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment