Unverified Commit af6b218c authored by mchrusci's avatar mchrusci Committed by GitHub

Merge branch 'master' into mchrusci/onnx_ci_fix

parents a32857e9 76a0e185
......@@ -18,11 +18,6 @@ add_executable(mnist_mlp mnist_loader.cpp mnist_mlp.cpp)
add_dependencies(mnist_mlp ngraph cpu_backend)
target_link_libraries(mnist_mlp ngraph cpu_backend)
if (NGRAPH_DISTRIBUTED_ENABLE)
find_package(MPI REQUIRED)
add_definitions(-DNGRAPH_DISTRIBUTED)
include_directories(SYSTEM ${MPI_C_INCLUDE_PATH} ${MPI_CXX_INCLUDE_PATH})
link_directories(${MPI_C_LIBRARIES} ${MPI_CXX_LIBRARIES})
link_libraries(${MPI_CXX_LIBRARIES})
add_executable(dist_mnist_mlp mnist_loader.cpp dist_mnist_mlp.cpp)
add_dependencies(dist_mnist_mlp ngraph cpu_backend)
target_link_libraries(dist_mnist_mlp ngraph cpu_backend)
......
......@@ -20,13 +20,13 @@
#include <list>
#include <math.h>
#include <memory>
#include <mpi.h>
#include <random>
#include <set>
#include <stdexcept>
#include <string>
#include <ngraph/autodiff/adjoints.hpp>
#include <ngraph/distributed.hpp>
#include <ngraph/graph_util.hpp>
#include <ngraph/ngraph.hpp>
......@@ -109,7 +109,7 @@ float test_accuracy(MNistDataLoader& loader,
int main(int argc, const char* argv[])
{
MPI::Init();
ngraph::Distributed dist;
size_t epochs = 5;
size_t batch_size = 128;
......@@ -291,7 +291,5 @@ int main(int argc, const char* argv[])
}
}
MPI::Finalize();
return 0;
}
......@@ -22,9 +22,8 @@ To deploy data-parallel training on backends supported by nGraph API, the
:lines: 180-196
:emphasize-lines: 9-12
Also since we are using OpenMPI in this example, we need to initialize and
finalize MPI with ``MPI::Init();`` and ``MPI::Finalize();`` at the beginning
and the end of the code used to deploy to devices; see the `full raw code`_.
We need to initialize and finalize distributed training with ``Distributed`` object;
see the `full raw code`_.
Finally, to run the training using two nGraph devices, invoke :command:`mpirun`.
This will launch two nGraph CPU backends.
......@@ -36,4 +35,4 @@ This will launch two nGraph CPU backends.
.. _OpenMPI: https://www.open-mpi.org/software/ompi/v3.1
.. _full raw code: https://github.com/NervanaSystems/ngraph/blob/master/doc/examples/mnist_mlp/dist_mnist_mlp.cpp
\ No newline at end of file
.. _full raw code: https://github.com/NervanaSystems/ngraph/blob/master/doc/examples/mnist_mlp/dist_mnist_mlp.cpp
......@@ -84,7 +84,6 @@ set (SRC
op/reduce.cpp
op/reduce_window.cpp
op/relu.cpp
op/remainder.cpp
op/replace_slice.cpp
op/reshape.cpp
op/result.cpp
......@@ -151,23 +150,16 @@ set (SRC
cpio.cpp
)
add_subdirectory(frontend)
if(NGRAPH_DISTRIBUTED_ENABLE)
find_package(MPI REQUIRED)
add_definitions(-DNGRAPH_DISTRIBUTED)
include_directories(SYSTEM ${MPI_C_INCLUDE_PATH} ${MPI_CXX_INCLUDE_PATH})
link_directories(${MPI_C_LIBRARIES} ${MPI_CXX_LIBRARIES})
link_libraries(${MPI_CXX_LIBRARIES})
set (SRC distributed.cpp ${SRC})
endif()
message(STATUS ${CMAKE_CURRENT_SOURCE_DIR}/op)
file(GLOB OPS "${CMAKE_CURRENT_SOURCE_DIR}/op/" "${CMAKE_CURRENT_SOURCE_DIR}/op/*.hpp")
foreach(OP ${OPS})
file(STRINGS ${OP} OP_CLASS REGEX "class [A-Za-z0-9_]+ :")
foreach(LINE ${OP_CLASS})
string(REGEX REPLACE ".*class ([A-Za-z0-9_]+) : public ([A-Za-z0-9_]+).*" "\\1" CLASS_FOUND ${LINE})
string(REGEX REPLACE ".*class ([A-Za-z0-9_]+) : public ([A-Za-z0-9_]+).*" "\\2" BASE_FOUND ${LINE})
if (NOT ${BASE_FOUND} STREQUAL "std" AND NOT ${CLASS_FOUND} STREQUAL "Op")
set(OP_CLASS_LIST ${OP_CLASS_LIST} ${CLASS_FOUND})
endif()
endforeach(LINE ${OP_CLASS})
endforeach()
message(STATUS "${CMAKE_CURRENT_BINARY_DIR}/ops_list.txt")
string(REPLACE ";" "\n" OP_CLASS_LINES "${OP_CLASS_LIST}")
file(WRITE "${CMAKE_CURRENT_BINARY_DIR}/ops_list.txt" "${OP_CLASS_LINES}")
add_subdirectory(frontend)
find_package(Graphviz QUIET)
if (GRAPHVIZ_FOUND)
......
/*******************************************************************************
* Copyright 2018 Intel Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*******************************************************************************/
#ifdef NGRAPH_DISTRIBUTED
#include "ngraph/distributed.hpp"
#include <mpi.h>
using namespace ngraph;
ngraph::Distributed::Distributed()
{
int flag = 0;
MPI_Initialized(&flag);
if (!flag)
{
MPI_Init(NULL, NULL);
}
}
ngraph::Distributed::~Distributed()
{
MPI_Finalize();
}
int ngraph::Distributed::get_size() const
{
int size;
MPI_Comm_size(MPI_COMM_WORLD, &size);
return size;
}
int ngraph::Distributed::get_rank() const
{
int rank;
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
return rank;
}
#endif
/*******************************************************************************
* Copyright 2018 Intel Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*******************************************************************************/
#pragma once
namespace ngraph
{
class Distributed
{
public:
Distributed();
~Distributed();
int get_size() const;
int get_rank() const;
};
}
......@@ -107,7 +107,6 @@
#include "ngraph/op/reduce.hpp"
#include "ngraph/op/reduce_window.hpp"
#include "ngraph/op/relu.hpp"
#include "ngraph/op/remainder.hpp"
#include "ngraph/op/replace_slice.hpp"
#include "ngraph/op/reshape.hpp"
#include "ngraph/op/reverse.hpp"
......
//*****************************************************************************
// Copyright 2017-2018 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#include "ngraph/op/remainder.hpp"
using namespace std;
using namespace ngraph;
op::Remainder::Remainder(const shared_ptr<Node>& arg0, const shared_ptr<Node>& arg1)
: BinaryElementwiseArithmetic("Remainder", arg0, arg1)
{
constructor_validate_and_infer_types();
}
shared_ptr<Node> op::Remainder::copy_with_new_args(const NodeVector& new_args) const
{
check_new_args_count(this, new_args);
return make_shared<Remainder>(new_args.at(0), new_args.at(1));
}
//*****************************************************************************
// Copyright 2017-2018 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#pragma once
#include "ngraph/op/util/binary_elementwise_arithmetic.hpp"
namespace ngraph
{
namespace op
{
/// \brief (NOT IMPLEMENTED) Elementwise remainder operation.
///
/// (TODO: Get a bit more clarity on this: is it just "mod"? What about negative numbers and floats?)
///
/// ## Inputs
///
/// | | Type | Description |
/// | ------ | --------------------------------- | ------------------------------------------------------ |
/// | `arg0` | \f$N[d_1,\dots,d_n]~(n \geq 0)\f$ | A tensor of any shape and numeric element type. |
/// | `arg1` | \f$N[d_1,\dots,d_n]~(n \geq 0)\f$ | A tensor of the same shape and element type as `arg0`. |
///
/// ## Output
///
/// | Type | Description |
/// | ---------------------- | ----------------------------------------------------------------------------------------------------------------- |
/// | \f$N[d_1,\dots,d_n]\f$ | The tensor \f$T\f$, where \f$T[i_1,\dots,i_n] = \texttt{arg0}[i_1,\dots,i_n] \mod \texttt{arg1}[i_1,\dots,i_n]\f$ |
class Remainder : public util::BinaryElementwiseArithmetic
{
public:
/// \brief Constructs a remainder operation.
///
/// \param arg0 Node that produces the first input tensor.
/// \param arg1 Node that produces the second input tensor.
Remainder(const std::shared_ptr<Node>& arg0, const std::shared_ptr<Node>& arg1);
virtual std::shared_ptr<Node>
copy_with_new_args(const NodeVector& new_args) const override;
};
}
}
......@@ -46,7 +46,6 @@
#include "ngraph/op/power.hpp"
#include "ngraph/op/product.hpp"
#include "ngraph/op/relu.hpp"
#include "ngraph/op/remainder.hpp"
#include "ngraph/op/reshape.hpp"
#include "ngraph/op/sigmoid.hpp"
#include "ngraph/op/sign.hpp"
......@@ -162,7 +161,6 @@ static std::unordered_map<std::type_index,
{TI(op::Minimum), cse_binarywise},
{TI(op::Multiply), cse_binarywise},
{TI(op::Power), cse_binarywise},
//{TI(op::Remainder), cse_binarywise},
{TI(op::Subtract), cse_binarywise},
{TI(op::Sum), cse_reduction},
{TI(op::Product), cse_reduction},
......
......@@ -74,7 +74,6 @@
#include "ngraph/op/reduce.hpp"
#include "ngraph/op/reduce_window.hpp"
#include "ngraph/op/relu.hpp"
#include "ngraph/op/remainder.hpp"
#include "ngraph/op/replace_slice.hpp"
#include "ngraph/op/reshape.hpp"
#include "ngraph/op/result.hpp"
......
......@@ -97,7 +97,6 @@
#include "ngraph/op/reduce.hpp"
#include "ngraph/op/reduce_window.hpp"
#include "ngraph/op/relu.hpp"
#include "ngraph/op/remainder.hpp"
#include "ngraph/op/replace_slice.hpp"
#include "ngraph/op/reshape.hpp"
#include "ngraph/op/result.hpp"
......
......@@ -1373,7 +1373,8 @@ void ngraph::runtime::cpu::pass::CPUFusion::construct_bounded_relu()
auto pattern_map = m.get_pattern_map();
if (!std::dynamic_pointer_cast<op::Constant>(pattern_map[alpha]))
{
throw ngraph_error("alpha must be constant for bounded relu");
NGRAPH_DEBUG << "alpha must be constant for bounded relu";
return false;
}
// we wont fuse if the alpha and the Relu output element type are not same
......
......@@ -75,7 +75,6 @@
#include "ngraph/op/reduce.hpp"
#include "ngraph/op/reduce_window.hpp"
#include "ngraph/op/relu.hpp"
#include "ngraph/op/remainder.hpp"
#include "ngraph/op/replace_slice.hpp"
#include "ngraph/op/reshape.hpp"
#include "ngraph/op/result.hpp"
......
......@@ -79,7 +79,6 @@
#include "ngraph/op/reduce.hpp"
#include "ngraph/op/reduce_window.hpp"
#include "ngraph/op/relu.hpp"
#include "ngraph/op/remainder.hpp"
#include "ngraph/op/replace_slice.hpp"
#include "ngraph/op/reshape.hpp"
#include "ngraph/op/result.hpp"
......
......@@ -310,11 +310,6 @@ TEST(copy, reduce)
EXPECT_TRUE(axes == node_cast->get_reduction_axes());
}
TEST(copy, remainder)
{
ASSERT_TRUE(check_binary<op::Remainder>());
}
TEST(copy, reshape)
{
Shape shape_in{2, 3, 4};
......
......@@ -36,12 +36,11 @@ TEST(distributed_${BACKEND_NAME}, allreduce)
auto f = make_shared<Function>(make_shared<op::AllReduce>(A), op::ParameterVector{A});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
auto v = vector<float>{1, 2, 3, 4};
int comm_size;
MPI_Comm_size(MPI_COMM_WORLD, &comm_size);
auto v = vector<float>{1, 2, 3, 4};
auto a = backend->create_tensor(element::f32, shape);
copy_data(a, vector<float>{1, 2, 3, 4});
......
......@@ -103,7 +103,6 @@ TEST(DISABLED_include, complete)
"ngraph/op/reduce.hpp",
"ngraph/op/reduce_window.hpp",
"ngraph/op/relu.hpp",
"ngraph/op/remainder.hpp",
"ngraph/op/replace_slice.hpp",
"ngraph/op/reshape.hpp",
"ngraph/op/reverse.hpp",
......
......@@ -23,28 +23,14 @@
using namespace std;
#ifdef NGRAPH_DISTRIBUTED
#include <mpi.h>
class MpiEnvironment : public ::testing::Environment
{
protected:
virtual void SetUp()
{
int flag = 0;
MPI_Initialized(&flag);
if (!flag)
{
MPI::Init();
}
}
virtual void TearDown() { MPI::Finalize(); }
virtual ~MpiEnvironment() {}
};
#include "ngraph/distributed.hpp"
#endif
int main(int argc, char** argv)
{
#ifdef NGRAPH_DISTRIBUTED
ngraph::Distributed dist;
#endif
const char* exclude = "--gtest_filter=-benchmark.*";
vector<char*> argv_vector;
argv_vector.push_back(argv[0]);
......@@ -56,9 +42,6 @@ int main(int argc, char** argv)
argc++;
::testing::InitGoogleTest(&argc, argv_vector.data());
#ifdef NGRAPH_DISTRIBUTED
::testing::AddGlobalTestEnvironment(new MpiEnvironment);
#endif
int rc = RUN_ALL_TESTS();
return rc;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment