Unverified Commit 621df65a authored by Ashok Emani's avatar Ashok Emani Committed by GitHub

Merge branch 'master' into examples

parents 7d160dad 607bcbc4
......@@ -208,19 +208,8 @@ void codegen::StaticCompiler::initialize()
}
// Enable various target features
// Most of these are for Eigen
auto& TO = m_compiler->getInvocation().getTargetOpts();
TO.CPU = sys::getHostCPUName();
TO.FeaturesAsWritten.emplace_back("+sse");
TO.FeaturesAsWritten.emplace_back("+sse2");
TO.FeaturesAsWritten.emplace_back("+sse3");
TO.FeaturesAsWritten.emplace_back("+ssse3");
TO.FeaturesAsWritten.emplace_back("+sse4.1");
TO.FeaturesAsWritten.emplace_back("+sse4.2");
TO.FeaturesAsWritten.emplace_back("+avx");
TO.FeaturesAsWritten.emplace_back("+avx2");
TO.FeaturesAsWritten.emplace_back("+fma");
}
codegen::StaticCompiler::~StaticCompiler()
......
......@@ -144,42 +144,6 @@ void Node::set_name(const string& name)
}
}
void Node::assert_argument_list_equivalency(const Nodes& b)
{
bool arguments_equal = true;
if (this->m_arguments.size() == b.size())
{
for (size_t i = 0; i < this->m_arguments.size(); i++)
{
arguments_equal = arguments_equal && this->m_arguments.at(i) == b.at(i);
}
}
else
{
arguments_equal = false;
}
if (!arguments_equal)
{
std::cout << "node = " << this->get_name() << std::endl;
std::cout << "m_arguments" << std::endl;
for (auto arg : this->m_arguments)
{
std::cout << "arg = " << arg->get_name() << std::endl;
}
std::cout << "results" << std::endl;
for (auto arg : b)
{
std::cout << "arg = " << arg->get_name() << std::endl;
}
}
if (!arguments_equal)
{
throw "Arguments aren't equal";
}
}
std::shared_ptr<Node> Node::get_input_op(size_t index)
{
for (auto arg : m_arguments)
......@@ -201,7 +165,10 @@ Nodes Node::get_input_ops() //const
result.push_back(i.get_output().get_node());
}
}
assert_argument_list_equivalency(result);
if (m_arguments != result)
{
throw ngraph_error("Arguments aren't equal: different values");
}
return result;
}
......
......@@ -170,8 +170,6 @@ namespace ngraph
protected:
void add_output(const element::Type& element_type, const Shape& shape);
void assert_argument_list_equivalency(const Nodes& b);
bool test_identical(const Node&) const;
std::string m_node_type;
std::multiset<Node*> m_users;
......
This diff is collapsed.
......@@ -24,12 +24,12 @@
#include "ngraph/runtime/cpu/cpu_external_function.hpp"
#include "ngraph/runtime/cpu/cpu_tensor_view_wrapper.hpp"
#define EMITTER_DECL(E) \
E(ngraph::runtime::cpu::CPU_ExternalFunction* external_function, \
codegen::CodeWriter& writer, \
const ngraph::Node* node, \
const std::vector<ngraph::runtime::cpu::TensorViewWrapper>& args, \
const std::vector<ngraph::runtime::cpu::TensorViewWrapper>& out)
#define EMITTER_DECL(op_name) \
emit<op_name>(CPU_ExternalFunction * external_function, \
codegen::CodeWriter & writer, \
const ngraph::Node* node, \
const std::vector<TensorViewWrapper>& args, \
const std::vector<TensorViewWrapper>& out)
namespace ngraph
{
......@@ -40,72 +40,25 @@ namespace ngraph
class CPU_Emitter
{
public:
static void EMITTER_DECL(EmitNop);
static void EMITTER_DECL(EmitAdd);
#ifdef NGRAPH_DISTRIBUTED
static void EMITTER_DECL(EmitAllReduce);
#endif
static void EMITTER_DECL(EmitDot);
static void EMITTER_DECL(EmitMultiply);
static void EMITTER_DECL(EmitGetOutputElement);
static void EMITTER_DECL(EmitXLAGetTupleElement);
static void EMITTER_DECL(EmitTuple);
static void EMITTER_DECL(EmitAbs);
static void EMITTER_DECL(EmitConcat);
static void EMITTER_DECL(EmitDivide);
static void EMITTER_DECL(EmitEqual);
static void EMITTER_DECL(EmitGreater);
static void EMITTER_DECL(EmitGreaterEq);
static void EMITTER_DECL(EmitLess);
static void EMITTER_DECL(EmitLessEq);
static void EMITTER_DECL(EmitLog);
static void EMITTER_DECL(EmitMaximum);
static void EMITTER_DECL(EmitMinimum);
static void EMITTER_DECL(EmitNegative);
static void EMITTER_DECL(EmitNotEqual);
static void EMITTER_DECL(EmitSelect);
static void EMITTER_DECL(EmitSubtract);
static void EMITTER_DECL(EmitBroadcast);
static void EMITTER_DECL(EmitMatmulBias);
static void EMITTER_DECL(EmitConvert);
static void EMITTER_DECL(EmitConstant);
static void EMITTER_DECL(EmitReshape);
static void EMITTER_DECL(EmitFunctionCall);
static void EMITTER_DECL(EmitReduce);
static void EMITTER_DECL(EmitSign);
static void EMITTER_DECL(EmitSlice);
static void EMITTER_DECL(EmitSum);
static void EMITTER_DECL(EmitExp);
static void EMITTER_DECL(EmitSin);
static void EMITTER_DECL(EmitSinh);
static void EMITTER_DECL(EmitCos);
static void EMITTER_DECL(EmitCosh);
static void EMITTER_DECL(EmitTan);
static void EMITTER_DECL(EmitTanh);
static void EMITTER_DECL(EmitAsin);
static void EMITTER_DECL(EmitAcos);
static void EMITTER_DECL(EmitAtan);
static void EMITTER_DECL(EmitPower);
static void EMITTER_DECL(EmitReplaceSlice);
static void EMITTER_DECL(EmitOneHot);
static void EMITTER_DECL(EmitFloor);
static void EMITTER_DECL(EmitCeiling);
static void EMITTER_DECL(EmitSqrt);
static void EMITTER_DECL(EmitConvolution);
static void EMITTER_DECL(EmitConvolutionBackpropFilters);
static void EMITTER_DECL(EmitConvolutionBackpropData);
static void EMITTER_DECL(EmitNot);
static void EMITTER_DECL(EmitMaxPool);
static void EMITTER_DECL(EmitReverse);
static void EMITTER_DECL(EmitReduceWindow);
static void EMITTER_DECL(EmitSelectAndScatter);
static void EMITTER_DECL(EmitAvgPool);
static void EMITTER_DECL(EmitAvgPoolBackprop);
static void EMITTER_DECL(EmitPad);
static void EMITTER_DECL(EmitBatchNorm);
static void EMITTER_DECL(EmitMaxPoolBackprop);
template <typename OP>
static void emit(CPU_ExternalFunction* external_function,
codegen::CodeWriter& writer,
const ngraph::Node* node,
const std::vector<TensorViewWrapper>& args,
const std::vector<TensorViewWrapper>& out)
{
throw std::runtime_error("Unimplemented op in CPU emitter");
}
static void EmitMKLDNNPreamble(codegen::CodeWriter& writer);
static void nop(CPU_ExternalFunction* external_function,
codegen::CodeWriter& writer,
const ngraph::Node* node,
const std::vector<TensorViewWrapper>& args,
const std::vector<TensorViewWrapper>& out)
{
}
static void emit_mkldnn_preamble(codegen::CodeWriter& writer);
private:
static std::string emit_vector(const TensorViewWrapper&,
......
......@@ -74,6 +74,18 @@ TEST(benchmark, mxnet_seq2seq_backward)
run_benchmark(json_path, "CPU", 10);
}
TEST(benchmark, mxnet_sockeye_seq2seq_forward)
{
const string json_path = file_util::path_join(SERIALIZED_ZOO, "mxnet/Sockeye_Seq2Seq_forward.json");
run_benchmark(json_path, "CPU", 10);
}
TEST(benchmark, mxnet_sockeye_seq2seq_backward)
{
const string json_path = file_util::path_join(SERIALIZED_ZOO, "mxnet/Sockeye_Seq2Seq_backward.json");
run_benchmark(json_path, "CPU", 10);
}
//
// Benchmarks a graph that concatenates six 32x1x200 arrays along the middle axis.
//
......
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment