Commit b2259d88 authored by Robert Kimball's avatar Robert Kimball Committed by Scott Cyphers

get backend working again (#2547)

parent bd869cb8
...@@ -26,7 +26,7 @@ if (NGRAPH_GENERIC_CPU_ENABLE) ...@@ -26,7 +26,7 @@ if (NGRAPH_GENERIC_CPU_ENABLE)
VERSION ${NGRAPH_VERSION} VERSION ${NGRAPH_VERSION}
SOVERSION ${NGRAPH_API_VERSION}) SOVERSION ${NGRAPH_API_VERSION})
endif() endif()
target_link_libraries(gcpu_backend PRIVATE ngraph libeigen hybrid_base interpreter_backend) target_link_libraries(gcpu_backend PRIVATE ngraph libeigen)
target_compile_options(gcpu_backend PUBLIC -fopenmp) target_compile_options(gcpu_backend PUBLIC -fopenmp)
install(TARGETS gcpu_backend install(TARGETS gcpu_backend
......
...@@ -44,6 +44,7 @@ ...@@ -44,6 +44,7 @@
#include "ngraph/op/min.hpp" #include "ngraph/op/min.hpp"
#include "ngraph/op/one_hot.hpp" #include "ngraph/op/one_hot.hpp"
#include "ngraph/op/pad.hpp" #include "ngraph/op/pad.hpp"
#include "ngraph/op/passthrough.hpp"
#include "ngraph/op/product.hpp" #include "ngraph/op/product.hpp"
#include "ngraph/op/quantize.hpp" #include "ngraph/op/quantize.hpp"
#include "ngraph/op/replace_slice.hpp" #include "ngraph/op/replace_slice.hpp"
...@@ -126,7 +127,6 @@ ...@@ -126,7 +127,6 @@
#include "ngraph/runtime/reference/softmax.hpp" #include "ngraph/runtime/reference/softmax.hpp"
#include "ngraph/runtime/reference/sqrt.hpp" #include "ngraph/runtime/reference/sqrt.hpp"
#include "ngraph/runtime/reference/subtract.hpp" #include "ngraph/runtime/reference/subtract.hpp"
#include "ngraph/runtime/reference/sum.hpp"
#include "ngraph/runtime/reference/tan.hpp" #include "ngraph/runtime/reference/tan.hpp"
#include "ngraph/runtime/reference/tanh.hpp" #include "ngraph/runtime/reference/tanh.hpp"
#include "ngraph/runtime/reference/topk.hpp" #include "ngraph/runtime/reference/topk.hpp"
...@@ -929,6 +929,11 @@ private: ...@@ -929,6 +929,11 @@ private:
pad->get_padding_interior()); pad->get_padding_interior());
break; break;
} }
case OP_TYPEID::Passthrough:
{
const op::Passthrough* passthrough = static_cast<const op::Passthrough*>(&node);
throw unsupported_op{"Unsupported operation language: " + passthrough->language()};
}
case OP_TYPEID::Power: case OP_TYPEID::Power:
{ {
size_t element_count = shape_size(node.get_output_shape(0)); size_t element_count = shape_size(node.get_output_shape(0));
...@@ -1003,8 +1008,7 @@ private: ...@@ -1003,8 +1008,7 @@ private:
case OP_TYPEID::QuantizedConvolution: case OP_TYPEID::QuantizedConvolution:
case OP_TYPEID::QuantizedMaxPool: case OP_TYPEID::QuantizedMaxPool:
{ {
throw unsupported_op("Unsupported op '" + node.description() + throw unsupported_op("Unsupported op '" + node.description() + "'.");
"' in Interpreter back end.");
} }
case OP_TYPEID::Relu: case OP_TYPEID::Relu:
{ {
......
...@@ -33,6 +33,7 @@ namespace ngraph ...@@ -33,6 +33,7 @@ namespace ngraph
{ {
namespace kernel namespace kernel
{ {
#ifdef PARALLEL
static std::tuple<size_t, size_t> get_start_finish(size_t size) static std::tuple<size_t, size_t> get_start_finish(size_t size)
{ {
const size_t nthreads = omp_get_num_threads(); const size_t nthreads = omp_get_num_threads();
...@@ -41,6 +42,7 @@ namespace ngraph ...@@ -41,6 +42,7 @@ namespace ngraph
const size_t finish = (ithread + 1) * size / nthreads; const size_t finish = (ithread + 1) * size / nthreads;
return std::make_tuple(start, finish); return std::make_tuple(start, finish);
} }
#endif
template <typename T> template <typename T>
void broadcast_2d(const T* in, void broadcast_2d(const T* in,
T* out, T* out,
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment