Unverified Commit 05a404a8 authored by Robert Kimball's avatar Robert Kimball Committed by GitHub

Change Backend::create to return std::unique_ptr<Backend> (#1909)

* create unique_ptr backend

* unit test cleanup

* address more code that was recently added

* change from reference to pointer when passing backend to reduce the number of lines changed.

* fix build error

* fix python wrapper

* style

* more specific treatment for unique_ptr
parent b5beac87
...@@ -25,7 +25,7 @@ namespace py = pybind11; ...@@ -25,7 +25,7 @@ namespace py = pybind11;
void regclass_pyngraph_runtime_Backend(py::module m) void regclass_pyngraph_runtime_Backend(py::module m)
{ {
py::class_<ngraph::runtime::Backend, std::shared_ptr<ngraph::runtime::Backend>> backend( py::class_<ngraph::runtime::Backend, std::unique_ptr<ngraph::runtime::Backend>> backend(
m, "Backend"); m, "Backend");
backend.doc() = "ngraph.impl.runtime.Backend wraps ngraph::runtime::Backend"; backend.doc() = "ngraph.impl.runtime.Backend wraps ngraph::runtime::Backend";
backend.def_static("create", &ngraph::runtime::Backend::create); backend.def_static("create", &ngraph::runtime::Backend::create);
......
...@@ -29,7 +29,7 @@ runtime::Backend::~Backend() ...@@ -29,7 +29,7 @@ runtime::Backend::~Backend()
{ {
} }
shared_ptr<runtime::Backend> runtime::Backend::create(const string& type) unique_ptr<runtime::Backend> runtime::Backend::create(const string& type)
{ {
return BackendManager::create_backend(type); return BackendManager::create_backend(type);
} }
......
...@@ -43,9 +43,9 @@ public: ...@@ -43,9 +43,9 @@ public:
/// \brief Create a new Backend object /// \brief Create a new Backend object
/// \param type The name of a registered backend, such as "CPU" or "GPU". /// \param type The name of a registered backend, such as "CPU" or "GPU".
/// To select a subdevice use "GPU:N" where s`N` is the subdevice number. /// To select a subdevice use "GPU:N" where s`N` is the subdevice number.
/// \returns shared_ptr to a new Backend or nullptr if the named backend /// \returns unique_ptr to a new Backend or nullptr if the named backend
/// does not exist. /// does not exist.
static std::shared_ptr<Backend> create(const std::string& type); static std::unique_ptr<Backend> create(const std::string& type);
/// \brief Query the list of registered devices /// \brief Query the list of registered devices
/// \returns A vector of all registered devices. /// \returns A vector of all registered devices.
......
...@@ -66,9 +66,9 @@ vector<string> runtime::BackendManager::get_registered_backends() ...@@ -66,9 +66,9 @@ vector<string> runtime::BackendManager::get_registered_backends()
return rc; return rc;
} }
shared_ptr<runtime::Backend> runtime::BackendManager::create_backend(const std::string& config) unique_ptr<runtime::Backend> runtime::BackendManager::create_backend(const std::string& config)
{ {
shared_ptr<runtime::Backend> rc; runtime::Backend* backend = nullptr;
string type = config; string type = config;
// strip off attributes, IE:CPU becomes IE // strip off attributes, IE:CPU becomes IE
...@@ -83,7 +83,7 @@ shared_ptr<runtime::Backend> runtime::BackendManager::create_backend(const std:: ...@@ -83,7 +83,7 @@ shared_ptr<runtime::Backend> runtime::BackendManager::create_backend(const std::
if (it != registry.end()) if (it != registry.end())
{ {
new_backend_t new_backend = it->second; new_backend_t new_backend = it->second;
rc = shared_ptr<runtime::Backend>(new_backend(config.c_str())); backend = new_backend(config.c_str());
} }
else else
{ {
...@@ -111,21 +111,9 @@ shared_ptr<runtime::Backend> runtime::BackendManager::create_backend(const std:: ...@@ -111,21 +111,9 @@ shared_ptr<runtime::Backend> runtime::BackendManager::create_backend(const std::
throw runtime_error("Backend '" + type + "' does not implement new_backend"); throw runtime_error("Backend '" + type + "' does not implement new_backend");
} }
function<void(runtime::Backend*)> delete_backend = backend = new_backend(config.c_str());
reinterpret_cast<void (*)(runtime::Backend*)>(DLSYM(handle, "delete_backend"));
if (!delete_backend)
{
CLOSE_LIBRARY(handle);
throw runtime_error("Backend '" + type + "' does not implement delete_backend");
} }
return unique_ptr<runtime::Backend>(backend);
runtime::Backend* backend = new_backend(config.c_str());
rc = shared_ptr<runtime::Backend>(backend, [=](runtime::Backend* b) {
delete_backend(b);
// CLOSE_LIBRARY(handle);
});
}
return rc;
} }
// This doodad finds the full path of the containing shared library // This doodad finds the full path of the containing shared library
......
...@@ -58,7 +58,7 @@ public: ...@@ -58,7 +58,7 @@ public:
static std::vector<std::string> get_registered_backends(); static std::vector<std::string> get_registered_backends();
private: private:
static std::shared_ptr<runtime::Backend> create_backend(const std::string& type); static std::unique_ptr<runtime::Backend> create_backend(const std::string& type);
static std::unordered_map<std::string, new_backend_t>& get_registry(); static std::unordered_map<std::string, new_backend_t>& get_registry();
static std::unordered_map<std::string, new_backend_t> s_registered_backend; static std::unordered_map<std::string, new_backend_t> s_registered_backend;
......
...@@ -33,7 +33,7 @@ namespace ngraph ...@@ -33,7 +33,7 @@ namespace ngraph
{ {
auto function_call = static_cast<const ngraph::op::FunctionCall*>(node); auto function_call = static_cast<const ngraph::op::FunctionCall*>(node);
auto function = function_call->get_functions()[0]; auto function = function_call->get_functions()[0];
auto backend = runtime::Backend::create("CPU"); shared_ptr<runtime::Backend> backend = runtime::Backend::create("CPU");
auto& functors = external_function->get_functors(); auto& functors = external_function->get_functors();
auto& callees = external_function->get_callees(); auto& callees = external_function->get_callees();
......
...@@ -36,7 +36,7 @@ namespace ngraph ...@@ -36,7 +36,7 @@ namespace ngraph
auto select_function = select_and_scatter->get_functions()[0]; auto select_function = select_and_scatter->get_functions()[0];
auto scatter_function = select_and_scatter->get_functions()[1]; auto scatter_function = select_and_scatter->get_functions()[1];
auto backend = runtime::Backend::create("CPU"); shared_ptr<runtime::Backend> backend = runtime::Backend::create("CPU");
auto& functors = external_function->get_functors(); auto& functors = external_function->get_functors();
auto& callees = external_function->get_callees(); auto& callees = external_function->get_callees();
......
...@@ -41,11 +41,6 @@ extern "C" runtime::Backend* new_backend(const char* configuration_string) ...@@ -41,11 +41,6 @@ extern "C" runtime::Backend* new_backend(const char* configuration_string)
return new runtime::interpreter::INTBackend(); return new runtime::interpreter::INTBackend();
} }
extern "C" void delete_backend(runtime::Backend* backend)
{
delete backend;
}
shared_ptr<runtime::Tensor> shared_ptr<runtime::Tensor>
runtime::interpreter::INTBackend::create_tensor(const element::Type& type, const Shape& shape) runtime::interpreter::INTBackend::create_tensor(const element::Type& type, const Shape& shape)
{ {
......
This diff is collapsed.
...@@ -35,7 +35,7 @@ TEST(INTERPRETER, nan_check_input) ...@@ -35,7 +35,7 @@ TEST(INTERPRETER, nan_check_input)
auto B = make_shared<op::Parameter>(element::f32, shape); auto B = make_shared<op::Parameter>(element::f32, shape);
auto f = make_shared<Function>(make_shared<op::Divide>(A, B), op::ParameterVector{A, B}); auto f = make_shared<Function>(make_shared<op::Divide>(A, B), op::ParameterVector{A, B});
auto backend = runtime::Backend::create("INTERPRETER"); shared_ptr<runtime::Backend> backend = runtime::Backend::create("INTERPRETER");
shared_ptr<runtime::interpreter::INTBackend> ibackend = shared_ptr<runtime::interpreter::INTBackend> ibackend =
static_pointer_cast<runtime::interpreter::INTBackend>(backend); static_pointer_cast<runtime::interpreter::INTBackend>(backend);
...@@ -58,7 +58,7 @@ TEST(INTERPRETER, nan_check_output) ...@@ -58,7 +58,7 @@ TEST(INTERPRETER, nan_check_output)
auto B = make_shared<op::Parameter>(element::f32, shape); auto B = make_shared<op::Parameter>(element::f32, shape);
auto f = make_shared<Function>(make_shared<op::Divide>(A, B), op::ParameterVector{A, B}); auto f = make_shared<Function>(make_shared<op::Divide>(A, B), op::ParameterVector{A, B});
auto backend = runtime::Backend::create("INTERPRETER"); shared_ptr<runtime::Backend> backend = runtime::Backend::create("INTERPRETER");
shared_ptr<runtime::interpreter::INTBackend> ibackend = shared_ptr<runtime::interpreter::INTBackend> ibackend =
static_pointer_cast<runtime::interpreter::INTBackend>(backend); static_pointer_cast<runtime::interpreter::INTBackend>(backend);
......
This diff is collapsed.
...@@ -50,7 +50,7 @@ TEST(debugger, add_breakpoint) ...@@ -50,7 +50,7 @@ TEST(debugger, add_breakpoint)
auto f = make_shared<Function>(neg, op::ParameterVector{A, B}); auto f = make_shared<Function>(neg, op::ParameterVector{A, B});
auto backend = runtime::Backend::create("CPU"); shared_ptr<runtime::Backend> backend = runtime::Backend::create("CPU");
shared_ptr<runtime::Tensor> a = backend->create_tensor(element::i32, shape); shared_ptr<runtime::Tensor> a = backend->create_tensor(element::i32, shape);
shared_ptr<runtime::Tensor> b = backend->create_tensor(element::i32, shape); shared_ptr<runtime::Tensor> b = backend->create_tensor(element::i32, shape);
...@@ -86,7 +86,7 @@ TEST(debugger, stepping) ...@@ -86,7 +86,7 @@ TEST(debugger, stepping)
auto f = make_shared<Function>(neg, op::ParameterVector{A, B}); auto f = make_shared<Function>(neg, op::ParameterVector{A, B});
auto backend = runtime::Backend::create("CPU"); shared_ptr<runtime::Backend> backend = runtime::Backend::create("CPU");
shared_ptr<runtime::Tensor> a = backend->create_tensor(element::i32, shape); shared_ptr<runtime::Tensor> a = backend->create_tensor(element::i32, shape);
shared_ptr<runtime::Tensor> b = backend->create_tensor(element::i32, shape); shared_ptr<runtime::Tensor> b = backend->create_tensor(element::i32, shape);
...@@ -123,7 +123,7 @@ TEST(debugger, delete_breakpoint) ...@@ -123,7 +123,7 @@ TEST(debugger, delete_breakpoint)
auto f = make_shared<Function>(neg, op::ParameterVector{A, B}); auto f = make_shared<Function>(neg, op::ParameterVector{A, B});
auto backend = runtime::Backend::create("CPU"); shared_ptr<runtime::Backend> backend = runtime::Backend::create("CPU");
shared_ptr<runtime::Tensor> a = backend->create_tensor(element::i32, shape); shared_ptr<runtime::Tensor> a = backend->create_tensor(element::i32, shape);
shared_ptr<runtime::Tensor> b = backend->create_tensor(element::i32, shape); shared_ptr<runtime::Tensor> b = backend->create_tensor(element::i32, shape);
...@@ -163,7 +163,7 @@ TEST(debugger, while_stepping) ...@@ -163,7 +163,7 @@ TEST(debugger, while_stepping)
auto f = make_shared<Function>(neg, op::ParameterVector{A, B}); auto f = make_shared<Function>(neg, op::ParameterVector{A, B});
auto backend = runtime::Backend::create("CPU"); shared_ptr<runtime::Backend> backend = runtime::Backend::create("CPU");
shared_ptr<runtime::Tensor> a = backend->create_tensor(element::i32, shape); shared_ptr<runtime::Tensor> a = backend->create_tensor(element::i32, shape);
shared_ptr<runtime::Tensor> b = backend->create_tensor(element::i32, shape); shared_ptr<runtime::Tensor> b = backend->create_tensor(element::i32, shape);
...@@ -201,7 +201,7 @@ TEST(debugger, resume) ...@@ -201,7 +201,7 @@ TEST(debugger, resume)
auto f = make_shared<Function>(neg, op::ParameterVector{A, B}); auto f = make_shared<Function>(neg, op::ParameterVector{A, B});
auto backend = runtime::Backend::create("CPU"); shared_ptr<runtime::Backend> backend = runtime::Backend::create("CPU");
shared_ptr<runtime::Tensor> a = backend->create_tensor(element::i32, shape); shared_ptr<runtime::Tensor> a = backend->create_tensor(element::i32, shape);
shared_ptr<runtime::Tensor> b = backend->create_tensor(element::i32, shape); shared_ptr<runtime::Tensor> b = backend->create_tensor(element::i32, shape);
......
This diff is collapsed.
...@@ -249,7 +249,7 @@ NGRAPH_TEST (${BACKEND_NAME}, %s) ...@@ -249,7 +249,7 @@ NGRAPH_TEST (${BACKEND_NAME}, %s)
backend->call_with_validate(function, {result}, {a, b}); backend->call_with_validate(function, {result}, {a, b});
EXPECT_TRUE(test::all_close<float>(vector<float>{expected_result}, read_vector<float>(result), 1.0e-4f, 1.0e-6f)); EXPECT_TRUE(test::all_close<float>(vector<float>{expected_result}, read_vector<float>(result), 1.0e-4f, 1.0e-6f));
// only test backprop for certain cases as it takes significant compute resources // only test backprop for certain cases as it takes significant compute resources
%sEXPECT_TRUE(autodiff_numeric_compare<float>(backend, make_graph, {a, b}, .01f, .01f)); %sEXPECT_TRUE(autodiff_numeric_compare<float>(backend.get(), make_graph, {a, b}, .01f, .01f));
} }
''' '''
f.write (template % (test_name, f.write (template % (test_name,
......
...@@ -46,7 +46,7 @@ namespace ngraph ...@@ -46,7 +46,7 @@ namespace ngraph
{ {
template <typename T> template <typename T>
std::vector<std::shared_ptr<runtime::Tensor>> std::vector<std::shared_ptr<runtime::Tensor>>
get_autodiff(const std::shared_ptr<runtime::Backend>& backend, get_autodiff(runtime::Backend* backend,
std::shared_ptr<Function>& df, std::shared_ptr<Function>& df,
const std::vector<std::shared_ptr<runtime::Tensor>>& df_input_args, const std::vector<std::shared_ptr<runtime::Tensor>>& df_input_args,
const std::vector<std::shared_ptr<op::Parameter>>& indep_params) const std::vector<std::shared_ptr<op::Parameter>>& indep_params)
...@@ -126,7 +126,7 @@ namespace ngraph ...@@ -126,7 +126,7 @@ namespace ngraph
template <typename T> template <typename T>
std::vector<std::shared_ptr<runtime::Tensor>> std::vector<std::shared_ptr<runtime::Tensor>>
backprop_derivative(const std::shared_ptr<runtime::Backend>& backend, backprop_derivative(runtime::Backend* backend,
const std::shared_ptr<Function>& f, const std::shared_ptr<Function>& f,
const std::vector<std::shared_ptr<runtime::Tensor>>& f_input_args, const std::vector<std::shared_ptr<runtime::Tensor>>& f_input_args,
const std::vector<std::shared_ptr<op::Parameter>>& indep_params) const std::vector<std::shared_ptr<op::Parameter>>& indep_params)
......
...@@ -25,7 +25,7 @@ ...@@ -25,7 +25,7 @@
// derivative does not work with int types // derivative does not work with int types
// TODO: Always compute the numerical derivatives in double // TODO: Always compute the numerical derivatives in double
template <typename T> template <typename T>
bool autodiff_numeric_compare(const std::shared_ptr<ngraph::runtime::Backend>& backend, bool autodiff_numeric_compare(ngraph::runtime::Backend* backend,
std::shared_ptr<ngraph::Function> f, std::shared_ptr<ngraph::Function> f,
std::shared_ptr<ngraph::Function> g, std::shared_ptr<ngraph::Function> g,
const std::vector<std::shared_ptr<ngraph::runtime::Tensor>>& args, const std::vector<std::shared_ptr<ngraph::runtime::Tensor>>& args,
...@@ -55,7 +55,7 @@ bool autodiff_numeric_compare(const std::shared_ptr<ngraph::runtime::Backend>& b ...@@ -55,7 +55,7 @@ bool autodiff_numeric_compare(const std::shared_ptr<ngraph::runtime::Backend>& b
interpreter_args.push_back(interpreter_arg); interpreter_args.push_back(interpreter_arg);
} }
auto results_num = ngraph::autodiff::numeric_derivative<T>( auto results_num = ngraph::autodiff::numeric_derivative<T>(
interpreter_backend, f, interpreter_args, delta, f->get_parameters()); interpreter_backend.get(), f, interpreter_args, delta, f->get_parameters());
// Use the backend being tested to compute symbolic derivatives // Use the backend being tested to compute symbolic derivatives
auto results_sym = auto results_sym =
...@@ -75,7 +75,7 @@ bool autodiff_numeric_compare(const std::shared_ptr<ngraph::runtime::Backend>& b ...@@ -75,7 +75,7 @@ bool autodiff_numeric_compare(const std::shared_ptr<ngraph::runtime::Backend>& b
} }
template <typename T> template <typename T>
bool autodiff_numeric_compare(const std::shared_ptr<ngraph::runtime::Backend>& backend, bool autodiff_numeric_compare(ngraph::runtime::Backend* backend,
std::function<std::shared_ptr<ngraph::Function>()> make_graph, std::function<std::shared_ptr<ngraph::Function>()> make_graph,
const std::vector<std::shared_ptr<ngraph::runtime::Tensor>>& args, const std::vector<std::shared_ptr<ngraph::runtime::Tensor>>& args,
T rtol, T rtol,
...@@ -86,7 +86,7 @@ bool autodiff_numeric_compare(const std::shared_ptr<ngraph::runtime::Backend>& b ...@@ -86,7 +86,7 @@ bool autodiff_numeric_compare(const std::shared_ptr<ngraph::runtime::Backend>& b
template <typename T> template <typename T>
bool autodiff_numeric_compare_selective( bool autodiff_numeric_compare_selective(
const std::shared_ptr<ngraph::runtime::Backend>& backend, ngraph::runtime::Backend* backend,
std::shared_ptr<ngraph::Function> f, std::shared_ptr<ngraph::Function> f,
std::shared_ptr<ngraph::Function> g, std::shared_ptr<ngraph::Function> g,
const std::vector<std::shared_ptr<ngraph::runtime::Tensor>>& args, const std::vector<std::shared_ptr<ngraph::runtime::Tensor>>& args,
...@@ -128,7 +128,7 @@ bool autodiff_numeric_compare_selective( ...@@ -128,7 +128,7 @@ bool autodiff_numeric_compare_selective(
interpreter_args.push_back(interpreter_arg); interpreter_args.push_back(interpreter_arg);
} }
auto results_num = ngraph::autodiff::numeric_derivative<T>( auto results_num = ngraph::autodiff::numeric_derivative<T>(
interpreter_backend, f, interpreter_args, .001f, f_indep_params); interpreter_backend.get(), f, interpreter_args, .001f, f_indep_params);
// Use the backend being tested to compute symbolic derivatives // Use the backend being tested to compute symbolic derivatives
std::vector<std::shared_ptr<ngraph::op::Parameter>> g_indep_params; std::vector<std::shared_ptr<ngraph::op::Parameter>> g_indep_params;
...@@ -161,7 +161,7 @@ bool autodiff_numeric_compare_selective( ...@@ -161,7 +161,7 @@ bool autodiff_numeric_compare_selective(
template <typename T> template <typename T>
bool autodiff_numeric_compare_selective( bool autodiff_numeric_compare_selective(
const std::shared_ptr<ngraph::runtime::Backend>& backend, ngraph::runtime::Backend* backend,
std::function<std::shared_ptr<ngraph::Function>()> make_graph, std::function<std::shared_ptr<ngraph::Function>()> make_graph,
const std::vector<std::shared_ptr<ngraph::runtime::Tensor>>& args, const std::vector<std::shared_ptr<ngraph::runtime::Tensor>>& args,
T rtol, T rtol,
......
...@@ -34,7 +34,7 @@ namespace ngraph ...@@ -34,7 +34,7 @@ namespace ngraph
/// \returns vector of dy/dvar, where each dy/dvar's shape is concat(y.shape(), var.shape()) /// \returns vector of dy/dvar, where each dy/dvar's shape is concat(y.shape(), var.shape())
template <typename T> template <typename T>
std::vector<std::shared_ptr<runtime::Tensor>> std::vector<std::shared_ptr<runtime::Tensor>>
numeric_derivative(const std::shared_ptr<runtime::Backend>& backend, numeric_derivative(runtime::Backend* backend,
const std::shared_ptr<Function>& f, const std::shared_ptr<Function>& f,
const std::vector<std::shared_ptr<runtime::Tensor>>& args, const std::vector<std::shared_ptr<runtime::Tensor>>& args,
T delta, T delta,
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment