Unverified Commit 47626835 authored by Robert Kimball's avatar Robert Kimball Committed by GitHub

Merge branch 'master' into bob/nbench_db

parents d6633ec4 a509de7b
......@@ -32,7 +32,7 @@ int main()
auto t1 = std::make_shared<op::Multiply>(t0, c);
// Make the function
auto f = std::make_shared<Function>(NodeVector{t1},
auto f = std::make_shared<Function>(OutputVector{t1},
ParameterVector{a, b, c});
// Create the backend
......
......@@ -31,7 +31,7 @@ int main()
auto t1 = (a + b) * c;
// Make the function
auto f = std::make_shared<Function>(NodeVector{t1},
auto f = std::make_shared<Function>(OutputVector{t1},
ParameterVector{a, b, c});
// Get the backend
......
......@@ -175,8 +175,8 @@ int main(int argc, char* argv[])
auto delta = -learning_rate * loss;
// Updates
ngraph::autodiff::Adjoints adjoints(NodeVector{loss},
NodeVector{delta});
ngraph::autodiff::Adjoints adjoints(OutputVector{loss},
OutputVector{delta});
auto grad_W0 = adjoints.backprop_node(W0);
auto grad_b0 = adjoints.backprop_node(b0);
auto grad_W1 = adjoints.backprop_node(W1);
......@@ -231,7 +231,7 @@ int main(int argc, char* argv[])
NodeMap train_node_map;
auto train_function = clone_function(
Function(
NodeVector{loss, softmax, W0_next, b0_next, W1_next, b1_next},
OutputVector{loss, softmax, W0_next, b0_next, W1_next, b1_next},
ParameterVector{X, Y, N, learning_rate, W0, b0, W1, b1}),
train_node_map);
auto train_exec = backend->compile(train_function);
......@@ -240,7 +240,7 @@ int main(int argc, char* argv[])
// X, W0, b0, W1, b1 -> softmax
NodeMap inference_node_map;
auto inference_function = clone_function(
Function(NodeVector{softmax}, ParameterVector{X, W0, b0, W1, b1}),
Function(OutputVector{softmax}, ParameterVector{X, W0, b0, W1, b1}),
inference_node_map);
auto inference_exec = backend->compile(inference_function);
......
......@@ -172,8 +172,8 @@ int main(int argc, const char* argv[])
auto delta = -learning_rate * loss;
// Updates
ngraph::autodiff::Adjoints adjoints(NodeVector{loss},
NodeVector{delta});
ngraph::autodiff::Adjoints adjoints(OutputVector{loss},
OutputVector{delta});
auto W0_next = W0 + adjoints.backprop_node(W0);
auto b0_next = b0 + adjoints.backprop_node(b0);
auto W1_next = W1 + adjoints.backprop_node(W1);
......@@ -218,7 +218,7 @@ int main(int argc, const char* argv[])
NodeMap train_node_map;
auto train_function = clone_function(
Function(
NodeVector{loss, softmax, W0_next, b0_next, W1_next, b1_next},
OutputVector{loss, softmax, W0_next, b0_next, W1_next, b1_next},
ParameterVector{X, Y, N, learning_rate, W0, b0, W1, b1}),
train_node_map);
auto train_exec = backend->compile(train_function);
......@@ -227,7 +227,7 @@ int main(int argc, const char* argv[])
// X, W0, b0, W1, b1 -> softmax
NodeMap inference_node_map;
auto inference_function = clone_function(
Function(NodeVector{softmax}, ParameterVector{X, W0, b0, W1, b1}),
Function(OutputVector{softmax}, ParameterVector{X, W0, b0, W1, b1}),
inference_node_map);
auto inference_exe = backend->compile(inference_function);
......
......@@ -99,8 +99,8 @@ Once the graph is built, we need to package it in a ``Function``:
:lines: 35-36
The first argument to the constuctor specifies the nodes that the function will
return; in this case, the product. A ``NodeVector`` is a vector of shared
pointers of ``op::Node``. The second argument specifies the parameters of the
return; in this case, the product. An ``OutputVector`` is a vector of references to
outputs of ``op::Node``. The second argument specifies the parameters of the
function, in the order they are to be passed to the compiled function. A
``ParameterVector`` is a vector of shared pointers to ``op::Parameter``.
......
......@@ -23,6 +23,8 @@
namespace py = pybind11;
static const char* CAPSULE_NAME = "ngraph_function";
void regclass_pyngraph_Function(py::module m)
{
py::class_<ngraph::Function, std::shared_ptr<ngraph::Function>> function(m, "Function");
......@@ -49,4 +51,41 @@ void regclass_pyngraph_Function(py::module m)
py::cast(self.get_output_shape(0)).attr("__str__")().cast<std::string>();
return "<" + class_name + ": '" + self.get_friendly_name() + "' (" + shape + ")>";
});
function.def_static("from_capsule", [](py::object* capsule) {
// get the underlying PyObject* which is a PyCapsule pointer
auto* pybind_capsule_ptr = capsule->ptr();
// extract the pointer stored in the PyCapsule under the name CAPSULE_NAME
auto* capsule_ptr = PyCapsule_GetPointer(pybind_capsule_ptr, CAPSULE_NAME);
auto* ngraph_function = static_cast<std::shared_ptr<ngraph::Function>*>(capsule_ptr);
if (ngraph_function)
{
return *ngraph_function;
}
else
{
throw std::runtime_error("The provided capsule does not contain an ngraph::Function");
}
});
function.def_static("to_capsule", [](std::shared_ptr<ngraph::Function>& ngraph_function) {
// create a shared pointer on the heap before putting it in the capsule
// this secures the lifetime of the object transferred by the capsule
auto* sp_copy = new std::shared_ptr<ngraph::Function>(ngraph_function);
// a destructor callback that will delete the heap allocated shared_ptr
// when the capsule is destructed
auto sp_deleter = [](PyObject* capsule) {
auto* capsule_ptr = PyCapsule_GetPointer(capsule, CAPSULE_NAME);
auto* function_sp = static_cast<std::shared_ptr<ngraph::Function>*>(capsule_ptr);
if (function_sp)
{
delete function_sp;
}
};
// put the shared_ptr in a new capsule under the same name as in "from_capsule"
auto pybind_capsule = py::capsule(sp_copy, CAPSULE_NAME, sp_deleter);
return pybind_capsule;
});
}
......@@ -51,11 +51,6 @@ OutputVector make_zeros(std::shared_ptr<Node> x)
return zeros;
}
autodiff::Adjoints::Adjoints(const NodeVector& ys, const NodeVector& cs)
: Adjoints(OutputVector(ys.begin(), ys.end()), OutputVector(cs.begin(), cs.end()))
{
}
autodiff::Adjoints::Adjoints(const OutputVector& ys, const OutputVector& cs)
{
if (ys.size() != cs.size())
......
......@@ -46,8 +46,6 @@ namespace ngraph
/// \param c An expression for where to evaluate the derivatives
Adjoints(const OutputVector& y, const OutputVector& c);
Adjoints(const NodeVector& y, const NodeVector& c);
Adjoints(const Adjoints& adjoints) = default;
Adjoints& operator=(const Adjoints& adjoints) = default;
Adjoints() = default;
......
......@@ -41,6 +41,30 @@ Function::Function(const ResultVector& results,
init();
}
Function::Function(const OutputVector& results,
const ParameterVector& parameters,
const std::string& name)
: m_results(results.size())
, m_parameters(parameters)
, m_temporary_pool_size(0)
, m_instance_id(m_next_instance_id.fetch_add(1))
, m_name(name)
, m_unique_name("Function_" + to_string(m_instance_id))
{
if (std::any_of(results.cbegin(), results.cend(), [](Output<Node> n) {
return std::dynamic_pointer_cast<op::Result>(n.get_node_shared_ptr());
}))
{
throw ngraph_error(
" Results already contain op::Results. Use a c-tor that takes a ResultVector");
}
std::transform(results.begin(), results.end(), m_results.begin(), [](Output<Node> n) {
return std::make_shared<op::Result>(n);
});
init();
}
Function::Function(const NodeVector& results,
const ParameterVector& parameters,
const std::string& name)
......@@ -208,6 +232,11 @@ shared_ptr<Node> Function::get_output_op(size_t i) const
return m_results.at(i);
}
Output<Node> Function::output(size_t i) const
{
return m_results.at(i);
}
shared_ptr<Node> Function::get_result() const
{
if (m_results.size() != 1)
......
......@@ -37,6 +37,10 @@ namespace ngraph
const ParameterVector& parameters,
const std::string& name = "");
Function(const OutputVector& results,
const ParameterVector& parameters,
const std::string& name = "");
Function(const std::shared_ptr<Node>& result,
const ParameterVector& parameters,
const std::string& name = "");
......@@ -55,6 +59,8 @@ namespace ngraph
/// Return the op that generates output i
std::shared_ptr<Node> get_output_op(size_t i) const;
Output<Node> output(size_t i) const;
/// Return the element type of output i
const element::Type& get_output_element_type(size_t i) const;
......
......@@ -34,7 +34,7 @@ namespace ngraph
/// \param arg_pad_value The node producing the scalar value to be inserted for padding.
/// \param padding_below The padding-below widths.
/// \param padding_above The padding-above widths.
/// \param pad_mode The padding mode: CONSTANT(default), EDGE or REFLECT.
/// \param pad_mode The padding mode: CONSTANT(default), EDGE, REFLECT or SYMMETRIC.
Pad(const std::shared_ptr<Node>& arg,
const std::shared_ptr<Node>& arg_pad_value,
const CoordinateDiff& padding_below,
......
......@@ -3027,7 +3027,9 @@ namespace ngraph
case ngraph::op::PadMode::REFLECT:
pad_mode_string = "ngraph::op::PadMode::REFLECT";
break;
case ngraph::op::PadMode::SYMMETRIC: throw ngraph_error("Unsupported PadMode");
case ngraph::op::PadMode::SYMMETRIC:
pad_mode_string = "ngraph::op::PadMode::SYMMETRIC";
break;
}
writer << "reference::pad<" << out[0].get_type() << ">(" << args[0].get_name()
<< ",\n";
......
......@@ -123,6 +123,7 @@ pad_reflect_1d_bottom_neg_bigger_than_tensor
pad_reflect_1d_multi_reflect
pad_reflect_2d
pad_reflect_2d_with_neg
pad_symmetric
# Quantized operators are not supported on gpu backend
model_dequantize_linear
......
......@@ -40,6 +40,7 @@ pad_reflect_1d_bottom_neg_bigger_than_tensor
pad_reflect_1d_multi_reflect
pad_reflect_2d
pad_reflect_2d_with_neg
pad_symmetric
# Not implemented
batch_mat_mul_forward
......
......@@ -137,6 +137,7 @@ pad_reflect_2d_with_neg
pad_negative_exterior_2d
pad_negative_exterior_2d_all_negative
pad_negative_exterior_4d
pad_symmetric
max_trivial_int8
max_trivial_5d_int32
max_3d_to_scalar_double
......
......@@ -164,8 +164,31 @@ namespace ngraph
}
case op::PadMode::SYMMETRIC:
{
// TODO: Add support for Symmetric mode
throw ngraph_error("Symmetric mode padding not supported");
Coordinate c = in_coord; // have to copy because in_coord is const
for (size_t i = 0; i < c.size(); i++)
{
ptrdiff_t pos = padding_below[i] - (c[i] + 1);
if (pos >= 0)
{
c[i] = static_cast<size_t>(pos + padding_below[i]);
}
else
{
pos = -(pos + 1);
ptrdiff_t src_dim = static_cast<ptrdiff_t>(arg0_shape[i]);
if (pos < src_dim)
{
c[i] = static_cast<size_t>(pos + padding_below[i]);
}
else
{
c[i] = static_cast<size_t>(padding_below[i] + src_dim +
padding_above[i] - pos);
}
}
}
v = arg0[input_transform.index(c)];
break;
}
}
......
......@@ -733,8 +733,8 @@ NGRAPH_TEST(${BACKEND_NAME}, batch_norm_bprop_n4c3h2w2)
auto C = std::make_shared<op::Parameter>(element::f32, shape_r);
auto zero = ngraph::make_zero(bn_dgamma->get_element_type(), bn_dgamma->get_shape());
ngraph::autodiff::Adjoints adjoints(NodeVector{bn_dx, bn_dgamma, bn_dbeta},
NodeVector{C, zero, zero});
ngraph::autodiff::Adjoints adjoints(OutputVector{bn_dx, bn_dgamma, bn_dbeta},
OutputVector{C, zero, zero});
auto dinput = adjoints.backprop_node(input);
auto dgamma = adjoints.backprop_node(gamma);
......
......@@ -257,10 +257,10 @@ NGRAPH_TEST(${BACKEND_NAME}, divide_adjoint_stability)
auto B = make_shared<op::Parameter>(element::f32, shape);
auto f = make_shared<Function>(make_shared<op::Divide>(A, B), ParameterVector{A, B});
auto Y_out = f->get_output_op(0);
auto Y_out = f->output(0);
auto Xs = f->get_parameters();
auto C = std::make_shared<op::Parameter>(Y_out->get_element_type(), Y_out->get_shape());
ngraph::autodiff::Adjoints adjoints(NodeVector{Y_out}, NodeVector{C});
auto C = std::make_shared<op::Parameter>(Y_out.get_element_type(), Y_out.get_shape());
ngraph::autodiff::Adjoints adjoints(OutputVector{Y_out}, OutputVector{C});
std::vector<std::shared_ptr<Node>> dYdXs(Xs.size());
transform(
Xs.begin(), Xs.end(), dYdXs.begin(), [C, &adjoints](const std::shared_ptr<Node>& X) {
......
......@@ -939,3 +939,36 @@ NGRAPH_TEST(${BACKEND_NAME}, pad_2channel_2image_asym)
read_vector<float>(result),
MIN_FLOAT_TOLERANCE_BITS));
}
NGRAPH_TEST(${BACKEND_NAME}, pad_symmetric)
{
Shape shape_a{2, 3};
auto A = make_shared<op::Parameter>(element::f32, shape_a);
Shape shape_b{};
auto B = make_shared<op::Parameter>(element::f32, shape_b);
Shape shape_r{4, 7};
CoordinateDiff padding_below{1, 2};
CoordinateDiff padding_above{1, 2};
auto f = make_shared<Function>(
make_shared<op::Pad>(A, B, padding_below, padding_above, op::PadMode::SYMMETRIC),
ParameterVector{A, B});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape_a);
copy_data(a, test::NDArray<float, 2>({{1, 2, 3}, {4, 5, 6}}).get_vector());
auto b = backend->create_tensor(element::f32, shape_b);
copy_data(b, vector<float>{2112});
auto result = backend->create_tensor(element::f32, shape_r);
auto handle = backend->compile(f);
handle->call_with_validate({result}, {a, b});
EXPECT_TRUE(test::all_close_f((test::NDArray<float, 2>({{2, 1, 1, 2, 3, 3, 2},
{2, 1, 1, 2, 3, 3, 2},
{5, 4, 4, 5, 6, 6, 5},
{5, 4, 4, 5, 6, 6, 5}})
.get_vector()),
read_vector<float>(result),
MIN_FLOAT_TOLERANCE_BITS));
}
......@@ -507,7 +507,8 @@ TEST(cpu_fusion, conv_bias_bprop_n1c1h3w3)
auto f = make_shared<Function>(
convolution_bias, ParameterVector{conv_test.data, conv_test.weights, conv_test.bias});
ngraph::autodiff::Adjoints adjoints(NodeVector{convolution_bias}, NodeVector{conv_test.delta});
ngraph::autodiff::Adjoints adjoints(OutputVector{convolution_bias},
OutputVector{conv_test.delta});
auto d_data = adjoints.backprop_node(conv_test.data);
auto d_weights = adjoints.backprop_node(conv_test.weights);
......@@ -546,7 +547,7 @@ TEST(cpu_fusion, conv_bias_bprop)
pass_manager.register_pass<pass::VisualizeTree>("conv_bias_bprop_fusion.png");
auto f = make_shared<Function>(conv_bias, ParameterVector{data_batch, filters, bias});
ngraph::autodiff::Adjoints adjoints(NodeVector{conv_bias}, NodeVector{delta});
ngraph::autodiff::Adjoints adjoints(OutputVector{conv_bias}, OutputVector{delta});
auto d_data = adjoints.backprop_node(data_batch);
auto d_weights = adjoints.backprop_node(filters);
......@@ -1452,7 +1453,7 @@ TEST(cpu_fusion, max_pool_with_indices)
auto max_pool = std::make_shared<op::MaxPool>(input, window_shape);
auto C = std::make_shared<op::Parameter>(element::f32, max_pool->get_shape());
ngraph::autodiff::Adjoints adjoints(NodeVector{max_pool}, NodeVector{C});
ngraph::autodiff::Adjoints adjoints(ngraph::OutputVector{max_pool}, ngraph::OutputVector{C});
auto dinput = adjoints.backprop_node(input);
......@@ -1789,14 +1790,14 @@ static std::shared_ptr<ngraph::Function> make_forward_function()
return std::make_shared<Function>(NodeVector{max_pool, neg, absn}, ParameterVector{input});
}
static std::pair<std::shared_ptr<ngraph::Function>, std::vector<std::shared_ptr<ngraph::Node>>>
static std::pair<std::shared_ptr<ngraph::Function>, OutputVector>
make_backward_function(std::shared_ptr<ngraph::Function> f)
{
// get parameters
std::vector<std::shared_ptr<ngraph::op::Parameter>> back_parameters = f->get_parameters();
ngraph::NodeVector adjoints;
ngraph::NodeVector outputs;
ngraph::OutputVector adjoints;
ngraph::OutputVector outputs;
for (auto Y : f->get_results())
{
// Get the output
......@@ -1809,7 +1810,7 @@ static std::pair<std::shared_ptr<ngraph::Function>, std::vector<std::shared_ptr<
ngraph::autodiff::Adjoints adjoint{outputs, adjoints};
// Perform autodiff
std::vector<std::shared_ptr<Node>> dYdXs(back_parameters.size());
OutputVector dYdXs(back_parameters.size());
transform(back_parameters.begin(),
back_parameters.end(),
dYdXs.begin(),
......@@ -1818,7 +1819,8 @@ static std::pair<std::shared_ptr<ngraph::Function>, std::vector<std::shared_ptr<
// create the backward function
std::vector<std::shared_ptr<ngraph::op::Parameter>> param_adjoints;
for (auto n : adjoints)
param_adjoints.push_back(std::dynamic_pointer_cast<ngraph::op::Parameter>(n));
param_adjoints.push_back(
std::dynamic_pointer_cast<ngraph::op::Parameter>(n.get_node_shared_ptr()));
back_parameters.insert(back_parameters.begin(), param_adjoints.begin(), param_adjoints.end());
return {std::make_shared<ngraph::Function>(dYdXs, back_parameters), adjoints};
......@@ -2703,7 +2705,7 @@ void sigmoid_multiply_fusion_backward_compute(runtime::Backend* backend,
auto sigmoid_mul =
make_shared<op::SigmoidMultiply>(input_0_alt, input_1_alt, input_0_type, input_1_type);
ngraph::autodiff::Adjoints adjoints(NodeVector{sigmoid_mul}, NodeVector{delta_param});
ngraph::autodiff::Adjoints adjoints(OutputVector{sigmoid_mul}, OutputVector{delta_param});
auto d_input_0 = adjoints.backprop_node(input_0_adjoint);
auto d_input_1 = adjoints.backprop_node(input_1_adjoint);
auto df = make_shared<Function>(NodeVector{d_input_0, d_input_1}, back_params);
......
......@@ -144,7 +144,7 @@ namespace ngraph
// df/dX*
std::vector<std::shared_ptr<Node>> df_output_params;
Adjoints adjoints(NodeVector{f->get_output_op(0)}, NodeVector{c_param});
Adjoints adjoints(OutputVector{f->output(0)}, OutputVector{c_param});
// for each x "of interest"
for (auto x : indep_params)
......
......@@ -32,10 +32,10 @@ using namespace ngraph;
std::shared_ptr<Function> autodiff::backprop_function(const std::shared_ptr<Function>& f)
{
auto Y_out = f->get_output_op(0);
auto Y_out = f->output(0);
auto Xs = f->get_parameters();
auto C = std::make_shared<op::Parameter>(Y_out->get_element_type(), Y_out->get_shape());
Adjoints adjoints(NodeVector{Y_out}, NodeVector{C});
auto C = std::make_shared<op::Parameter>(Y_out.get_element_type(), Y_out.get_shape());
Adjoints adjoints(OutputVector{Y_out}, OutputVector{C});
std::vector<std::shared_ptr<Node>> dYdXs(Xs.size());
transform(Xs.begin(), Xs.end(), dYdXs.begin(), [C, &adjoints](const std::shared_ptr<Node>& X) {
return adjoints.backprop_node(X);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment