Unverified Commit 9f1e8f06 authored by Scott Cyphers's avatar Scott Cyphers Committed by GitHub

Make sure examples compile (#3981)

* Make sure examples compile

* Resolve doc build error due to opset versioning and align dynamic tensor doc to cpp example

* Add latest rc

* Remove deprecated API

* Update brief link summary

* Dist example

* update doc for cpp code examples folder

* Fix typo and toc index

* Build config for example, deprecation in dist test

* style
parent b1851f7a
...@@ -621,6 +621,7 @@ endif() ...@@ -621,6 +621,7 @@ endif()
add_subdirectory(src) add_subdirectory(src)
add_subdirectory(test) add_subdirectory(test)
add_subdirectory(doc/examples)
if (NGRAPH_DOC_BUILD_ENABLE) if (NGRAPH_DOC_BUILD_ENABLE)
add_subdirectory(doc) add_subdirectory(doc)
......
...@@ -17,6 +17,7 @@ ...@@ -17,6 +17,7 @@
if (NGRAPH_CPU_ENABLE) if (NGRAPH_CPU_ENABLE)
add_subdirectory(abc) add_subdirectory(abc)
add_subdirectory(abc_operator) add_subdirectory(abc_operator)
add_subdirectory(dynamic_tensor)
add_subdirectory(mnist_mlp) add_subdirectory(mnist_mlp)
add_subdirectory(update) add_subdirectory(update)
endif() endif()
...@@ -50,9 +50,9 @@ int main() ...@@ -50,9 +50,9 @@ int main()
float v_b[2][3] = {{7, 8, 9}, {10, 11, 12}}; float v_b[2][3] = {{7, 8, 9}, {10, 11, 12}};
float v_c[2][3] = {{1, 0, -1}, {-1, 1, 2}}; float v_c[2][3] = {{1, 0, -1}, {-1, 1, 2}};
t_a->write(&v_a, 0, sizeof(v_a)); t_a->write(&v_a, sizeof(v_a));
t_b->write(&v_b, 0, sizeof(v_b)); t_b->write(&v_b, sizeof(v_b));
t_c->write(&v_c, 0, sizeof(v_c)); t_c->write(&v_c, sizeof(v_c));
// Invoke the function // Invoke the function
auto exec = backend->compile(f); auto exec = backend->compile(f);
...@@ -60,7 +60,7 @@ int main() ...@@ -60,7 +60,7 @@ int main()
// Get the result // Get the result
float r[2][3]; float r[2][3];
t_result->read(&r, 0, sizeof(r)); t_result->read(&r, sizeof(r));
std::cout << "[" << std::endl; std::cout << "[" << std::endl;
for (size_t i = 0; i < s[0]; ++i) for (size_t i = 0; i < s[0]; ++i)
......
...@@ -49,9 +49,9 @@ int main() ...@@ -49,9 +49,9 @@ int main()
float v_b[2][3] = {{7, 8, 9}, {10, 11, 12}}; float v_b[2][3] = {{7, 8, 9}, {10, 11, 12}};
float v_c[2][3] = {{1, 0, -1}, {-1, 1, 2}}; float v_c[2][3] = {{1, 0, -1}, {-1, 1, 2}};
t_a->write(&v_a, 0, sizeof(v_a)); t_a->write(&v_a, sizeof(v_a));
t_b->write(&v_b, 0, sizeof(v_b)); t_b->write(&v_b, sizeof(v_b));
t_c->write(&v_c, 0, sizeof(v_c)); t_c->write(&v_c, sizeof(v_c));
// Invoke the function // Invoke the function
auto exec = backend->compile(f); auto exec = backend->compile(f);
...@@ -59,7 +59,7 @@ int main() ...@@ -59,7 +59,7 @@ int main()
// Get the result // Get the result
float r[2][3]; float r[2][3];
t_result->read(&r, 0, sizeof(r)); t_result->read(&r, sizeof(r));
std::cout << "[" << std::endl; std::cout << "[" << std::endl;
for (size_t i = 0; i < s[0]; ++i) for (size_t i = 0; i < s[0]; ++i)
......
# ******************************************************************************
# Copyright 2017-2019 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ******************************************************************************
add_executable(partial_shape partial_shape.cpp)
add_dependencies(partial_shape ngraph cpu_backend)
target_link_libraries(partial_shape ngraph cpu_backend)
...@@ -15,11 +15,19 @@ ...@@ -15,11 +15,19 @@
//***************************************************************************** //*****************************************************************************
#include <iostream> #include <iostream>
#include <numeric>
#include <vector>
#include <ngraph/ngraph.hpp> #include <ngraph/ngraph.hpp>
using namespace std;
using namespace ngraph; using namespace ngraph;
void execute(shared_ptr<runtime::Backend> be,
shared_ptr<runtime::Executable> ex,
shared_ptr<runtime::Tensor> t_out,
uint32_t n);
int main() int main()
{ {
// Create and compile a graph where the provided info of shape of x is // Create and compile a graph where the provided info of shape of x is
...@@ -27,42 +35,46 @@ int main() ...@@ -27,42 +35,46 @@ int main()
auto x_shape_info = PartialShape{2, Dimension::dynamic()}; auto x_shape_info = PartialShape{2, Dimension::dynamic()};
auto x = make_shared<op::Parameter>(element::i32, x_shape_info); auto x = make_shared<op::Parameter>(element::i32, x_shape_info);
auto a = x + x; auto a = x + x;
auto f = make_shared<Function>({a}, {x}); auto f = make_shared<Function>(OutputVector{a}, ParameterVector{x});
auto be = runtime::backend::create(); auto be = runtime::Backend::create("CPU", true);
auto ex = be->compile(f); auto ex = be->compile(f);
// Create a dynamic tensor of shape (2,?) // Create a dynamic tensor of shape (2,?)
auto t_out = be->create_dynamic_tensor(element::i32, x_shape_info); auto t_out = be->create_dynamic_tensor(element::i32, x_shape_info);
execute(be, ex, t_out, 3);
execute(be, ex, t_out, 11);
execute(be, ex, t_out, 20);
// Call the graph to write a value with shape (2,3) to t_out return 0;
auto t_in = be->create_tensor(element::i32, Shape{2, 3}); }
t_in->write();
ex->call({t_out}, {t_in})
// Call the graph again, to write a value with a different shape to
// t_out.
t_in = be->create_tensor(element::i32, Shape{2, 20});
t_in->write();
ex->call({t_out}, {t_in})
// Get the result. At this point t_out->get_shape() would return
// Shape{2,20},
// but t_out->get_partial_shape() would return "(2,?)"
float r[2][3]; void execute(shared_ptr<runtime::Backend> be,
t_result->read(&r, 0, sizeof(r)); shared_ptr<runtime::Executable> ex,
shared_ptr<runtime::Tensor> t_out,
uint32_t n)
{
// Initialize input of shape (2, n)
auto t_in = be->create_tensor(element::i32, Shape{2, n});
{
vector<int32_t> t_val(2 * n);
iota(t_val.begin(), t_val.end(), 0);
t_in->write(&t_val[0], t_val.size() * sizeof(t_val[0]));
}
// Get the result
ex->call({t_out}, {t_in});
std::cout << "[" << std::endl; auto s = t_out->get_shape();
vector<int32_t> r(s[0] * s[1]);
t_out->read(&r[0], r.size() * sizeof(r[0]));
cout << "[" << endl;
for (size_t i = 0; i < s[0]; ++i) for (size_t i = 0; i < s[0]; ++i)
{ {
std::cout << " ["; cout << " [";
for (size_t j = 0; j < s[1]; ++j) for (size_t j = 0; j < s[1]; ++j)
{ {
std::cout << r[i][j] << ' '; cout << r[i * s[1] + j] << ' ';
} }
std::cout << ']' << std::endl; cout << ']' << endl;
} }
std::cout << ']' << std::endl; cout << ']' << endl;
return 0;
} }
...@@ -17,9 +17,8 @@ ...@@ -17,9 +17,8 @@
add_executable(mnist_mlp mnist_loader.cpp mnist_mlp.cpp) add_executable(mnist_mlp mnist_loader.cpp mnist_mlp.cpp)
add_dependencies(mnist_mlp ngraph cpu_backend) add_dependencies(mnist_mlp ngraph cpu_backend)
target_link_libraries(mnist_mlp ngraph cpu_backend) target_link_libraries(mnist_mlp ngraph cpu_backend)
if (NGRAPH_DISTRIBUTED_ENABLE)
add_executable(dist_mnist_mlp mnist_loader.cpp dist_mnist_mlp.cpp) add_executable(dist_mnist_mlp mnist_loader.cpp dist_mnist_mlp.cpp)
target_compile_definitions(dist_mnist_mlp PRIVATE NGRAPH_DISTRIBUTED_ENABLE) target_compile_definitions(dist_mnist_mlp PRIVATE NGRAPH_DISTRIBUTED_ENABLE)
target_include_directories(dist_mnist_mlp SYSTEM PRIVATE libmlsl) add_dependencies(dist_mnist_mlp ngraph cpu_backend)
target_link_libraries(dist_mnist_mlp ngraph cpu_backend libmlsl) target_link_libraries(dist_mnist_mlp ngraph cpu_backend)
endif()
...@@ -90,10 +90,8 @@ float test_accuracy(MNistDataLoader& loader, ...@@ -90,10 +90,8 @@ float test_accuracy(MNistDataLoader& loader,
{ {
loader.load(); loader.load();
t_X->write(loader.get_image_floats(), t_X->write(loader.get_image_floats(),
0,
loader.get_image_batch_size() * sizeof(float)); loader.get_image_batch_size() * sizeof(float));
t_Y->write(loader.get_label_floats(), t_Y->write(loader.get_label_floats(),
0,
loader.get_label_batch_size() * sizeof(float)); loader.get_label_batch_size() * sizeof(float));
exec->call({t_softmax}, {t_X, t_W0, t_b0, t_W1, t_b1}); exec->call({t_softmax}, {t_X, t_W0, t_b0, t_W1, t_b1});
size_t acc = accuracy_count(t_softmax, t_Y); size_t acc = accuracy_count(t_softmax, t_Y);
...@@ -106,8 +104,6 @@ float test_accuracy(MNistDataLoader& loader, ...@@ -106,8 +104,6 @@ float test_accuracy(MNistDataLoader& loader,
int main(int argc, char* argv[]) int main(int argc, char* argv[])
{ {
ngraph::Distributed dist;
size_t epochs = 5; size_t epochs = 5;
size_t batch_size = 128; size_t batch_size = 128;
size_t output_size = 10; size_t output_size = 10;
...@@ -254,10 +250,8 @@ int main(int argc, char* argv[]) ...@@ -254,10 +250,8 @@ int main(int argc, char* argv[])
{ {
train_loader.load(); train_loader.load();
t_X->write(train_loader.get_image_floats(), t_X->write(train_loader.get_image_floats(),
0,
train_loader.get_image_batch_size() * sizeof(float)); train_loader.get_image_batch_size() * sizeof(float));
t_Y->write(train_loader.get_label_floats(), t_Y->write(train_loader.get_label_floats(),
0,
train_loader.get_label_batch_size() * sizeof(float)); train_loader.get_label_batch_size() * sizeof(float));
train_exec->call( train_exec->call(
{t_loss, {t_loss,
......
...@@ -89,10 +89,8 @@ float test_accuracy(MNistDataLoader& loader, ...@@ -89,10 +89,8 @@ float test_accuracy(MNistDataLoader& loader,
{ {
loader.load(); loader.load();
t_X->write(loader.get_image_floats(), t_X->write(loader.get_image_floats(),
0,
loader.get_image_batch_size() * sizeof(float)); loader.get_image_batch_size() * sizeof(float));
t_Y->write(loader.get_label_floats(), t_Y->write(loader.get_label_floats(),
0,
loader.get_label_batch_size() * sizeof(float)); loader.get_label_batch_size() * sizeof(float));
exec->call({t_softmax}, {t_X, t_W0, t_b0, t_W1, t_b1}); exec->call({t_softmax}, {t_X, t_W0, t_b0, t_W1, t_b1});
size_t acc = accuracy_count(t_softmax, t_Y); size_t acc = accuracy_count(t_softmax, t_Y);
...@@ -232,7 +230,7 @@ int main(int argc, const char* argv[]) ...@@ -232,7 +230,7 @@ int main(int argc, const char* argv[])
clone_function(Function(OutputVector{softmax}, clone_function(Function(OutputVector{softmax},
ParameterVector{X, W0, b0, W1, b1}), ParameterVector{X, W0, b0, W1, b1}),
inference_node_map); inference_node_map);
auto inference_exe = backend->compile(inference_function); auto inference_exec = backend->compile(inference_function);
set_scalar(t_learning_rate, .03f); set_scalar(t_learning_rate, .03f);
...@@ -241,10 +239,8 @@ int main(int argc, const char* argv[]) ...@@ -241,10 +239,8 @@ int main(int argc, const char* argv[])
{ {
train_loader.load(); train_loader.load();
t_X->write(train_loader.get_image_floats(), t_X->write(train_loader.get_image_floats(),
0,
train_loader.get_image_batch_size() * sizeof(float)); train_loader.get_image_batch_size() * sizeof(float));
t_Y->write(train_loader.get_label_floats(), t_Y->write(train_loader.get_label_floats(),
0,
train_loader.get_label_batch_size() * sizeof(float)); train_loader.get_label_batch_size() * sizeof(float));
train_exec->call( train_exec->call(
{t_loss, {t_loss,
...@@ -264,7 +260,7 @@ int main(int argc, const char* argv[]) ...@@ -264,7 +260,7 @@ int main(int argc, const char* argv[])
{ {
last_epoch = train_loader.get_epoch(); last_epoch = train_loader.get_epoch();
std::cout << "Test accuracy: " << test_accuracy(test_loader, std::cout << "Test accuracy: " << test_accuracy(test_loader,
exec, inference_exec,
t_X, t_X,
t_Y, t_Y,
t_softmax, t_softmax,
......
...@@ -49,7 +49,7 @@ void randomize(std::function<T()> rand, ...@@ -49,7 +49,7 @@ void randomize(std::function<T()> rand,
{ {
temp.push_back(rand()); temp.push_back(rand());
} }
t->write(&temp[0], 0, element_count * sizeof(T)); t->write(&temp[0], element_count * sizeof(T));
} }
// Get a scalar value from a tensor, optionally at an element offset // Get a scalar value from a tensor, optionally at an element offset
...@@ -58,7 +58,7 @@ T get_scalar(const std::shared_ptr<ngraph::runtime::Tensor>& t, ...@@ -58,7 +58,7 @@ T get_scalar(const std::shared_ptr<ngraph::runtime::Tensor>& t,
size_t element_offset = 0) size_t element_offset = 0)
{ {
T result; T result;
t->read(&result, element_offset * sizeof(T), sizeof(T)); t->read(&result + (element_offset * sizeof(T)), sizeof(T));
return result; return result;
} }
...@@ -68,7 +68,7 @@ void set_scalar(const std::shared_ptr<ngraph::runtime::Tensor>& t, ...@@ -68,7 +68,7 @@ void set_scalar(const std::shared_ptr<ngraph::runtime::Tensor>& t,
T value, T value,
size_t element_offset = 0) size_t element_offset = 0)
{ {
t->write(&value, element_offset * sizeof(T), sizeof(T)); t->write(&value + (element_offset * sizeof(T)), sizeof(T));
} }
// Show a shape // Show a shape
......
...@@ -9,6 +9,7 @@ ...@@ -9,6 +9,7 @@
<dt>{{ _('Recent Versions') }}<i class="fa fa-terminal"></i></dt> <dt>{{ _('Recent Versions') }}<i class="fa fa-terminal"></i></dt>
<dd><!-- Until our https://docs.ngraph.ai/ publishing is set up, we link to GitHub --> <dd><!-- Until our https://docs.ngraph.ai/ publishing is set up, we link to GitHub -->
<ul> <ul>
<li><a href="https://github.com/NervanaSystems/ngraph/releases/tag/v0.27.0-rc.1">0.27.0</a></li>
<li><a href="https://github.com/NervanaSystems/ngraph/releases/tag/v0.26.0">0.26.0</a></li> <li><a href="https://github.com/NervanaSystems/ngraph/releases/tag/v0.26.0">0.26.0</a></li>
<li><a href="https://github.com/NervanaSystems/ngraph/releases/tag/v0.25.1-rc.10">0.25.1</a></li> <li><a href="https://github.com/NervanaSystems/ngraph/releases/tag/v0.25.1-rc.10">0.25.1</a></li>
<li><a href="https://github.com/NervanaSystems/ngraph/releases/tag/v0.25.0">0.25.0</a></li> <li><a href="https://github.com/NervanaSystems/ngraph/releases/tag/v0.25.0">0.25.0</a></li>
......
...@@ -11,10 +11,3 @@ appropriately to import symbols referenced from outside the library, ...@@ -11,10 +11,3 @@ appropriately to import symbols referenced from outside the library,
and to export them from within the library. See any of the and to export them from within the library. See any of the
``${backend}_backend_visibility`` header files for an example; see ``${backend}_backend_visibility`` header files for an example; see
also :ref:`what_is_backend` also :ref:`what_is_backend`
..
.. doxygenclass:: ngraph::runtime::Backend
:project: ngraph
:members:
...@@ -9,7 +9,6 @@ Backend APIs ...@@ -9,7 +9,6 @@ Backend APIs
backend-api/index backend-api/index
dynamicbackend-api/index dynamicbackend-api/index
plaidml-ng-api/index plaidml-ng-api/index
executable-api/index
As of version ``0.15``, there is a new backend API to work with functions that As of version ``0.15``, there is a new backend API to work with functions that
......
.. backends/executable-api/index.rst:
Executable
==========
The ``compile`` function on an ``Executable`` has more direct methods to
actions such as ``validate``, ``call``, ``get_performance_data``, and so on.
.. doxygenclass:: ngraph::runtime::Executable
:project: ngraph
:members:
...@@ -82,20 +82,20 @@ weights and bias: ...@@ -82,20 +82,20 @@ weights and bias:
.. literalinclude:: ../../../../examples/mnist_mlp/mnist_mlp.cpp .. literalinclude:: ../../../../examples/mnist_mlp/mnist_mlp.cpp
:language: cpp :language: cpp
:lines: 127-135 :lines: 123-134
Repeat the process for the next layer, Repeat the process for the next layer,
.. literalinclude:: ../../../../examples/mnist_mlp/mnist_mlp.cpp .. literalinclude:: ../../../../examples/mnist_mlp/mnist_mlp.cpp
:language: cpp :language: cpp
:lines: 138-146 :lines: 137-144
and normalize everything with a ``softmax``. and normalize everything with a ``softmax``.
.. literalinclude:: ../../../../examples/mnist_mlp/mnist_mlp.cpp .. literalinclude:: ../../../../examples/mnist_mlp/mnist_mlp.cpp
:language: cpp :language: cpp
:lines: 148-150 :lines: 146-147
.. _loss: .. _loss:
...@@ -109,7 +109,7 @@ underflow. ...@@ -109,7 +109,7 @@ underflow.
.. literalinclude:: ../../../../examples/mnist_mlp/mnist_mlp.cpp .. literalinclude:: ../../../../examples/mnist_mlp/mnist_mlp.cpp
:language: cpp :language: cpp
:lines: 154-166 :lines: 150-164
.. _backprop: .. _backprop:
...@@ -125,7 +125,7 @@ allows the calculations for the adjustments to be further optimized. ...@@ -125,7 +125,7 @@ allows the calculations for the adjustments to be further optimized.
.. literalinclude:: ../../../../examples/mnist_mlp/mnist_mlp.cpp .. literalinclude:: ../../../../examples/mnist_mlp/mnist_mlp.cpp
:language: cpp :language: cpp
:lines: 169-172 :lines: 167-170
For any node ``N``, if the update for ``loss`` is ``delta``, the For any node ``N``, if the update for ``loss`` is ``delta``, the
...@@ -138,7 +138,7 @@ update computation for ``N`` will be given by the node ...@@ -138,7 +138,7 @@ update computation for ``N`` will be given by the node
.. literalinclude:: ../../../../examples/mnist_mlp/mnist_mlp.cpp .. literalinclude:: ../../../../examples/mnist_mlp/mnist_mlp.cpp
:language: cpp :language: cpp
:lines: 177-181 :lines: 177-179
The different update nodes will share intermediate computations. So to The different update nodes will share intermediate computations. So to
...@@ -147,7 +147,7 @@ get the updated values for the weights as computed with the specified ...@@ -147,7 +147,7 @@ get the updated values for the weights as computed with the specified
.. literalinclude:: ../../../../examples/mnist_mlp/mnist_mlp.cpp .. literalinclude:: ../../../../examples/mnist_mlp/mnist_mlp.cpp
:language: cpp :language: cpp
:lines: 182-215 :lines: 184-212
.. _update: .. _update:
...@@ -167,5 +167,5 @@ compile clones of the nodes. ...@@ -167,5 +167,5 @@ compile clones of the nodes.
.. literalinclude:: ../../../../examples/mnist_mlp/mnist_mlp.cpp .. literalinclude:: ../../../../examples/mnist_mlp/mnist_mlp.cpp
:language: cpp :language: cpp
:lines: 216-226 :lines: 217-224
...@@ -41,7 +41,7 @@ the new code is highlighted below: ...@@ -41,7 +41,7 @@ the new code is highlighted below:
.. literalinclude:: ../../../../examples/mnist_mlp/dist_mnist_mlp.cpp .. literalinclude:: ../../../../examples/mnist_mlp/dist_mnist_mlp.cpp
:language: cpp :language: cpp
:lines: 178-194 :lines: 174-189
:emphasize-lines: 8-11 :emphasize-lines: 8-11
See the `full code`_ in the ``examples`` folder ``/doc/examples/mnist_mlp/dist_mnist_mlp.cpp``. See the `full code`_ in the ``examples`` folder ``/doc/examples/mnist_mlp/dist_mnist_mlp.cpp``.
......
...@@ -270,16 +270,15 @@ programmatically or manually) in order to successfully retreive shape data. ...@@ -270,16 +270,15 @@ programmatically or manually) in order to successfully retreive shape data.
* :ref:`create_dyn_tensor` * :ref:`create_dyn_tensor`
* :ref:`call_graph_vw_` * :ref:`call_graph_vw_`
* :ref:`call_graph_vwnew` * :ref:`dyn_ten_result`
* :ref:`kpsh` * :ref:`kpsh`
Create and compile a graph for ``f(x) = x + x`` where the provided info Create and compile a graph where the provided info of shape ``x`` is ``(2,?)``:
of shape ``x`` is ``(2,?)``:
.. literalinclude:: ../../../../examples/dynamic_tensor/partial_shape.cpp .. literalinclude:: ../../../../examples/dynamic_tensor/partial_shape.cpp
:language: cpp :language: cpp
:lines: 27-32 :lines: 35-40
.. _create_dyn_tensor: .. _create_dyn_tensor:
...@@ -291,7 +290,7 @@ Create a dynamic tensor of shape ``(2,?)`` ...@@ -291,7 +290,7 @@ Create a dynamic tensor of shape ``(2,?)``
.. literalinclude:: ../../../../examples/dynamic_tensor/partial_shape.cpp .. literalinclude:: ../../../../examples/dynamic_tensor/partial_shape.cpp
:language: cpp :language: cpp
:lines: 35 :lines: 43-46
At this point, ``t_out->get_shape()`` would throw an exception, while At this point, ``t_out->get_shape()`` would throw an exception, while
``t_out->get_partial_shape()`` would return ``"(2,?)"``. ``t_out->get_partial_shape()`` would return ``"(2,?)"``.
...@@ -299,29 +298,25 @@ At this point, ``t_out->get_shape()`` would throw an exception, while ...@@ -299,29 +298,25 @@ At this point, ``t_out->get_shape()`` would throw an exception, while
.. _call_graph_vw_: .. _call_graph_vw_:
Write shape Initialize input of shape
----------- -------------------------
Call the graph to write a value with shape (2,3) to t_out
.. literalinclude:: ../../../../examples/dynamic_tensor/partial_shape.cpp .. literalinclude:: ../../../../examples/dynamic_tensor/partial_shape.cpp
:language: cpp :language: cpp
:lines: 38-40 :lines: 57-62
At this point, ``t_out->get_shape()`` would return ``Shape{2,3}``, At this point, ``t_out->get_shape()`` would return ``Shape{2,3}``,
while ``t_out->get_partial_shape()`` would return ``"(2,?)"``. while ``t_out->get_partial_shape()`` would return ``"(2,?)"``.
.. _call_graph_vwnew: .. _dyn_ten_result:
Write new shape
---------------
Call the graph again, to write a value with a different shape to ``t_out``. Get the result
--------------
.. literalinclude:: ../../../../examples/dynamic_tensor/partial_shape.cpp .. literalinclude:: ../../../../examples/dynamic_tensor/partial_shape.cpp
:language: cpp :language: cpp
:lines: 44-45 :lines: 64-80
At this point, ``t_out->get_shape()`` would return ``Shape{2,20}``, At this point, ``t_out->get_shape()`` would return ``Shape{2,20}``,
while ``t_out->get_partial_shape()`` would return ``"(2,?)"``. while ``t_out->get_partial_shape()`` would return ``"(2,?)"``.
......
...@@ -59,7 +59,6 @@ nGraph Compiler Stack Documentation ...@@ -59,7 +59,6 @@ nGraph Compiler Stack Documentation
core/fusion/index.rst core/fusion/index.rst
nGraph Core Ops <ops/index.rst> nGraph Core Ops <ops/index.rst>
provenance/index.rst provenance/index.rst
Graph Execution API <backends/executable-api/index.rst>
core/quantization.rst core/quantization.rst
dynamic/index.rst dynamic/index.rst
......
...@@ -54,6 +54,6 @@ Backprop ...@@ -54,6 +54,6 @@ Backprop
C++ Interface C++ Interface
============= =============
.. doxygenclass:: ngraph::op::Abs .. doxygenclass:: ngraph::op::v0::Abs
:project: ngraph :project: ngraph
:members: :members:
...@@ -53,6 +53,6 @@ Backprop ...@@ -53,6 +53,6 @@ Backprop
C++ Interface C++ Interface
============= =============
.. doxygenclass:: ngraph::op::Acos .. doxygenclass:: ngraph::op::v0::Acos
:project: ngraph :project: ngraph
:members: :members:
...@@ -42,6 +42,6 @@ Outputs ...@@ -42,6 +42,6 @@ Outputs
C++ Interface C++ Interface
============= =============
.. doxygenclass:: ngraph::op::All .. doxygenclass:: ngraph::op::v0::All
:project: ngraph :project: ngraph
:members: :members:
...@@ -41,6 +41,6 @@ Outputs ...@@ -41,6 +41,6 @@ Outputs
C++ Interface C++ Interface
============= =============
.. doxygenclass:: ngraph::op::AllReduce .. doxygenclass:: ngraph::op::v0::AllReduce
:project: ngraph :project: ngraph
:members: :members:
...@@ -52,6 +52,6 @@ Backprop ...@@ -52,6 +52,6 @@ Backprop
C++ Interface C++ Interface
============= =============
.. doxygenclass:: ngraph::op::Asin .. doxygenclass:: ngraph::op::v0::Asin
:project: ngraph :project: ngraph
:members: :members:
...@@ -54,6 +54,6 @@ Backprop ...@@ -54,6 +54,6 @@ Backprop
C++ Interface C++ Interface
============= =============
.. doxygenclass:: ngraph::op::Atan .. doxygenclass:: ngraph::op::v0::Atan
:project: ngraph :project: ngraph
:members: :members:
...@@ -73,7 +73,7 @@ and by two scaling attributes: :math:`\gamma` and :math:`\beta`. ...@@ -73,7 +73,7 @@ and by two scaling attributes: :math:`\gamma` and :math:`\beta`.
C++ Interface C++ Interface
============== ==============
.. doxygenclass:: ngraph::op::BatchNormInference .. doxygenclass:: ngraph::op::v0::BatchNormInference
:project: ngraph :project: ngraph
:members: :members:
......
...@@ -91,7 +91,7 @@ Backprop ...@@ -91,7 +91,7 @@ Backprop
C++ Interface C++ Interface
============== ==============
.. doxygenclass:: ngraph::op::BatchNormTraining .. doxygenclass:: ngraph::op::v0::BatchNormTraining
:project: ngraph :project: ngraph
:members: :members:
......
...@@ -101,7 +101,7 @@ We work backwards ...@@ -101,7 +101,7 @@ We work backwards
C++ Interface C++ Interface
============== ==============
.. doxygenclass:: ngraph::op::BatchNormTrainingBackprop .. doxygenclass:: ngraph::op::v0::BatchNormTrainingBackprop
:project: ngraph :project: ngraph
:members: :members:
......
...@@ -41,6 +41,6 @@ Outputs (in place) ...@@ -41,6 +41,6 @@ Outputs (in place)
C++ Interface C++ Interface
============= =============
.. doxygenclass:: ngraph::op::BroadcastDistributed .. doxygenclass:: ngraph::op::v0::BroadcastDistributed
:project: ngraph :project: ngraph
:members: :members:
...@@ -56,6 +56,6 @@ to define a backprop. ...@@ -56,6 +56,6 @@ to define a backprop.
C++ Interface C++ Interface
============= =============
.. doxygenclass:: ngraph::op::Ceiling .. doxygenclass:: ngraph::op::v0::Ceiling
:project: ngraph :project: ngraph
:members: :members:
...@@ -74,6 +74,6 @@ We slice the backprop value into the backprops associated with the inputs. ...@@ -74,6 +74,6 @@ We slice the backprop value into the backprops associated with the inputs.
C++ Interface C++ Interface
============= =============
.. doxygenclass:: ngraph::op::Concat .. doxygenclass:: ngraph::op::v0::Concat
:project: ngraph :project: ngraph
:members: :members:
...@@ -55,6 +55,6 @@ Backprop ...@@ -55,6 +55,6 @@ Backprop
C++ Interface C++ Interface
============= =============
.. doxygenclass:: ngraph::op::Convert .. doxygenclass:: ngraph::op::v0::Convert
:project: ngraph :project: ngraph
:members: :members:
...@@ -54,6 +54,6 @@ Backprop ...@@ -54,6 +54,6 @@ Backprop
C++ Interface C++ Interface
============= =============
.. doxygenclass:: ngraph::op::Cos .. doxygenclass:: ngraph::op::v0::Cos
:project: ngraph :project: ngraph
:members: :members:
...@@ -54,6 +54,6 @@ Backprop ...@@ -54,6 +54,6 @@ Backprop
C++ Interface C++ Interface
============= =============
.. doxygenclass:: ngraph::op::Cosh .. doxygenclass:: ngraph::op::v0::Cosh
:project: ngraph :project: ngraph
:members: :members:
...@@ -82,6 +82,6 @@ To be documented. ...@@ -82,6 +82,6 @@ To be documented.
C++ Interface C++ Interface
============= =============
.. doxygenclass:: ngraph::op::Dot .. doxygenclass:: ngraph::op::v0::Dot
:project: ngraph :project: ngraph
:members: :members:
...@@ -54,6 +54,6 @@ Backprop ...@@ -54,6 +54,6 @@ Backprop
C++ Interface C++ Interface
============= =============
.. doxygenclass:: ngraph::op::Exp .. doxygenclass:: ngraph::op::v0::Exp
:project: ngraph :project: ngraph
:members: :members:
...@@ -55,6 +55,6 @@ to define a backprop. ...@@ -55,6 +55,6 @@ to define a backprop.
C++ Interface C++ Interface
============= =============
.. doxygenclass:: ngraph::op::Floor .. doxygenclass:: ngraph::op::v0::Floor
:project: ngraph :project: ngraph
:members: :members:
...@@ -44,6 +44,6 @@ Outputs ...@@ -44,6 +44,6 @@ Outputs
C++ Interface C++ Interface
============= =============
.. doxygenclass:: ngraph::op::GetOutputElement .. doxygenclass:: ngraph::op::v0::GetOutputElement
:project: ngraph :project: ngraph
:members: :members:
...@@ -55,6 +55,6 @@ Backprop ...@@ -55,6 +55,6 @@ Backprop
C++ Interface C++ Interface
============= =============
.. doxygenclass:: ngraph::op::Log .. doxygenclass:: ngraph::op::v0::Log
:project: ngraph :project: ngraph
:members: :members:
...@@ -54,6 +54,6 @@ Backprop ...@@ -54,6 +54,6 @@ Backprop
C++ Interface C++ Interface
============= =============
.. doxygenclass:: ngraph::op::Negative .. doxygenclass:: ngraph::op::v0::Negative
:project: ngraph :project: ngraph
:members: :members:
...@@ -102,6 +102,6 @@ Mathematical Definition ...@@ -102,6 +102,6 @@ Mathematical Definition
C++ Interface C++ Interface
============= =============
.. doxygenclass:: ngraph::op::Quantize .. doxygenclass:: ngraph::op::v0::Quantize
:project: ngraph :project: ngraph
:members: :members:
...@@ -40,6 +40,6 @@ Mathematical Definition ...@@ -40,6 +40,6 @@ Mathematical Definition
C++ Interface C++ Interface
============= =============
.. doxygenclass:: ngraph::op::Relu .. doxygenclass:: ngraph::op::v0::Relu
:project: ngraph :project: ngraph
:members: :members:
...@@ -46,6 +46,6 @@ Mathematical Definition ...@@ -46,6 +46,6 @@ Mathematical Definition
C++ Interface C++ Interface
============= =============
.. doxygenclass:: ngraph::op::ShapeOf .. doxygenclass:: ngraph::op::v0::ShapeOf
:project: ngraph :project: ngraph
:members: :members:
...@@ -47,6 +47,6 @@ Mathematical Definition ...@@ -47,6 +47,6 @@ Mathematical Definition
C++ Interface C++ Interface
============= =============
.. doxygenclass:: ngraph::op::Sigmoid .. doxygenclass:: ngraph::op::v0::Sigmoid
:project: ngraph :project: ngraph
:members: :members:
...@@ -45,6 +45,6 @@ Mathematical Definition ...@@ -45,6 +45,6 @@ Mathematical Definition
C++ Interface C++ Interface
============= =============
.. doxygenclass:: ngraph::op::Sign .. doxygenclass:: ngraph::op::v0::Sign
:project: ngraph :project: ngraph
:members: :members:
...@@ -52,6 +52,6 @@ Backprop ...@@ -52,6 +52,6 @@ Backprop
C++ Interface C++ Interface
============= =============
.. doxygenclass:: ngraph::op::Sin .. doxygenclass:: ngraph::op::v0::Sin
:project: ngraph :project: ngraph
:members: :members:
...@@ -52,6 +52,6 @@ Backprop ...@@ -52,6 +52,6 @@ Backprop
C++ Interface C++ Interface
============= =============
.. doxygenclass:: ngraph::op::Sinh .. doxygenclass:: ngraph::op::v0::Sinh
:project: ngraph :project: ngraph
:members: :members:
...@@ -52,6 +52,6 @@ Backprop ...@@ -52,6 +52,6 @@ Backprop
C++ Interface C++ Interface
============= =============
.. doxygenclass:: ngraph::op::Sqrt .. doxygenclass:: ngraph::op::v0::Sqrt
:project: ngraph :project: ngraph
:members: :members:
...@@ -58,6 +58,6 @@ Backprop ...@@ -58,6 +58,6 @@ Backprop
C++ Interface C++ Interface
============= =============
.. doxygenclass:: ngraph::op::Subtract .. doxygenclass:: ngraph::op::v0::Subtract
:project: ngraph :project: ngraph
:members: :members:
...@@ -52,6 +52,6 @@ Backprop ...@@ -52,6 +52,6 @@ Backprop
C++ Interface C++ Interface
============= =============
.. doxygenclass:: ngraph::op::Tan .. doxygenclass:: ngraph::op::v0::Tan
:project: ngraph :project: ngraph
:members: :members:
...@@ -53,6 +53,6 @@ Backprop ...@@ -53,6 +53,6 @@ Backprop
C++ Interface C++ Interface
============= =============
.. doxygenclass:: ngraph::op::Tanh .. doxygenclass:: ngraph::op::v0::Tanh
:project: ngraph :project: ngraph
:members: :members:
...@@ -69,6 +69,6 @@ Not yet implemented. ...@@ -69,6 +69,6 @@ Not yet implemented.
C++ Interface C++ Interface
============= =============
.. doxygenclass:: ngraph::op::Transpose .. doxygenclass:: ngraph::op::v0::Transpose
:project: ngraph :project: ngraph
:members: :members:
...@@ -57,8 +57,7 @@ static void test_allreduce_common(reduction::Type reduce_type) ...@@ -57,8 +57,7 @@ static void test_allreduce_common(reduction::Type reduce_type)
{ {
case reduction::Type::SUM: case reduction::Type::SUM:
copy_data(a, v); copy_data(a, v);
std::transform( std::transform(v.begin(), v.end(), v.begin(), [=](float x) { return x * comm_size; });
v.begin(), v.end(), v.begin(), std::bind1st(std::multiplies<float>(), comm_size));
break; break;
case reduction::Type::PROD: case reduction::Type::PROD:
copy_data(a, v); copy_data(a, v);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment