Commit b8056257 authored by Leona C's avatar Leona C Committed by Scott Cyphers

Documentation updates for 0.21.0 (#3010)

* WIP more guides and tutorials for the docs

* Update Guides and structure

* Update Guides and structure

+ Test documentation build on Clear Linux OS;
+ Fix a few links and redirs.
+ Note rc-naming conventions in doc may be tag-generalized.

* documentation supports various rc-naming conventions

* Add instructions on building nGraph bridge from src

* Update ngtf bridge versions to 0.14.1 and TensorFlow's bazel dependency to 0.24.1

* Use consistent file naming on all BE indexes

* Fix comma

* Templatize versioning for releases starting 0.20 and add placeholders for changes by components

* Remove typo

* Minor formatting fixed

* Document how to find or display built version of nGraph

* Start a r0.21 for doc that also fixes broken link and typo

* Correct use of console command context for version check

* Minor fixes to doc-contributor-README

* Docs for version 0.21

* Ensure docs build without error

* Assign ownership project README and intro for new ngraph.ai site to Straw

* Note deprecated Tensor API in doc

* Update to generic versioning on core doc for importing via built wheels

* Resolve merge conflicts leftover from v20 version request changes

* remove typo and suggested use of prebuilt LLVM since newer OSes do not have them available

* Update to latest

* Update with review from PR and ngraph bridge team

* Update right section with review from PR and ngraph bridge team

* Ensure doc example folders have owners from the component

* Push edit of tensorflow_connect to v0.22 docs

* Update Python wheel building instructions to newer version
parent 6c8b5650
......@@ -19,10 +19,15 @@
/doc/ @indie
/doc/examples/mnist_mlp/dist_* @wenzhe-nrv @indie
/README.md @indie
/doc/*/*/frameworks/tensorflow_connect.rst @shresthamalik @avijit-nervana @sayantan-nervana
/doc/*/*/backends/plaidml-ng-api/ @flaub @brianretford @dgkutnic
/doc/*/*/inspection/ @aproctor
/doc/examples/onnx/ @arogowie-intel @tsocha
/README.md @adstraw
project/introduction.rst @adstraw
project/governance.rst @adstraw
/changes.md @indie @diyessi
/INSTALL.md @indie
/CONTRIB.md @indie
project/doc-contributor-README.rst @indie
/licenses/ @diyessi @raramer01 @aslepko
/LICENSE @diyessi @raramer01 @aslepko
......
......@@ -73,11 +73,13 @@ author = 'Intel Corporation'
# built documents.
#
# The short X.Y version.
version = '0.20'
version = '0.22'
# The Documentation full version, including alpha/beta/rc tags. Some features
# available in the latest code will not necessarily be documented first
release = '0.20.0'
# available in the latest code will not necessarily be documented first.
# rc syntax may be tagged; this documentation supports various rc-naming conventions
release = '0.22.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
......@@ -121,14 +123,15 @@ else:
# borrow this from the zephyr docs theme
html_context = {
# 'show_license': html_show_license, we have a custom footer to attribute
# 'show_license': html_show_license, we have custom footers to attribute
# RTD, WTD, and Sphinx contributors; so we do not enable this
'docs_title': docs_title,
'is_release': is_release,
'theme_logo_only': False,
'current_version': version,
'versions': ( ("latest", "../"),
("0.19.0", "/0.19.0/"), #not yet sure how we'll do this
("0.20.0", "/0.20.0/"), #not yet sure how we'll do this
("0.19.0", "/0.19.0/"),
("0.18.0", "/0.18.0/"),
("0.17.0", "/0.17.0/"),
("0.16.0", "/0.16.0/"),
......@@ -167,7 +170,7 @@ html_sidebars = {
# list of tuples (old_url, new_url) for pages to redirect
# (URLs should be relative to document root, only)
html_redirect_pages = [
('backends', 'backend-support/index'),
('backend-support', 'backends/index'),
('core/core', 'core/overview.rst'),
('core/fusion', 'core/fusion/index'),
('frameworks/mxnet', 'frameworks/mxnet_intg.rst'),
......@@ -177,6 +180,7 @@ html_redirect_pages = [
('inspection/inspection', 'inspection/index'),
('releases/release-notes', 'releases/index'),
# ('getting_started/getting_starting', 'getting_started/index'),
# mv to framework-specific helper directory
('project/project', 'project/index'),
('python_api/', 'python_api/index'),
]
......
......@@ -6,9 +6,10 @@
</span>
<div class="rst-other-versions">
<dl>
<dt>{{ _('Previous Verified Versions') }}</dt>
<dt>{{ _('Previous Versions') }}</dt>
<dd><!-- Until our https://docs.ngraph.ai/ publishing is set up, we link to GitHub -->
<ul>
<li><a href="https://github.com/NervanaSystems/ngraph/releases/tag/v0.20.0">0.20.0-rc.0</a></li>
<li><a href="https://github.com/NervanaSystems/ngraph/releases/tag/v0.19.0-rc.2">0.19.0-rc.2</a></li>
<li><a href="https://github.com/NervanaSystems/ngraph/releases/tag/v0.18.1">0.18.1</a></li>
<li><a href="https://github.com/NervanaSystems/ngraph/releases/tag/v0.17.0-rc.1">0.17.0-rc.1</a></li>
......
......@@ -2407,9 +2407,10 @@ div[class^='highlight'] pre {
}
.rst-content h1 .headerlink:after, .rst-content h2 .headerlink:after, .rst-content .toctree-wrapper p.caption .headerlink:after, .rst-content h3 .headerlink:after, .rst-content h4 .headerlink:after, .rst-content h5 .headerlink:after, .rst-content h6 .headerlink:after, .rst-content dl dt .headerlink:after, .rst-content p.caption .headerlink:after {
visibility: visible;
content: "";
font-family: "NeoSansIntel";
display: inline-block;
background-color: #27ae41;
font-size: 0.93em;
}
.rst-content h1:hover .headerlink, .rst-content h2:hover .headerlink, .rst-content .toctree-wrapper p.caption:hover .headerlink, .rst-content h3:hover .headerlink, .rst-content h4:hover .headerlink, .rst-content h5:hover .headerlink, .rst-content h6:hover .headerlink, .rst-content dl dt:hover .headerlink, .rst-content p.caption:hover .headerlink {
display: inline-block;
......@@ -2727,7 +2728,6 @@ span[id*='MathJax-Span'] {
}
.wy-menu-horiz:before, .wy-menu-horiz:after {
display: table;
content: "";
}
.wy-menu-horiz:after {
clear: both;
......
format_release_notes.rst
Release Notes
#############
This is the `Release Notes` template for latest nGraph Compiler stack
release versioning.
We are pleased to announce the release of version |version|.
What's new?
-----------
Additional functionality included with this release:
What's updated?
---------------
The following sections provide detailed lists of major updates/removals
by component:
Core
~~~~
Frameworks
~~~~~~~~~~
Backends
~~~~~~~~
Visualization Tools
~~~~~~~~~~~~~~~~~~~
Other
~~~~~
.. backend-support/tensor-api/index.rst:
Tensor
======
.. doxygenclass:: ngraph::runtime::Tensor
:project: ngraph
:members:
.. backend-support/backend-api/index.rst:
.. backends/backend-api/index.rst:
Backend
......
.. backend-support/cpp-api.rst:
.. backends/cpp-api.rst:
Backend APIs
############
......@@ -10,14 +10,13 @@ Backend APIs
executable-api/index
hosttensor-api/index
plaidml-ng-api/index
tensor-api/index
As of version ``0.15``, there is a new backend API to work with functions that
can be compiled as a runtime ``Executable``. Where previously ``Backend`` used a
``shared_ptr<Function>`` as the handle passed to the ``call`` method to execute
a compiled object, the addition of the ``shared_ptr<Executable>`` object has
more direct methods to actions such as ``validate``, ``call``, ``get_performance_data``, and so on. This new API permits any executable to be saved or loaded *into* or
more direct methods to actions such as ``validate``, ``call``, ``get_performance_data``,
and so on. This new API permits any executable to be saved or loaded *into* or
*out of* storage and makes it easier to distinguish when a Function is compiled,
thus making the internals of the ``Backend`` and ``Executable`` easier to
implement.
.. backend-support/executable-api/index.rst:
.. backends/executable-api/index.rst:
Executable
......
.. backend-support/hosttensor-api/index.rst:
.. backends/hosttensor-api/index.rst:
HostTensor
......
.. backend-support/index.rst
.. backends/index.rst
About backends
##############
#################################
Developer Resources for Backends
#################################
* :ref:`what_is_backend`
* :ref:`how_to_use`
......
.. plaidml-ng-api/index.rst:
.. backends/plaidml-ng-api/index.rst:
PlaidML from nGraph
......
......@@ -73,11 +73,11 @@ author = 'Intel Corporation'
# built documents.
#
# The short X.Y version.
version = '0.20'
version = '0.21'
# The Documentation full version, including alpha/beta/rc tags. Some features
# available in the latest code will not necessarily be documented first
release = '0.20.0'
release = '0.21.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
......@@ -105,6 +105,8 @@ todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
html_title = "Documentation for the nGraph Library and Compiler stack"
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
......@@ -114,7 +116,7 @@ html_theme = 'ngraph_theme'
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
html_logo = '../ngraph_theme/static/favicon.ico'
html_logo = '../ngraph_theme/static/logo.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
......@@ -152,11 +154,11 @@ htmlhelp_basename = 'IntelnGraphlibrarydoc'
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
......@@ -171,11 +173,10 @@ latex_elements = {
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'nGraphCompilerStack.tex', 'nGraph Compiler Stack Documentation',
'Intel Corporation', 'manual'),
(master_doc, 'nGraphCompilerStack.tex', u'nGraph Compiler Stack Documentation',
u'Intel Corporation', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
......@@ -201,23 +202,7 @@ breathe_projects = {
}
rst_epilog = u"""
.. |codename| replace:: Intel nGraph
.. |project| replace:: Intel nGraph Library
.. |InG| replace:: Intel® nGraph
.. |nGl| replace:: nGraph library
.. |copy| unicode:: U+000A9 .. COPYRIGHT SIGN
:ltrim:
.. |deg| unicode:: U+000B0 .. DEGREE SIGN
:ltrim:
.. |plusminus| unicode:: U+000B1 .. PLUS-MINUS SIGN
:rtrim:
.. |micro| unicode:: U+000B5 .. MICRO SIGN
:rtrim:
.. |trade| unicode:: U+02122 .. TRADEMARK SIGN
:ltrim:
.. |reg| unicode:: U+000AE .. REGISTERED TRADEMARK SIGN
:ltrim:
.. include:: /replacements.txt
"""
# -- autodoc Extension configuration --------------------------------------
......
......@@ -143,7 +143,7 @@ update computation for ``N`` will be given by the node
The different update nodes will share intermediate computations. So to
get the updated values for the weights as computed with the specified
:doc:`backend <../../backend-support/index>`:
:doc:`backend <../../backends/index>`:
.. literalinclude:: ../../../../examples/mnist_mlp/mnist_mlp.cpp
:language: cpp
......
......@@ -126,7 +126,7 @@ There are two backends for the CPU: the optimized ``"CPU"`` backend, which uses
the `Intel MKL-DNN`_, and the ``"INTERPRETER"`` backend, which runs reference
versions of kernels that favor implementation clarity over speed. The
``"INTERPRETER"`` backend can be slow, and is primarily intended for testing.
See the documentation on :doc:`runtime options for various backends <../../backend-support/index>`
See the documentation on :doc:`runtime options for various backends <../../backends/index>`
for additional details.
To continue with our original example and select the ``"CPU_Backend"``:
......@@ -175,7 +175,7 @@ the three parameters and the return value.
Each tensor is a shared pointer to a :term:`Tensorview`, which is the interface
backends implement for tensor use. When there are no more references to the
tensor view, it will be freed when convenient for the backend. See the
:doc:`../../backend-support/cpp-api` documentation for details on how to work
:doc:`../../backends/cpp-api` documentation for details on how to work
with ``Tensor``.
......
......@@ -45,7 +45,7 @@ everything at once; if an `ngraph_dist` is already installed on your system,
skip ahead to the next section, :ref:`install_ngonnx`.
#. Install prerequisites for the system and install nGraph as ``ngraph_dist``:
#. Install prerequisites for the system and install nGraph as ``ngraph_dist``.
.. code-block:: console
......@@ -54,7 +54,7 @@ skip ahead to the next section, :ref:`install_ngonnx`.
$ apt install build-essential cmake curl clang-3.9 git zlib1g zlib1g-dev libtinfo-dev
$ git clone https://github.com/NervanaSystems/ngraph.git
$ cd ngraph && mkdir build
$ cd build && cmake ../ -DCMAKE_INSTALL_PREFIX=~/ngraph_dist -DNGRAPH_USE_PREBUILT_LLVM=TRUE
$ cd build && cmake ../ -DCMAKE_INSTALL_PREFIX=~/ngraph_dist
$ make install
#. Build the Python package (binary wheel) for ngraph and set up an env for ONNX;
......@@ -77,8 +77,10 @@ skip ahead to the next section, :ref:`install_ngonnx`.
.. code-block:: console
(onnx)$ pip install -U python/dist/ngraph-0.9.0-cp36-cp36m-linux_x86_64.whl
(onnx)$ pip install -U python/dist/ngraph-[version]-cp36-cp36m-linux_x86_64.whl
Where ``[version]`` is the version number of the nGraph Python module
you see in that directory.
#. Confirm ``ngraph`` is properly installed through a Python interpreter:
......
......@@ -12,7 +12,7 @@ Pattern matcher
The nGraph Compiler is an optimizing compiler. As such, it provides a way to
capture a given :term:`function graph` and perform a series of optimization
passes over that graph. The result is a semantically-equivalent graph that, when
executed using any :doc:`backend <../../backend-support/index>`, has optimizations
executed using any :doc:`backend <../../backends/index>`, has optimizations
inherent at the hardware level: superior runtime characteristics to increase
training performance or reduce inference latency.
......
......@@ -6,7 +6,7 @@ Overview: Optimize graphs with nGraph Compiler fusions
The nGraph Compiler is an optimizing compiler. As such, it provides a way to
capture a given :term:`function graph` and perform a series of optimization
passes over that graph. The result is a semantically-equivalent graph that, when
executed using any :doc:`backend <../../backend-support/index>`, has
executed using any :doc:`backend <../../backends/index>`, has
hardware-agnostic *and* hardware-specific optimizations, providing superior
runtime characteristics to increase training performance or reduce inference
latency.
......
......@@ -15,8 +15,9 @@ each of these briefly to get started.
A framework bridge interfaces with the "frontend" Core API. A framework bridge
is a component that sits between a framework like TensorFlow or MXNet, and the
nGraph Core frontend API. A framework bridge does two things: first, it
translates a framework's operations into graphs in nGraph’s in-memory :abbr:`Intermediary Representation (IR)`. Second, it executes the nGraph IR graphs via
the backend execution interface.
translates a framework's operations into graphs in nGraph’s in-memory
:abbr:`Intermediary Representation (IR)`. Second, it executes the nGraph IR
graphs via the backend execution interface.
The details of bridge implementation vary from framework to framework, but there
are some common patterns: a fairly typical example for a graph-based framework
......@@ -59,16 +60,14 @@ descriptions:
.. csv-table::
:header: "Namespace", "Description", "Location in Repo", "Docs"
:widths: 23, 53, 13, 23
:escape: ~
``ngraph``, The Intel nGraph C++ API, `ngraph`_, Implicit namespace omitted from most API documentation
``ngraph``, The Intel nGraph C++ API, ngraph, Implicit namespace omitted from most API documentation
``builder``, "Convenience functions that create additional graph nodes to implement commonly-used recipes; for example, auto-broadcast", `builder`_, Coming Soon
``descriptor``, Descriptors are compile-time representations of objects that will appear at run-time, `descriptor`_, Coming Soon
``op``, Ops used in graph construction, `op`_, :doc:`../ops/index`
``runtime``, The objects and methods used for executing the graph, `runtime`_, :doc:`../backend-support/cpp-api`
``runtime``, The objects and methods used for executing the graph, `runtime`_, :doc:`../backends/cpp-api`
.. _ngraph: https://github.com/NervanaSystems/ngraph/tree/master/src/ngraph
.. _builder: https://github.com/NervanaSystems/ngraph/tree/master/src/ngraph/builder
.. _descriptor: https://github.com/NervanaSystems/ngraph/tree/master/src/ngraph/descriptor
.. _op: https://github.com/NervanaSystems/ngraph/tree/master/src/ngraph/op
......
......@@ -19,6 +19,15 @@ something like:
export LD_LIBRARY_PATH=path/to/ngraph_dist/lib/
Find or display nGraph Version
-------------------------------
.. code-block:: console
python3 -c "import ngraph as ng; print('nGraph version: ',ng.__version__)";
FMV
---
......
.. frameworks/index.rst:
Current framework integrations
==============================
Connect a framework
===================
.. toctree::
:maxdepth: 1
......@@ -12,7 +12,6 @@ Current framework integrations
paddle_integ.rst
validated/testing-latency.rst
A framework is "supported" when there is a framework :term:`bridge` that can be
cloned from one of our GitHub repos and built to connect to nGraph device
backends while maintaining the framework's programmatic or user interface. Bridges
......
......@@ -13,33 +13,39 @@
.. limitations under the License.
.. ---------------------------------------------------------------------------
.. This documentation is available online at
.. https://ngraph.nervanasys.com/docs/latest
######################
nGraph Compiler stack
######################
nGraph is an open-source graph compiler for :abbr:`Artificial Neural Networks (ANNs)`.
The nGraph Compiler stack provides an inherently efficient graph-based compilation
infrastructure designed to be compatible with many upcoming
:abbr:`Application-Specific Integrated Circuits (ASICs)`, like the Intel® Nervana™
Neural Network Processor (Intel® Nervana™ NNP), while also unlocking a massive
performance boost on any existing hardware targets for your neural network: both
GPUs and CPUs. Using its flexible infrastructure, you will find it becomes much
easier to create Deep Learning (DL) models that can adhere to the "write once,
run anywhere" mantra that enables your AI solutions to easily go from concept to
production to scale.
.. _ngraph_home:
.. only:: release
nGraph Compiler stack documentation for version |version|.
Documentation for the latest (master) development branch can be found
at https://ngraph.nervanasys.com/docs/latest
.. https://docs.ngraph.ai/
.. only:: (development or daily)
nGraph Compiler stack documentation for the master tree under development
(version |version|).
For information about the releases, see the :doc:`../project/release-notes`.
The nGraph Library and Compiler stack are provided under the `Apache 2.0 license`_
(found in the LICENSE file in the project's `GitHub repo`_). It may also import
or reference packages, scripts, and other files that use licensing.
.. _Apache 2.0 license: https://github.com/NervanaSystems/ngraph/blob/master/LICENSE
.. _GitHub repo: https://github.com/NervanaSystems/ngraph
Frameworks using nGraph to execute workloads have shown `up to 45X`_ performance
boost compared to native implementations. For a high-level overview, see the
:doc:`project/introduction` and our latest :doc:`project/release-notes`.
.. toctree::
:maxdepth: 1
:caption: Connecting Frameworks
:caption: Getting Started
frameworks/index.rst
frameworks/validated/list.rst
......@@ -57,6 +63,7 @@ boost compared to native implementations. For a high-level overview, see the
core/constructing-graphs/index.rst
core/passes/passes.rst
.. toctree::
:maxdepth: 1
:caption: nGraph Python API
......@@ -66,10 +73,10 @@ boost compared to native implementations. For a high-level overview, see the
.. toctree::
:maxdepth: 1
:caption: Backend Support
:caption: Backend Developers
backend-support/index.rst
backend-support/cpp-api.rst
backends/index.rst
backends/cpp-api.rst
.. toctree::
......@@ -91,14 +98,10 @@ boost compared to native implementations. For a high-level overview, see the
project/extras.rst
glossary.rst
.. only:: html
Indices and tables
==================
* :ref:`search`
* :ref:`genindex`
.. nGraph: https://www.ngraph.ai
.. _up to 45X: https://ai.intel.com/ngraph-compiler-stack-beta-release/
\ No newline at end of file
......@@ -20,22 +20,26 @@ Contributing to documentation
.. important:: Read this for changes affecting **anything** in ``ngraph/doc``
For updates to the Intel® nGraph Library ``/doc`` repo, please submit a PR with
For updates to the nGraph Library ``/doc`` repo, please submit a PR with
any changes or ideas you'd like integrated. This helps us maintain trackability
with respect to additions or feature requests.
with respect to changes made, additions, deletions, and feature requests.
If you prefer to use a containerized application, like Jupyter\* notebooks,
Google Docs\*, or MS Word\* to explain, write, or share documentation contributions,
you can convert the ``doc/sphinx/source/*.rst`` files to another format with a tool
like ``pypandoc`` and share a link to your docs on our `wiki`_.
Google Docs\*, the GitHub* GUI, or MS Word\* to explain, write, or share
documentation contributions, you can convert the ``doc/sphinx/source/*.rst``
files to another format with a tool like ``pypandoc`` and share a link
to your efforts on our `wiki`_.
Another option is to fork the `ngraph repo`_, essentially snapshotting it at
that point in time, and to build a Jupyter\* notebook or other set of docs around
it for a specific use case; then share a link with the community on our wiki.
it for a specific use case. Add a note on our wiki to show us what you
did; new and novel applications may have their projects highlighted on an
upcoming `ngraph.ai`_ release.
.. note:: Please do not submit Jupyter* notebook code to the Intel nGraph library
.. note:: Please do not submit Jupyter* notebook code to the nGraph Library
or core repos; best practice is to maintain any project-specific examples,
tests, or walk-throughs separately.
tests, or walk-throughs in a separate repository.
Documenting source code examples
......@@ -43,7 +47,7 @@ Documenting source code examples
When **verbosely** documenting functionality of specific sections of code -- whether
they are entire code blocks within a file, or code strings that are **outside**
the Intel nGraph `documentation repo`_, here is an example of best practice:
the nGraph Library's `documentation repo`_, here is an example of best practice:
Say a file has some interesting functionality that could benefit from more
explanation about one or more of the pieces in context. To keep the "in context"
......@@ -68,7 +72,6 @@ the code. The trick here is to add the file you want to reference relative to
the folder where the ``Makefile`` is that generates the documentation you're
writing.
See the **note** at the bottom of this page for more detail about how
this works in the current |version| version of Intel nGraph library
documentation.
......@@ -136,7 +139,7 @@ To build documentation locally, run:
Then point your browser at ``localhost:8000``.
To build documentation in a python3 virtualenv, run:
To build documentation in a python3 virtualenv, try:
.. code-block:: console
......@@ -159,6 +162,7 @@ For tips on writing reStructuredText-formatted documentation, see the `sphinx`_
stable reST documentation.
.. _ngraph repo: https://github.com/NervanaSystems/ngraph/
.. _ngraph.ai: https://www.ngraph.ai
.. _documentation repo: https://github.com/NervanaSystems/ngraph/tree/master/doc
.. _sphinx: http://www.sphinx-doc.org/en/stable/rest.html
.. _wiki: https://github.com/NervanaSystems/ngraph/wiki/
......
.. introduction:
.. project/introduction.rst:
############
Introduction
############
#######
Summary
#######
nGraph is an open-source graph compiler for :abbr:`Artificial Neural Networks (ANNs)`.
The nGraph Compiler stack provides an inherently efficient graph-based compilation
infrastructure designed to be compatible with many upcoming
:abbr:`Application-Specific Integrated Circuits (ASICs)`, like the Intel® Nervana™
Neural Network Processor (Intel® Nervana™ NNP), while also unlocking a massive
performance boost on any existing hardware targets for your neural network: both
GPUs and CPUs. Using its flexible infrastructure, you will find it becomes much
easier to create Deep Learning (DL) models that can adhere to the "write once,
run anywhere" mantra that enables your AI solutions to easily go from concept to
production to scale.
Frameworks using nGraph to execute workloads have shown `up to 45X`_ performance
boost compared to native implementations.
For a detailed overview, see below; for a more historical perspective, see
our `arXiv`_ paper.
Motivations
===========
Developers working to craft solutions with :abbr:`Artificial Intelligence (AI)`
face a steep learning curve in taking their concepts from design to
......@@ -49,10 +68,6 @@ larger datasets, data that must be encrypted, and data-in-motion. Our solution
also addresses the scalability issue with kernel libraries, the current
popular solution to accelerating deep learning performance.
Motivations
===========
The current state-of-the-art software solution for speeding up deep learning
computation is to integrate kernel libraries like Intel® Math Kernel Library
for Deep Neural Networks (Intel® MKL DNN) and Nvidia\*'s CuDNN into deep
......@@ -151,7 +166,7 @@ final product of required kernels is increasing exponentially.
PlaidML addresses the kernel explosion problem in a manner that lifts a heavy
burden off kernel developers. It automatically lowers networks from nGraph
into Tile, a :abbr:Domain-Specific Language (DSL) designed for deep learning
into Tile, a :abbr:`Domain-Specific Language (DSL)` designed for deep learning
that allows developers to express how an operation should calculate tensors in
an intuitive, mathematical form via `Stripe`_. Integration of PlaidML with
nGraph means extra flexibility to support newer deep learning models in the
......@@ -187,6 +202,7 @@ will make life easier for many kinds of developers:
#. New DL accelerator developers creating an end-to-end software stack from
a deep learning framework to their silicon.
.. _arXiv: https://arxiv.org/abs/1801.08058
.. _up to 45X: https://ai.intel.com/ngraph-compiler-stack-beta-release/
.. _more transistors on denser and denser circuits: https://www.intel.com/content/www/us/en/silicon-innovations/moores-law-technology.html
.. _Stripe: https://arxiv.org/abs/1903.06498
.. ngraph/release-notes:
.. project/release-notes.rst:
Release Notes
#############
|version|
nGraph is provided as source code, APIs, build scripts, and some binary formats
for various Compiler stack configurations and use cases.
This page includes additional documentation updates.
We are pleased to announce the release of version |version|-doc.
0.21-doc
--------
Documentation updates
~~~~~~~~~~~~~~~~~~~~~
Summary of documentation-related changes:
+ Update :doc:`doc-contributor-README` for new community-based contributions.
+ Added instructions on how to test or display the installed nGraph version.
+ Added instructions on building nGraph bridge (ngraph-bridge).
+ Updated Backend Developer Guides and ToC structure.
+ Tested documentation build on Clear Linux OS; it works.
+ Fixed a few links and redirs affected by filename changes.
+ Some coding adjustments for options to render math symbols, so they can be
documented more clearly and without excessive JS (see replacements.txt).
+ Consistent filenaming on all BE indexes.
+ Remove deprecated TensorAPI.
+
Core updates for |version|
~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ The offset argument in tensor reads and writes has been removed
+ Save/load API
+ More ONNX ops
+ Better tensor creation
+ More shape support
+ Provenance improvements
+ offset arg for tensor creation is deprecated
+ static linking support
For downloads formatted as ``.zip`` and ``tar.gz``, see https://github.com/NervanaSystems/ngraph/releases;
.. important:: Pre-releases (``-rc-0.*``) have newer features, and are less stable.
Changelog
=========
Changelog on Previous Releases
==============================
For downloads formatted as ``.zip`` and ``tar.gz``, see
https://github.com/NervanaSystems/ngraph/releases.
0.20
----
+ Save/load API
+ More ONNX ops
+ Better tensor creation
+ More shape support
+ Provenance improvements
0.19
----
......@@ -58,7 +109,8 @@ Changelog
+ Add code generation for some quantized ops
+ Preliminary dynamic shape support
+ initial distributed ops
+ Pad op takes CoordinateDiff instead of Shape pad values to allow for negative padding.
+ Pad op takes CoordinateDiff instead of Shape pad values to allow for negative
padding.
0.16
......@@ -77,6 +129,7 @@ Changelog
+ Concat optimization
+ Doc updates
.. _0.20.0-rc.0: https://github.com/NervanaSystems/ngraph/releases/tag/v0.20.0-rc.0_
.. _0.19.0-rc.2: https://github.com/NervanaSystems/ngraph/releases/tag/v0.19.0-rc.2_
.. _0.18.1: https://github.com/NervanaSystems/ngraph/releases/tag/v0.18.1_
.. _0.17.0-rc.1: `https://github.com/NervanaSystems/ngraph/releases/tag/v0.17.0-rc.1
......
.. |codename| replace:: nGraph
.. |project| replace:: nGraph Compiler stack
.. |Intel| replace:: Intel®
.. Replacement strings for commonly-used math symbols; renders in default font
.. |lcsigma| unicode:: U+03C3 .. sigma σ
.. |sigma| unicode:: U+03A3 .. Sigma Σ
.. |lcdelta| unicode:: U+03B4 .. delta δ
.. |delta| unicode:: U+0394 .. Delta Δ
.. |lcgamma| unicode:: U+03B3 .. gamma γ
.. |gamma| unicode:: U+0393 .. Gamma Γ
.. |lcphi| unicode:: U+03C6 .. phi φ
.. |sphi| unicode:: U+03D5 .. ϕ
.. |phi| unicode:: U+03A6 .. Phi Φ
.. |lcpi| unicode:: U+03C0 .. pi π
.. |pi| unicode:: U+03A0 .. Pi Π
.. |lctheta| unicode:: U+03B8 .. theta θ
.. |theta| unicode:: U+03F4 .. Theta ϴ
.. |lclambda| unicode:: U+03BB .. lambda λ
.. |lambda| unicode:: U+039B .. Lambda Λ
.. |vmlt| unicode:: U+22D8 .. very much less than ⋘
.. |vmgt| unicode:: U+22D9 .. very much greater than ⋙
.. Miscellaneous IP-related substitutions
.. |trade| unicode:: U+02122 .. TRADEMARK SIGN
:ltrim:
.. |reg| unicode:: U+000AE .. REGISTERED TRADEMARK SIGN
:ltrim:
.. |copy| unicode:: U+000A9 .. COPYRIGHT SIGN
:ltrim:
.. Default substitutions
.. |bullet| unicode:: U+2022 .. BULLET
.. |bulletop| unicode:: U+2219 .. BULLET OPERATOR
.. |micro| unicode:: U+000B5 .. MICRO SIGN
:rtrim:
.. |sup2| unicode:: U+00B2 .. SUPERSCRIPT TWO
:ltrim:
.. |deg| unicode:: U+000B0 .. DEGREE SIGN
:ltrim:
.. |plusminus| unicode:: U+000B1 .. PLUS-MINUS SIGN
:rtrim:
\ No newline at end of file
:orphan:
.. _sitemap.rst:
.. toctree::
:includehidden:
:caption: Sitemap
:maxdepth: 1
index.rst
frameworks/index
python_api/index
inspection/index
core/overview
backends/index
project/index
......@@ -2,26 +2,27 @@
## Building nGraph Python Wheels
[nGraph's build instructions][ngraph_build] give detailed instructions on building nGraph on different operating systems. Please make sure you specify the options `-DNGRAPH_PYTHON_BUILD_ENABLE=ON` and `-DNGRAPH_ONNX_IMPORT_ENABLE=ON` when building nGraph. Use the `make python_wheel` command to build nGraph and create Python packages.
If you want to try a newer version of nGraph's Python API than is available from
PyPI, you can build your own latest version from the source code. This
process is very similar to what is outlined in our [ngraph_build] instructions
with two important differences:
Basic build procedure on an Ubuntu system:
1. You must specify: `-DNGRAPH_PYTHON_BUILD_ENABLE=ON` and `-DNGRAPH_ONNX_IMPORT_ENABLE=ON`
when running `cmake`.
# apt-get install build-essential cmake clang-3.9 clang-format-3.9 git curl zlib1g zlib1g-dev libtinfo-dev unzip autoconf automake libtool
# apt-get install python3 python3-dev python python-dev python-virtualenv
2. Instead of running `make`, use the command `make python_wheel`.
$ git clone https://github.com/NervanaSystems/ngraph.git
$ cd ngraph/
$ mkdir build
$ cd build/
$ cmake ../ -DNGRAPH_PYTHON_BUILD_ENABLE=ON -DNGRAPH_ONNX_IMPORT_ENABLE=ON -DNGRAPH_USE_PREBUILT_LLVM=ON
$ make python_wheel
`$ cmake ../ -DNGRAPH_PYTHON_BUILD_ENABLE=ON -DNGRAPH_ONNX_IMPORT_ENABLE=ON -DNGRAPH_USE_PREBUILT_LLVM=ON`
After this procedure completes, the `ngraph/build/python/dist` directory should contain Python packages.
`$ make python_wheel`
After this procedure completes, the `ngraph/build/python/dist` directory should
contain the Python packages of the version you cloned. For example, if you
checked out and built `0.21`, you may see something like:
$ ls python/dist/
ngraph-core-0.10.0.tar.gz
ngraph_core-0.10.0-cp27-cp27mu-linux_x86_64.whl
ngraph_core-0.10.0-cp35-cp35m-linux_x86_64.whl
ngraph-core-0.21.0rc0.tar.gz
ngraph_core-0.21.0rc0-cp37-cp37m-linux_x86_64.whl
### Using a virtualenv (optional)
......@@ -35,7 +36,7 @@ You may wish to use a virutualenv for your installation.
You may wish to use a virutualenv for your installation.
(venv) $ pip install ngraph/build/python/dist/ngraph_core-0.10.0-cp35-cp35m-linux_x86_64.whl
(venv) $ pip install ngraph/build/python/dist/ngraph_core-0.21.0rc0-cp37-cp37m-linux_x86_64.whl
## Running tests
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment