Commit 2522ae5e authored by Jayaram Bobba's avatar Jayaram Bobba

Merge remote-tracking branch 'origin/master' into jbobba/batchnorm-layout

parents d7bd9bb1 2feefb92
# Environment to build and unit-test private-ngraph-cpp on centos74
# with gcc 4.8.5
# with python 2.7
# with cmake3
# LLVM/clang will be built from source
FROM centos:7
# Added install for perl Data::Dumper to avoid a compile error
RUN yum -y update && \
yum -y --enablerepo=extras install epel-release && \
yum -y install \
gcc gcc-c++ \
cmake3 make \
git \
wget patch diffutils zlib-devel ncurses-devel libtinfo-dev \
python python-devel python-setuptools \
doxygen \
which \
'perl(Data::Dumper)'
RUN ln -s /usr/bin/cmake3 /usr/bin/cmake
RUN cmake --version
RUN make --version
RUN gcc --version
RUN c++ --version
RUN easy_install pip
RUN pip install virtualenv
# Install some pip packages
RUN pip install numpy
# need to use sphinx version 1.6 to build docs
# installing with apt-get install python-sphinx installs sphinx version 1.3.6 only
# added install for python-pip above and
# installed sphinx with pip to get the updated version 1.6.5
# allows for make html build under the doc/source directory as an interim build process
RUN pip install sphinx
# breathe package required to build documentation
RUN pip install breathe
WORKDIR /home
# Environment to build and unit-test private-ngraph-cpp
FROM ubuntu:16.04
RUN apt-get update && apt-get install -y \
build-essential cmake \
clang-3.9 clang-format-3.9 \
git \
wget patch diffutils zlib1g-dev libtinfo-dev \
doxygen python-pip
RUN apt-get clean autoclean && \
apt-get autoremove -y
RUN pip install --upgrade pip
# need to use sphinx version 1.6 to build docs
# installing with apt-get install python-sphinx installs sphinx version 1.3.6 only
# added install for python-pip above and
# installed sphinx with pip to get the updated version 1.6.5
# allows for make html build under the doc/source directory as an interim build process
RUN pip install sphinx
RUN pip install breathe
WORKDIR /home
# Environment to build and unit-test private-ngraph-cpp
FROM ubuntu:16.04
RUN apt-get update && apt-get install -y \
build-essential cmake \
git \
wget patch diffutils zlib1g-dev libtinfo-dev \
doxygen python-pip
RUN which gcc && gcc --version || true
RUN which cc++ && cc++ --version || true
RUN apt-get install -y gcc-4.8 gcc++-4.8
RUN which gcc && gcc --version || true
RUN which cc++ && cc++ --version || true
RUN ln -s /usr/bin/gcc-4.8 /usr/bin/gcc || true
RUN ln -s /usr/bin/cc++-4.8 /usr/bin/cc++ || true
RUN which gcc && gcc --version || true
RUN which cc++ && cc++ --version || true
#RUN apt-get clean autoclean && \
# apt-get autoremove -y
RUN pip install --upgrade pip
# need to use sphinx version 1.6 to build docs
# installing with apt-get install python-sphinx installs sphinx version 1.3.6 only
# added install for python-pip above and
# installed sphinx with pip to get the updated version 1.6.5
# allows for make html build under the doc/source directory as an interim build process
RUN pip install sphinx
# breathe package required to build documentation
RUN pip install breathe
WORKDIR /home
# Basic Makefile for contrib/docker. This can be expanded later as more targets
# are added.
# Default is to build with -j for parallel builds. Turn off with
# make PARELLEL=
# Default is to build with -j for parallel cmake/make. Turn off with
# make PARALLEL=
PARALLEL=-j
# DIR is an internal variable that serves as an anchor to this cloned git
......@@ -28,13 +28,27 @@ DBUILD_DIR = ${DIR}/contrib/docker/.build-${DBUILD_VERSION}
# Enable additional options to be added on the command line
ifndef CMAKE_OPTIONS_EXTRA
CMAKE_OPTIONS_EXTRA = ""
CMAKE_OPTIONS_EXTRA=
endif
# OS set to 'ubuntu1604' by default
# can be overridden on the command line with 'make <target> OS=centos74"
ifndef OS
OS="ubuntu1604"
endif
ifeq ("$(shell echo ${OS} | grep centos)","centos74")
RUN_AS_USER_SCRIPT=${DOCKUSER_HOME}/ngraph-cpp-test/contrib/docker/run_as_centos_user.sh
DOCKERFILE=Dockerfile.ngraph_cpp.centos74_cmake3
else
DOCKERFILE ?= "Dockerfile.ngraph_cpp"
RUN_AS_USER_SCRIPT ?= ${DOCKUSER_HOME}/ngraph-cpp-test/contrib/docker/run_as_ubuntu_user.sh
endif
# For gcc builds, we do NOT regard warnings as errors
# For clang builds, we DO make warnings into errors
CMAKE_OPTIONS_COMMON=-DNGRAPH_BUILD_DOXYGEN_DOCS=ON -DNGRAPH_BUILD_SPHINX_DOCS=ON -DCMAKE_BUILD_TYPE=RelWithDebInfo $(CMAKE_OPTIONS_EXTRA)
CMAKE_OPTIONS_GCC=$(CMAKE_OPTIONS_COMMON) -DNGRAPH_INSTALL_PREFIX=${DOCKUSER_HOME}/ngraph-cpp-test/BUILD-GCC/ngraph_dist -DNGRAPH_USE_PREBUILT_LLVM=TRUE
CMAKE_OPTIONS_GCC=$(CMAKE_OPTIONS_COMMON) -DNGRAPH_INSTALL_PREFIX=${DOCKUSER_HOME}/ngraph-cpp-test/BUILD-GCC/ngraph_dist
CMAKE_OPTIONS_CLANG=$(MAKE_OPTIONS_COMMON)-DNGRAPH_INSTALL_PREFIX=${DOCKUSER_HOME}/ngraph-cpp-test/BUILD-CLANG/ngraph_dist -DCMAKE_CXX_COMPILER=clang++-3.9 -DCMAKE_C_COMPILER=clang-3.9 -DNGRAPH_WARNINGS_AS_ERRORS=ON -DNGRAPH_USE_PREBUILT_LLVM=TRUE
CALLER_UID := $(shell id -u)
......@@ -66,9 +80,12 @@ all: check_gcc check_clang
# Docker actions
expand_dockerfile_templates:
@echo "OS=${OS}"
@echo "DOCKERFILE=${DOCKERFILE}"
@echo "RUN_AS_USER_SCRIPT=${RUN_AS_USER_SCRIPT}"
cd "${DIR}"/contrib/docker
mkdir "${DBUILD_DIR}" || true
sed -e 's/\(FROM ngraph.*\)/\1:${DBUILD_VERSION}/' Dockerfile.ngraph_cpp > "${DBUILD_DIR}"/Dockerfile.build_ngraph_cpp
sed -e 's/\(FROM ngraph.*\)/\1:${DBUILD_VERSION}/' ${DOCKERFILE} > "${DBUILD_DIR}"/Dockerfile.build_ngraph_cpp
build_docker_image: expand_dockerfile_templates
$(DOCKER_BUILD) -f="${DBUILD_DIR}"/Dockerfile.build_ngraph_cpp --build-arg python_version="${PYTHON_VERSION}" -t=build_ngraph_cpp:"${DBUILD_VERSION}" .
......@@ -85,11 +102,11 @@ sphinx_doc: build_docker_image
# sphinx html docs build
docker run --rm --tty \
${VOLUME} \
${DOCKER_RUN_ENV} \
{DOCKER_RUN_ENV} \
--env RUN_UID="$(shell id -u)" \
--env RUN_CMD="set -e ; set -o pipefail ; cd ${DOCKUSER_HOME}/ngraph-cpp-test/doc/sphinx; env VERBOSE=1 make html 2>&1 | tee make_sphinx_html.log" \
"build_ngraph_cpp:${DBUILD_VERSION}" \
sh -c "${DOCKUSER_HOME}/ngraph-cpp-test/contrib/docker/run_as_user.sh"
sh -c "cd ${DOCKUSER_HOME}; ${RUN_AS_USER_SCRIPT}"
# Build
build_all: build_gcc build_clang
......@@ -105,11 +122,11 @@ build_gcc: build_docker_image
${DOCKER_RUN_ENV} \
--env GTEST_OUTPUT="xml:${DOCKUSER_HOME}/ngraph-cpp-test/BUILD-GCC/unit-test-results.xml" \
--env RUN_UID="$(shell id -u)" \
--env RUN_CMD="set -e ; set -o pipefail ; cd ${DOCKUSER_HOME}/ngraph-cpp-test/BUILD-GCC; cmake ${CMAKE_OPTIONS_GCC} .. 2>&1 | tee cmake_gcc.log ; env VERBOSE=1 make ${PARALLEL} 2>&1 | tee make_gcc.log" \
--env RUN_CMD="set -x; set -e ; set -o pipefail ; if [ -f "/etc/centos-release" ]; then cat /etc/centos-release; fi; if [ -f "/etc/lsb-release" ]; then cat /etc/lsb-release; fi; uname -a ; cat /etc/os-release || true; cd ${DOCKUSER_HOME}/ngraph-cpp-test/BUILD-GCC; cmake ${CMAKE_OPTIONS_GCC} .. 2>&1 | tee cmake_gcc.log ; env VERBOSE=1 make ${PARALLEL} 2>&1 | tee make_gcc.log" \
"build_ngraph_cpp:${DBUILD_VERSION}" \
sh -c "${DOCKUSER_HOME}/ngraph-cpp-test/contrib/docker/run_as_user.sh"
sh -c "cd ${DOCKUSER_HOME}; ${RUN_AS_USER_SCRIPT}"
build_clang:
build_clang: build_docker_image
# Remove old distribution directory if present
( test -d "${DIR}"/BUILD-CLANG/ngraph_dist && rm -fr "${DIR}"/BUILD-CLANG/ngraph_dist && echo "Removed old ${DIR}/BUILD-CLANG/ngraph_dist directory" ) || echo "Previous ngraph_dist directory not found"
# Make BUILD-CLANG directory as user
......@@ -120,9 +137,9 @@ build_clang:
${DOCKER_RUN_ENV} \
--env GTEST_OUTPUT="xml:${DOCKUSER_HOME}/ngraph-cpp-test/BUILD-CLANG/unit-test-results.xml" \
--env RUN_UID="$(shell id -u)" \
--env RUN_CMD="set -e ; set -o pipefail ; cd ${DOCKUSER_HOME}/ngraph-cpp-test/BUILD-CLANG; cmake ${CMAKE_OPTIONS_CLANG} .. 2>&1 | tee cmake_clang.log ; env VERBOSE=1 make ${PARALLEL} 2>&1 | tee make_clang.log" \
--env RUN_CMD="set -e ; set -o pipefail ; if [ -f "/etc/centos-release" ]; then cat /etc/centos-release; fi; if [ -f "/etc/lsb-release" ]; then cat /etc/lsb-release; fi; uname -a ; cat /etc/os-release || true; cd ${DOCKUSER_HOME}/ngraph-cpp-test/BUILD-CLANG; cmake ${CMAKE_OPTIONS_CLANG} .. 2>&1 | tee cmake_clang.log ; env VERBOSE=1 make ${PARALLEL} 2>&1 | tee make_clang.log" \
"build_ngraph_cpp:${DBUILD_VERSION}" \
sh -c "${DOCKUSER_HOME}/ngraph-cpp-test/contrib/docker/run_as_user.sh"
sh -c "cd ${DOCKUSER_HOME}; ${RUN_AS_USER_SCRIPT}"
# Check (run unit-tests)
......@@ -134,9 +151,9 @@ check_gcc: build_gcc
${DOCKER_RUN_ENV} \
--env GTEST_OUTPUT="xml:${DOCKUSER_HOME}/ngraph-cpp-test/BUILD-GCC/unit-test-results.xml" \
--env RUN_UID="$(shell id -u)" \
--env RUN_CMD="set -e ; set -o pipefail ; cd ${DOCKUSER_HOME}/ngraph-cpp-test/BUILD-GCC; env VERBOSE=1 make check 2>&1 | tee make_check_gcc.log ; sed -E -e 's/classname\=\"[a-zA-Z0-9_]+/&1_gcc/' unit-test-results.xml > unit-test-results-gcc.xml" \
--env RUN_CMD="set -e ; set -o pipefail ; cd ${DOCKUSER_HOME}/ngraph-cpp-test/BUILD-GCC; env VERBOSE=1 make unit-test-check 2>&1 | tee make_check_gcc.log ; sed -E -e 's/classname\=\"[a-zA-Z0-9_]+/&1_gcc/' unit-test-results.xml > unit-test-results-gcc.xml" \
"build_ngraph_cpp:${DBUILD_VERSION}" \
sh -c "${DOCKUSER_HOME}/ngraph-cpp-test/contrib/docker/run_as_user.sh"
sh -c "cd ${DOCKUSER_HOME}; ${RUN_AS_USER_SCRIPT}"
check_clang: build_clang
docker run --rm --tty \
......@@ -146,7 +163,17 @@ check_clang: build_clang
--env RUN_UID="$(shell id -u)" \
--env RUN_CMD="set -e ; set -o pipefail ; cd ${DOCKUSER_HOME}/ngraph-cpp-test/BUILD-CLANG; env VERBOSE=1 make check 2>&1 | tee make_check_clang.log ; sed -E -e 's/classname\=\"[a-zA-Z0-9_]+/&1_clang/' unit-test-results.xml > unit-test-results-clang.xml" \
"build_ngraph_cpp:${DBUILD_VERSION}" \
sh -c "${DOCKUSER_HOME}/ngraph-cpp-test/contrib/docker/run_as_user.sh"
sh -c "cd ${DOCKUSER_HOME}; ${RUN_AS_USER_SCRIPT}"
style_clang: build_clang
docker run --rm --tty \
${VOLUME} \
${DOCKER_RUN_ENV} \
--env GTEST_OUTPUT="xml:${DOCKUSER_HOME}/ngraph-cpp-test/BUILD-CLANG/unit-test-results.xml" \
--env RUN_UID="$(shell id -u)" \
--env RUN_CMD="set -e ; set -o pipefail ; cd ${DOCKUSER_HOME}/ngraph-cpp-test/BUILD-CLANG; env VERBOSE=1 make style-check 2>&1 | tee make_style_check_clang.log" \
"build_ngraph_cpp:${DBUILD_VERSION}" \
sh -c "cd ${DOCKUSER_HOME}; ${RUN_AS_USER_SCRIPT}"
# Install
......@@ -160,7 +187,7 @@ install_gcc: check_gcc
--env RUN_UID="$(shell id -u)" \
--env RUN_CMD="set -e ; set -o pipefail; cd ${DOCKUSER_HOME}/ngraph-cpp-test/BUILD-GCC ; test -d ngraph_dist && rm -fr ngraph_dist && echo 'Removed old ngraph_dist directory' ; make install 2>&1 | tee make_install_gcc.log ; tar czf ngraph_dist_gcc.tgz ngraph_dist 2>&1 | tee make_tarball_gcc.log" \
"build_ngraph_cpp:${DBUILD_VERSION}" \
sh -c "${DOCKUSER_HOME}/ngraph-cpp-test/contrib/docker/run_as_user.sh"
sh -c "cd ${DOCKUSER_HOME}; ${RUN_AS_USER_SCRIPT}"
install_clang: check_clang
# Puts ngraph_dist in BUILD-CLANG directory. This is used by Jenkins ngraph-tensorflow batch job.
......@@ -170,7 +197,7 @@ install_clang: check_clang
--env RUN_UID="$(shell id -u)" \
--env RUN_CMD="set -e ; set -o pipefail; cd ${DOCKUSER_HOME}/ngraph-cpp-test/BUILD-CLANG ; test -d ngraph_dist && rm -fr ngraph_dist && echo 'Removed old ngraph_dist directory' ; make install 2>&1 | tee make_install_clang.log ; tar czf ngraph_dist_clang.tgz ngraph_dist 2>&1 | tee make_tarball_clang.log" \
"build_ngraph_cpp:${DBUILD_VERSION}" \
sh -c "${DOCKUSER_HOME}/ngraph-cpp-test/contrib/docker/run_as_user.sh"
sh -c "cd ${DOCKUSER_HOME}; ${RUN_AS_USER_SCRIPT}"
# Interactive shell
......@@ -181,7 +208,7 @@ shell: build_docker_image
${DOCKER_RUN_ENV} \
--env RUN_UID="$(shell id -u)" \
"build_ngraph_cpp:${DBUILD_VERSION}" \
sh -c "cd ${DOCKUSER_HOME} ; ${DOCKUSER_HOME}/ngraph-cpp-test/contrib/docker/run_as_user.sh"
sh -c "cd ${DOCKUSER_HOME}; ${RUN_AS_USER_SCRIPT}"
# Clean
......
#! /bin/bash
# This script is designed to simulate running as a user with a particular UID
# within a docker container.
#
# Normally a docker container runs as root, which can cause problems with file
# ownership when a host directory tree is mounted into the docker container.
# There are other problems with building and running software as root as
# well. Good practice when validating software builds in a docker container
# is to run as a normal user, since many (most?) end users will not be building
# and installing software as root.
#
# This script should be run using "docker run", with RUN_UID (set to the user
# you want to run as) passed into the docker container as an environment
# variable. The script will then add the UID as user "dockuser" to
# /etc/passwd (important for some software, like bazel), add the new dockuser
# to the sudo group (whether or not sudo is installed), and su to a new shell
# as the dockuser (passing in the existing environment, which is important).
#
# If the environment variable RUN_CMD is passed into the docker container, then
# this script will use RUN_CMD as a command to run when su'ing. If RUN_CMD is
# not defined, then /bin/bash will run, which effectively provides an
# interactive shell in the docker container, for debugging.
set -e # Make sure we exit on any command that returns non-zero
set -u # No unset variables
if [ -z "$RUN_UID" ] ; then
# >&2 redirects echo output to stderr.
# See: https://stackoverflow.com/questions/2990414/echo-that-outputs-to-stderr
( >&2 echo 'ERROR: Environment variable RUN_UID was not set when run-as-user.sh was run' )
( >&2 echo ' Running as default user (root, in docker)' )
( >&2 echo ' ' )
exit 1
else
# The username used in the docker container to map the caller UID to
#
# Note 'dockuser' is used in other scripts, notably Makefile. If you
# choose to change it here, then you need to change it in all other
# scripts, or else the builds will break.
#
DOCK_USER='dockuser'
# We will be su'ing using a non-login shell or command, and preserving
# the environment. This is done so that env. variables passed in with
# "docker run --env ..." are honored.
# Therefore, we need to reset at least HOME=/root ...
#
# Note also that /home/dockuser is used in other scripts, notably
# Makefile. If you choose to change it here, then you need to change it
# in all other scripts, or else the builds will break.
#
export HOME="/home/${DOCK_USER}"
# Make sure the home directory is owned by the new user
if [ -d "${HOME}" ] ; then
chown "${RUN_UID}" "${HOME}"
fi
# Add a user with UID of person running docker (in ${RUN_UID})
# If $HOME does not yet exist, then it will be created
adduser -c 'Docker-User' -u "${RUN_UID}" "${DOCK_USER}"
passwd -d "${DOCK_USER}"
# Add dockuser to the sudo group. Sudo *is* used for installing packages,
# so make sure dockuser can run sudo without requesting a password.
usermod -aG wheel "${DOCK_USER}"
echo '%sudo ALL=(ALL) NOPASSWD:ALL' >> /etc/sudoers
if [ -z "${RUN_CMD+x}" ] ; then # Launch a shell as dockuser
echo 'Running interactive shell (/bin/bash) as dockuser'
su -m "${DOCK_USER}" -c "/bin/bash"
else # Run command as dockuser
echo "Running command [${RUN_CMD}] as dockuser"
su -m "${DOCK_USER}" -c "${RUN_CMD}"
fi
fi
#! /bin/bash
# This script is designed to simulate running as a user with a particular UID
# within a docker container.
#
# Normally a docker container runs as root, which can cause problems with file
# ownership when a host directory tree is mounted into the docker container.
# There are other problems with building and running software as root as
# well. Good practice when validating software builds in a docker container
# is to run as a normal user, since many (most?) end users will not be building
# and installing software as root.
#
# This script should be run using "docker run", with RUN_UID (set to the user
# you want to run as) passed into the docker container as an environment
# variable. The script will then add the UID as user "dockuser" to
# /etc/passwd (important for some software, like bazel), add the new dockuser
# to the sudo group (whether or not sudo is installed), and su to a new shell
# as the dockuser (passing in the existing environment, which is important).
#
# If the environment variable RUN_CMD is passed into the docker container, then
# this script will use RUN_CMD as a command to run when su'ing. If RUN_CMD is
# not defined, then /bin/bash will run, which effectively provides an
# interactive shell in the docker container, for debugging.
set -e # Make sure we exit on any command that returns non-zero
set -u # No unset variables
if [ -z "$RUN_UID" ] ; then
# >&2 redirects echo output to stderr.
# See: https://stackoverflow.com/questions/2990414/echo-that-outputs-to-stderr
( >&2 echo 'ERROR: Environment variable RUN_UID was not set when run-as-user.sh was run' )
( >&2 echo ' Running as default user (root, in docker)' )
( >&2 echo ' ' )
exit 1
else
# The username used in the docker container to map the caller UID to
#
# Note 'dockuser' is used in other scripts, notably Makefile. If you
# choose to change it here, then you need to change it in all other
# scripts, or else the builds will break.
#
DOCK_USER='dockuser'
# We will be su'ing using a non-login shell or command, and preserving
# the environment. This is done so that env. variables passed in with
# "docker run --env ..." are honored.
# Therefore, we need to reset at least HOME=/root ...
#
# Note also that /home/dockuser is used in other scripts, notably
# Makefile. If you choose to change it here, then you need to change it
# in all other scripts, or else the builds will break.
#
export HOME="/home/${DOCK_USER}"
# Make sure the home directory is owned by the new user
if [ -d "${HOME}" ] ; then
chown "${RUN_UID}" "${HOME}"
fi
# Add a user with UID of person running docker (in ${RUN_UID})
# If $HOME does not yet exist, then it will be created
adduser --disabled-password --gecos 'Docker-User' -u "${RUN_UID}" "${DOCK_USER}"
# Add dockuser to the sudo group
adduser "${DOCK_USER}" sudo
# If root access is needed in the docker image while running as a normal
# user, uncomment this and add 'sudo' as a package installed in Dockerfile
# echo '%sudo ALL=(ALL) NOPASSWD:ALL' >> /etc/sudoers
if [ -z "${RUN_CMD+x}" ] ; then # Launch a shell as dockuser
su -m "${DOCK_USER}" -c /bin/bash
else # Run command as dockuser
su -m "${DOCK_USER}" -c "${RUN_CMD}"
fi
fi
......@@ -349,7 +349,7 @@ TYPEDEF_HIDES_STRUCT = NO
# the optimal cache size from a speed point of view.
# Minimum value: 0, maximum value: 9, default value: 0.
LOOKUP_CACHE_SIZE = 0
LOOKUP_CACHE_SIZE = 3
#---------------------------------------------------------------------------
# Build related configuration options
......@@ -761,7 +761,7 @@ FILE_PATTERNS = *.c \
# be searched for input files as well.
# The default value is: NO.
RECURSIVE = NO
RECURSIVE = YES
# The EXCLUDE tag can be used to specify files and/or directories that should be
# excluded from the INPUT source files. This way you can easily exclude a
......@@ -1975,7 +1975,7 @@ UML_LIMIT_NUM_FIELDS = 10
# The default value is: NO.
# This tag requires that the tag HAVE_DOT is set to YES.
TEMPLATE_RELATIONS = NO
TEMPLATE_RELATIONS = YES
# If the INCLUDE_GRAPH, ENABLE_PREPROCESSING and SEARCH_INCLUDES tags are set to
# YES then doxygen will generate a graph for each documented file showing the
......@@ -2130,7 +2130,7 @@ DOT_TRANSPARENT = NO
# The default value is: NO.
# This tag requires that the tag HAVE_DOT is set to YES.
DOT_MULTI_TARGETS = NO
DOT_MULTI_TARGETS = YES
# If the GENERATE_LEGEND tag is set to YES doxygen will generate a legend page
# explaining the meaning of the various boxes and arrows in the dot generated
......
This diff is collapsed.
......@@ -34,7 +34,6 @@ needs_sphinx = '1.6.5'
extensions = ['sphinx.ext.mathjax',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
'sphinx.ext.autodoc',
'breathe'
]
......@@ -191,7 +190,7 @@ texinfo_documents = [
html_add_permalinks = ""
breathe_projects = {
"": "../../../doxygen/xml",
"ngraph": "../../doxygen/xml",
}
rst_epilog = u"""
......
......@@ -4,11 +4,14 @@
Abs
###
.. code-block:: cpp
Abs // Elementwise absolute value operation
Description
===========
Elementwise absolute value operation.
Produces a single output tensor of the same element type and shape as ``arg``,
where the value at each coordinate of ``output`` is the absoloute value of the
value at each ``arg`` coordinate.
......@@ -28,7 +31,7 @@ Outputs
+-----------------+-------------------------+--------------------------------+
| Name | Element Type | Shape |
+=================+=========================+================================+
| ``output`` | Same as ``arg`` | Same as ``arg``. |
| ``output`` | Same as ``arg`` | Same as ``arg`` |
+-----------------+-------------------------+--------------------------------+
......@@ -52,4 +55,5 @@ C++ Interface
=============
.. doxygenclass:: ngraph::op::Abs
:project: ngraph
:members:
......@@ -4,14 +4,17 @@
Acos
####
.. code-block:: cpp
Acos // Elementwise acos operation
Description
===========
Elementwise acos operation.
Produces a tensor of the same element type and shape as ``arg``,
where the value at each coordinate of ``output`` is the inverse cosine of the
value at the corresponding coordinate of ``arg`` .
Produces a tensor of the same element type and shape as ``arg``, where the
value at each coordinate of ``output`` is the inverse cosine of the value
at the corresponding coordinate of ``arg``.
Inputs
------
......@@ -28,7 +31,7 @@ Outputs
+-----------------+-------------------------+--------------------------------+
| Name | Element Type | Shape |
+=================+=========================+================================+
| ``output`` | Same as ``arg`` | Same as ``arg``. |
| ``output`` | Same as ``arg`` | Same as ``arg`` |
+-----------------+-------------------------+--------------------------------+
......@@ -51,4 +54,5 @@ C++ Interface
=============
.. doxygenclass:: ngraph::op::Acos
:project: ngraph
:members:
......@@ -4,6 +4,11 @@
Add
###
.. code-block:: cpp
Add // Elementwise add operation
Description
===========
......@@ -11,7 +16,7 @@ Elementwise add operation.
Produces tensor of the same element type and shape as the two inputs,
where the value at each coordinate of ``output`` is the sum of the
value at the corresponding input coordinates.
values at the corresponding input coordinates.
Inputs
------
......@@ -54,4 +59,5 @@ C++ Interface
=============
.. doxygenclass:: ngraph::op::Add
:project: ngraph
:members:
......@@ -4,14 +4,17 @@
Asin
####
.. code-block:: cpp
Asin // Elementwise asin operation
Description
===========
Elementwise asin operation.
Produces a tensor of the same element type and shape as ``arg``,
where the value at each coordinate of ``output`` is the inverse sine of the
value at the corresponding coordinate of ``arg`` .
value at the corresponding coordinate of ``arg``.
Inputs
------
......@@ -50,4 +53,5 @@ C++ Interface
=============
.. doxygenclass:: ngraph::op::Asin
:project: ngraph
:members:
......@@ -4,14 +4,17 @@
Atan
####
.. code-block:: cpp
Atan // Elementwise atan operation
Description
===========
Elementwise atan operation.
Produces a tensor of the same element type and shape as ``arg``,
where the value at each coordinate of ``output`` is the inverse tangent of the
value at the corresponding coordinate of ``arg`` .
value at the corresponding coordinate of ``arg``.
Inputs
------
......@@ -28,7 +31,7 @@ Outputs
+-----------------+-------------------------+--------------------------------+
| Name | Element Type | Shape |
+=================+=========================+================================+
| ``output`` | Same as ``arg`` | Same as ``arg``. |
| ``output`` | Same as ``arg`` | Same as ``arg`` |
+-----------------+-------------------------+--------------------------------+
......@@ -52,4 +55,5 @@ C++ Interface
=============
.. doxygenclass:: ngraph::op::Atan
:project: ngraph
:members:
......@@ -4,11 +4,14 @@
AvgPool
#######
.. code-block:: cpp
AvgPool // Average Pooling operation
Description
===========
Average Pooling operation.
Average pooling windows its input and produces an average for each window.
Inputs
......@@ -48,10 +51,10 @@ Outputs
+-----------------+-------------------------+--------------------------------+
Average pooling takes as its input a batch tensor `data` of shape
:math:`(N,C,d_1,\ldots,d_n)` where where :math:`N` is the batch
Average pooling takes as its input, a batch tensor `data` of shape
:math:`(N,C,d_1,\ldots,d_n)`, where where :math:`N` is the batch
size, and :math:`C > 0` is the
number of channels (sometimes called features). The dimensions
number of channels (sometimes called features). The dimensions
:math:`(d_1,\ldots,d_n)` correspond to the shape of an
:math:`n`-dimensional data item in a batch. For example, where
:math:`n=2`, the data may represent a two-dimensional image. It also
......@@ -193,5 +196,6 @@ C++ Interface
=============
.. doxygenclass:: ngraph::op::AvgPool
:project: ngraph
:members:
......@@ -4,16 +4,23 @@
AvgPoolBackprop
###############
Average Pooling backprop operation.
.. code-block:: cpp
AvgPoolBackprop // Average Pooling backprop operation.
Description
===========
C++ Interface
=============
.. doxygenclass:: ngraph::op::AvgPoolBackprop
:project: ngraph
:members:
Python Interface
================
is not merged yet, but could go here!
......@@ -4,6 +4,11 @@
Broadcast
#########
.. code-block:: cpp
Broadcast // Operation that produces a tensor based on arg's axes
Description
===========
......@@ -38,7 +43,7 @@ Outputs
+-----------------+-------------------------+--------------------------------+
| Name | Element Type | Shape |
+=================+=========================+================================+
| ``output`` | Same as ``arg`` | Same as ``shape``. |
| ``output`` | Same as ``arg`` | Same as ``shape`` |
+-----------------+-------------------------+--------------------------------+
The shape of ``arg`` must match ``shape`` with elements in ``broadcast_axes`` removed.
......@@ -87,4 +92,5 @@ C++ Interface
=============
.. doxygenclass:: ngraph::op::Broadcast
:project: ngraph
:members:
......@@ -4,11 +4,14 @@
Ceiling
#######
.. code-block:: cpp
Ceiling // Elementwise ceiling operation
Description
===========
Elementwise ceiling operation.
Produces a single output tensor of the same element type and shape as ``arg``,
where the value at each coordinate of ``output`` is the ceiling of the
value at each ``arg`` coordinate.
......@@ -28,7 +31,7 @@ Outputs
+-----------------+-------------------------+--------------------------------+
| Name | Element Type | Shape |
+=================+=========================+================================+
| ``output`` | Same as ``arg`` | Same as ``arg``. |
| ``output`` | Same as ``arg`` | Same as ``arg`` |
+-----------------+-------------------------+--------------------------------+
......@@ -54,4 +57,5 @@ C++ Interface
=============
.. doxygenclass:: ngraph::op::Ceiling
:project: ngraph
:members:
.. concatenate.rst:
.. concat.rst:
###########
Concatenate
###########
######
Concat
######
Description
===========
.. code-block:: cpp
Concat // Concatenation operation
Description
===========
Produces a single output tensor of the same element type and shape as ``arg``,
where the value at each coordinate of ``output`` is the absoloute value of the
......@@ -29,18 +32,18 @@ Attributes
+-------------------------+----------------------------------+
| Name | Notes |
+=========================+==================================+
| ``concatenation_axis`` | Less than the rank of the shape. |
| ``concatenation_axis`` | Less than the rank of the shape |
+-------------------------+----------------------------------+
Outputs
-------
+-----------------+-------------------------+---------------------------------------------------+
| Name | Element Type | Shape |
+=================+=========================+===================================================+
| ``output`` | Same as ``args` | Same as ``arg`` on non-``concatenation_axis`` |
| | | Sum of ``concatenation_axis`` lengths of ``args`` |
+-----------------+-------------------------+---------------------------------------------------+
+-----------------+-------------------------+----------------------------------------------------+
| Name | Element Type | Shape |
+=================+=========================+====================================================+
| ``output`` | Same as ``args`` | Same as ``arg`` on non-``concatenation_axis`` |
| | | Sum of ``concatenation_axis`` lengths of ``args`` |
+-----------------+-------------------------+----------------------------------------------------+
Mathematical Definition
......@@ -72,5 +75,6 @@ We slice the backprop value into the backprops associated with the inputs.
C++ Interface
=============
.. doxygenclass:: ngraph::op::Concatenate
.. doxygenclass:: ngraph::op::Concat
:project: ngraph
:members:
......@@ -4,11 +4,14 @@
Constant
########
.. code-block:: cpp
Constant // Literal constant tensor
Description
===========
Literal constant tensor.
The output is a tensor initialized from the ``values`` attribute.
Attributes
......@@ -18,13 +21,13 @@ Attributes
| Name | Type | Notes |
+=================+==============================+=======================================+
| ``type`` | ``ngraph::element::type`` | The element type of the value |
| | | in the computation. |
| | | in the computation |
+-----------------+------------------------------+---------------------------------------+
| ``shape`` | ``ngraph::Shape`` | The shape of the constant. |
| ``shape`` | ``ngraph::Shape`` | The shape of the constant |
+-----------------+------------------------------+---------------------------------------+
| ``values`` | ``const std::vector<T>&`` | Constant elements in row-major order. |
| | | T must be compatible with the element |
| | | type. |
| | | type |
+-----------------+------------------------------+---------------------------------------+
Outputs
......@@ -41,4 +44,5 @@ C++ Interface
=============
.. doxygenclass:: ngraph::op::Constant
:project: ngraph
:members:
......@@ -4,11 +4,17 @@
Convert
#######
.. code-block:: cpp
Convert // Convert a tensor from one element type to another
Description
===========
Convert a tensor from one element type to another.
.. TODO
Long description
Inputs
------
......@@ -25,7 +31,7 @@ Attributes
+------------------+---------------------------+---------------------------------+
| Name | Type | Notes |
+==================+===========================+=================================+
| ``element_type`` | ``ngraph::element::type`` | The element type of the result. |
| ``element_type`` | ``ngraph::element::type`` | The element type of the result |
+------------------+---------------------------+---------------------------------+
Outputs
......@@ -34,7 +40,7 @@ Outputs
+-----------------+-------------------------+--------------------------------+
| Name | Element Type | Shape |
+=================+=========================+================================+
| ``output`` | ``element_type`` | Same as ``arg``. |
| ``output`` | ``element_type`` | Same as ``arg`` |
+-----------------+-------------------------+--------------------------------+
......@@ -50,4 +56,5 @@ C++ Interface
=============
.. doxygenclass:: ngraph::op::Convert
:project: ngraph
:members:
......@@ -4,10 +4,19 @@
Convolution
###########
.. code-block:: cpp
Convolution // A batched convolution operation
Description
===========
A batched convolution operation.
.. TODO
Long description
Inputs
------
......@@ -23,24 +32,24 @@ Inputs
Attributes
----------
+-----------------------------+-----------------------------+---------------------------------------+
| Name | Type | Notes |
+=============================+=============================+=======================================+
| ``window_movement_strides`` | ``Strides[n]`` | How far to slide the window along |
| | | each axis at each step. |
+-----------------------------+-----------------------------+---------------------------------------+
| ``window_dilation_strides`` | ``Strides[n]`` | Per-axis dilation to apply to the |
| | | filters. |
+-----------------------------+-----------------------------+---------------------------------------+
| ``padding_below`` | ``Shape[n]`` | How many padding elements to add |
| | | below the 0-coordinate on each axis. |
+-----------------------------+-----------------------------+---------------------------------------+
| ``padding_above`` | ``Shape[n]`` | How manny padding elements to add |
| | | above the max-coordinate on each axis.|
+-----------------------------+-----------------------------+---------------------------------------+
| ``image_dilation_strides`` | ``Strides[n]`` | Per-axis dilation to apply to the |
| | | image batch. |
+-----------------------------+-----------------------------+---------------------------------------+
+-----------------------------+-----------------------------+----------------------------------------+
| Name | Type | Notes |
+=============================+=============================+========================================+
| ``window_movement_strides`` | ``Strides[n]`` | How far to slide the |
| | | window along each axis at each step |
+-----------------------------+-----------------------------+----------------------------------------+
| ``window_dilation_strides`` | ``Strides[n]`` | Per-axis dilation to apply to the |
| | | filters |
+-----------------------------+-----------------------------+----------------------------------------+
| ``padding_below`` | ``Shape[n]`` | How many padding elements to add |
| | | below the 0-coordinate on each axis |
+-----------------------------+-----------------------------+----------------------------------------+
| ``padding_above`` | ``Shape[n]`` | How many padding elements to add above |
| | | the max-coordinate on each axis |
+-----------------------------+-----------------------------+----------------------------------------+
| ``image_dilation_strides`` | ``Strides[n]`` | Per-axis dilation to apply to the |
| | | image batch |
+-----------------------------+-----------------------------+----------------------------------------+
Outputs
......@@ -101,10 +110,18 @@ such that
\mathit{Stride}[s](T)_{i_1,\dots,i_n} \triangleq T_{s_1i_1,\dots,s_ni_n}
:math:`s` is the how far, not the unit of farness.
Convolution
-----------
.. TODO
.. image possibly imported soon; they are not big files but they are svg
..
figure:: ../graphics/classngraph_1_1op_1_1Convolution__coll__graph_org.svg
:height: 500px
Padded, Dilated, Strided Convolution
------------------------------------
......@@ -121,8 +138,8 @@ Batched, Padded, Dilated, Strided Convolution
C++ Interface
=============
.. WIP
.. doxygenclass:: ngraph::op::Convolution
:project: ngraph
:members:
\ No newline at end of file
......@@ -4,11 +4,14 @@
Cos
###
.. code-block:: cpp
Cos // Elementwise cosine operation
Description
===========
Elementwise cosine operation.
Produces a tensor of the same element type and shape as ``arg``,
where the value at each coordinate of ``output`` is the cosine of the
value at the corresponding coordinate of ``arg``.
......@@ -28,7 +31,7 @@ Outputs
+-----------------+-------------------------+--------------------------------+
| Name | Element Type | Shape |
+=================+=========================+================================+
| ``output`` | Same as ``arg`` | Same as ``arg``. |
| ``output`` | Same as ``arg`` | Same as ``arg`` |
+-----------------+-------------------------+--------------------------------+
......@@ -52,4 +55,5 @@ C++ Interface
=============
.. doxygenclass:: ngraph::op::Cos
:project: ngraph
:members:
......@@ -4,11 +4,14 @@
Cosh
####
.. code-block:: cpp
Cosh // Elementwise hyperbolic cosine operation
Description
===========
Elementwise hyperbolic cosine operation.
Produces a tensor of the same element type and shape as ``arg``, where
the value at each coordinate of ``output`` is the hyperbolic cosine of
the value at the corresponding coordinate of ``arg``.
......@@ -28,7 +31,7 @@ Outputs
+-----------------+-------------------------+--------------------------------+
| Name | Element Type | Shape |
+=================+=========================+================================+
| ``output`` | Same as ``arg`` | Same as ``arg``. |
| ``output`` | Same as ``arg`` | Same as ``arg`` |
+-----------------+-------------------------+--------------------------------+
......@@ -52,4 +55,5 @@ C++ Interface
=============
.. doxygenclass:: ngraph::op::Cosh
:project: ngraph
:members:
.. divide.rst:
######
Divide
######
.. code-block:: cpp
Divide // Elementwise divide operation
Description
===========
Produces tensor of the same element type and shape as the two inputs,
where the value at each coordinate of ``output`` is the quotient of the
values at the corresponding input coordinates.
Inputs
------
+-----------------+-------------------------+--------------------------------+
| Name | Element Type | Shape |
+=================+=========================+================================+
| ``arg0`` | any | any |
+-----------------+-------------------------+--------------------------------+
| ``arg1`` | same as ``arg0`` | same as ``arg0`` |
+-----------------+-------------------------+--------------------------------+
Outputs
-------
+-----------------+-------------------------+--------------------------------+
| Name | Element Type | Shape |
+=================+=========================+================================+
| ``output`` | same as ``arg0`` | same as ``arg0`` |
+-----------------+-------------------------+--------------------------------+
Mathematical Definition
=======================
.. math::
\texttt{output}_{i_0, \ldots, i_{n-1}} = \frac{\texttt{arg0}_{i_0, \ldots, i_{n-1}}}{\texttt{arg1}_{i_0, \ldots, i_{n-1}}}
Backprop
========
.. math::
\overline{\texttt{arg0}} &\leftarrow \frac{\Delta}{\texttt{arg1}}\\
\overline{\texttt{arg1}} &\leftarrow -\Delta \frac{\texttt{Output}}{\texttt{arg1}}
C++ Interface
=============
.. doxygenclass:: ngraph::op::Divide
:project: ngraph
:members:
.. dot.rst:
###
Dot
###
.. code-block:: cpp
Dot // Generalized dot product operation
Description
===========
Generalized dot product operation, including scalar-tensor product,
matrix-vector product, and matrix multiplication.
A few common cases are as follows:
* If :math:`m = 0` and :math:`n = 1` or :math:`p = 1`, the operation
is a scalar-tensor product.
* If :math:`m = 1`, :math:`n = 2`, and :math:`p = 1`, the operation is
a matrix-vector product.
* If :math:`m = 1` and :math:`n = p = 2`, the operation is a matrix
multiplication.
Inputs
------
+-----------------+-------------------------+-----------------------------------------+
| Name | Element Type | Shape |
+=================+=========================+=========================================+
| ``arg0`` | any | :math:`(i_1,\dots,i_n,j_1,\dots,j_m)` |
+-----------------+-------------------------+-----------------------------------------+
| ``arg1`` | same as ``arg0`` | :math:`(j_1,\ldots,j_m,k_1,\dots,k_p)` |
+-----------------+-------------------------+-----------------------------------------+
Attributes
----------
+------------------------+---------------+--------------------------------------------------+
| Name | | |
+========================+===============+==================================================+
| reduction_axes_count | ``size_t`` | The number of axes to reduce through dot-product |
| | | (corresponds to :math:`m` in the formulas above) |
+------------------------+---------------+--------------------------------------------------+
Outputs
-------
+-----------------+-------------------------+----------------------------------------+
| Name | Element Type | Shape |
+=================+=========================+========================================+
| ``output`` | same as ``arg0`` | :math:`(i_1,\ldots,i_n,k_1,\dots,k_p)` |
+-----------------+-------------------------+----------------------------------------+
Mathematical Definition
=======================
.. math::
\texttt{output}_{i_1,\dots,i_n,k_1,\ldots,k_p} =
\begin{cases}
\texttt{arg0}_{i_1,\dots,i_n} \cdot
\texttt{arg1}_{k_1,\dots,k_p}&\text{if }m=0,\\
\sum_{j_1, \ldots, j_m}
\texttt{arg0}_{i_1,\dots,i_n,j_1,\dots,j_m}
\cdot
\texttt{arg1}_{j_1,\ldots,j_m,k_1,\ldots,k_p}
&\text{otherwise}.
\end{cases}
Backprop
========
To be documented.
C++ Interface
=============
.. doxygenclass:: ngraph::op::Dot
:project: ngraph
:members:
.. equal.rst:
#####
Equal
#####
.. code-block:: cpp
Equal // Elementwise equal operation
Description
===========
Produces tensor of the same element type and shape as the two inputs,
where the value at each coordinate of ``output`` is ``1`` (true) if
``arg0`` is equal to ``arg1``, ``0`` otherwise.
Inputs
------
+-----------------+-------------------------+--------------------------------+
| Name | Element Type | Shape |
+=================+=========================+================================+
| ``arg0`` | any | any |
+-----------------+-------------------------+--------------------------------+
| ``arg1`` | same as ``arg0`` | same as ``arg0`` |
+-----------------+-------------------------+--------------------------------+
Outputs
-------
+-----------------+------------------------------+--------------------------------+
| Name | Element Type | Shape |
+=================+==============================+================================+
| ``output`` | ``ngraph::element::boolean`` | same as ``arg0`` |
+-----------------+------------------------------+--------------------------------+
Mathematical Definition
=======================
.. math::
\texttt{output}_{i_0, \ldots, i_{n-1}} = \texttt{arg0}_{i_0, \ldots, i_{n-1}} == \texttt{arg1}_{i_0, \ldots, i_{n-1}}
C++ Interface
=============
.. doxygenclass:: ngraph::op::Equal
:project: ngraph
:members:
.. exp.rst:
###
Exp
###
.. code-block:: cpp
Exp // Elementwise expine operation
Description
===========
Produces a tensor of the same element type and shape as ``arg``,
where the value at each coordinate of ``output`` is the expine of the
value at the corresponding coordinate of ``arg``.
Inputs
------
+-----------------+-------------------------+--------------------------------+
| Name | Element Type | Shape |
+=================+=========================+================================+
| ``arg`` | Any | Any |
+-----------------+-------------------------+--------------------------------+
Outputs
-------
+-----------------+-------------------------+--------------------------------+
| Name | Element Type | Shape |
+=================+=========================+================================+
| ``output`` | Same as ``arg`` | Same as ``arg`` |
+-----------------+-------------------------+--------------------------------+
Mathematical Definition
=======================
.. math::
\texttt{output}_{i_0, \ldots, i_{n-1}} = \exp(\texttt{arg}_{i_0, \ldots, i_{n-1}})
Backprop
========
.. math::
\overline{\texttt{arg}} \leftarrow \Delta\ \texttt{output}
C++ Interface
=============
.. doxygenclass:: ngraph::op::Exp
:project: ngraph
:members:
.. floor.rst:
#####
Floor
#####
.. code-block:: cpp
Floor // Elementwise floor operation
Description
===========
Produces a single output tensor of the same element type and shape as ``arg``,
where the value at each coordinate of ``output`` is the floor of the
value at each ``arg`` coordinate.
Inputs
------
+-----------------+-------------------------+--------------------------------+
| Name | Element Type | Shape |
+=================+=========================+================================+
| ``arg`` | Any | Any |
+-----------------+-------------------------+--------------------------------+
Outputs
-------
+-----------------+-------------------------+--------------------------------+
| Name | Element Type | Shape |
+=================+=========================+================================+
| ``output`` | Same as ``arg`` | Same as ``arg`` |
+-----------------+-------------------------+--------------------------------+
Mathematical Definition
=======================
.. math::
\mathtt{output}_{i_0, \ldots, i_{n-1}} = \lfloor \mathtt{arg}_{i_0,
\ldots, i_{n-1}}\rfloor
Backprop
========
Not defined by nGraph.
The backprop would be zero for non-integer
input and undefined for integer input; a zero backprop would have
no effect on the backprop to ``arg``, so there is no need for ``Floor``
to define a backprop.
C++ Interface
=============
.. doxygenclass:: ngraph::op::Floor
:project: ngraph
:members:
.. function_call.rst:
############
FunctionCall
############
.. code-block:: cpp
FunctionCall // Function call operation
Description
===========
Calls the specified function on ``args``. The results of the function are the outputs
of the op.
Inputs
------
+------------+--------------------+----------------------------------------------+
| Name | Type | |
+============+====================+==============================================+
| ``args`` | ``ngraph::Nodes`` | Element types and shapes must correspond to |
| | | the parameters of ``function`` |
+------------+--------------------+----------------------------------------------+
Attributes
----------
+----------------+---------------------------------------+
| Name | Type |
+================+=======================================+
| ``function`` | ``std::shared_ptr<ngraph::Function>`` |
+----------------+---------------------------------------+
Outputs
-------
One output for each function result.
C++ Interface
=============
.. doxygenclass:: ngraph::op::FunctionCall
:project: ngraph
:members:
.. greater.rst:
#######
Greater
#######
.. code-block:: cpp
Greater // Elementwise greater operation
Description
===========
Produces tensor of the same element type and shape as the two inputs,
where the value at each coordinate of ``output`` is true (1) if
``arg0`` is greater than ``arg1``, 0 otherwise.
Inputs
------
+-----------------+-------------------------+--------------------------------+
| Name | Element Type | Shape |
+=================+=========================+================================+
| ``arg0`` | any | any |
+-----------------+-------------------------+--------------------------------+
| ``arg1`` | same as ``arg0`` | same as ``arg0`` |
+-----------------+-------------------------+--------------------------------+
Outputs
-------
+-----------------+------------------------------+--------------------------------+
| Name | Element Type | Shape |
+=================+==============================+================================+
| ``output`` | ``ngraph::element::boolean`` | same as ``arg0`` |
+-----------------+------------------------------+--------------------------------+
Mathematical Definition
=======================
.. math::
\texttt{output}_{i_0, \ldots, i_{n-1}} = \texttt{arg0}_{i_0, \ldots, i_{n-1}} > \texttt{arg1}_{i_0, \ldots, i_{n-1}}
C++ Interface
=============
.. doxygenclass:: ngraph::op::Greater
:project: ngraph
:members:
.. greater_eq.rst:
#########
GreaterEq
#########
.. code-block:: cpp
GreaterEq // Elementwise greater or equal operation
Description
===========
Produces tensor of the same element type and shape as the two inputs,
where the value at each coordinate of ``output`` is true (1) if
``arg0`` is greater than or equal to ``arg1``, 0 otherwise.
Inputs
------
+-----------------+-------------------------+--------------------------------+
| Name | Element Type | Shape |
+=================+=========================+================================+
| ``arg0`` | any | any |
+-----------------+-------------------------+--------------------------------+
| ``arg1`` | same as ``arg0`` | same as ``arg0`` |
+-----------------+-------------------------+--------------------------------+
Outputs
-------
+-----------------+------------------------------+--------------------------------+
| Name | Element Type | Shape |
+=================+==============================+================================+
| ``output`` | ``ngraph::element::boolean`` | same as ``arg0`` |
+-----------------+------------------------------+--------------------------------+
Mathematical Definition
=======================
.. math::
\texttt{output}_{i_0, \ldots, i_{n-1}} = \texttt{arg0}_{i_0, \ldots, i_{n-1}} \ge \texttt{arg1}_{i_0, \ldots, i_{n-1}}
C++ Interface
=============
.. doxygenclass:: ngraph::op::GreaterEq
:project: ngraph
:members:
......@@ -57,10 +57,26 @@ Not currently a comprehensive list.
avg_pool_backprop.rst
broadcast.rst
ceiling.rst
concatenate.rst
concat.rst
constant.rst
convert.rst
convolution.rst
cos.rst
cosh.rst
divide.rst
dot.rst
equal.rst
exp.rst
floor.rst
function_call.rst
greater_eq.rst
greater.rst
less_eq.rst
less.rst
log.rst
maximum.rst
minimum.rst
multiply.rst
negative.rst
not_equal.rst
not.rst
.. less.rst:
####
Less
####
.. code-block:: cpp
Less // Elementwise less operation
Description
===========
Produces tensor of the same element type and shape as the two inputs,
where the value at each coordinate of ``output`` is true (1) if
``arg0`` is less than ``arg1``, 0 otherwise.
Inputs
------
+-----------------+-------------------------+--------------------------------+
| Name | Element Type | Shape |
+=================+=========================+================================+
| ``arg0`` | any | any |
+-----------------+-------------------------+--------------------------------+
| ``arg1`` | same as ``arg0`` | same as ``arg0`` |
+-----------------+-------------------------+--------------------------------+
Outputs
-------
+-----------------+------------------------------+--------------------------------+
| Name | Element Type | Shape |
+=================+==============================+================================+
| ``output`` | ``ngraph::element::boolean`` | same as ``arg0`` |
+-----------------+------------------------------+--------------------------------+
Mathematical Definition
=======================
.. math::
\texttt{output}_{i_0, \ldots, i_{n-1}} = \texttt{arg0}_{i_0, \ldots, i_{n-1}} < \texttt{arg1}_{i_0, \ldots, i_{n-1}}
C++ Interface
=============
.. doxygenclass:: ngraph::op::Less
:project: ngraph
:members:
.. less_eq.rst:
######
LessEq
######
.. code-block:: cpp
LessEq // Elementwise less or equal operation
Description
===========
Produces tensor of the same element type and shape as the two inputs,
where the value at each coordinate of ``output`` is true (1) if
``arg0`` is less than or equal to ``arg1``, 0 otherwise.
Inputs
------
+-----------------+-------------------------+--------------------------------+
| Name | Element Type | Shape |
+=================+=========================+================================+
| ``arg0`` | any | any |
+-----------------+-------------------------+--------------------------------+
| ``arg1`` | same as ``arg0`` | same as ``arg0`` |
+-----------------+-------------------------+--------------------------------+
Outputs
-------
+-----------------+------------------------------+--------------------------------+
| Name | Element Type | Shape |
+=================+==============================+================================+
| ``output`` | ``ngraph::element::boolean`` | same as ``arg0`` |
+-----------------+------------------------------+--------------------------------+
Mathematical Definition
=======================
.. math::
\texttt{output}_{i_0, \ldots, i_{n-1}} = \texttt{arg0}_{i_0, \ldots, i_{n-1}} \le \texttt{arg1}_{i_0, \ldots, i_{n-1}}
C++ Interface
=============
.. doxygenclass:: ngraph::op::LessEq
:project: ngraph
:members:
.. log.rst:
###
Log
###
.. code-block:: cpp
Log // Elementwise logine operation
Description
===========
Produces a tensor of the same element type and shape as ``arg``,
where the value at each coordinate of ``output`` is the logine of the
value at the corresponding coordinate of ``arg``.
Inputs
------
+-----------------+-------------------------+--------------------------------+
| Name | Element Type | Shape |
+=================+=========================+================================+
| ``arg`` | Any | Any |
+-----------------+-------------------------+--------------------------------+
Outputs
-------
+-----------------+-------------------------+--------------------------------+
| Name | Element Type | Shape |
+=================+=========================+================================+
| ``output`` | Same as ``arg`` | Same as ``arg`` |
+-----------------+-------------------------+--------------------------------+
Mathematical Definition
=======================
.. math::
\texttt{output}_{i_0, \ldots, i_{n-1}} = \log(\texttt{arg}_{i_0, \ldots, i_{n-1}})
Backprop
========
.. math::
\overline{\texttt{arg}} \leftarrow \frac{\Delta}{\texttt{input}}
C++ Interface
=============
.. doxygenclass:: ngraph::op::Log
:project: ngraph
:members:
.. maximum.rst:
#######
Maximum
#######
.. code-block:: cpp
Maximum // Elementwise maximum operation
Description
===========
Produces tensor of the same element type and shape as the two inputs,
where the value at each coordinate of ``output`` is the maximum of the
values at the corresponding input coordinates.
Inputs
------
+-----------------+-------------------------+--------------------------------+
| Name | Element Type | Shape |
+=================+=========================+================================+
| ``arg0`` | any | any |
+-----------------+-------------------------+--------------------------------+
| ``arg1`` | same as ``arg0`` | same as ``arg0`` |
+-----------------+-------------------------+--------------------------------+
Outputs
-------
+-----------------+-------------------------+--------------------------------+
| Name | Element Type | Shape |
+=================+=========================+================================+
| ``output`` | same as ``arg0`` | same as ``arg0`` |
+-----------------+-------------------------+--------------------------------+
Mathematical Definition
=======================
.. math::
\texttt{output}_{i_0, \ldots, i_{n-1}} = \max(\texttt{arg0}_{i_0, \ldots, i_{n-1}}, \texttt{arg1}_{i_0, \ldots, i_{n-1}})
Backprop
========
.. math::
\overline{\texttt{arg0}} &\leftarrow \texttt{Greater}(\texttt{arg0}, \texttt{arg1})\ \Delta \\
\overline{\texttt{arg1}} &\leftarrow \texttt{Greater}(\texttt{arg1}, \texttt{arg0})\ \Delta
C++ Interface
=============
.. doxygenclass:: ngraph::op::Maximum
:project: ngraph
:members:
.. minimum.rst:
#######
Minimum
#######
.. code-block:: cpp
Minimum // Short description.
Description
===========
Produces tensor of the same element type and shape as the two inputs,
where the value at each coordinate of ``output`` is the minimum of the
values at the corresponding input coordinates.
Inputs
------
+-----------------+-------------------------+--------------------------------+
| Name | Element Type | Shape |
+=================+=========================+================================+
| ``arg0`` | any | any |
+-----------------+-------------------------+--------------------------------+
| ``arg1`` | same as ``arg0`` | same as ``arg0`` |
+-----------------+-------------------------+--------------------------------+
Outputs
-------
+-----------------+-------------------------+--------------------------------+
| Name | Element Type | Shape |
+=================+=========================+================================+
| ``output`` | same as ``arg0`` | same as ``arg0`` |
+-----------------+-------------------------+--------------------------------+
Mathematical Definition
=======================
.. math::
\texttt{output}_{i_0, \ldots, i_{n-1}} = \min(\texttt{arg0}_{i_0, \ldots, i_{n-1}}, \texttt{arg1}_{i_0, \ldots, i_{n-1}})
Backprop
========
.. math::
\overline{\texttt{arg0}} &\leftarrow \texttt{Less}(\texttt{arg0}, \texttt{arg1})\ \Delta \\
\overline{\texttt{arg1}} &\leftarrow \texttt{Less}(\texttt{arg1}, \texttt{arg0})\ \Delta
C++ Interface
=============
.. doxygenclass:: ngraph::op::Minimum
:project: ngraph
:members:
.. multiply.rst:
########
Multiply
########
.. code-block:: cpp
Multiply // Elementwise multiply operation
Description
===========
Produces tensor of the same element type and shape as the two inputs,
where the value at each coordinate of ``output`` is the product of the
values at the corresponding input coordinates.
Inputs
------
+-----------------+-------------------------+--------------------------------+
| Name | Element Type | Shape |
+=================+=========================+================================+
| ``arg0`` | any | any |
+-----------------+-------------------------+--------------------------------+
| ``arg1`` | same as ``arg0`` | same as ``arg0`` |
+-----------------+-------------------------+--------------------------------+
Outputs
-------
+-----------------+-------------------------+--------------------------------+
| Name | Element Type | Shape |
+=================+=========================+================================+
| ``output`` | same as ``arg0`` | same as ``arg0`` |
+-----------------+-------------------------+--------------------------------+
Mathematical Definition
=======================
.. math::
\texttt{output}_{i_0, \ldots, i_{n-1}} = \texttt{arg0}_{i_0, \ldots, i_{n-1}} \texttt{arg1}_{i_0, \ldots, i_{n-1}}
Backprop
========
.. math::
\overline{\texttt{arg0}} &\leftarrow \Delta\ \texttt{arg1}\\
\overline{\texttt{arg1}} &\leftarrow \Delta\ \texttt{arg0}
C++ Interface
=============
.. doxygenclass:: ngraph::op::Multiply
:project: ngraph
:members:
.. negative.rst:
########
Negative
########
.. code-block:: cpp
Negative // Elementwise negative operation
Description
===========
Produces a single output tensor of the same element type and shape as ``arg``,
where the value at each coordinate of ``output`` is the negative of the
value at each ``arg`` coordinate.
Inputs
------
+-----------------+-------------------------+--------------------------------+
| Name | Element Type | Shape |
+=================+=========================+================================+
| ``arg`` | Any | Any |
+-----------------+-------------------------+--------------------------------+
Outputs
-------
+-----------------+-------------------------+--------------------------------+
| Name | Element Type | Shape |
+=================+=========================+================================+
| ``output`` | Same as ``arg`` | Same as ``arg`` |
+-----------------+-------------------------+--------------------------------+
Mathematical Definition
=======================
.. math::
\mathtt{output}_{i_0, \ldots, i_{n-1}} = -\mathtt{arg}_{i_0,
\ldots, i_{n-1}}
Backprop
========
.. math::
\overline{\texttt{arg}} \leftarrow -\Delta
C++ Interface
=============
.. doxygenclass:: ngraph::op::Negative
:project: ngraph
:members:
.. not.rst:
###
Not
###
.. code-block:: cpp
Not // Elementwise negation operation
Description
===========
Produces a single output tensor of boolean type and the same shape as ``arg``,
where the value at each coordinate of ``output`` is the negation of the
value at each ``arg`` coordinate.
Inputs
------
+-----------------+-------------------------+--------------------------------+
| Name | Element Type | Shape |
+=================+=========================+================================+
| ``arg`` | ``element::boolean`` | Any |
+-----------------+-------------------------+--------------------------------+
Outputs
-------
+-----------------+-------------------------+--------------------------------+
| Name | Element Type | Shape |
+=================+=========================+================================+
| ``output`` | ``element::boolean`` | Same as ``arg`` |
+-----------------+-------------------------+--------------------------------+
Mathematical Definition
=======================
.. math::
\mathtt{output}_{i_0, \ldots, i_{n-1}} = \neg\mathtt{arg}_{i_0, \ldots, i_{n-1}}
C++ Interface
=============
.. doxygenclass:: ngraph::op::Abs
:project: ngraph
:members:
.. not_equal.rst:
########
NotEqual
########
.. code-block:: cpp
NotEqual // Elementwise "not equal" operation
Description
===========
Produces tensor of the same element type and shape as the two inputs,
where the value at each coordinate of ``output`` is ``1`` (true) if
``arg0`` is not equal to ``arg1``, ``0`` otherwise.
Inputs
------
+-----------------+-------------------------+--------------------------------+
| Name | Element Type | Shape |
+=================+=========================+================================+
| ``arg0`` | any | any |
+-----------------+-------------------------+--------------------------------+
| ``arg1`` | same as ``arg0`` | same as ``arg0`` |
+-----------------+-------------------------+--------------------------------+
Outputs
-------
+-----------------+------------------------------+--------------------------------+
| Name | Element Type | Shape |
+=================+==============================+================================+
| ``output`` | ``ngraph::element::boolean`` | same as ``arg0`` |
+-----------------+------------------------------+--------------------------------+
Mathematical Definition
=======================
.. math::
\texttt{output}_{i_0, \ldots, i_{n-1}} = \texttt{arg0}_{i_0, \ldots, i_{n-1}} \neq \texttt{arg1}_{i_0, \ldots, i_{n-1}}
C++ Interface
=============
.. doxygenclass:: ngraph::op::NotEqual
:project: ngraph
:members:
......@@ -44,16 +44,16 @@ standards:
- Assign template parameters with ``UPPER_SNAKE_CASE``.
- Case variable and function names with ``snake_case``.
Method names for basic acceesors are prefixed by ``get_`` or ``set_`` and
should have simple *O(1)* implementations:
Method names for basic accessors are prefixed by ``get_`` or ``set_`` and
should have simple :math:`\mathcal{O}(1)` implementations:
- A ``get_`` method should be externally idempotent. It may perform some simple
initialization and cache the result for later use.
- ``is_`` may be used instead of ``get_`` for boolean accessors. Trivial ``get_``
- An ``is_`` may be used instead of ``get_`` for boolean accessors. Trivial ``get_``
methods can be defined in a header file.
- A ``set_`` method should change the value returned by the corresponding``get_``
- A ``set_`` method should change the value returned by the corresponding ``get_``
method.
* Use ``set_is_`` if using ``is_`` to get a value.
......
......@@ -28,31 +28,30 @@ with respect to additions or feature requests.
If you prefer to use a containerized application, like Jupyter\* notebooks,
Google Docs\*, or MS Word\* to write and share documentation contributions,
you can convert the ``doc/sphinx/source/*.rst`` files to another format with a tool
like ``pypandoc`` and share a link to your docs on our `wiki`_.
like ``pypandoc`` and share a link to your docs on our `wiki`_.
Another option is to fork the `ngraph repo`_, essentially snapshotting it at
that point in time, and to build a Jupyter\* notebook or other set of docs around
it for a specific use case, and to share that contribution with us directly on
our wiki.
it for a specific use case; then share contribution with us directly on our wiki.
.. note:: Please do not submit Jupyter* notebook code to the Intel nGraph library
repos; best practice is to maintain any project-specific examples, tests, or
walk-throughs separately. Alternatively, you may wish to upstream documentation
contributions directly to whatever frontend framework supports your example.
or core repos; best practice is to maintain any project-specific examples,
tests, or walk-throughs separately. Alternatively, you may wish to upstream
documentation contributions directly to whatever frontend framework supports
your model or example.
Documenting source code examples
--------------------------------
When **verbosely** documenting functionality of specific sections of code -- whether
they're entire code blocks within a file, or code strings that are **outside** the
they are entire code blocks within a file, or code strings that are **outside** the
Intel nGraph `documentation repo`_, here is an example of best practice:
Say the file named `` `` has some interesting functionality that could
benefit from more explanation about one or more of the pieces in context. To keep
the "in context" format, write something like the following in your documentation
source file (``.rst``):
Say a file has some interesting functionality that could benefit from more
explanation about one or more of the pieces in context. To keep the "in context"
navigable, write something like the following in your ``.rst`` documentation
source file:
::
......@@ -63,17 +62,15 @@ source file (``.rst``):
And the raw code will render as follows
.. literalinclude:: ../../../../src/ngraph/descriptor/primary_tensor_view.cpp
.. literalinclude:: ../../../../src/ngraph/descriptor/primary_tensor_view.cpp
:language: cpp
:lines: 20-31
You can now verbosely explain the code block without worrying about breaking
the code.
The trick here is to add the file you want to reference relative to the folder
where the ``Makefile`` is that generates the documentation you're writing. See the
**note** at the bottom of this page for more detail about how this works in Intel
Nervana Graph project documentation.
the code. The trick here is to add the file you want to reference relative to
the folder where the ``Makefile`` is that generates the documentation you're
writing. See the **note** at the bottom of this page for more detail about how
this works in the alpha version of Intel nGraph library documentation.
Adding captions to code blocks
......@@ -82,14 +79,14 @@ Adding captions to code blocks
One more trick to helping users understand exactly what you mean with a section
of code is to add a caption with content that describes your parsing logic. To
build on the previous example, let's take a bigger chunk of code, add some
line numbers, and add a caption "One way to define neon axes within the dqn_atari.py file":
line numbers, and add a caption:
::
.. literalinclude:: ../../../../src/ngraph/descriptor/primary_tensor_view.cpp
:language: cpp
:lines: 20-31
:caption:
:language: cpp
:lines: 20-31
:caption: "primary_tensor_view.cpp"
and the generated output will show readers of your helpful documentation
......@@ -97,37 +94,31 @@ and the generated output will show readers of your helpful documentation
.. literalinclude:: ../../../../src/ngraph/descriptor/primary_tensor_view.cpp
:language: cpp
:lines: 20-31
:caption:
Take note that the ``linenos`` line will add a new context for line numbers
within your file; it will not bring the original line numbering with it. This
usually is not a problem because users will not see the back-end code rendering
the raw source code file, just the output defined by your reference.
:caption: "primary_tensor_view.cpp"
Our documentation practices are designed around "write once, reuse" that we can
use to prevent code bloat. A ``literalinclude`` with the ``caption`` option
also generates a permalink (see above) that makes finding "verbose" documentation
easier.
use to prevent code bloat. See the :doc:`code-contributor-README` for our code
style guide.
.. build-docs:
Build the Documentation
========================
Build the documentation
=======================
.. note:: Stuck on how to generate the html? Run these commands; they assume
.. note:: Stuck on how to generate the html? Run these commands; they assume
you start at a command line running within a clone (or a cloned fork) of the
``ngraph`` repo. You do **not** need to run a virtual environment to create
documentation if you don't want; running ``$ make clean`` in the ``doc/`` folder
removes any generated files.
documentation if you don't want; running ``$ make clean`` in the
``doc/sphinx`` folder removes any generated files.
Right now the minimal version of Sphinx needed to build the documentation is
Sphinx v. 1.6.5. This can be installed with `pip3`, either to a virtual
environment, or to your base system if you plan to contribute much to docs.
`Breathe`_ can also be installed to build C++ API documentation (currently WIP).
Sphinx v. 1.6.5. This can be installed with :command:`pip3`, either to a virtual
environment, or to your base system if you plan to contribute much core code or
documentation. For C++ API docs that contain inheritance diagrams and collaboration
diagrams which are helpful for framework integratons, or for building bridge code,
be sure you have a system capable of running `doxygen`_.
To build documentation locally, run:
......@@ -137,14 +128,31 @@ To build documentation locally, run:
$ pip3 install [-I] breathe [--user]
$ cd doc/sphinx/
$ make html
$ cd build/html
$ python3 -m http.server 8000
Then point your browser at ``localhost:8000``.
For tips similar to this, see the `sphinx`_ stable reST documentation.
To build documentation in a python3 virtualenv, run:
.. code-block:: console
$ python3 -m venv py3doc
$ . py3doc/bin/activate
(py3doc)$ pip install sphinx breathe
(py3doc)$ cd doc/sphinx
(py3doc)$ make html
(py3doc)$ cd build/html
(py3doc)$ python -m http.server 8000
Then point your browser at ``localhost:8000``.
For tips on writing reStructuredText-formatted documentation, see the `sphinx`_
stable reST documentation.
.. _ngraph repo: https://github.com/NervanaSystems/ngraph-cpp/
.. _documentation repo: https://github.com/NervanaSystems/ngraph/tree/master/doc
.. _documentation repo: https://github.com/NervanaSystems/private-ngraph/tree/master/doc
.. _sphinx: http://www.sphinx-doc.org/en/stable/rest.html
.. _wiki: https://github.com/NervanaSystems/ngraph/wiki/
.. _Breathe: https://breathe.readthedocs.io/en/latest/
.. _breathe: https://breathe.readthedocs.io/en/latest/
.. _doxygen: http://www.doxygen.org/index.html
......@@ -259,13 +259,15 @@ op::AvgPoolBackprop::AvgPoolBackprop(const Shape& forward_arg_shape,
const Shape& window_shape,
const Strides& window_movement_strides,
const Shape& padding_below,
const Shape& padding_above)
const Shape& padding_above,
bool include_padding_in_avg_computation)
: RequiresTensorViewArgs("AvgPoolBackprop", {delta})
, m_forward_arg_shape(forward_arg_shape)
, m_window_shape(window_shape)
, m_window_movement_strides(window_movement_strides)
, m_padding_below(padding_below)
, m_padding_above(padding_above)
, m_include_padding_in_avg_computation(include_padding_in_avg_computation)
{
// --
// TODO: de-duplicate this code from AvgPool::AvgPool.
......@@ -386,6 +388,47 @@ op::AvgPoolBackprop::AvgPoolBackprop(const Shape& forward_arg_shape,
window_movement_strides[i]));
}
//
// Make sure we're not going to have to compute average over an empty set of tensor elements.
// That will happen if the sliding window ever resides entirely over the padding area AND
// we're planning to disregard padding when computing the window's average.
//
if (!include_padding_in_avg_computation)
{
for (size_t i = 0; i < spatial_dimension_count; i++)
{
const size_t dim_virtual_size = input_item_virtual_shape[i];
const size_t dim_window_size = window_shape[i];
const size_t dim_stride = window_movement_strides[i];
const size_t dim_padding_below = padding_below[i];
const size_t dim_padding_above = padding_above[i];
// Checking the lower edge of each dimension is easy, because there's no mystery
// regarding the window's lower-edge placement...
if ((dim_padding_below > 0) && (dim_window_size <= dim_padding_below))
{
throw ngraph_error(
"AvgPoolBackprop window will sometimes reside entirely within the "
"padding-below region, but the op disregards padding elements.");
}
// Now check the upper-bound...
{
const size_t dim_num_strides = (dim_virtual_size - dim_window_size) / dim_stride;
const size_t dim_window_max_lower_offset = dim_num_strides * dim_stride;
const size_t dim_padding_above_start_offset = dim_virtual_size - dim_padding_above;
if ((dim_padding_above > 0) &&
(dim_window_max_lower_offset >= dim_padding_above_start_offset))
{
throw ngraph_error(
"AvgPoolBackprop window will sometimes reside entirely within the "
"padding-above region, but the op disregards padding elements.");
}
}
}
}
//
// Construct result shape: NCDo.
//
......@@ -413,6 +456,7 @@ void op::AvgPool::generate_adjoints(autodiff::Adjoints& adjoints,
m_window_shape,
m_window_movement_strides,
m_padding_below,
m_padding_above);
m_padding_above,
m_include_padding_in_avg_computation);
adjoints.add_delta(operand, backprop);
}
......@@ -117,7 +117,8 @@ namespace ngraph
const Shape& window_shape,
const Strides& window_movement_strides,
const Shape& padding_below,
const Shape& padding_above);
const Shape& padding_above,
bool include_padding_in_avg_computation);
virtual std::shared_ptr<Node> copy_with_new_args(
const std::vector<std::shared_ptr<Node>>& new_args) const override
......@@ -132,7 +133,8 @@ namespace ngraph
m_window_shape,
m_window_movement_strides,
m_padding_below,
m_padding_above);
m_padding_above,
m_include_padding_in_avg_computation);
return std::shared_ptr<op::AvgPoolBackprop>(avpn);
}
......@@ -141,12 +143,18 @@ namespace ngraph
const Strides& get_window_movement_strides() const { return m_window_movement_strides; }
const Shape& get_padding_below() const { return m_padding_below; }
const Shape& get_padding_above() const { return m_padding_above; }
bool get_include_padding_in_avg_computation() const
{
return m_include_padding_in_avg_computation;
}
protected:
Shape m_forward_arg_shape;
Shape m_window_shape;
Strides m_window_movement_strides;
Shape m_padding_below;
Shape m_padding_above;
bool m_include_padding_in_avg_computation;
};
}
}
/*******************************************************************************
* Copyright 2017-2018 Intel Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*******************************************************************************/
#include <cassert>
#include <memory>
#include "ngraph/ops/concatenate.hpp"
#include "ngraph/ops/slice.hpp"
using namespace std;
using namespace ngraph;
op::Concat::Concat(const Nodes& args, size_t concatenation_axis)
: RequiresTensorViewArgs("Concat", args)
, m_concatenation_axis(concatenation_axis)
{
if (m_inputs.size() < 1)
{
throw ngraph_error("At least one argument required");
}
auto& input_0 = get_inputs().at(0);
auto input_0_shape = input_0.get_shape();
if (m_concatenation_axis >= input_0_shape.size())
{
throw ngraph_error("Concatenation axis is out of bounds");
}
size_t concatenation_axis_length = input_0_shape.at(m_concatenation_axis);
auto& input_0_element_type = input_0.get_element_type();
for (auto i = 1; i < get_inputs().size(); i++)
{
auto& input_i = get_inputs().at(i);
auto input_i_shape = input_i.get_shape();
if (input_i_shape.size() != input_0_shape.size())
{
throw ngraph_error("Arguments to concat do not have same rank");
}
if (input_i.get_element_type() != input_0_element_type)
{
throw ngraph_error("Argument element types do not match");
}
for (auto j = 0; j < input_i_shape.size(); j++)
{
if (j != m_concatenation_axis && input_0_shape.at(j) != input_i_shape.at(j))
{
throw ngraph_error(
"Arguments to concat do not have same dimension on a non-concatenation axis");
}
else if (j == m_concatenation_axis)
{
concatenation_axis_length += input_i_shape.at(j);
}
}
}
vector<size_t> concatenated_shape = input_0_shape;
concatenated_shape.at(m_concatenation_axis) = concatenation_axis_length;
set_value_type_checked(make_shared<TensorViewType>(input_0_element_type, concatenated_shape));
}
void op::Concat::generate_adjoints(autodiff::Adjoints& adjoints, const std::shared_ptr<Node>& delta)
{
auto concat_result_shape = get_outputs().at(0).get_shape();
Coordinate arg_delta_slice_lower = Coordinate(concat_result_shape.size(), 0);
Coordinate arg_delta_slice_upper = concat_result_shape;
Coordinate arg_delta_slice_strides = Coordinate(concat_result_shape.size(), 1);
size_t pos = 0;
for (auto arg : get_input_ops())
{
auto arg_shape = arg->get_shape();
auto slice_width = arg_shape[m_concatenation_axis];
size_t next_pos = pos + slice_width;
arg_delta_slice_lower[m_concatenation_axis] = pos;
arg_delta_slice_upper[m_concatenation_axis] = next_pos;
adjoints.add_delta(
arg,
make_shared<op::Slice>(
delta, arg_delta_slice_lower, arg_delta_slice_upper, arg_delta_slice_strides));
pos = next_pos;
}
}
/*******************************************************************************
* Copyright 2017-2018 Intel Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*******************************************************************************/
#pragma once
#include <memory>
#include "ngraph/ops/util/requires_tensor_view_args.hpp"
namespace ngraph
{
namespace op
{
/// \brief Concatenation operation.
class Concat : public util::RequiresTensorViewArgs
{
public:
/// \brief Constructs a concatenation operation.
///
/// \param args The nodes producing the input tensors.
/// \param concatenation_axis The axis along which to concatenate the input tensors.
Concat(const Nodes& args, size_t concatenation_axis);
virtual std::shared_ptr<Node> copy_with_new_args(
const std::vector<std::shared_ptr<Node>>& new_args) const override
{
return std::make_shared<Concat>(new_args, m_concatenation_axis);
}
/// \return The concatenation axis.
size_t get_concatenation_axis() const { return m_concatenation_axis; }
protected:
virtual void generate_adjoints(autodiff::Adjoints& adjoints,
const std::shared_ptr<Node>& delta) override;
const size_t m_concatenation_axis;
};
}
}
......@@ -23,19 +23,6 @@ namespace ngraph
namespace op
{
/// \brief Elementwise division operation.
///
/// ## Inputs
///
/// | | Type | Description |
/// | ------ | --------------------------------- | ------------------------------------------------------ |
/// | `arg0` | \f$N[d_1,\dots,d_n]~(n \geq 0)\f$ | A tensor of any shape and numeric element type. |
/// | `arg1` | \f$N[d_1,\dots,d_n]~(n \geq 0)\f$ | A tensor of the same shape and element type as `arg0`. |
///
/// ## Output
///
/// | Type | Description |
/// | ---------------------- | ------------------------------------------------------------------------------------------------------------------------ |
/// | \f$N[d_1,\dots,d_n]\f$ | The tensor \f$T\f$, where \f$T[i_1,\dots,i_n] = \texttt{arg0}[i_1,\dots,i_n] \mathbin{/} \texttt{arg1}[i_1,\dots,i_n]\f$ |
class Divide : public util::BinaryElementwiseArithmetic
{
public:
......
......@@ -25,35 +25,6 @@ namespace ngraph
namespace op
{
/// \brief Generalized dot product operation, including scalar-tensor product, matrix-vector product, and matrix multiplication.
///
/// Takes two arguments `arg0` and `arg1`, with shapes \f$(i_1,\dots,i_n,j_1,\dots,j_m)\f$ and \f$(j_1,\dots,j_m,k_1,\dots,k_p)\f$ respectively,
/// and produces an output tensor with shape \f$(i_1,\dots,i_n,k_1,\dots,k_p)\f$ by summing products along the \f$j\f$ dimensions.
///
/// A few common cases are as follows:
///
/// * If \f$m = 0\f$ and \f$n = 1\f$ or \f$p = 1\f$, the operation is a scalar-tensor product.
/// * If \f$m = 1\f$, \f$n = 2\f$, and \f$p = 1\f$, the operation is a matrix-vector product.
/// * If \f$m = 1\f$ and \f$n = p = 2\f$, the operation is a matrix multiplication.
///
/// ## Parameters
///
/// | | Description |
/// | ---------------------- | ------------------------------------------------------------------------------------------------ |
/// | `reduction_axes_count` | The number of axes to reduce through dot-product (corresponds to \f$m\f$ in the formulas above). |
///
/// ## Inputs
///
/// | | Type | Description |
/// | ------ | ----------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
/// | `arg0` | \f$E[d_1,\dots,d_n,d'_1,\dots,d'_m]~(n,m \geq 0)\f$ | A tensor of any shape and element type. |
/// | `arg1` | \f$E[d'_1,\dots,d'_m,d''_1,\dots,d''_p]~(p \geq 0)\f$ | A tensor of any shape with the same element type as `arg0` and rank at least \f$m\f$, whose first \f$m\f$ dimensions match the last \f$m\f$ dimensions of `arg0`, in order. |
///
/// ## Output
///
/// | Type | Description |
/// | ---------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
/// | \f$E[d_1,\dots,d_n,d''_1,\dots,d''_p]\f$ | The tensor \f$T\f$, where \f$T[i_1,\dots,i_n,k_1,\dots,k_p] = \Sigma_{0 \le j_1 < d'_1, \dots, 0 \le j_m < d'_m}(\mathtt{arg0}[i_1,\dots,i_n,j_1,\dots,j_m] \cdot \mathtt{arg1}[j_1,\dots,j_m,k_1,\dots,k_p])\f$ or, if \f$m = 0\f$, \f$T[i_1,\dots,i_n,k_1,\dots,k_p] = \mathtt{arg0}[i_1,\dots,i_n] \cdot \mathtt{arg1}[k_1,\dots,k_p]\f$. |
///
class Dot : public util::RequiresTensorViewArgs
{
public:
......
......@@ -23,18 +23,6 @@ namespace ngraph
namespace op
{
/// \brief Elementwise natural exponential (exp) operation.
///
/// ## Inputs
///
/// | | Type | Description |
/// | ----- | --------------------------------- | ----------------------------------------------- |
/// | `arg` | \f$N[d_1,\dots,d_n]~(n \geq 0)\f$ | A tensor of any shape and numeric element type. |
///
/// ## Output
///
/// | Type | Description |
/// | ---------------------- | ------------------------------------------------------------------------------------ |
/// | \f$N[d_1,\dots,d_n]\f$ | The tensor \f$T\f$, where \f$T[i_1,\dots,i_n] = \exp(\texttt{arg}[i_1,\dots,i_n])\f$ |
class Exp : public util::UnaryElementwiseArithmetic
{
public:
......
......@@ -23,18 +23,6 @@ namespace ngraph
namespace op
{
/// \brief Elementwise floor operation.
///
/// ## Inputs
///
/// | | Type | Description |
/// | ----- | --------------------------------- | ----------------------------------------------- |
/// | `arg` | \f$N[d_1,\dots,d_n]~(n \geq 0)\f$ | A tensor of any shape and numeric element type. |
///
/// ## Output
///
/// | Type | Description |
/// | ---------------------- | ---------------------------------------------------------------------------------------------- |
/// | \f$N[d_1,\dots,d_n]\f$ | The tensor \f$T\f$, where \f$T[i_1,\dots,i_n] = \lfloor \texttt{arg}[i_1,\dots,i_n] \rfloor\f$ |
class Floor : public util::UnaryElementwiseArithmetic
{
public:
......
......@@ -23,24 +23,6 @@ namespace ngraph
namespace op
{
/// \brief %Function call operation.
///
/// ## Parameters
///
/// | | Description |
/// | ---------- | -------------------------- |
/// | `function` | The function to be called. |
///
/// ## Inputs
///
/// | | Type | Description |
/// | ------ | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------ |
/// | `args` | \f$T_1,\dots,T_n\f$ where \f$n\f$ matches the number of arguments expected by `function` and \f$T_i\f$ matches the type expected for the \f$i\f$th argument of `function`. | The arguments for the function call. |
///
/// ## Output
///
/// | Type | Description |
/// | --------- | -------------------------------------------------------- |
/// | \f$T_R\f$ | The tensor returned by `function` when called on `args`. |
class FunctionCall : public Node
{
public:
......
......@@ -23,19 +23,6 @@ namespace ngraph
namespace op
{
/// \brief Elementwise greater-than operation.
///
/// ## Inputs
///
/// | | Type | Description |
/// | ------ | --------------------------------- | ------------------------------------------------------ |
/// | `arg0` | \f$E[d_1,\dots,d_n]~(n \geq 0)\f$ | A tensor of any shape and element type. |
/// | `arg1` | \f$E[d_1,\dots,d_n]~(n \geq 0)\f$ | A tensor of the same shape and element type as `arg0`. |
///
/// ## Output
///
/// | Type | Description |
/// | ---------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------- |
/// | \f$\texttt{bool}[d_1,\dots,d_n]\f$ | The tensor \f$T\f$, where \f$T[i_1,\dots,i_n] = 1\text{ if }\texttt{arg0}[i_1,\dots,i_n] \gt \texttt{arg1}[i_1,\dots,i_n]\text{, else } 0\f$ |
class Greater : public util::BinaryElementwiseComparison
{
public:
......
......@@ -23,19 +23,6 @@ namespace ngraph
namespace op
{
/// \brief Elementwise greater-than-or-equal operation.
///
/// ## Inputs
///
/// | | Type | Description |
/// | ------ | --------------------------------- | ------------------------------------------------------ |
/// | `arg0` | \f$E[d_1,\dots,d_n]~(n \geq 0)\f$ | A tensor of any shape and element type. |
/// | `arg1` | \f$E[d_1,\dots,d_n]~(n \geq 0)\f$ | A tensor of the same shape and element type as `arg0`. |
///
/// ## Output
///
/// | Type | Description |
/// | ---------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------- |
/// | \f$\texttt{bool}[d_1,\dots,d_n]\f$ | The tensor \f$T\f$, where \f$T[i_1,\dots,i_n] = 1\text{ if }\texttt{arg0}[i_1,\dots,i_n] \geq \texttt{arg1}[i_1,\dots,i_n]\text{, else } 0\f$ |
class GreaterEq : public util::BinaryElementwiseComparison
{
public:
......
......@@ -23,19 +23,6 @@ namespace ngraph
namespace op
{
/// \brief Elementwise less-than operation.
///
/// ## Inputs
///
/// | | Type | Description |
/// | ------ | --------------------------------- | ------------------------------------------------------ |
/// | `arg0` | \f$E[d_1,\dots,d_n]~(n \geq 0)\f$ | A tensor of any shape and element type. |
/// | `arg1` | \f$E[d_1,\dots,d_n]~(n \geq 0)\f$ | A tensor of the same shape and element type as `arg0`. |
///
/// ## Output
///
/// | Type | Description |
/// | ---------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------- |
/// | \f$\texttt{bool}[d_1,\dots,d_n]\f$ | The tensor \f$T\f$, where \f$T[i_1,\dots,i_n] = 1\text{ if }\texttt{arg0}[i_1,\dots,i_n] \lt \texttt{arg1}[i_1,\dots,i_n]\text{, else } 0\f$ |
class Less : public util::BinaryElementwiseComparison
{
public:
......
......@@ -23,19 +23,6 @@ namespace ngraph
namespace op
{
/// \brief Elementwise less-than-or-equal operation.
///
/// ## Inputs
///
/// | | Type | Description |
/// | ------ | --------------------------------- | ------------------------------------------------------ |
/// | `arg0` | \f$E[d_1,\dots,d_n]~(n \geq 0)\f$ | A tensor of any shape and element type. |
/// | `arg1` | \f$E[d_1,\dots,d_n]~(n \geq 0)\f$ | A tensor of the same shape and element type as `arg0`. |
///
/// ## Output
///
/// | Type | Description |
/// | ---------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------- |
/// | \f$\texttt{bool}[d_1,\dots,d_n]\f$ | The tensor \f$T\f$, where \f$T[i_1,\dots,i_n] = 1\text{ if }\texttt{arg0}[i_1,\dots,i_n] \leq \texttt{arg1}[i_1,\dots,i_n]\text{, else } 0\f$ |
class LessEq : public util::BinaryElementwiseComparison
{
public:
......
......@@ -23,18 +23,6 @@ namespace ngraph
namespace op
{
/// \brief Elementwise natural log operation.
///
/// ## Inputs
///
/// | | Type | Description |
/// | ----- | --------------------------------- | ----------------------------------------------- |
/// | `arg` | \f$N[d_1,\dots,d_n]~(n \geq 0)\f$ | A tensor of any shape and numeric element type. |
///
/// ## Output
///
/// | Type | Description |
/// | ---------------------- | ----------------------------------------------------------------------------------- |
/// | \f$N[d_1,\dots,d_n]\f$ | The tensor \f$T\f$, where \f$T[i_1,\dots,i_n] = \ln(\texttt{arg}[i_1,\dots,i_n])\f$ |
class Log : public util::UnaryElementwiseArithmetic
{
public:
......
......@@ -79,8 +79,11 @@ namespace ngraph
{
throw ngraph_error("Incorrect number of new arguments");
}
return std::make_shared<MaxPool>(
new_args.at(0), m_window_shape, m_window_movement_strides);
return std::make_shared<MaxPool>(new_args.at(0),
m_window_shape,
m_window_movement_strides,
m_padding_below,
m_padding_above);
}
/// \return The window shape.
......
......@@ -23,19 +23,6 @@ namespace ngraph
namespace op
{
/// \brief Elementwise maximum operation.
///
/// ## Inputs
///
/// | | Type | Description |
/// | ------ | --------------------------------- | ------------------------------------------------------ |
/// | `arg0` | \f$N[d_1,\dots,d_n]~(n \geq 0)\f$ | A tensor of any shape and numeric element type. |
/// | `arg1` | \f$N[d_1,\dots,d_n]~(n \geq 0)\f$ | A tensor of the same shape and element type as `arg0`. |
///
/// ## Output
///
/// | Type | Description |
/// | ---------------------- | ------------------------------------------------------------------------------------------------------------------ |
/// | \f$N[d_1,\dots,d_n]\f$ | The tensor \f$T\f$, where \f$T[i_1,\dots,i_n] = \max(\texttt{arg0}[i_1,\dots,i_n],\texttt{arg1}[i_1,\dots,i_n])\f$ |
class Maximum : public util::BinaryElementwiseArithmetic
{
public:
......
......@@ -23,19 +23,6 @@ namespace ngraph
namespace op
{
/// \brief Elementwise minimum operation.
///
/// ## Inputs
///
/// | | Type | Description |
/// | ------ | --------------------------------- | ------------------------------------------------------ |
/// | `arg0` | \f$N[d_1,\dots,d_n]~(n \geq 0)\f$ | A tensor of any shape and numeric element type. |
/// | `arg1` | \f$N[d_1,\dots,d_n]~(n \geq 0)\f$ | A tensor of the same shape and element type as `arg0`. |
///
/// ## Output
///
/// | Type | Description |
/// | ---------------------- | ------------------------------------------------------------------------------------------------------------------ |
/// | \f$N[d_1,\dots,d_n]\f$ | The tensor \f$T\f$, where \f$T[i_1,\dots,i_n] = \min(\texttt{arg0}[i_1,\dots,i_n],\texttt{arg1}[i_1,\dots,i_n])\f$ |
class Minimum : public util::BinaryElementwiseArithmetic
{
public:
......
......@@ -23,19 +23,6 @@ namespace ngraph
namespace op
{
/// \brief Elementwise multiplication operation.
///
/// ## Inputs
///
/// | | Type | Description |
/// | ------ | --------------------------------- | ------------------------------------------------------ |
/// | `arg0` | \f$N[d_1,\dots,d_n]~(n \geq 0)\f$ | A tensor of any shape and numeric element type. |
/// | `arg1` | \f$N[d_1,\dots,d_n]~(n \geq 0)\f$ | A tensor of the same shape and element type as `arg0`. |
///
/// ## Output
///
/// | Type | Description |
/// | ---------------------- | ------------------------------------------------------------------------------------------------------------------ |
/// | \f$N[d_1,\dots,d_n]\f$ | The tensor \f$T\f$, where \f$T[i_1,\dots,i_n] = \texttt{arg0}[i_1,\dots,i_n] \cdot \texttt{arg1}[i_1,\dots,i_n]\f$ |
class Multiply : public util::BinaryElementwiseArithmetic
{
public:
......
......@@ -22,23 +22,11 @@ namespace ngraph
{
namespace op
{
/// \brief Elementwise negation operation.
///
/// ## Inputs
///
/// | | Type | Description |
/// | ----- | --------------------------------- | ----------------------------------------------- |
/// | `arg` | \f$N[d_1,\dots,d_n]~(n \geq 0)\f$ | A tensor of any shape and numeric element type. |
///
/// ## Output
///
/// | Type | Description |
/// | ---------------------- | --------------------------------------------------------------------------------- |
/// | \f$N[d_1,\dots,d_n]\f$ | The tensor \f$T\f$, where \f$T[i_1,\dots,i_n] = -(\texttt{arg}[i_1,\dots,i_n])\f$ |
/// \brief Elementwise negative operation.
class Negative : public util::UnaryElementwiseArithmetic
{
public:
/// \brief Constructs a negation operation.
/// \brief Constructs a negative operation.
///
/// \param arg Node that produces the input tensor.
Negative(const std::shared_ptr<Node>& arg)
......
......@@ -23,18 +23,6 @@ namespace ngraph
namespace op
{
/// \brief Elementwise logical negation operation.
///
/// ## Inputs
///
/// | | Type | Description |
/// | ----- | --------------------------------------------- | ------------------------------------------------- |
/// | `arg` | \f$\texttt{bool}[d_1,\dots,d_n]~(n \geq 0)\f$ | A tensor of any shape, with boolean element type. |
///
/// ## Output
///
/// | Type | Description |
/// | ---------------------------------- | -------------------------------------------------------------------------------------------------------------- |
/// | \f$\texttt{bool}[d_1,\dots,d_n]\f$ | The tensor \f$T\f$, where \f$T[i_1,\dots,i_n] = 1\text{ if }\texttt{arg}[i_1,\dots,i_n] = 0\text{, else } 0\f$ |
class Not : public util::UnaryElementwise
{
public:
......
......@@ -23,19 +23,6 @@ namespace ngraph
namespace op
{
/// \brief Elementwise not-equal operation.
///
/// ## Inputs
///
/// | | Type | Description |
/// | ------ | --------------------------------- | ------------------------------------------------------ |
/// | `arg0` | \f$E[d_1,\dots,d_n]~(n \geq 0)\f$ | A tensor of any shape and element type. |
/// | `arg1` | \f$E[d_1,\dots,d_n]~(n \geq 0)\f$ | A tensor of the same shape and element type as `arg0`. |
///
/// ## Output
///
/// | Type | Description |
/// | ---------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------- |
/// | \f$\texttt{bool}[d_1,\dots,d_n]\f$ | The tensor \f$T\f$, where \f$T[i_1,\dots,i_n] = 1\text{ if }\texttt{arg0}[i_1,\dots,i_n] \neq \texttt{arg1}[i_1,\dots,i_n]\text{, else } 0\f$ |
class NotEqual : public util::BinaryElementwiseComparison
{
public:
......
......@@ -2625,9 +2625,14 @@ namespace ngraph
writer << "memory result = memory({result_desc, cpu_engine}, "
<< out[0].get_name() << ");\n";
// Dummy forward primitive descriptor to keep MKLDNN happy
const char* algorithm_enumerator =
apb->get_include_padding_in_avg_computation()
? "algorithm::pooling_avg_include_padding"
: "algorithm::pooling_avg_exclude_padding";
writer << "pooling_forward::primitive_desc fwd_pd = "
"pooling_forward::primitive_desc("
<< "{prop_kind::forward, algorithm::pooling_avg_exclude_padding, "
<< "{prop_kind::forward, " << algorithm_enumerator << ", "
<< "result_desc, input_data_desc, {"
<< join(apb->get_window_movement_strides()) << "}, {"
<< join(apb->get_window_shape()) << "}, "
......@@ -2636,7 +2641,7 @@ namespace ngraph
<< "padding_kind::zero}, cpu_engine);\n";
writer
<< "auto avg_pooling = pooling_backward(pooling_backward::primitive_desc("
<< "pooling_backward::desc(algorithm::pooling_avg_exclude_padding, "
<< "pooling_backward::desc(" << algorithm_enumerator << ", "
<< "result_desc, input_data_desc, {"
<< join(apb->get_window_movement_strides()) << "}, {"
<< join(apb->get_window_shape()) << "}, "
......@@ -2660,7 +2665,11 @@ namespace ngraph
writer << " {" << join(apb->get_window_movement_strides())
<< "},\n";
writer << " {" << join(apb->get_padding_below()) << "},\n";
writer << " {" << join(apb->get_padding_above()) << "}\n";
writer << " {" << join(apb->get_padding_above()) << "},\n";
writer << " "
<< ngraph::to_cplusplus_sourcecode_literal(
apb->get_include_padding_in_avg_computation())
<< "\n";
writer << " );\n";
}
}
......
......@@ -291,7 +291,8 @@ private:
apb->get_window_shape(),
apb->get_window_movement_strides(),
apb->get_padding_below(),
apb->get_padding_above());
apb->get_padding_above(),
apb->get_include_padding_in_avg_computation());
}
else if (node_op == "Broadcast")
{
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment