Commit 5af90dcd authored by Scott Cyphers's avatar Scott Cyphers

Merge branch 'master' into cyphers/autodiff

parents 37feb402 89cef471
......@@ -11,7 +11,4 @@ RUN apt-get update && apt-get install -y \
RUN apt-get clean autoclean && \
apt-get autoremove -y
# Add chown_files script to avoid
ADD contrib/docker/chown_files.sh /tmp/chown_files.sh
WORKDIR /root/ngraph-cpp-test
WORKDIR /home
# Basic Makefile for contrib/docker. This can be expanded later as more targets
# are added.
# Default is to build with -j for parallel builds. Turn off with
# make PARELLEL=
PARALLEL=-j
# DIR is an internal variable that serves as an anchor to this cloned git
# repository. DIR is mounted into the docker container, so that builds
# can occur within the container on this cloned git repository. DIR should
# not be modified - if it is, then the build system will not work.
DIR = $(realpath ../..)
# Use /tmp/ngraph-cpp-test, because we run as the user (and not root)
# DOCKUSER_HOME is the location of the home directory of the fabricated
# "dockuser" user, used only within the docker containers. "dockuser" is
# created (from the passed-in RUN_UID) to map the docker-caller user's UID to a
# first-class user (/etc/passwd entry, member of sudo group, proper home dir)
# /home/dockuser is also used in other scripts, notably run_as_user.sh, so if
# changed it must be done in other areas for the builds to work.
DOCKUSER_HOME=/home/dockuser
# Use /home/dockuser/ngraph-cpp-test, because we run as the user (and not root)
# /root/ngraph-cpp-test is not used, because /root is not accessible to user
VOLUME = -v ${DIR}:/tmp/ngraph-cpp-test
VOLUME = -v "${DIR}:${DOCKUSER_HOME}/ngraph-cpp-test"
GIT_COMMIT = $(shell git rev-parse HEAD)
BUILD_VERSION = ${GIT_COMMIT}_${PYTHON_VERSION}
BUILD_DIR = ${DIR}/contrib/docker/.build-${BUILD_VERSION}
......@@ -16,56 +33,71 @@ CALLER_GID := $(shell id -g)
# line
PYTHON_VERSION = 2
.PHONY: clean build_ngraph_cpp_cpu check_cpu shell build_all
.PHONY: clean build_ngraph_cpp_cpu check_cpu install shell build_all
DOCKER_BUILD=docker build --rm=true
ifdef http_proxy
DOCKER_BUILD+=--build-arg http_proxy=$(http_proxy)
DOCKER_RUN_ENV+=--env http_proxy=$(http_proxy)
DOCKER_RUN_ENV+=--env "http_proxy=$(http_proxy)"
endif
ifdef https_proxy
DOCKER_BUILD+=--build-arg https_proxy=$(https_proxy)
DOCKER_RUN_ENV+=--env https_proxy=$(https_proxy)
DOCKER_RUN_ENV+=--env "https_proxy=$(https_proxy)"
endif
expand_dockerfile_templates:
cd ${DIR}/contrib/docker
mkdir ${BUILD_DIR} || true
sed -e 's/\(FROM ngraph.*\)/\1:${BUILD_VERSION}/' Dockerfile.ngraph_cpp_cpu > ${BUILD_DIR}/Dockerfile.ngraph_cpp_cpu
cd "${DIR}"/contrib/docker
mkdir "${BUILD_DIR}" || true
sed -e 's/\(FROM ngraph.*\)/\1:${BUILD_VERSION}/' Dockerfile.ngraph_cpp_cpu > "${BUILD_DIR}"/Dockerfile.ngraph_cpp_cpu
clean:
rm -f ${DIR}/contrib/docker/.build-*/Dockerfile.* || echo "keep going if files are not present"
rmdir ${DIR}/contrib/docker/.build-* || echo "keep going if directory is not present"
rm -f "${DIR}"/contrib/docker/.build-*/Dockerfile.* || echo "keep going if files are not present"
rmdir "${DIR}"/contrib/docker/.build-* || echo "keep going if directory is not present"
rm -fr "${DIR}"/BUILD
build_ngraph_cpp_cpu: expand_dockerfile_templates
$(DOCKER_BUILD) -f=${BUILD_DIR}/Dockerfile.ngraph_cpp_cpu --build-arg python_version=${PYTHON_VERSION} -t=ngraph_cpp_cpu:${BUILD_VERSION} ${DIR}
$(DOCKER_BUILD) -f="${BUILD_DIR}"/Dockerfile.ngraph_cpp_cpu --build-arg python_version="${PYTHON_VERSION}" -t=ngraph_cpp_cpu:"${BUILD_VERSION}" "${DIR}"
# remove the tag for the previous latest image
docker rmi ngraph_cpp_cpu:latest || echo "keep going if docker rmi command fails"
docker tag `docker images -q ngraph_cpp_cpu:${BUILD_VERSION}` ngraph_cpp_cpu:latest
docker tag `docker images -q "ngraph_cpp_cpu:${BUILD_VERSION}"` ngraph_cpp_cpu:latest
build_all: build_ngraph_cpp_cpu
check_cpu: build_ngraph_cpp_cpu
# Remove old distribution directory if present
( test -d "${DIR}"/BUILD/ngraph_dist && rm -fr "${DIR}"/BUILD/ngraph_dist && echo "Removed old ${DIR}/BUILD/ngraph_dist directory" ) || echo "Previous ngraph_dist directory not found"
# Make BUILD directory as user
mkdir -p ${DIR}/BUILD
chmod ug+rwx ${DIR}/BUILD
# Need to use /tmp/ngraph-cpp-test/BUILD, because running as user
# Can't use /root/ngraph-cpp-test/BUILD, because /root not accessible to user
docker run \
--rm --user ${CALLER_UID}:${CALLER_GID} \
${DOCKER_RUN_ENV} ${VOLUME} -w /tmp/ngraph-cpp-test/BUILD -t ngraph_cpp_cpu:${BUILD_VERSION} \
sh -c "cmake -DCMAKE_CXX_COMPILER=clang++-3.9 -DCMAKE_C_COMPILER=clang-3.9 .. ; env VERBOSE=1 make check"
# update the files to be owned by the calling user instead of root, to avoid docker mount problems with file ownership
docker run --rm ${VOLUME} \
--env MY_UID=${CALLER_UID} \
--env MY_GID=${CALLER_GID} \
--env MY_ROOT_DIR=/root/ngraph-cpp-test \
-t ngraph_cpp_cpu \
/tmp/chown_files.sh
mkdir -p "${DIR}"/BUILD
chmod ug+rwx "${DIR}"/BUILD
docker run --rm --tty \
${VOLUME} \
${DOCKER_RUN_ENV} \
--env RUN_UID="$(shell id -u)" \
--env RUN_CMD="set -e ; set -o pipefail ; cd ${DOCKUSER_HOME}/ngraph-cpp-test/BUILD; cmake -DCMAKE_CXX_COMPILER=clang++-3.9 -DCMAKE_C_COMPILER=clang-3.9 .. 2>&1 | tee cmake.log ; env VERBOSE=1 make ${PARALLEL} 2>&1 | tee make.log ; env VERBOSE=1 make check 2>&1 | tee make_check.log" \
"ngraph_cpp_cpu:${BUILD_VERSION}" \
sh -c "${DOCKUSER_HOME}/ngraph-cpp-test/contrib/docker/run_as_user.sh"
shell: build_ngraph_cpp_cpu
docker run --rm ${VOLUME} -it ngraph_cpp_cpu:${BUILD_VERSION} /bin/bash
# "make shell" runs an interactive shell in the docker image, for debugging
docker run --rm --tty --interactive \
${VOLUME} \
${DOCKER_RUN_ENV} \
--env RUN_UID="$(shell id -u)" \
"ngraph_cpp_cpu:${BUILD_VERSION}" \
sh -c "cd ${DOCKUSER_HOME} ; ${DOCKUSER_HOME}/ngraph-cpp-test/contrib/docker/run_as_user.sh"
install:
# Puts ngraph_dist in BUILD directory. This is used by Jenkins ngraph-tensorflow batch job.
# Note: We currently have a bug where cmake only installs in $HOME. Jira NGTF-205 is opened
# for this. For now, here we install to $HOME, then move the directory.
docker run --rm --tty \
${VOLUME} \
${DOCKER_RUN_ENV} \
--env RUN_UID="$(shell id -u)" \
--env RUN_CMD="set -e ; set -o pipefail; cd ${DOCKUSER_HOME}/ngraph-cpp-test/BUILD ; test -d ngraph_dist && rm -fr ngraph_dist && echo 'Removed old ngraph_dist directory' ; make install 2>&1 | tee make_install.log ; mv -v ${DOCKUSER_HOME}/ngraph_dist ${DOCKUSER_HOME}/ngraph-cpp-test/BUILD" \
"ngraph_cpp_cpu:${BUILD_VERSION}" \
sh -c "${DOCKUSER_HOME}/ngraph-cpp-test/contrib/docker/run_as_user.sh"
all: build_ngraph_cpp_cpu
#!/bin/bash
# the docker run commands leave output files with root ownership
# modify the file ownership with the UID of the calling user
if [ -z $MY_UID ];then
MY_UID=`id -u`
fi
if [ -z $MY_GID ];then
MY_GID=`id -g`
fi
if [ -z $MY_ROOT_DIR ];then
MY_ROOT_DIR=/root/ngraph-test
fi
cd $MY_ROOT_DIR
find . -user root > files_to_chown.txt
cat files_to_chown.txt | xargs chown ${MY_UID} ${1}
cat files_to_chown.txt | xargs chgrp ${MY_GID} ${1}
rm files_to_chown.txt
......@@ -7,7 +7,7 @@ echo
# clean up old docker containers
echo "Removing Exited docker containers..."
docker ps -a | grep Exited | cut -f 1 -d ' ' | xargs docker rm -f ${1}
docker ps -a | grep Exited | cut -f 1 -d ' ' | xargs docker rm -f "${1}"
echo
#list docker images for ngraph
......@@ -17,4 +17,4 @@ echo
# clean up docker images no longer in use
echo "Removing docker images for ngraph..."
docker images -qa ngraph_* | xargs docker rmi -f ${1}
docker images -qa ngraph_* | xargs docker rmi -f "${1}"
#! /bin/bash
# This script is designed to simulate running as a user with a particular UID
# within a docker container.
#
# Normally a docker container runs as root, which can cause problems with file
# ownership when a host directory tree is mounted into the docker container.
# There are other problems with building and running software as root as
# well. Good practice when validating software builds in a docker container
# is to run as a normal user, since many (most?) end users will not be building
# and installing software as root.
#
# This script should be run using "docker run", with RUN_UID (set to the user
# you want to run as) passed into the docker container as an environment
# variable. The script will then add the UID as user "dockuser" to
# /etc/passwd (important for some software, like bazel), add the new dockuser
# to the sudo group (whether or not sudo is installed), and su to a new shell
# as the dockuser (passing in the existing environment, which is important).
#
# If the environment variable RUN_CMD is passed into the docker container, then
# this script will use RUN_CMD as a command to run when su'ing. If RUN_CMD is
# not defined, then /bin/bash will run, which effectively provides an
# interactive shell in the docker container, for debugging.
set -e # Make sure we exit on any command that returns non-zero
set -u # No unset variables
if [ -z "$RUN_UID" ] ; then
# >&2 redirects echo output to stderr.
# See: https://stackoverflow.com/questions/2990414/echo-that-outputs-to-stderr
( >&2 echo 'ERROR: Environment variable RUN_UID was not set when run-as-user.sh was run' )
( >&2 echo ' Running as default user (root, in docker)' )
( >&2 echo ' ' )
exit 1
else
# The username used in the docker container to map the caller UID to
#
# Note 'dockuser' is used in other scripts, notably Makefile. If you
# choose to change it here, then you need to change it in all other
# scripts, or else the builds will break.
#
DOCK_USER='dockuser'
# We will be su'ing using a non-login shell or command, and preserving
# the environment. This is done so that env. variables passed in with
# "docker run --env ..." are honored.
# Therefore, we need to reset at least HOME=/root ...
#
# Note also that /home/dockuser is used in other scripts, notably
# Makefile. If you choose to change it here, then you need to change it
# in all other scripts, or else the builds will break.
#
export HOME="/home/${DOCK_USER}"
# Make sure the home directory is owned by the new user
if [ -d "${HOME}" ] ; then
chown "${RUN_UID}" "${HOME}"
fi
# Add a user with UID of person running docker (in ${RUN_UID})
# If $HOME does not yet exist, then it will be created
adduser --disabled-password --gecos 'Docker-User' -u "${RUN_UID}" "${DOCK_USER}"
# Add dockuser to the sudo group
adduser "${DOCK_USER}" sudo
# If root access is needed in the docker image while running as a normal
# user, uncomment this and add 'sudo' as a package installed in Dockerfile
# echo '%sudo ALL=(ALL) NOPASSWD:ALL' >> /etc/sudoers
if [ -z "${RUN_CMD+x}" ] ; then # Launch a shell as dockuser
su -m "${DOCK_USER}" -c /bin/bash
else # Run command as dockuser
su -m "${DOCK_USER}" -c "${RUN_CMD}"
fi
fi
......@@ -3,5 +3,8 @@ PROJECT_BRIEF = "Nervana graph compiler"
OUTPUT_DIRECTORY = @CMAKE_CURRENT_BINARY_DIR@
INPUT = @CMAKE_SOURCE_DIR@/src
RECURSIVE = YES
EXTRACT_STATIC = YES
USE_MATHJAX = YES
......@@ -84,6 +84,7 @@
#include "ngraph/ops/remainder.hpp"
#include "ngraph/ops/reshape.hpp"
#include "ngraph/ops/select.hpp"
#include "ngraph/ops/sign.hpp"
#include "ngraph/ops/sin.hpp"
#include "ngraph/ops/sinh.hpp"
#include "ngraph/ops/slice.hpp"
......
......@@ -20,9 +20,31 @@ namespace ngraph
{
namespace op
{
/// \brief Elementwise absolute value operation.
///
/// ## Inputs
///
/// | | Type | Description |
/// | ----- | --------------------------------- | ----------------------------------------------- |
/// | `arg` | \f$N[d_1,\dots,d_n]~(n \geq 0)\f$ | A tensor of any shape and numeric element type. |
///
/// ## Output
///
/// | Type | Description |
/// | ---------------------- | -------------------------------------------------------------------------------- |
/// | \f$N[d_1,\dots,d_n]\f$ | The tensor \f$T\f$, where \f$T[i_1,\dots,i_n] = |\texttt{arg}[i_1,\dots,i_n]|\f$ |
///
/// ## Implementation Status
///
/// | Backend | Status |
/// | ------- | ---------------------------------- |
/// | NGVM | Implemented for signed types only. |
class Abs : public UnaryElementwiseArithmetic
{
public:
/// \brief Constructs an absolute value operation.
///
/// \param arg Node that produces the input tensor.
Abs(const std::shared_ptr<Node>& arg)
: UnaryElementwiseArithmetic(arg)
{
......
......@@ -20,9 +20,31 @@ namespace ngraph
{
namespace op
{
/// \brief Elementwise inverse cosine (arccos) operation.
///
/// ## Inputs
///
/// | | Type | Description |
/// | ----- | --------------------------------- | ----------------------------------------------- |
/// | `arg` | \f$N[d_1,\dots,d_n]~(n \geq 0)\f$ | A tensor of any shape and numeric element type. |
///
/// ## Output
///
/// | Type | Description |
/// | ---------------------- | --------------------------------------------------------------------------------------- |
/// | \f$N[d_1,\dots,d_n]\f$ | The tensor \f$T\f$, where \f$T[i_1,\dots,i_n] = \arccos(\texttt{arg}[i_1,\dots,i_n])\f$ |
///
/// ## Implementation Status
///
/// | Backend | Status |
/// | ------- | ------------------ |
/// | NGVM | Fully implemented. |
class Acos : public UnaryElementwiseArithmetic
{
public:
/// \brief Constructs an arccos operation.
///
/// \param arg Node that produces the input tensor.
Acos(const std::shared_ptr<Node>& arg)
: UnaryElementwiseArithmetic(arg)
{
......
......@@ -20,9 +20,33 @@ namespace ngraph
{
namespace op
{
/// \brief Elementwise addition operation.
///
/// ## Inputs
///
/// | | Type | Description |
/// | ------ | --------------------------------- | ------------------------------------------------------ |
/// | `arg0` | \f$N[d_1,\dots,d_n]~(n \geq 0)\f$ | A tensor of any shape and numeric element type. |
/// | `arg1` | \f$N[d_1,\dots,d_n]~(n \geq 0)\f$ | A tensor of the same shape and element type as `arg0`. |
///
/// ## Output
///
/// | Type | Description |
/// | ---------------------- | -------------------------------------------------------------------------------------------------------------- |
/// | \f$N[d_1,\dots,d_n]\f$ | The tensor \f$T\f$, where \f$T[i_1,\dots,i_n] = \texttt{arg0}[i_1,\dots,i_n] + \texttt{arg1}[i_1,\dots,i_n]\f$ |
///
/// ## Implementation Status
///
/// | Backend | Status |
/// | ------- | ------------------ |
/// | NGVM | Fully implemented. |
class Add : public BinaryElementwiseArithmetic
{
public:
/// \brief Constructs an addition operation.
///
/// \param arg0 Node that produces the first input tensor.
/// \param arg1 Node that produces the second input tensor.
Add(const std::shared_ptr<Node>& arg0, const std::shared_ptr<Node>& arg1)
: BinaryElementwiseArithmetic(arg0, arg1)
{
......
......@@ -20,9 +20,31 @@ namespace ngraph
{
namespace op
{
/// \brief Elementwise inverse sine (arcsin) operation.
///
/// ## Inputs
///
/// | | Type | Description |
/// | ----- | --------------------------------- | ----------------------------------------------- |
/// | `arg` | \f$N[d_1,\dots,d_n]~(n \geq 0)\f$ | A tensor of any shape and numeric element type. |
///
/// ## Output
///
/// | Type | Description |
/// | ---------------------- | --------------------------------------------------------------------------------------- |
/// | \f$N[d_1,\dots,d_n]\f$ | The tensor \f$T\f$, where \f$T[i_1,\dots,i_n] = \arcsin(\texttt{arg}[i_1,\dots,i_n])\f$ |
///
/// ## Implementation Status
///
/// | Backend | Status |
/// | ------- | ------------------ |
/// | NGVM | Fully implemented. |
class Asin : public UnaryElementwiseArithmetic
{
public:
/// \brief Constructs an arcsin operation.
///
/// \param arg Node that produces the input tensor.
Asin(const std::shared_ptr<Node>& arg)
: UnaryElementwiseArithmetic(arg)
{
......
......@@ -20,9 +20,31 @@ namespace ngraph
{
namespace op
{
/// \brief Elementwise inverse tangent (arctan) operation.
///
/// ## Inputs
///
/// | | Type | Description |
/// | ----- | --------------------------------- | ----------------------------------------------- |
/// | `arg` | \f$N[d_1,\dots,d_n]~(n \geq 0)\f$ | A tensor of any shape and numeric element type. |
///
/// ## Output
///
/// | Type | Description |
/// | ---------------------- | --------------------------------------------------------------------------------------- |
/// | \f$N[d_1,\dots,d_n]\f$ | The tensor \f$T\f$, where \f$T[i_1,\dots,i_n] = \arctan(\texttt{arg}[i_1,\dots,i_n])\f$ |
///
/// ## Implementation Status
///
/// | Backend | Status |
/// | ------- | ------------------ |
/// | NGVM | Fully implemented. |
class Atan : public UnaryElementwiseArithmetic
{
public:
/// \brief Constructs an arctan operation.
///
/// \param arg Node that produces the input tensor.
Atan(const std::shared_ptr<Node>& arg)
: UnaryElementwiseArithmetic(arg)
{
......
......@@ -20,15 +20,51 @@ namespace ngraph
{
namespace op
{
/// \brief Operation which "adds" axes to an input tensor, replicating elements from the input as needed along the new axes.
///
/// Informally, a broadcast "adds" axes to the input tensor, replicating elements from the input tensor as needed to fill the new dimensions.
/// The parameter `m_broadcast_axes` indicates which of the output axes is being so added. For example, an output shape of `{2,5,6,2,8}` and
/// broadcast axes of `{1,3,4}` means that the input must have shape `{2,6}`.
///
/// Formally, given a shape or coordinate \f$S = [d_1,\dots,d_n]\f$ and a set of axis indices \f$A\f$, define \f$\textit{del}(S,A)\f$ to be
/// the shape or coordinate obtained by deleting the \f$(a + 1)\f$th dimension from \f$S\f$ for each \f$a \in A\f$. Then given an input
/// tensor \f$T\f$ of shape \f$\textit{del}(S,A)\f$ with element type \f$E\f$, broadcasting axes \f$A\f$ produces a tensor \f$T'\f$ of shape
/// \f$S\f$ with element type \f$E\f$, where \f$T'[i_1,\dots,i_n] = T[del([i_1,\dots,i_n],A)]\f$.
///
/// ## Parameters
///
/// | | Description |
/// | ---------------- | ------------------------------------------------------------------------ |
/// | `shape` | The shape \f$[d_1,\dots,d_n]\f$ of the broadcasted output. |
/// | `broadcast_axes` | The indices \f$A\f$ in the `shape` of each broadcasted (i.e., new) axis. |
///
/// ## Inputs
///
/// | | Type | Description |
/// | ----- | --------------------------------------------------- | --------------------------------------- |
/// | `arg` | \f$E[\mathit{del}([d_1,\dots,d_n],A)]~(n \geq 0)\f$ | A tensor of any shape and element type. |
///
/// ## Output
///
/// | Type | Description |
/// | ---------------------- | ------------------------------------------------------------------------------- |
/// | \f$E[d_1,\dots,d_n]\f$ | The tensor \f$T'\f$, where \f$T'[i_1,\dots,i_n] = T[del([i_1,\dots,i_n],A)]\f$. |
///
/// ## Implementation Status
///
/// | Backend | Status |
/// | ------- | ----------------------------------------------- |
/// | NGVM | Implemented for scalars, matrices, and vectors. |
class Broadcast : public Builtin
{
public:
/// \brief Constructs a conversion operation.
///
/// @param arg The tensor view to be broadcast.
/// @param shape The shape of the result
/// @param broadcast_axes The axis positions (0-based) in the result that are being broadcast.
/// the remaining axes in shape must be the same as the shape of arg.
///
/// \param arg Node that produces the input tensor to be broadcast.
/// \param shape The shape of the output tensor.
/// \param broadcast_axes The axis positions (0-based) in the result that are being broadcast. The
/// remaining axes in shape must be the same as the shape of arg.
Broadcast(const std::shared_ptr<Node>& arg,
const Shape& shape,
const AxisSet& broadcast_axes)
......@@ -41,6 +77,7 @@ namespace ngraph
virtual std::string description() const override { return "Broadcast"; }
virtual void propagate_types() override;
/// \return An set containing the indices of the broadcast axes (0-based).
const AxisSet& get_broadcast_axes() const { return m_broadcast_axes; }
protected:
Shape m_shape;
......
......@@ -20,9 +20,31 @@ namespace ngraph
{
namespace op
{
/// \brief Elementwise ceiling operation.
///
/// ## Inputs
///
/// | | Type | Description |
/// | ----- | --------------------------------- | ----------------------------------------------- |
/// | `arg` | \f$N[d_1,\dots,d_n]~(n \geq 0)\f$ | A tensor of any shape and numeric element type. |
///
/// ## Output
///
/// | Type | Description |
/// | ---------------------- | -------------------------------------------------------------------------------------------- |
/// | \f$N[d_1,\dots,d_n]\f$ | The tensor \f$T\f$, where \f$T[i_1,\dots,i_n] = \lceil \texttt{arg}[i_1,\dots,i_n] \rceil\f$ |
///
/// ## Implementation Status
///
/// | Backend | Status |
/// | ------- | ---------------- |
/// | NGVM | Not implemented. |
class Ceiling : public UnaryElementwiseArithmetic
{
public:
/// \brief Constructs a ceiling operation.
///
/// \param arg Node that produces the input tensor.
Ceiling(const std::shared_ptr<Node>& arg)
: UnaryElementwiseArithmetic(arg)
{
......
......@@ -20,18 +20,54 @@ namespace ngraph
{
namespace op
{
/// \brief Concatenation operation.
///
/// Given an axis index \f$a\f$ and a rank \f$r \geq 1\f$ where \f$0 \leq a \lt r\f$, and one or more \f$r\f$-tensors
/// with the same element type whose shapes are the same except possibly at axis \f$a\f$, the tensors are
/// concatenated along axis \f$a\f$.
///
/// For example:
/// 1. Concatenating matrices on axis 0 (the row axis) stacks the matrices from top to bottom.
/// The number of rows in the resulting matrix is the sum of the number of rows for each
/// input matrix.
/// 2. Concatenating matrices on axis 1 (the column axis) concatenates them from left to right.
/// The number of columns in the resulting matrix is the sum of the number of columns for each
/// input matrix.
/// 3. Concatenating 3-tensors on axis 2 (the depth axis) stacks them from front to back.
/// The depth of the resulting tensor is the sum of the total depth for each input tensor.
///
/// The resulting tensor will have the same rank as the input tensors.
///
/// ## Parameters
///
/// | | Description |
/// | -------------------- | -------------------------------------------------------------- |
/// | `concatenation_axis` | The axis \f$a\f$ along which to concatenate the input tensors. |
///
/// ## Inputs
///
/// | | Type | Description |
/// | --------------- | ------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------ |
/// | `args`[\f$i\f$] | \f$E[d_1,\dots,d_{a-1},d^i_a,d_{a+1},\dots,d_n]~(n \geq 1)\f$ | One or more input tensors, all of which have the same element type, and the same shape, except possibly at axis \f$a\f$. |
///
/// ## Output
///
/// | Type | Description |
/// | ------------------------------------------------------------ | ----------------------------------------------------------------------------------------------- |
/// | \f$E[d_1,\dots,d_{a-1},\Sigma_i(d^i_a),d_{a+1},\dots,d_n]\f$ | The tensor \f$T\f$, where \f$T\f$ is the concatenation of the input tensors along axis \f$a\f$. |
///
/// ## Implementation Status
///
/// | Backend | Status |
/// | ------- | ------------------------------------- |
/// | NGVM | Implemented for vectors and matrices. |
class Concat : public Builtin
{
public:
/// Concatenates one or more tensors.
/// \brief Constructs a concatenation operation.
///
/// All tensors must have the same rank, and the sizes of the axes must match
/// everywhere except at the concatenation axis. The size of the concatenation
/// axis on the output is the sum of its size on all inputs; the size of other
/// axes is unchanged from the input tensors.
///
/// Example: n0 has shape {2,4,2}, and n1 has shape {2,5,2}. Then the output of
/// Concat(Nodes{n0,n1},1) will have shape {2,9,2}.
/// \param args The nodes producing the input tensors.
/// \param concatenation_axis The axis along which to concatenate the input tensors.
Concat(const Nodes& args, size_t concatenation_axis)
: Builtin(args)
, m_concatenation_axis(concatenation_axis)
......@@ -41,6 +77,7 @@ namespace ngraph
virtual std::string description() const override { return "Concatenate"; }
virtual void propagate_types() override;
/// \return The concatenation axis.
size_t get_concatenation_axis() const { return m_concatenation_axis; }
protected:
const size_t m_concatenation_axis;
......
......@@ -24,10 +24,17 @@ namespace ngraph
{
namespace op
{
// Defines methods to all constants
/// \brief Abstract base class for constants.
///
/// There are two subclasses: ParameterizedConstant and Constant. ParameterizedConstant allows constant values to be supplied via vectors of the corresponding C++ type;
/// however, the ParameterizedConstant subclass can only be used when type information is available at C++ compile-time. In cases where types are not known until
/// C++ runtime, the Constant subclass must be used instead.
class ConstantBase : public Node
{
protected:
/// \brief Constructs a constant base-type node.
///
/// \param type The TensorViewType for the constant.
ConstantBase(const std::shared_ptr<TensorViewType>& type)
: Node({}, type)
{
......@@ -36,16 +43,43 @@ namespace ngraph
virtual void propagate_types() override;
};
// Implement a constant tensor for each element type.
/// \brief Class for constants whose element types are known at C++ compile-time.
///
/// \tparam T The ngraph::element::Type of the tensor's elements.
///
/// This class can be used when the type of the tensor constant is known at C++ compile-time. For other cases, Constant must be used.
///
/// ## Parameters
///
/// | | Description |
/// | ------- | ------------------------------------------------------------------------------------ |
/// | `shape` | The ngraph::Shape of the tensor constant. |
/// | `value` | The ngraph::runtime::ParameterizedTensorView containing data fo the tensor constant. |
///
/// ## Output
///
/// | Type | Description |
/// | ---------------------- | --------------------------------------------------------------------- |
/// | \f$E[d_1,\dots,d_n]\f$ | A constant tensor with the specified element type, shape, and values. |
///
/// ## Implementation Status
///
/// | Backend | Status |
/// | ------- | ------------------ |
/// | NGVM | Fully implemented. |
template <typename T>
class ParameterizedConstant : public ConstantBase
{
public:
// The ngraph element type
/// \brief The ngraph element type
using element_type = T;
// The C++ type that holds the element type
/// \brief The C++ type that holds the element type
using type = typename T::type;
/// \brief Constructs a parameterized tensor constant.
///
/// \param shape The shape of the tensor constant.
/// \param value The value of the tensor constant.
ParameterizedConstant(
const Shape& shape,
typename std::shared_ptr<ngraph::runtime::ParameterizedTensorView<T>>& value)
......@@ -62,6 +96,7 @@ namespace ngraph
return ss.str();
}
/// \return The value of the tensor constant.
typename std::shared_ptr<ngraph::runtime::ParameterizedTensorView<T>> get_value() const
{
return m_value;
......@@ -71,17 +106,52 @@ namespace ngraph
std::shared_ptr<ngraph::runtime::ParameterizedTensorView<T>> m_value;
};
/// \brief A 32-bit floating-point tensor constant.
using Float32Constant = ParameterizedConstant<element::Float32>;
/// \brief An 8-bit signed integer tensor constant.
using Int8Constant = ParameterizedConstant<element::Int8>;
/// \brief A 32-bit signed integer tensor constant.
using Int32Constant = ParameterizedConstant<element::Int32>;
/// \brief A 64-bit signed integer tensor constant.
using Int64Constant = ParameterizedConstant<element::Int64>;
/// \brief An 8-bit unsigned integer tensor constant.
using UInt8Constant = ParameterizedConstant<element::UInt8>;
/// \brief A 16-bit unsigned integer tensor constant.
using UInt32Constant = ParameterizedConstant<element::UInt32>;
/// \brief A 64-bit unsigned integer tensor constant.
using UInt64Constant = ParameterizedConstant<element::UInt64>;
/// \brief Class for constants whose element types may not be known until graph construction time.
///
/// This class must be used when the type of the tensor constant is unknown at C++ compile-time. For other cases, ParameterizedConstant should be used.
///
/// ## Parameters
///
/// | | Description |
/// | --------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
/// | `et` | The ngraph::element::Type of the tensor constant. |
/// | `shape` | The ngraph::Shape of the tensor constant. |
/// | `value_strings` | A list of strings containing literals for initialization of the tensor constant. These strings are parsed with the appropriate instance of ngraph::element::TraitedType::read. |
///
/// ## Output
///
/// | Type | Description |
/// | ---------------------- | --------------------------------------------------------------------- |
/// | \f$E[d_1,\dots,d_n]\f$ | A constant tensor with the specified element type, shape, and values. |
///
/// ## Implementation Status
///
/// | Backend | Status |
/// | ------- | ------------------ |
/// | NGVM | Fully implemented. |
class Constant : public ConstantBase
{
public:
/// \brief Constructs a tensor constant.
///
/// \param et The element type of the tensor constant.
/// \param shape The shape of the tensor constant.
/// \param value_strings A list of literals for initializing the tensor constant. There must be one literal for each element of the tensor; i.e., `value_strings.size()` must equal `ngraph::shape_size(shape)`.
Constant(const element::Type& et,
const Shape& shape,
const std::vector<std::string>& value_strings)
......@@ -90,6 +160,11 @@ namespace ngraph
{
}
/// \brief Constructs a tensor constant with the same initialization value copied across the tensor.
///
/// \param et The element type of the tensor constant.
/// \param shape The shape of the tensor constant.
/// \param value_string A literal for initializing each tensor constant.
Constant(const element::Type& et, const Shape& shape, const std::string& value_string)
: ConstantBase(std::make_shared<TensorViewType>(et, shape))
, m_value_strings(ngraph::shape_size(shape), value_string)
......@@ -104,6 +179,7 @@ namespace ngraph
return ss.str();
}
/// \return The initialization literals for the tensor constant.
const std::vector<std::string>& get_value_strings() const { return m_value_strings; }
virtual void propagate_types() override;
......
......@@ -20,9 +20,41 @@ namespace ngraph
{
namespace op
{
/// \brief Elementwise type conversion operation.
///
/// Each scalar in the input tensor is converted to the specified output element type. Note that the conversion may
/// result in loss of precision. For example, conversion from `float32` to `int32` is allowed.
///
/// ## Parameters
///
/// | | Description |
/// | -------------- | ---------------------------------------- |
/// | `element_type` | The element type \f$E'\f$ to convert to. |
///
/// ## Inputs
///
/// | | Type | Description |
/// | ----- | --------------------------------- | ----------------------------------------- |
/// | `arg` | \f$E[d_1,\dots,d_n]~(n \geq 0)\f$ | A tensor of any shape and element type. |
///
/// ## Output
///
/// | Type | Description |
/// | ----------------------- | --------------------------------------------------------------------------------------------------------- |
/// | \f$E'[d_1,\dots,d_n]\f$ | The tensor \f$T\f$, where \f$T[i_1,\dots,i_n] = \mathit{convert}_{(E,E')}(\texttt{arg}[i_1,\dots,i_n])\f$ |
///
/// ## Implementation Status
///
/// | Backend | Status |
/// | ------- | ------------------ |
/// | NGVM | Fully implemented. |
class Convert : public UnaryElementwiseBuiltin
{
public:
/// \brief Constructs a conversion operation.
///
/// \param arg Node that produces the input tensor.
/// \param element_type Element type for the output tensor.
Convert(const std::shared_ptr<Node>& arg, const ngraph::element::Type& element_type)
: UnaryElementwiseBuiltin({arg})
, m_element_type(element_type)
......
......@@ -20,9 +20,31 @@ namespace ngraph
{
namespace op
{
/// \brief Elementwise cosine operation.
///
/// ## Inputs
///
/// | | Type | Description |
/// | ----- | --------------------------------- | ----------------------------------------------- |
/// | `arg` | \f$N[d_1,\dots,d_n]~(n \geq 0)\f$ | A tensor of any shape and numeric element type. |
///
/// ## Output
///
/// | Type | Description |
/// | ---------------------- | ------------------------------------------------------------------------------------ |
/// | \f$N[d_1,\dots,d_n]\f$ | The tensor \f$T\f$, where \f$T[i_1,\dots,i_n] = \cos(\texttt{arg}[i_1,\dots,i_n])\f$ |
///
/// ## Implementation Status
///
/// | Backend | Status |
/// | ------- | ------------------ |
/// | NGVM | Fully implemented. |
class Cos : public UnaryElementwiseArithmetic
{
public:
/// \brief Constructs a cosine operation.
///
/// \param arg Node that produces the input tensor.
Cos(const std::shared_ptr<Node>& arg)
: UnaryElementwiseArithmetic(arg)
{
......
......@@ -20,9 +20,31 @@ namespace ngraph
{
namespace op
{
/// \brief Elementwise hyperbolic cosine (cosh) operation.
///
/// ## Inputs
///
/// | | Type | Description |
/// | ----- | --------------------------------- | ----------------------------------------------- |
/// | `arg` | \f$N[d_1,\dots,d_n]~(n \geq 0)\f$ | A tensor of any shape and numeric element type. |
///
/// ## Output
///
/// | Type | Description |
/// | ---------------------- | ------------------------------------------------------------------------------------- |
/// | \f$N[d_1,\dots,d_n]\f$ | The tensor \f$T\f$, where \f$T[i_1,\dots,i_n] = \cosh(\texttt{arg}[i_1,\dots,i_n])\f$ |
///
/// ## Implementation Status
///
/// | Backend | Status |
/// | ------- | ------------------ |
/// | NGVM | Fully implemented. |
class Cosh : public UnaryElementwiseArithmetic
{
public:
/// \brief Constructs a hyperbolic cosine operation.
///
/// \param arg Node that produces the input tensor.
Cosh(const std::shared_ptr<Node>& arg)
: UnaryElementwiseArithmetic(arg)
{
......
......@@ -20,9 +20,33 @@ namespace ngraph
{
namespace op
{
/// \brief Elementwise division operation.
///
/// ## Inputs
///
/// | | Type | Description |
/// | ------ | --------------------------------- | ------------------------------------------------------ |
/// | `arg0` | \f$N[d_1,\dots,d_n]~(n \geq 0)\f$ | A tensor of any shape and numeric element type. |
/// | `arg1` | \f$N[d_1,\dots,d_n]~(n \geq 0)\f$ | A tensor of the same shape and element type as `arg0`. |
///
/// ## Output
///
/// | Type | Description |
/// | ---------------------- | ------------------------------------------------------------------------------------------------------------------------ |
/// | \f$N[d_1,\dots,d_n]\f$ | The tensor \f$T\f$, where \f$T[i_1,\dots,i_n] = \texttt{arg0}[i_1,\dots,i_n] \mathbin{/} \texttt{arg1}[i_1,\dots,i_n]\f$ |
///
/// ## Implementation Status
///
/// | Backend | Status |
/// | ------- | ------------------ |
/// | NGVM | Fully implemented. |
class Divide : public BinaryElementwiseArithmetic
{
public:
/// \brief Constructs a division operation.
///
/// \param arg0 Node that produces the first input tensor.
/// \param arg1 Node that produces the second input tensor.
Divide(const std::shared_ptr<Node>& arg0, const std::shared_ptr<Node>& arg1)
: BinaryElementwiseArithmetic(arg0, arg1)
{
......
......@@ -20,28 +20,95 @@ namespace ngraph
{
namespace op
{
/// \brief Inner product/dot product/matrix product/tensor contraction operation.
///
/// Takes two arguments `arg0` and `arg1`. There are three possible cases:
///
/// 1. `arg0` or `arg1` is 0-dimensional. Then, treats that 0-dimensional argument as a scalars and computes a scalar-tensor product.
/// (Example: `arg0` has shape `{1,2,3}` and arg1 has shape `{}`; then the result will have shape `{1,2,3}`.)
///
/// 2. `arg1` is a vector (1-dimensional tensor). Then, computes a dot product reducing on the innermost (rightmost) dimensions of `arg0` and `arg1`.
/// (Example: arg0 has shape `{1,2,3}` and arg1 has shape `{3}`; then the result will have shape `{1,2}`.)
///
/// 3. `arg1` is more than 1-dimensional. Then, computes a dot product reducing on the innermost (rightmost) dimension of arg0, and the next-to-innermost dimension of arg1.
/// (Example: arg0 has shape {3,4} and arg1 has shape {4,3}; then the result will have shape {3,3}.)
///
///
/// = Case 1: Scalar-tensor product =
///
/// ## Inputs
///
/// | | Type | Description |
/// | ------ | --------------------------------- | ------------------------------------------------------------ |
/// | `arg0` | \f$E[]\f$ | A scalar of any element type. |
/// | `arg1` | \f$E[d_1,\dots,d_n]~(n \geq 0)\f$ | A tensor of any shape, with the same element type as `arg0`. |
///
/// <i>(Note: the order of inputs may be reversed in this case, i.e., `arg1` can be the scalar and `arg0` the tensor.)</i>
///
/// ## Output
///
/// | Type | Description |
/// | ---------------------- | ---------------------------------------------------------------------------------------------------- |
/// | \f$E[d_1,\dots,d_n]\f$ | The tensor \f$T\f$, where \f$T[i_1,\dots,i_n] = \mathtt{arg0} \cdot \mathtt{arg1}[i_1,\dots,i_n]\f$. |
///
/// ## Implementation Status
///
/// | Backend | Status |
/// | ------- | ------------------ |
/// | NGVM | Fully implemented. |
///
/// = Case 2: Vector-tensor product =
///
/// ## Inputs
///
/// | | Type | Description |
/// | ------ | ----------------------------------- | ------------------------------------------------------------------------------------------------------------ |
/// | `arg0` | \f$E[d]\f$ | A vector of any element type. |
/// | `arg1` | \f$E[d_1,\dots,d_n,d]~(n \geq 0)\f$ | A tensor of any shape whose innermost dimension matches `arg0`'s size, with the same element type as `arg0`. |
///
/// <i>(Note: in the particular case where \f$n = 0\f$, this is a vector dot product; when \f$n = 1\f$, this is a vector-matrix product.)</i>
//
/// ## Output
///
/// | Type | Description |
/// | ---------------------- | ---------------------------------------------------------------------------------------------------- |
/// | \f$E[d_1,\dots,d_n]\f$ | The tensor \f$T\f$, where \f$T[i_1,\dots,i_n] = \mathtt{arg0} \cdot \mathtt{arg1}[i_1,\dots,i_n]\f$. |
///
/// ## Implementation Status
///
/// | Backend | Status |
/// | ------- | ------------------------------------------- |
/// | NGVM | Implemented for `arg1` with rank 2 or less. |
///
/// = Case 3: Tensor-tensor product =
///
/// ## Inputs
///
/// | | Type | Description |
/// | ------ | ----------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------ |
/// | `arg0` | \f$E[d_1,\dots,d_n]~(n \geq 1)\f$ | A tensor of any shape with rank of at least 1, and any element type. |
/// | `arg1` | \f$E[d'_1,\dots,d'_m]~(m \geq 2\text{ and }d'_{m-1}=d_n)\f$ | A tensor with the same element type as `arg0`, and any shape with rank of at least 2 whose next-to-innermost dimension matches `arg0`'s innermost dimension. |
///
/// <i>(Note: in the particular case where \f$n = m = 2\f$, this is a matrix product.)</i>
///
/// ## Output
///
/// | Type | Description |
/// | ----------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------|
/// | \f$E[d_1,\dots,d_{n-1},d'_1,\dots,d'_{m-2},d'_{m}]\f$ | The tensor \f$T\f$, where \f$T[i_1,\dots,i_{n-1},j_1,\dots,j_{m-2},j_m] = \dots\f$ TODO: FIXME: finish this; but it's like numpy. |
///
/// ## Implementation Status
///
/// | Backend | Status |
/// | ------- | ---------------------------------------------- |
/// | NGVM | Implemented for `arg1` with rank of exactly 2. |
class Dot : public Builtin
{
public:
/// Computes the dot product of two tensors.
/// \brief Constructs a dot product operation.
///
/// There are three possible cases:
/// (1) arg0 or arg1 is 0-dimensional. Then, we treat the 0-dimensional
/// argument(s) as scalars and compute a scalar-tensor or
/// scalar-scalar product.
/// (Example: arg0 has shape {1,2,3} and arg1 has shape {}; then
/// the result will have shape {1,2,3}.)
///
/// (2) arg1 is 1-dimensional. Then, we compute a dot product reducing
/// on the innermost (rightmost) dimensions of arg0 and arg1.
/// (Example: arg0 has shape {1,2,3} and arg1 has shape {3}; then
/// the result will have shape {1,2}.)
///
/// (3) arg1 is more than 1-dimensional. Then, we compute a dot product
/// reducing on the innermost (rightmost) dimension of arg0, and the
/// next-to-innermost dimension of arg1.
/// (Example: arg0 has shape {3,4} and arg1 has shape {4,3}; then
/// the result will have shape {3,3}.)
/// \param arg0 The node producing the first argument.
/// \param arg1 The node producing the second argument.
Dot(const std::shared_ptr<Node>& arg0, const std::shared_ptr<Node>& arg1)
: Builtin({arg0, arg1})
{
......
......@@ -20,9 +20,33 @@ namespace ngraph
{
namespace op
{
/// \brief Elementwise is-equal operation.
///
/// ## Inputs
///
/// | | Type | Description |
/// | ------ | --------------------------------- | ------------------------------------------------------ |
/// | `arg0` | \f$E[d_1,\dots,d_n]~(n \geq 0)\f$ | A tensor of any shape and element type. |
/// | `arg1` | \f$E[d_1,\dots,d_n]~(n \geq 0)\f$ | A tensor of the same shape and element type as `arg0`. |
///
/// ## Output
///
/// | Type | Description |
/// | ---------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------ |
/// | \f$\texttt{bool}[d_1,\dots,d_n]\f$ | The tensor \f$T\f$, where \f$T[i_1,\dots,i_n] = 1\text{ if }\texttt{arg0}[i_1,\dots,i_n] = \texttt{arg1}[i_1,\dots,i_n]\text{, else } 0\f$ |
///
/// ## Implementation Status
///
/// | Backend | Status |
/// | ------- | ------------------ |
/// | NGVM | Fully implemented. |
class Equal : public BinaryElementwiseComparison
{
public:
/// \brief Constructs an is-equal operation.
///
/// \param arg0 Node that produces the first input tensor.
/// \param arg1 Node that produces the second input tensor.
Equal(const std::shared_ptr<Node>& arg0, const std::shared_ptr<Node>& arg1)
: BinaryElementwiseComparison(arg0, arg1)
{
......
......@@ -20,9 +20,31 @@ namespace ngraph
{
namespace op
{
/// \brief Elementwise natural exponential (exp) operation.
///
/// ## Inputs
///
/// | | Type | Description |
/// | ----- | --------------------------------- | ----------------------------------------------- |
/// | `arg` | \f$N[d_1,\dots,d_n]~(n \geq 0)\f$ | A tensor of any shape and numeric element type. |
///
/// ## Output
///
/// | Type | Description |
/// | ---------------------- | ------------------------------------------------------------------------------------ |
/// | \f$N[d_1,\dots,d_n]\f$ | The tensor \f$T\f$, where \f$T[i_1,\dots,i_n] = \exp(\texttt{arg}[i_1,\dots,i_n])\f$ |
///
/// ## Implementation Status
///
/// | Backend | Status |
/// | ------- | ------------------ |
/// | NGVM | Fully implemented. |
class Exp : public UnaryElementwiseArithmetic
{
public:
/// \brief Constructs an exponential operation.
///
/// \param arg Node that produces the input tensor.
Exp(const std::shared_ptr<Node>& arg)
: UnaryElementwiseArithmetic(arg)
{
......
......@@ -20,9 +20,31 @@ namespace ngraph
{
namespace op
{
/// \brief Elementwise floor operation.
///
/// ## Inputs
///
/// | | Type | Description |
/// | ----- | --------------------------------- | ----------------------------------------------- |
/// | `arg` | \f$N[d_1,\dots,d_n]~(n \geq 0)\f$ | A tensor of any shape and numeric element type. |
///
/// ## Output
///
/// | Type | Description |
/// | ---------------------- | ---------------------------------------------------------------------------------------------- |
/// | \f$N[d_1,\dots,d_n]\f$ | The tensor \f$T\f$, where \f$T[i_1,\dots,i_n] = \lfloor \texttt{arg}[i_1,\dots,i_n] \rfloor\f$ |
///
/// ## Implementation Status
///
/// | Backend | Status |
/// | ------- | ---------------- |
/// | NGVM | Not implemented. |
class Floor : public UnaryElementwiseArithmetic
{
public:
/// \brief Constructs a floor operation.
///
/// \param arg Node that produces the input tensor.
Floor(const std::shared_ptr<Node>& arg)
: UnaryElementwiseArithmetic(arg)
{
......
......@@ -21,13 +21,38 @@ namespace ngraph
{
namespace op
{
/// \brief %Function call operation.
///
/// ## Parameters
///
/// | | Description |
/// | ---------- | -------------------------- |
/// | `function` | The function to be called. |
///
/// ## Inputs
///
/// | | Type | Description |
/// | ------ | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------ |
/// | `args` | \f$T_1,\dots,T_n\f$ where \f$n\f$ matches the number of arguments expected by `function` and \f$T_i\f$ matches the type expected for the \f$i\f$th argument of `function`. | The arguments for the function call. |
///
/// ## Output
///
/// | Type | Description |
/// | --------- | -------------------------------------------------------- |
/// | \f$T_R\f$ | The tensor returned by `function` when called on `args`. |
///
/// ## Implementation Status
///
/// | Backend | Status |
/// | ------- | ------------------ |
/// | NGVM | Fully implemented. |
class FunctionCall : public Builtin
{
public:
/// \brief Constructs a function call operation.
///
/// @param function The function to be called
/// @param args The function arguments
///
/// \param function The function to be called.
/// \param args The arguments for the function call.
FunctionCall(std::shared_ptr<Function> function,
const std::vector<std::shared_ptr<Node>>& args)
: Builtin(args)
......@@ -38,6 +63,7 @@ namespace ngraph
virtual std::string description() const override { return "FunctionCall"; }
virtual void propagate_types() override;
/// \return The function to be called.
std::shared_ptr<Function> get_function() const { return m_function; }
protected:
std::shared_ptr<Function> m_function;
......
......@@ -22,9 +22,38 @@ namespace ngraph
{
class Node;
/// \brief Operation to get an element from a tuple.
///
/// ## Parameters
///
/// | | Description |
/// | --- | ------------------------------------------------------------------ |
/// | `n` | The position of the element (0-based) to get from the input tuple. |
///
/// ## Inputs
///
/// | | Type | Description |
/// | ------ | ----------------------------------------------------------- | ------------------------------------------ |
/// | `arg` | \f$(T_1,\dots,T_{n-1},T_n,T_{n+1},\dots,T_m)~(m \geq 1)\f$ | An input tuple with at least `n` elements. |
///
/// ## Output
///
/// | Type | Description |
/// | --------- | ------------------------------------- |
/// | \f$T_n\f$ | The `n`th element of the input tuple. |
///
/// ## Implementation Status
///
/// | Backend | Status |
/// | ------- | ------------------ |
/// | NGVM | Fully implemented. |
class GetTupleElement : public Builtin
{
public:
/// \brief Constructs a get-tuple-element operation.
///
/// \param arg The input tuple.
/// \param n The index of the tuple element to get.
GetTupleElement(const std::shared_ptr<Node>& arg, size_t n)
: Builtin({arg})
, m_n{n}
......@@ -33,6 +62,7 @@ namespace ngraph
virtual void propagate_types() override;
virtual std::string description() const override { return "GetTupleElement"; }
/// \return The index of the tuple element to get.
size_t get_n() const { return m_n; }
protected:
size_t m_n;
......
......@@ -20,9 +20,33 @@ namespace ngraph
{
namespace op
{
/// \brief Elementwise greater-than operation.
///
/// ## Inputs
///
/// | | Type | Description |
/// | ------ | --------------------------------- | ------------------------------------------------------ |
/// | `arg0` | \f$E[d_1,\dots,d_n]~(n \geq 0)\f$ | A tensor of any shape and element type. |
/// | `arg1` | \f$E[d_1,\dots,d_n]~(n \geq 0)\f$ | A tensor of the same shape and element type as `arg0`. |
///
/// ## Output
///
/// | Type | Description |
/// | ---------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------- |
/// | \f$\texttt{bool}[d_1,\dots,d_n]\f$ | The tensor \f$T\f$, where \f$T[i_1,\dots,i_n] = 1\text{ if }\texttt{arg0}[i_1,\dots,i_n] \gt \texttt{arg1}[i_1,\dots,i_n]\text{, else } 0\f$ |
///
/// ## Implementation Status
///
/// | Backend | Status |
/// | ------- | ------------------ |
/// | NGVM | Fully implemented. |
class Greater : public BinaryElementwiseComparison
{
public:
/// \brief Constructs a greater-than operation.
///
/// \param arg0 Node that produces the first input tensor.
/// \param arg1 Node that produces the second input tensor.
Greater(const std::shared_ptr<Node>& arg0, const std::shared_ptr<Node>& arg1)
: BinaryElementwiseComparison(arg0, arg1)
{
......
......@@ -20,9 +20,33 @@ namespace ngraph
{
namespace op
{
/// \brief Elementwise greater-than-or-equal operation.
///
/// ## Inputs
///
/// | | Type | Description |
/// | ------ | --------------------------------- | ------------------------------------------------------ |
/// | `arg0` | \f$E[d_1,\dots,d_n]~(n \geq 0)\f$ | A tensor of any shape and element type. |
/// | `arg1` | \f$E[d_1,\dots,d_n]~(n \geq 0)\f$ | A tensor of the same shape and element type as `arg0`. |
///
/// ## Output
///
/// | Type | Description |
/// | ---------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------- |
/// | \f$\texttt{bool}[d_1,\dots,d_n]\f$ | The tensor \f$T\f$, where \f$T[i_1,\dots,i_n] = 1\text{ if }\texttt{arg0}[i_1,\dots,i_n] \geq \texttt{arg1}[i_1,\dots,i_n]\text{, else } 0\f$ |
///
/// ## Implementation Status
///
/// | Backend | Status |
/// | ------- | ------------------ |
/// | NGVM | Fully implemented. |
class GreaterEq : public BinaryElementwiseComparison
{
public:
/// \brief Constructs a greater-than-or-equal operation.
///
/// \param arg0 Node that produces the first input tensor.
/// \param arg1 Node that produces the second input tensor.
GreaterEq(const std::shared_ptr<Node>& arg0, const std::shared_ptr<Node>& arg1)
: BinaryElementwiseComparison(arg0, arg1)
{
......
......@@ -20,9 +20,33 @@ namespace ngraph
{
namespace op
{
/// \brief Elementwise less-than operation.
///
/// ## Inputs
///
/// | | Type | Description |
/// | ------ | --------------------------------- | ------------------------------------------------------ |
/// | `arg0` | \f$E[d_1,\dots,d_n]~(n \geq 0)\f$ | A tensor of any shape and element type. |
/// | `arg1` | \f$E[d_1,\dots,d_n]~(n \geq 0)\f$ | A tensor of the same shape and element type as `arg0`. |
///
/// ## Output
///
/// | Type | Description |
/// | ---------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------- |
/// | \f$\texttt{bool}[d_1,\dots,d_n]\f$ | The tensor \f$T\f$, where \f$T[i_1,\dots,i_n] = 1\text{ if }\texttt{arg0}[i_1,\dots,i_n] \lt \texttt{arg1}[i_1,\dots,i_n]\text{, else } 0\f$ |
///
/// ## Implementation Status
///
/// | Backend | Status |
/// | ------- | ------------------ |
/// | NGVM | Fully implemented. |
class Less : public BinaryElementwiseComparison
{
public:
/// \brief Constructs a less-than operation.
///
/// \param arg0 Node that produces the first input tensor.
/// \param arg1 Node that produces the second input tensor.
Less(const std::shared_ptr<Node>& arg0, const std::shared_ptr<Node>& arg1)
: BinaryElementwiseComparison(arg0, arg1)
{
......
......@@ -20,9 +20,33 @@ namespace ngraph
{
namespace op
{
/// \brief Elementwise less-than-or-equal operation.
///
/// ## Inputs
///
/// | | Type | Description |
/// | ------ | --------------------------------- | ------------------------------------------------------ |
/// | `arg0` | \f$E[d_1,\dots,d_n]~(n \geq 0)\f$ | A tensor of any shape and element type. |
/// | `arg1` | \f$E[d_1,\dots,d_n]~(n \geq 0)\f$ | A tensor of the same shape and element type as `arg0`. |
///
/// ## Output
///
/// | Type | Description |
/// | ---------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------- |
/// | \f$\texttt{bool}[d_1,\dots,d_n]\f$ | The tensor \f$T\f$, where \f$T[i_1,\dots,i_n] = 1\text{ if }\texttt{arg0}[i_1,\dots,i_n] \leq \texttt{arg1}[i_1,\dots,i_n]\text{, else } 0\f$ |
///
/// ## Implementation Status
///
/// | Backend | Status |
/// | ------- | ------------------ |
/// | NGVM | Fully implemented. |
class LessEq : public BinaryElementwiseComparison
{
public:
/// \brief Constructs a less-than-or-equal operation.
///
/// \param arg0 Node that produces the first input tensor.
/// \param arg1 Node that produces the second input tensor.
LessEq(const std::shared_ptr<Node>& arg0, const std::shared_ptr<Node>& arg1)
: BinaryElementwiseComparison(arg0, arg1)
{
......
......@@ -20,9 +20,31 @@ namespace ngraph
{
namespace op
{
/// \brief Elementwise natural log operation.
///
/// ## Inputs
///
/// | | Type | Description |
/// | ----- | --------------------------------- | ----------------------------------------------- |
/// | `arg` | \f$N[d_1,\dots,d_n]~(n \geq 0)\f$ | A tensor of any shape and numeric element type. |
///
/// ## Output
///
/// | Type | Description |
/// | ---------------------- | ----------------------------------------------------------------------------------- |
/// | \f$N[d_1,\dots,d_n]\f$ | The tensor \f$T\f$, where \f$T[i_1,\dots,i_n] = \ln(\texttt{arg}[i_1,\dots,i_n])\f$ |
///
/// ## Implementation Status
///
/// | Backend | Status |
/// | ------- | ------------------ |
/// | NGVM | Fully implemented. |
class Log : public UnaryElementwiseArithmetic
{
public:
/// \brief Constructs a natural log operation.
///
/// \param arg Node that produces the input tensor.
Log(const std::shared_ptr<Node>& arg)
: UnaryElementwiseArithmetic(arg)
{
......
......@@ -20,9 +20,33 @@ namespace ngraph
{
namespace op
{
/// \brief Elementwise maximum operation.
///
/// ## Inputs
///
/// | | Type | Description |
/// | ------ | --------------------------------- | ------------------------------------------------------ |
/// | `arg0` | \f$N[d_1,\dots,d_n]~(n \geq 0)\f$ | A tensor of any shape and numeric element type. |
/// | `arg1` | \f$N[d_1,\dots,d_n]~(n \geq 0)\f$ | A tensor of the same shape and element type as `arg0`. |
///
/// ## Output
///
/// | Type | Description |
/// | ---------------------- | ------------------------------------------------------------------------------------------------------------------ |
/// | \f$N[d_1,\dots,d_n]\f$ | The tensor \f$T\f$, where \f$T[i_1,\dots,i_n] = \max(\texttt{arg0}[i_1,\dots,i_n],\texttt{arg1}[i_1,\dots,i_n])\f$ |
///
/// ## Implementation Status
///
/// | Backend | Status |
/// | ------- | ------------------ |
/// | NGVM | Fully implemented. |
class Maximum : public BinaryElementwiseArithmetic
{
public:
/// \brief Constructs a maximum operation.
///
/// \param arg0 Node that produces the first input tensor.
/// \param arg1 Node that produces the second input tensor.
Maximum(const std::shared_ptr<Node>& arg0, const std::shared_ptr<Node>& arg1)
: BinaryElementwiseArithmetic(arg0, arg1)
{
......
......@@ -20,9 +20,33 @@ namespace ngraph
{
namespace op
{
/// \brief Elementwise minimum operation.
///
/// ## Inputs
///
/// | | Type | Description |
/// | ------ | --------------------------------- | ------------------------------------------------------ |
/// | `arg0` | \f$N[d_1,\dots,d_n]~(n \geq 0)\f$ | A tensor of any shape and numeric element type. |
/// | `arg1` | \f$N[d_1,\dots,d_n]~(n \geq 0)\f$ | A tensor of the same shape and element type as `arg0`. |
///
/// ## Output
///
/// | Type | Description |
/// | ---------------------- | ------------------------------------------------------------------------------------------------------------------ |
/// | \f$N[d_1,\dots,d_n]\f$ | The tensor \f$T\f$, where \f$T[i_1,\dots,i_n] = \min(\texttt{arg0}[i_1,\dots,i_n],\texttt{arg1}[i_1,\dots,i_n])\f$ |
///
/// ## Implementation Status
///
/// | Backend | Status |
/// | ------- | ------------------ |
/// | NGVM | Fully implemented. |
class Minimum : public BinaryElementwiseArithmetic
{
public:
/// \brief Constructs a minimum operation.
///
/// \param arg0 Node that produces the first input tensor.
/// \param arg1 Node that produces the second input tensor.
Minimum(const std::shared_ptr<Node>& arg0, const std::shared_ptr<Node>& arg1)
: BinaryElementwiseArithmetic(arg0, arg1)
{
......
......@@ -20,9 +20,33 @@ namespace ngraph
{
namespace op
{
/// \brief Elementwise multiplication operation.
///
/// ## Inputs
///
/// | | Type | Description |
/// | ------ | --------------------------------- | ------------------------------------------------------ |
/// | `arg0` | \f$N[d_1,\dots,d_n]~(n \geq 0)\f$ | A tensor of any shape and numeric element type. |
/// | `arg1` | \f$N[d_1,\dots,d_n]~(n \geq 0)\f$ | A tensor of the same shape and element type as `arg0`. |
///
/// ## Output
///
/// | Type | Description |
/// | ---------------------- | ------------------------------------------------------------------------------------------------------------------ |
/// | \f$N[d_1,\dots,d_n]\f$ | The tensor \f$T\f$, where \f$T[i_1,\dots,i_n] = \texttt{arg0}[i_1,\dots,i_n] \cdot \texttt{arg1}[i_1,\dots,i_n]\f$ |
///
/// ## Implementation Status
///
/// | Backend | Status |
/// | ------- | ------------------ |
/// | NGVM | Fully implemented. |
class Multiply : public BinaryElementwiseArithmetic
{
public:
/// \brief Constructs a multiplication operation.
///
/// \param arg0 Node that produces the first input tensor.
/// \param arg1 Node that produces the second input tensor.
Multiply(const std::shared_ptr<Node>& arg0, const std::shared_ptr<Node>& arg1)
: BinaryElementwiseArithmetic(arg0, arg1)
{
......
......@@ -20,9 +20,31 @@ namespace ngraph
{
namespace op
{
/// \brief Elementwise negation operation.
///
/// ## Inputs
///
/// | | Type | Description |
/// | ----- | --------------------------------- | ----------------------------------------------- |
/// | `arg` | \f$N[d_1,\dots,d_n]~(n \geq 0)\f$ | A tensor of any shape and numeric element type. |
///
/// ## Output
///
/// | Type | Description |
/// | ---------------------- | --------------------------------------------------------------------------------- |
/// | \f$N[d_1,\dots,d_n]\f$ | The tensor \f$T\f$, where \f$T[i_1,\dots,i_n] = -(\texttt{arg}[i_1,\dots,i_n])\f$ |
///
/// ## Implementation Status
///
/// | Backend | Status |
/// | ------- | ------------------ |
/// | NGVM | Fully implemented. |
class Negative : public UnaryElementwiseArithmetic
{
public:
/// \brief Constructs a negation operation.
///
/// \param arg Node that produces the input tensor.
Negative(const std::shared_ptr<Node>& arg)
: UnaryElementwiseArithmetic(arg)
{
......
......@@ -20,9 +20,33 @@ namespace ngraph
{
namespace op
{
/// \brief Elementwise not-equal operation.
///
/// ## Inputs
///
/// | | Type | Description |
/// | ------ | --------------------------------- | ------------------------------------------------------ |
/// | `arg0` | \f$E[d_1,\dots,d_n]~(n \geq 0)\f$ | A tensor of any shape and element type. |
/// | `arg1` | \f$E[d_1,\dots,d_n]~(n \geq 0)\f$ | A tensor of the same shape and element type as `arg0`. |
///
/// ## Output
///
/// | Type | Description |
/// | ---------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------- |
/// | \f$\texttt{bool}[d_1,\dots,d_n]\f$ | The tensor \f$T\f$, where \f$T[i_1,\dots,i_n] = 1\text{ if }\texttt{arg0}[i_1,\dots,i_n] \neq \texttt{arg1}[i_1,\dots,i_n]\text{, else } 0\f$ |
///
/// ## Implementation Status
///
/// | Backend | Status |
/// | ------- | ------------------ |
/// | NGVM | Fully implemented. |
class NotEqual : public BinaryElementwiseComparison
{
public:
/// \brief Constructs a not-equal operation.
///
/// \param arg0 Node that produces the first input tensor.
/// \param arg1 Node that produces the second input tensor.
NotEqual(const std::shared_ptr<Node>& arg0, const std::shared_ptr<Node>& arg1)
: BinaryElementwiseComparison(arg0, arg1)
{
......
This diff is collapsed.
......@@ -22,11 +22,29 @@ namespace ngraph
class Function;
namespace op
{
/// \brief A function parameter.
///
/// Parameters are nodes that represent the arguments that will be passed to user-defined functions.
/// Function creation requires a sequence of parameters.
/// Basic graph operations do not need parameters attached to a function.
///
/// ## Parameters
///
/// | | Description |
/// | -------------| ---------------------------------- |
/// | `value_type` | The type \f$T\f$ of the parameter. |
///
/// ## Output
///
/// | Type | Description |
/// | ------- | --------------------------------------------------------------------------------------------------------------------------- |
/// | \f$T\f$ | The value of the parameter, supplied by the `FunctionCall` to this function or in the initial `ngraph::runtime::CallFrame`. |
///
/// ## Implementation Status
///
/// | Backend | Status |
/// | ------- | ------------------ |
/// | NGVM | Fully implemented. |
class Parameter : public Node
{
protected:
......@@ -36,7 +54,14 @@ namespace ngraph
}
public:
/// \brief Constructions a parameter node.
///
/// \param value_type The type of the parameter.
Parameter(const std::shared_ptr<const ValueType>& value_type = nullptr);
/// \brief Constructions a tensor view-typed parameter node.
///
/// \param element_type The element type of the parameter.
/// \param shape The shape of the parameter.
Parameter(const ngraph::element::Type& element_type, const Shape& shape);
std::string description() const override { return "Parameter"; }
......
......@@ -20,9 +20,33 @@ namespace ngraph
{
namespace op
{
/// \brief Elementwise exponentiation operation.
///
/// ## Inputs
///
/// | | Type | Description |
/// | ------ | --------------------------------- | ------------------------------------------------------ |
/// | `arg0` | \f$N[d_1,\dots,d_n]~(n \geq 0)\f$ | A tensor of any shape and numeric element type. |
/// | `arg1` | \f$N[d_1,\dots,d_n]~(n \geq 0)\f$ | A tensor of the same shape and element type as `arg0`. |
///
/// ## Output
///
/// | Type | Description |
/// | ---------------------- | -------------------------------------------------------------------------------------------------------------- |
/// | \f$N[d_1,\dots,d_n]\f$ | The tensor \f$T\f$, where \f$T[i_1,\dots,i_n] = \texttt{arg0}[i_1,\dots,i_n]^{\texttt{arg1}[i_1,\dots,i_n]}\f$ |
///
/// ## Implementation Status
///
/// | Backend | Status |
/// | ------- | ---------------- |
/// | NGVM | Not implemented. |
class Power : public BinaryElementwiseArithmetic
{
public:
/// \brief Constructs an exponentiation operation.
///
/// \param arg0 Node that produces the first input tensor.
/// \param arg1 Node that produces the second input tensor.
Power(const std::shared_ptr<Node>& arg0, const std::shared_ptr<Node>& arg1)
: BinaryElementwiseArithmetic(arg0, arg1)
{
......
......@@ -20,15 +20,82 @@ namespace ngraph
{
namespace op
{
/// \brief Tensor reduction operation.
///
/// Element-wise reduces the input tensor, eliminating the specified reduction axes, given a reduction function that maps two scalars to a scalar.
/// For example, if the reduction function \f$f(x,y) = x+y\f$:
///
/// \f[
/// \mathit{reduce}\left(f,\{0\},
/// \left[ \begin{array}{ccc}
/// 1 & 2 \\
/// 3 & 4 \\
/// 5 & 6 \end{array} \right]\right) =
/// \left[ (1 + 3 + 5), (2 + 4 + 6) \right] =
/// \left[ 9, 12 \right]~~~\text{(dimension 0 (rows) is eliminated)}
/// \f]
///
/// \f[
/// \mathit{reduce}\left(f,\{1\},
/// \left[ \begin{array}{ccc}
/// 1 & 2 \\
/// 3 & 4 \\
/// 5 & 6 \end{array} \right]\right) =
/// \left[ (1 + 2), (3 + 4), (5 + 6) \right] =
/// \left[ 3, 7, 11 \right]~~~\text{(dimension 1 (columns) is eliminated)}
/// \f]
///
/// \f[
/// \mathit{reduce}\left(f,\{0,1\},
/// \left[ \begin{array}{ccc}
/// 1 & 2 \\
/// 3 & 4 \\
/// 5 & 6 \end{array} \right]\right) =
/// (1 + 2) + (3 + 4) + (5 + 6) =
/// 21~~~\text{(both dimensions (rows and columns) are eliminated)}
/// \f]
///
/// It is assumed that \f$f\f$ is associative. In other words, the order of operations is undefined. In the case where a collapsed dimension is 0,
/// the value of `arg_init` will be substituted.
///
/// Note that the parameter `reduction_axes` specifies which axes are to be <i>eliminated</i>, which can be a bit counterintuitive. For example,
/// as seen above, eliminating the column dimension results in the <i>rows</i> being summed, not the columns.
///
/// ## Parameters
///
/// | | Description |
/// | -------------------- | ------------------------------------------------------------------------------------------------------------------------- |
/// | `reduction_function` | The scalar function used to reduce the input tensor. Must take two arguments of type \f$E[]\f$ and return type \f$E[]\f$. |
/// | `reduction_axes` | The axes to eliminate through reduction. |
///
/// ## Inputs
///
/// | | Type | Description |
/// | -------------- | --------------------------------- | ----------------------------------------------------------------------------------------------------- |
/// | `arg_reductee` | \f$E[d_1,\dots,d_n]~(n \geq 0)\f$ | An input tensor of any shape, with the element type matching that expected by the reduction function. |
/// | `arg_init` | \f$E[]\f$ | An scalar to be used as a substitute output value on zero-sized axes. |
///
/// ## Output
///
/// | Type | Description |
/// | ----------------------------------------- | ---------------------------------------------------------------------------------------------------------------- |
/// | \f$E[\textit{delete}(A,d_1,\dots,d_n)]\f$ | The tensor \f$T\f$, where \f$T\f$ is the input tensor with the `reduction_axes` \f$A\f$ eliminated by reduction. |
///
/// ## Implementation Status
///
/// | Backend | Status |
/// | ------- | ----------------------------------------------------- |
/// | NGVM | Fully implemented for scalars, vectors, and matrices. |
class Reduce : public Builtin
{
public:
/// \brief Constructs a reduction operation.
///
/// @param arg_reductee The tensor view to be reduced.
/// @param arg_init The initial value for reduction.
/// @param reduction_function The reduction function to use.
/// @param reduction_axes The axis positions (0-based) to be reduced.
///
/// \param arg_reductee The tensor view to be reduced.
/// \param arg_init The initial value for reduction.
/// \param reduction_function The reduction function to use.
/// \param reduction_axes The axis positions (0-based) to be eliminated.
Reduce(const std::shared_ptr<Node>& arg_reductee,
const std::shared_ptr<Node>& arg_init,
const std::shared_ptr<Function>& reduction_function,
......@@ -42,10 +109,12 @@ namespace ngraph
virtual std::string description() const override { return "Reduce"; }
virtual void propagate_types() override;
/// \return The function to use for reduction.
std::shared_ptr<Function> get_reduction_function() const
{
return m_reduction_function;
}
/// \return The axis positions (0-based) to be eliminated through reduction.
const AxisSet& get_reduction_axes() const { return m_reduction_axes; }
protected:
std::shared_ptr<Function> m_reduction_function;
......
......@@ -20,9 +20,35 @@ namespace ngraph
{
namespace op
{
/// \brief Elementwise remainder operation.
///
/// (TODO: Get a bit more clarity on this: is it just "mod"? What about negative numbers and floats?)
///
/// ## Inputs
///
/// | | Type | Description |
/// | ------ | --------------------------------- | ------------------------------------------------------ |
/// | `arg0` | \f$N[d_1,\dots,d_n]~(n \geq 0)\f$ | A tensor of any shape and numeric element type. |
/// | `arg1` | \f$N[d_1,\dots,d_n]~(n \geq 0)\f$ | A tensor of the same shape and element type as `arg0`. |
///
/// ## Output
///
/// | Type | Description |
/// | ---------------------- | ----------------------------------------------------------------------------------------------------------------- |
/// | \f$N[d_1,\dots,d_n]\f$ | The tensor \f$T\f$, where \f$T[i_1,\dots,i_n] = \texttt{arg0}[i_1,\dots,i_n] \mod \texttt{arg1}[i_1,\dots,i_n]\f$ |
///
/// ## Implementation Status
///
/// | Backend | Status |
/// | ------- | ---------------- |
/// | NGVM | Not implemented. |
class Remainder : public BinaryElementwiseArithmetic
{
public:
/// \brief Constructs a remainder operation.
///
/// \param arg0 Node that produces the first input tensor.
/// \param arg1 Node that produces the second input tensor.
Remainder(const std::shared_ptr<Node>& arg0, const std::shared_ptr<Node>& arg1)
: BinaryElementwiseArithmetic(arg0, arg1)
{
......
......@@ -20,16 +20,55 @@ namespace ngraph
{
namespace op
{
/// \brief Tensor reshape operation.
///
/// "Converts" an input tensor into a new shape with the same number of elements.
///
/// Given that the input tensor has shape \f$[d_1,\dots,d_n]\f$, the output may have any shape \f$[d'_1,\dots,d'_m]\f$ such that
/// \f$\Pi_{0 \leq i \lt n}(d_i) = \Pi_{0 \leq i \lt m}(d'_i)\f$. For example, a \f$3\times{}4\f$ matrix can be reshaped into a
/// 3-tensor of shape \f$3\times{}2\times{}2\f$, a matrix of shape \f$6\times{}2\f$, or a vector of size \f$12\f$, but not, for
/// example, a matrix of size \f$4\times{}4\f$.
///
/// The parameter `input_order` indicates the order in which to "walk" over the input axes. Given a tensor of shape \f$(d_1,\dots,d_n)\f$,
/// an input order of \f$(a_0, a_1, \dots, a_{n-1})\f$ results in the coordinate for axis \f$a_{n-1}\f$ being varied most frequently,
/// followed by axis \f$a-2\f$, and so on down to \f$a_0\f$.
///
/// (TODO: example.)
///
/// ## Parameters
///
/// | | Description |
/// | -------------- | ---------------------------------------------------------- |
/// | `input_order` | The order in which to walk over the input axes. |
/// | `output_shape` | The shape \f$[d'_1,\dots,d'_m]\f$ for the reshaped tensor. |
///
/// ## Inputs
///
/// | | Type | Description |
/// | ----- | --------------------------------- | ------------------------------------------------------------------------------------------------------------ |
/// | `arg` | \f$E[d_1,\dots,d_n]~(n \geq 0)\f$ | An input tensor of any type and shape, as long as the product of \f$d_i\f$ equals the product of \f$d'_i\f$. |
///
/// ## Output
///
/// | Type | Description |
/// | ------------------------ | ------------------------------------------------------------------------------------------------------ |
/// | \f$E[d'_1,\dots,d'_m]\f$ | The tensor \f$T\f$, where \f$T\f$ is the input tensor with its elements rearranged as described above. |
///
/// ## Implementation Status
///
/// | Backend | Status |
/// | ------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
/// | NGVM | Fully implemented for scalars, vectors, and matrices. Implemented for other shapes only when there is no reordering of the input axes, i.e. `input_order` is \f$(0,\dots,n-1)\f$. |
class Reshape : public Builtin
{
public:
/// \brief Constructs a reshape operation.
///
/// @param arg The tensor view to be reshaped.
/// @param input_order The order in which to iterate over input axes. (TODO: that needs more explanation)
/// This must be a permutation of the sequence (0,...,n-1) where n is the rank of the input tensor.
/// @param output_shape The output shape. If the input shape is (a0,...,ak-1) then the output shape must
/// be of the form (b0,...,bj-1) where product(ai) == product(bi).
///
/// \param arg The tensor view to be reshaped.
/// \param input_order The order in which to iterate over input axes. This must be a permutation of the
/// sequence \f$(0,\dots,n-1)\f$ where \f$n\f$ is the rank of the input tensor.
/// \param output_shape The output shape. If the input shape is \f$(a_0,\dots,a_{k-1})\f$ then the output shape must
/// be of the form \f$(b_0,\dots,b_{j-1})\f$ where \f$\Pi(a_i) = \Pi(b_i)\f$.
Reshape(const std::shared_ptr<Node>& arg,
const AxisVector& input_order,
const Shape& output_shape)
......@@ -42,7 +81,9 @@ namespace ngraph
virtual std::string description() const override { return "Reshape"; }
virtual void propagate_types() override;
/// \return The order in which to iterate over input axes.
const AxisVector& get_input_order() const { return m_input_order; }
/// \return The shape of the output tensor.
const Shape& get_output_shape() const { return m_output_shape; }
protected:
const AxisVector m_input_order;
......
......@@ -20,9 +20,35 @@ namespace ngraph
{
namespace op
{
/// \brief Elementwise selection operation.
///
/// ## Inputs
///
/// | | Type | Description |
/// | ------ | --------------------------------------------- | ------------------------------------------------------------ |
/// | `arg0` | \f$\texttt{bool}[d_1,\dots,d_n]~(n \geq 0)\f$ | A tensor of any shape, with element `bool`. |
/// | `arg1` | \f$E[d_1,\dots,d_n]~(n \geq 0)\f$ | A tensor of the same shape as `arg0`, with any element type. |
/// | `arg2` | \f$E[d_1,\dots,d_n]~(n \geq 0)\f$ | A tensor of the same shape and element type as `arg1`. |
///
/// ## Output
///
/// | Type | Description |
/// | ---------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
/// | \f$E[d_1,\dots,d_n]\f$ | The tensor \f$T\f$, where \f$T[i_1,\dots,i_n] = \texttt{arg1}[i_1,\dots,i_n]\text{ if }\texttt{arg0}[i_1,\dots,i_n] \neq 0\text{, else }\texttt{arg2}[i_1,\dots,i_n]\f$ |
///
/// ## Implementation Status
///
/// | Backend | Status |
/// | ------- | ------------------ |
/// | NGVM | Fully implemented. |
class Select : public Builtin
{
public:
/// \brief Constructs a selection operation.
///
/// \param arg0 Node that produces the first input tensor.
/// \param arg1 Node that produces the second input tensor.
/// \param arg2 Node that produces the third input tensor.
Select(const std::shared_ptr<Node>& arg0,
const std::shared_ptr<Node>& arg1,
const std::shared_ptr<Node>& arg2)
......
// ----------------------------------------------------------------------------
// Copyright 2017 Nervana Systems Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// ----------------------------------------------------------------------------
#pragma once
#include "ngraph/ops/op.hpp"
namespace ngraph
{
namespace op
{
/// \brief Elementwise sign operation.
///
/// Maps each element of the input tensor to -1 (if it is negative), 0 (if it is zero), or 1 (if it is positive).
///
/// ## Inputs
///
/// | | Type | Description |
/// | ----- | --------------------------------- | ----------------------------------------------- |
/// | `arg` | \f$N[d_1,\dots,d_n]~(n \geq 0)\f$ | A tensor of any shape and numeric element type. |
///
/// ## Output
///
/// | Type | Description |
/// | ---------------------- | ------------------------------------------------------------------------------------ |
/// | \f$N[d_1,\dots,d_n]\f$ | The tensor \f$T\f$, where \f$T[i_1,\dots,i_n] = \text{sgn}(\texttt{arg}[i_1,\dots,i_n])\f$ |
///
/// ## Implementation Status
///
/// | Backend | Status |
/// | ------- | ------------------ |
/// | NGVM | Fully implemented. |
class Sign : public UnaryElementwiseArithmetic
{
public:
/// \brief Constructs an elementwise sign operation.
///
/// \param arg Node that produces the input tensor.
Sign(const std::shared_ptr<Node>& arg)
: UnaryElementwiseArithmetic(arg)
{
}
virtual std::string description() const override { return "Sign"; }
};
}
}
......@@ -20,9 +20,31 @@ namespace ngraph
{
namespace op
{
/// \brief Elementwise sine operation.
///
/// ## Inputs
///
/// | | Type | Description |
/// | ----- | --------------------------------- | ----------------------------------------------- |
/// | `arg` | \f$N[d_1,\dots,d_n]~(n \geq 0)\f$ | A tensor of any shape and numeric element type. |
///
/// ## Output
///
/// | Type | Description |
/// | ---------------------- | ------------------------------------------------------------------------------------ |
/// | \f$N[d_1,\dots,d_n]\f$ | The tensor \f$T\f$, where \f$T[i_1,\dots,i_n] = \sin(\texttt{arg}[i_1,\dots,i_n])\f$ |
///
/// ## Implementation Status
///
/// | Backend | Status |
/// | ------- | ------------------ |
/// | NGVM | Fully implemented. |
class Sin : public UnaryElementwiseArithmetic
{
public:
/// \brief Constructs a sine operation.
///
/// \param arg Node that produces the input tensor.
Sin(const std::shared_ptr<Node>& arg)
: UnaryElementwiseArithmetic(arg)
{
......
......@@ -20,9 +20,31 @@ namespace ngraph
{
namespace op
{
/// \brief Elementwise hyperbolic sine (sinh) operation.
///
/// ## Inputs
///
/// | | Type | Description |
/// | ----- | --------------------------------- | ----------------------------------------------- |
/// | `arg` | \f$N[d_1,\dots,d_n]~(n \geq 0)\f$ | A tensor of any shape and numeric element type. |
///
/// ## Output
///
/// | Type | Description |
/// | ---------------------- | ------------------------------------------------------------------------------------- |
/// | \f$N[d_1,\dots,d_n]\f$ | The tensor \f$T\f$, where \f$T[i_1,\dots,i_n] = \sinh(\texttt{arg}[i_1,\dots,i_n])\f$ |
///
/// ## Implementation Status
///
/// | Backend | Status |
/// | ------- | ------------------ |
/// | NGVM | Fully implemented. |
class Sinh : public UnaryElementwiseArithmetic
{
public:
/// \brief Constructs a hyperbolic sine operation.
///
/// \param arg Node that produces the input tensor.
Sinh(const std::shared_ptr<Node>& arg)
: UnaryElementwiseArithmetic(arg)
{
......
......@@ -20,16 +20,48 @@ namespace ngraph
{
namespace op
{
/// \brief Takes a slice of an input tensor, i.e., the sub-tensor that resides within a bounding box, optionally with stride.
///
/// Given an input tensor \f$T\f$ of shape \f$[d_1,\dots,d_n]\f$, lower bounds \f$[l_1,\dots,l_n]\f$, and upper bounds \f$[u_1,\dots,u_n]\f$,
/// where \f$l_i \leq d_i \leq d_i\f$, and a stride \f$[s_1,\dots,s_n]\f$, returns a new tensor \f$T'\f$ of the same element type and shape
/// \f$[d'_1,\dots,d'_n]\f$ where \f$d'_i = \lceil(u_i - l_i)\, /\, s_i\rceil\f$, where \f$T'[i_1,\dots,i_n] = T[i'_1,\dots,i'_n]\f$
/// where \f$i'_j = i_j s_j + l_j\f$.
///
/// ## Parameters
///
/// | | Description |
/// | -------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
/// | `lower_bounds` | The (inclusive) lower-bound coordinates \f$l_i\f$ for the tensor slice. For example, a lower-bound of \f$(1,2)\f$ means to start the slice at row 1 and column 2. |
/// | `upper_bounds` | The (non-inclusive) upper-bound coordinates \f$u_i\f$ for the tensor slice. For example, an upper-bound of \f$(5,4)\f$ means to end the slice before row 4 and column 3. |
/// | `step` | The "step" or "stride" \f$s_i\f$ for the tensor slice. For example, a stride of \f$(1,3)\f$ means to take every row, and every third column (starting at the lower bound). |
///
/// ## Inputs
///
/// | | Type | Description |
/// | ----- | --------------------------------------------------- | --------------------------------------- |
/// | `arg` | \f$E[\mathit{del}([d_1,\dots,d_n],A)]~(n \geq 0)\f$ | A tensor of any shape and element type. |
///
/// ## Output
///
/// | Type | Description |
/// | ------------------------------------------------------------------------------ | --------------------------------- |
/// | \f$E[d'_1,\dots,d'_n]\f$ where \f$d'_i = \lceil(u_i - l_i)\, /\, s_i\rceil\f$. | The tensor sliced from the input. |
///
/// ## Implementation Status
///
/// | Backend | Status |
/// | ------- | ----------------------------------------------- |
/// | NGVM | Implemented for scalars, matrices, and vectors. |
class Slice : public Builtin
{
public:
/// \brief Constructs a tensor slice operation.
///
/// @param arg The tensor view to be sliced.
/// @param lower_bounds The axiswise lower bounds of the slice.
/// @param upper_bounds The axiswise upper bounds of the slice (exclusive).
/// @param step The slicing step; for example, step of {n,m} means to take
/// every nth row and everyth mth column of the input matrix.
///
/// \param arg The tensor view to be sliced.
/// \param lower_bounds The axiswise lower bounds of the slice (inclusive).
/// \param upper_bounds The axiswise upper bounds of the slice (exclusive).
/// \param step The slicing step; for example, step of `{n,m}` means to take
/// every nth row and every mth column of the input matrix.
Slice(const std::shared_ptr<Node>& arg,
const Coordinate& lower_bounds,
const Coordinate& upper_bounds,
......@@ -41,6 +73,11 @@ namespace ngraph
{
}
/// \brief Constructs a tensor slice operation with unit step; i.e., every element inside the bounding box will be copied to the output slice.
///
/// \param arg The tensor view to be sliced.
/// \param lower_bounds The axiswise lower bounds of the slice (inclusive).
/// \param upper_bounds The axiswise upper bounds of the slice (exclusive).
Slice(const std::shared_ptr<Node>& arg,
const Coordinate& lower_bounds,
const Coordinate& upper_bounds)
......@@ -54,8 +91,11 @@ namespace ngraph
virtual std::string description() const override { return "Slice"; }
virtual void propagate_types() override;
/// \return The inclusive lower-bound coordinates.
const Coordinate& get_lower_bounds() const { return m_lower_bounds; }
/// \return The exclusive upper-bound coordinates.
const Coordinate& get_upper_bounds() const { return m_upper_bounds; }
/// \return The slicing step.
const Shape& get_step() const { return m_step; }
protected:
const Coordinate m_lower_bounds;
......
......@@ -20,9 +20,33 @@ namespace ngraph
{
namespace op
{
/// \brief Elementwise subtraction operation.
///
/// ## Inputs
///
/// | | Type | Description |
/// | ------ | --------------------------------- | ------------------------------------------------------ |
/// | `arg0` | \f$N[d_1,\dots,d_n]~(n \geq 0)\f$ | A tensor of any shape and numeric element type. |
/// | `arg1` | \f$N[d_1,\dots,d_n]~(n \geq 0)\f$ | A tensor of the same shape and element type as `arg0`. |
///
/// ## Output
///
/// | Type | Description |
/// | ---------------------- | -------------------------------------------------------------------------------------------------------------- |
/// | \f$N[d_1,\dots,d_n]\f$ | The tensor \f$T\f$, where \f$T[i_1,\dots,i_n] = \texttt{arg0}[i_1,\dots,i_n] - \texttt{arg1}[i_1,\dots,i_n]\f$ |
///
/// ## Implementation Status
///
/// | Backend | Status |
/// | ------- | ------------------ |
/// | NGVM | Fully implemented. |
class Subtract : public BinaryElementwiseArithmetic
{
public:
/// \brief Constructs an subtraction operation.
///
/// \param arg0 Node that produces the first input tensor.
/// \param arg1 Node that produces the second input tensor.
Subtract(const std::shared_ptr<Node>& arg0, const std::shared_ptr<Node>& arg1)
: BinaryElementwiseArithmetic(arg0, arg1)
{
......
......@@ -44,11 +44,11 @@ void Sum::propagate_types()
auto arg_shape = arg_tensor_view_type->get_shape();
for (auto axis : m_summed_axes)
for (auto axis : m_reduction_axes)
{
if (axis >= arg_shape.size())
{
throw ngraph_error("Summed axis is out of bounds");
throw ngraph_error("Reduction axis for sum is out of bounds");
}
}
......@@ -56,7 +56,7 @@ void Sum::propagate_types()
for (size_t i = 0; i < arg_shape.size(); i++)
{
if (m_summed_axes.count(i) == 0)
if (m_reduction_axes.count(i) == 0)
{
result_shape.push_back(arg_shape.at(i));
}
......
......@@ -20,25 +20,86 @@ namespace ngraph
{
namespace op
{
/// \brief Tensor sum operation.
///
/// Element-wise sums the input tensor, eliminating the specified reduction axes.
/// For example:
///
/// \f[
/// \mathit{sum}\left(\{0\},
/// \left[ \begin{array}{ccc}
/// 1 & 2 \\
/// 3 & 4 \\
/// 5 & 6 \end{array} \right]\right) =
/// \left[ (1 + 3 + 5), (2 + 4 + 6) \right] =
/// \left[ 9, 12 \right]~~~\text{(dimension 0 (rows) is eliminated)}
/// \f]
///
/// \f[
/// \mathit{sum}\left(\{1\},
/// \left[ \begin{array}{ccc}
/// 1 & 2 \\
/// 3 & 4 \\
/// 5 & 6 \end{array} \right]\right) =
/// \left[ (1 + 2), (3 + 4), (5 + 6) \right] =
/// \left[ 3, 7, 11 \right]~~~\text{(dimension 1 (columns) is eliminated)}
/// \f]
///
/// \f[
/// \mathit{sum}\left(\{0,1\},
/// \left[ \begin{array}{ccc}
/// 1 & 2 \\
/// 3 & 4 \\
/// 5 & 6 \end{array} \right]\right) =
/// (1 + 2) + (3 + 4) + (5 + 6) =
/// 21~~~\text{(both dimensions (rows and columns) are eliminated)}
/// \f]
///
/// This is equivalent to Reduce where `arg_init` = 0 and `reduction_function` is \f$f(x,y) = x+y\f$.
///
/// ## Parameters
///
/// | | Description |
/// | -------------------- | ---------------------------------------- |
/// | `reduction_axes` | The axes to eliminate through summation. |
///
/// ## Inputs
///
/// | | Type | Description |
/// | ----- | --------------------------------- | ------------------------------------------------------ |
/// | `arg` | \f$N[d_1,\dots,d_n]~(n \geq 0)\f$ | An input tensor of any shape and numeric element type. |
///
/// ## Output
///
/// | Type | Description |
/// | ----------------------------------------- | ---------------------------------------------------------------------------------------------------------------- |
/// | \f$N[\textit{delete}(A,d_1,\dots,d_n)]\f$ | The tensor \f$T\f$, where \f$T\f$ is the input tensor with the `reduction_axes` \f$A\f$ eliminated by summation. |
///
/// ## Implementation Status
///
/// | Backend | Status |
/// | ------- | ----------------------------------------------------- |
/// | NGVM | Fully implemented for scalars, vectors, and matrices. |
class Sum : public Builtin
{
public:
/// \brief Constructs a summation operation.
///
/// @param arg The tensor view to be summedn.
/// @param summed_axes The axis positions (0-based) to be summed.
///
Sum(const std::shared_ptr<Node>& arg, const AxisSet& summed_axes)
/// \param arg The tensor view to be summed.
/// \param reduction_axes The axis positions (0-based) to be eliminated.
Sum(const std::shared_ptr<Node>& arg, const AxisSet& reduction_axes)
: Builtin({arg})
, m_summed_axes(summed_axes)
, m_reduction_axes(reduction_axes)
{
}
virtual std::string description() const override { return "Sum"; }
virtual void propagate_types() override;
const AxisSet& get_summed_axes() const { return m_summed_axes; }
/// \return The axis positions (0-based) to be eliminated through summation.
const AxisSet& get_reduction_axes() const { return m_reduction_axes; }
protected:
AxisSet m_summed_axes;
AxisSet m_reduction_axes;
};
}
}
......@@ -20,9 +20,31 @@ namespace ngraph
{
namespace op
{
/// \brief Elementwise tangent operation.
///
/// ## Inputs
///
/// | | Type | Description |
/// | ----- | --------------------------------- | ----------------------------------------------- |
/// | `arg` | \f$N[d_1,\dots,d_n]~(n \geq 0)\f$ | A tensor of any shape and numeric element type. |
///
/// ## Output
///
/// | Type | Description |
/// | ---------------------- | ------------------------------------------------------------------------------------ |
/// | \f$N[d_1,\dots,d_n]\f$ | The tensor \f$T\f$, where \f$T[i_1,\dots,i_n] = \tan(\texttt{arg}[i_1,\dots,i_n])\f$ |
///
/// ## Implementation Status
///
/// | Backend | Status |
/// | ------- | ------------------ |
/// | NGVM | Fully implemented. |
class Tan : public UnaryElementwiseArithmetic
{
public:
/// \brief Constructs a tangent operation.
///
/// \param arg Node that produces the input tensor.
Tan(const std::shared_ptr<Node>& arg)
: UnaryElementwiseArithmetic(arg)
{
......
......@@ -20,9 +20,31 @@ namespace ngraph
{
namespace op
{
/// \brief Elementwise hyperbolic tangent operation.
///
/// ## Inputs
///
/// | | Type | Description |
/// | ----- | --------------------------------- | ----------------------------------------------- |
/// | `arg` | \f$N[d_1,\dots,d_n]~(n \geq 0)\f$ | A tensor of any shape and numeric element type. |
///
/// ## Output
///
/// | Type | Description |
/// | ---------------------- | ------------------------------------------------------------------------------------- |
/// | \f$N[d_1,\dots,d_n]\f$ | The tensor \f$T\f$, where \f$T[i_1,\dots,i_n] = \tanh(\texttt{arg}[i_1,\dots,i_n])\f$ |
///
/// ## Implementation Status
///
/// | Backend | Status |
/// | ------- | ------------------ |
/// | NGVM | Fully implemented. |
class Tanh : public UnaryElementwiseArithmetic
{
public:
/// \brief Constructs a hyperbolic tangent operation.
///
/// \param arg Node that produces the input tensor.
Tanh(const std::shared_ptr<Node>& arg)
: UnaryElementwiseArithmetic(arg)
{
......
......@@ -20,9 +20,31 @@ namespace ngraph
{
namespace op
{
/// \brief Operation to construct a tuple.
///
/// ## Inputs
///
/// | | Type | Description |
/// | ------ | ------------------------------ | -------------------------------------- |
/// | `args` | \f$T_1,\dots,T_n~(n \geq 0)\f$ | The elements of the constructed tuple. |
///
/// ## Output
///
/// | Type | Description |
/// | --------------------- | ---------------------------------------------------------- |
/// | \f$(T_1,\dots,T_n)\f$ | The tuple \f$(\texttt{args}[0],\dots,\texttt{args}[n])\f$. |
///
/// ## Implementation Status
///
/// | Backend | Status |
/// | ------- | ------------------ |
/// | NGVM | Fully implemented. |
class Tuple : public Builtin
{
public:
/// \brief Constructs a tuple construction operation.
///
/// \param args The nodes that produce the elements of the constructed tuple.
Tuple(const Nodes& args)
: Builtin(args)
{
......
// ----------------------------------------------------------------------------
// Copyright 2017 Nervana Systems Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// ----------------------------------------------------------------------------
#include <algorithm>
#include "ngraph/ngraph.hpp"
using namespace std;
using namespace ngraph;
using namespace ngraph::runtime;
CallFrame::CallFrame(size_t n_inputs,
size_t n_outputs,
const TensorViewPtrs& temps,
size_t initial_pc,
const shared_ptr<vector<shared_ptr<Instruction>>>& instructions)
: m_n_inputs(n_inputs)
, m_n_outputs(n_outputs)
, m_tensor_views(n_inputs + n_outputs + temps.size())
, m_initial_pc(initial_pc)
, m_instructions(instructions)
{
copy(temps.begin(), temps.end(), m_tensor_views.begin() + m_n_inputs + m_n_outputs);
}
void CallFrame::tensor_call(
const std::vector<std::shared_ptr<ngraph::runtime::TensorView>>& inputs,
const std::vector<std::shared_ptr<ngraph::runtime::TensorView>>& outputs)
{
copy(inputs.begin(), inputs.end(), m_tensor_views.begin());
copy(outputs.begin(), outputs.end(), m_tensor_views.begin() + m_n_inputs);
m_next_pc = m_initial_pc;
m_return = false;
while (!m_return)
{
m_pc = m_next_pc;
m_next_pc = m_pc + 1;
m_instructions->at(m_pc)->execute(*this);
}
// Don't hold onto inputs/outputs
fill_n(m_tensor_views.begin(), m_n_inputs + m_n_outputs, nullptr);
}
void CallFrame::operator()(const std::vector<std::shared_ptr<ngraph::runtime::Value>>& arguments,
const std::vector<std::shared_ptr<ngraph::runtime::Value>>& results)
{
// TODO: Check types of args and result
std::vector<std::shared_ptr<ngraph::runtime::TensorView>> inputs;
for (auto argument : arguments)
{
argument->collect_tensor_views(inputs, argument);
}
std::vector<std::shared_ptr<ngraph::runtime::TensorView>> outputs;
for (auto result : results)
{
result->collect_tensor_views(outputs, result);
}
tensor_call(inputs, outputs);
}
// ----------------------------------------------------------------------------
// Copyright 2017 Nervana Systems Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// ----------------------------------------------------------------------------
#pragma once
#include "ngraph/runtime/ngvm/call_frame.hpp"
#include "ngraph/runtime/ngvm/eigen/utils.hpp"
#include "ngraph/runtime/ngvm/instruction.hpp"
#include "ngraph/runtime/tensor_view.hpp"
namespace ngraph
{
namespace runtime
{
namespace ngvm
{
namespace eigen
{
template <typename ET>
class SignInstruction : public Instruction
{
public:
SignInstruction(TensorViewInfo arg, TensorViewInfo out)
: m_arg(arg)
, m_out(out)
{
}
virtual void execute(CallFrame& call_frame) const override
{
EigenArray1d<ET, fmt::V>(call_frame, m_out) =
EigenArray1d<ET, fmt::V>(call_frame, m_arg).sign();
}
protected:
TensorViewInfo m_arg;
TensorViewInfo m_out;
};
}
}
}
}
......@@ -52,6 +52,7 @@
#include "ngraph/ops/reduce.hpp"
#include "ngraph/ops/reshape.hpp"
#include "ngraph/ops/select.hpp"
#include "ngraph/ops/sign.hpp"
#include "ngraph/ops/sin.hpp"
#include "ngraph/ops/sinh.hpp"
#include "ngraph/ops/slice.hpp"
......@@ -103,6 +104,7 @@
#include "ngraph/runtime/ngvm/eigen/return.hpp"
#include "ngraph/runtime/ngvm/eigen/scalar_tensor_product.hpp"
#include "ngraph/runtime/ngvm/eigen/select.hpp"
#include "ngraph/runtime/ngvm/eigen/sign.hpp"
#include "ngraph/runtime/ngvm/eigen/sin.hpp"
#include "ngraph/runtime/ngvm/eigen/sinh.hpp"
#include "ngraph/runtime/ngvm/eigen/subtract.hpp"
......@@ -353,6 +355,7 @@ ExternalFunction::OpMap& ExternalFunction::get_op_map()
REGISTER_NUMERIC_UNOP(op::Exp, eigen::ExpInstruction);
REGISTER_NUMERIC_UNOP(op::Log, eigen::LogInstruction);
REGISTER_NUMERIC_UNOP(op::Negative, eigen::NegateInstruction);
REGISTER_NUMERIC_UNOP(op::Sign, eigen::SignInstruction);
REGISTER_NUMERIC_UNOP(op::Sin, eigen::SinInstruction);
REGISTER_NUMERIC_UNOP(op::Sinh, eigen::SinhInstruction);
REGISTER_NUMERIC_UNOP(op::Tan, eigen::TanInstruction);
......@@ -844,10 +847,10 @@ ExternalFunction::OpMap& ExternalFunction::get_op_map()
auto arg_shape = arg_tensor_view_type->get_shape();
auto arg_rank = arg_shape.size();
auto& summed_axes = s->get_summed_axes();
auto& reduction_axes = s->get_reduction_axes();
// Trivial case: no summed axes.
if (summed_axes.size() == 0)
// Trivial case: no reduction axes.
if (reduction_axes.size() == 0)
{
PUSH_POLYMORPHIC_INSTRUCTION(s_element_type,
"Sum has unhandled element type",
......@@ -856,8 +859,8 @@ ExternalFunction::OpMap& ExternalFunction::get_op_map()
out.at(0).get_index());
}
// Full reduction? Then sum to scalar.
else if ((arg_rank == 1 && summed_axes == AxisSet{0}) ||
(arg_rank == 2 && summed_axes == AxisSet{0, 1}))
else if ((arg_rank == 1 && reduction_axes == AxisSet{0}) ||
(arg_rank == 2 && reduction_axes == AxisSet{0, 1}))
{
PUSH_POLYMORPHIC_INSTRUCTION(s_element_type,
"Sum has unhandled element type",
......@@ -865,7 +868,7 @@ ExternalFunction::OpMap& ExternalFunction::get_op_map()
in[0],
out[0]);
}
else if (arg_rank == 2 && summed_axes == AxisSet{1})
else if (arg_rank == 2 && reduction_axes == AxisSet{1})
{
PUSH_POLYMORPHIC_INSTRUCTION(s_element_type,
"Sum has unhandled element type",
......@@ -873,7 +876,7 @@ ExternalFunction::OpMap& ExternalFunction::get_op_map()
in[0],
out[0]);
}
else if (arg_rank == 2 && summed_axes == AxisSet{0})
else if (arg_rank == 2 && reduction_axes == AxisSet{0})
{
PUSH_POLYMORPHIC_INSTRUCTION(s_element_type,
"Sum has unhandled element type",
......
......@@ -117,6 +117,7 @@ namespace ngraph
return std::make_shared<runtime::ParameterizedTensorView<TraitedType<T>>>(shape);
}
/// Parses a string containing a literal of the underlying type.
static T read(const std::string& s)
{
T result;
......@@ -134,6 +135,7 @@ namespace ngraph
return result;
}
/// Parses a list of strings containing literals of the underlying type.
static std::vector<T> read(const std::vector<std::string>& ss)
{
std::vector<T> result;
......
......@@ -2478,3 +2478,24 @@ TEST(execute, sum_matrix_to_scalar_zero_by_zero)
// input tensors, so let's do this too.
ASSERT_EQ((vector<float>{}), a->get_vector());
}
TEST(execute, sign)
{
auto shape = Shape{2, 3};
auto A = make_shared<op::Parameter>(element::Float32::element_type(), shape);
auto result_type = make_shared<TensorViewType>(element::Float32::element_type(), shape);
auto f = make_shared<Function>(make_shared<op::Sign>(A), result_type, op::Parameters{A});
auto manager = runtime::Manager::get("NGVM");
auto external = manager->compile(f);
auto backend = manager->allocate_backend();
auto cf = backend->make_call_frame(external);
// Create some tensors for input/output
auto a = backend->make_parameterized_tensor_view<element::Float32>(shape);
*a = vector<float>{1, -2, 0, -4.8f, 4.8f, -0.0};
auto result = backend->make_parameterized_tensor_view<element::Float32>(shape);
(*cf)({a}, {result});
ASSERT_EQ((vector<float>{1, -1, 0, -1, 1, 0}), result->get_vector());
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment