Commit 92864fb2 authored by Jai Menon's avatar Jai Menon Committed by GitHub

Merge branch 'master' into jmenon/codegen

parents 0c5fea18 89cef471
......@@ -69,3 +69,7 @@ config_args.txt
.nfs*
venv/
.vscode/
# VisualGDB files
VisualGDB/
toolchain.cmake
......@@ -12,6 +12,8 @@ TODO
## Steps
_If you are developing ngraph on macOS (officially unsupported) please see the section "macOS Development Prerequisites" below._
`libngraph` is build in the customary manner for a CMake-based project:
1. Create a build directory outside of source directory tree.
......@@ -23,6 +25,17 @@ TODO
6. _(Optional, requires `doxygen`)_ Run `make doc`.
* This will build API documentation in the directory `doc` inside the build directory.
## macOS Development Prerequisites
The repository includes two scripts (`maint/check-code-format.sh` and `maint/apply-code-format.sh`) that are used respectively to check adherence to `libngraph` code formatting conventions, and to automatically reformat code according to those conventions. These scripts require the command `clang-format-3.9` to be in your `PATH`. Run the following commands (you will need to adjust them if you are not using `bash`).
```
$ brew install llvm@3.9
$ mkdir -p $HOME/bin
$ ln -s /usr/local/opt/llvm@3.9/bin/clang-format $HOME/bin/clang-format-3.9
$ echo 'export PATH=$HOME/bin:$PATH' >> $HOME/.bash_profile
```
# Testing `libngraph`
`libngraph` uses the GTest framework for unit tests. CMake automatically downloads a
......
......@@ -16,35 +16,16 @@ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Werror=inconsistent-missing-override")
# whitelist errors here
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Weverything")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-c++98-compat")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-c++98-compat-pedantic")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-weak-vtables")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-global-constructors")
# set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-switch-enum")
# set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-gnu-zero-variadic-macro-arguments")
# set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-undef")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-exit-time-destructors")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-missing-prototypes")
# set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-disabled-macro-expansion")
# set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-pedantic")
# set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-documentation")
# set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-covered-switch-default")
# set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-unknown-warning-option")
# # should remove these
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-old-style-cast")
# set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-old-style-cast")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-float-conversion")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-sign-conversion")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-padded")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-potentially-evaluated-expression") # Triggers false alarms on typeid
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-sign-compare")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-unused-parameter")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-weak-vtables") # Not ready for this yet
# set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-conversion")
# set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-float-equal")
# set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-duplicate-enum") # from numpy
# set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-used-but-marked-unused") # from sox
# set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-c++11-compat-deprecated-writable-strings")
# set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-deprecated")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-double-promotion")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-undefined-func-template")
......@@ -15,6 +15,9 @@
include(ExternalProject)
set(EIGEN_INSTALL_DIR ${EXTERNAL_INSTALL_DIR}/eigen)
set(EIGEN_PROJECT eigen)
set(EIGEN_SHA1_HASH dd238ca6c6b5d2ce2e7e2e9ded4c59bad77ce6d0)
set(EIGEN_URL http://bitbucket.org/eigen/eigen/get/3.3.3.zip)
#----------------------------------------------------------------------------------------------------------
# Download and install GoogleTest ...
......@@ -23,17 +26,17 @@ set(EIGEN_INSTALL_DIR ${EXTERNAL_INSTALL_DIR}/eigen)
# The 'BUILD_BYPRODUCTS' argument was introduced in CMake 3.2.
if (${CMAKE_VERSION} VERSION_LESS 3.2)
ExternalProject_Add(
eigen
URL http://bitbucket.org/eigen/eigen/get/3.3.3.zip
# PREFIX ${CMAKE_CURRENT_BINARY_DIR}/eigen
${EIGEN_PROJECT}
URL ${EIGEN_URL}
URL_HASH SHA1=${EIGEN_SHA1_HASH}
UPDATE_COMMAND ""
CMAKE_ARGS -DCMAKE_INSTALL_PREFIX=${EIGEN_INSTALL_DIR}
)
else()
ExternalProject_Add(
eigen
URL http://bitbucket.org/eigen/eigen/get/3.3.3.zip
# PREFIX ${CMAKE_CURRENT_BINARY_DIR}/eigen
${EIGEN_PROJECT}
URL ${EIGEN_URL}
URL_HASH SHA1=${EIGEN_SHA1_HASH}
UPDATE_COMMAND ""
CMAKE_ARGS -DCMAKE_INSTALL_PREFIX=${EIGEN_INSTALL_DIR}
BUILD_BYPRODUCTS "${EIGEN_INSTALL_DIR}/include/eigen3"
......
......@@ -11,7 +11,4 @@ RUN apt-get update && apt-get install -y \
RUN apt-get clean autoclean && \
apt-get autoremove -y
# Add chown_files script to avoid
ADD contrib/docker/chown_files.sh /tmp/chown_files.sh
WORKDIR /root/ngraph-cpp-test
WORKDIR /home
# Basic Makefile for contrib/docker. This can be expanded later as more targets
# are added.
# Default is to build with -j for parallel builds. Turn off with
# make PARELLEL=
PARALLEL=-j
# DIR is an internal variable that serves as an anchor to this cloned git
# repository. DIR is mounted into the docker container, so that builds
# can occur within the container on this cloned git repository. DIR should
# not be modified - if it is, then the build system will not work.
DIR = $(realpath ../..)
# Use /tmp/ngraph-cpp-test, because we run as the user (and not root)
# DOCKUSER_HOME is the location of the home directory of the fabricated
# "dockuser" user, used only within the docker containers. "dockuser" is
# created (from the passed-in RUN_UID) to map the docker-caller user's UID to a
# first-class user (/etc/passwd entry, member of sudo group, proper home dir)
# /home/dockuser is also used in other scripts, notably run_as_user.sh, so if
# changed it must be done in other areas for the builds to work.
DOCKUSER_HOME=/home/dockuser
# Use /home/dockuser/ngraph-cpp-test, because we run as the user (and not root)
# /root/ngraph-cpp-test is not used, because /root is not accessible to user
VOLUME = -v ${DIR}:/tmp/ngraph-cpp-test
VOLUME = -v "${DIR}:${DOCKUSER_HOME}/ngraph-cpp-test"
GIT_COMMIT = $(shell git rev-parse HEAD)
BUILD_VERSION = ${GIT_COMMIT}_${PYTHON_VERSION}
BUILD_DIR = ${DIR}/contrib/docker/.build-${BUILD_VERSION}
......@@ -16,56 +33,71 @@ CALLER_GID := $(shell id -g)
# line
PYTHON_VERSION = 2
.PHONY: clean build_ngraph_cpp_cpu check_cpu shell build_all
.PHONY: clean build_ngraph_cpp_cpu check_cpu install shell build_all
DOCKER_BUILD=docker build --rm=true
ifdef http_proxy
DOCKER_BUILD+=--build-arg http_proxy=$(http_proxy)
DOCKER_RUN_ENV+=--env http_proxy=$(http_proxy)
DOCKER_RUN_ENV+=--env "http_proxy=$(http_proxy)"
endif
ifdef https_proxy
DOCKER_BUILD+=--build-arg https_proxy=$(https_proxy)
DOCKER_RUN_ENV+=--env https_proxy=$(https_proxy)
DOCKER_RUN_ENV+=--env "https_proxy=$(https_proxy)"
endif
expand_dockerfile_templates:
cd ${DIR}/contrib/docker
mkdir ${BUILD_DIR} || true
sed -e 's/\(FROM ngraph.*\)/\1:${BUILD_VERSION}/' Dockerfile.ngraph_cpp_cpu > ${BUILD_DIR}/Dockerfile.ngraph_cpp_cpu
cd "${DIR}"/contrib/docker
mkdir "${BUILD_DIR}" || true
sed -e 's/\(FROM ngraph.*\)/\1:${BUILD_VERSION}/' Dockerfile.ngraph_cpp_cpu > "${BUILD_DIR}"/Dockerfile.ngraph_cpp_cpu
clean:
rm -f ${DIR}/contrib/docker/.build-*/Dockerfile.* || echo "keep going if files are not present"
rmdir ${DIR}/contrib/docker/.build-* || echo "keep going if directory is not present"
rm -f "${DIR}"/contrib/docker/.build-*/Dockerfile.* || echo "keep going if files are not present"
rmdir "${DIR}"/contrib/docker/.build-* || echo "keep going if directory is not present"
rm -fr "${DIR}"/BUILD
build_ngraph_cpp_cpu: expand_dockerfile_templates
$(DOCKER_BUILD) -f=${BUILD_DIR}/Dockerfile.ngraph_cpp_cpu --build-arg python_version=${PYTHON_VERSION} -t=ngraph_cpp_cpu:${BUILD_VERSION} ${DIR}
$(DOCKER_BUILD) -f="${BUILD_DIR}"/Dockerfile.ngraph_cpp_cpu --build-arg python_version="${PYTHON_VERSION}" -t=ngraph_cpp_cpu:"${BUILD_VERSION}" "${DIR}"
# remove the tag for the previous latest image
docker rmi ngraph_cpp_cpu:latest || echo "keep going if docker rmi command fails"
docker tag `docker images -q ngraph_cpp_cpu:${BUILD_VERSION}` ngraph_cpp_cpu:latest
docker tag `docker images -q "ngraph_cpp_cpu:${BUILD_VERSION}"` ngraph_cpp_cpu:latest
build_all: build_ngraph_cpp_cpu
check_cpu: build_ngraph_cpp_cpu
# Remove old distribution directory if present
( test -d "${DIR}"/BUILD/ngraph_dist && rm -fr "${DIR}"/BUILD/ngraph_dist && echo "Removed old ${DIR}/BUILD/ngraph_dist directory" ) || echo "Previous ngraph_dist directory not found"
# Make BUILD directory as user
mkdir -p ${DIR}/BUILD
chmod ug+rwx ${DIR}/BUILD
# Need to use /tmp/ngraph-cpp-test/BUILD, because running as user
# Can't use /root/ngraph-cpp-test/BUILD, because /root not accessible to user
docker run \
--rm --user ${CALLER_UID}:${CALLER_GID} \
${DOCKER_RUN_ENV} ${VOLUME} -w /tmp/ngraph-cpp-test/BUILD -t ngraph_cpp_cpu:${BUILD_VERSION} \
sh -c "cmake -DCMAKE_CXX_COMPILER=clang++-3.9 -DCMAKE_C_COMPILER=clang-3.9 .. ; env VERBOSE=1 make check"
# update the files to be owned by the calling user instead of root, to avoid docker mount problems with file ownership
docker run --rm ${VOLUME} \
--env MY_UID=${CALLER_UID} \
--env MY_GID=${CALLER_GID} \
--env MY_ROOT_DIR=/root/ngraph-cpp-test \
-t ngraph_cpp_cpu \
/tmp/chown_files.sh
mkdir -p "${DIR}"/BUILD
chmod ug+rwx "${DIR}"/BUILD
docker run --rm --tty \
${VOLUME} \
${DOCKER_RUN_ENV} \
--env RUN_UID="$(shell id -u)" \
--env RUN_CMD="set -e ; set -o pipefail ; cd ${DOCKUSER_HOME}/ngraph-cpp-test/BUILD; cmake -DCMAKE_CXX_COMPILER=clang++-3.9 -DCMAKE_C_COMPILER=clang-3.9 .. 2>&1 | tee cmake.log ; env VERBOSE=1 make ${PARALLEL} 2>&1 | tee make.log ; env VERBOSE=1 make check 2>&1 | tee make_check.log" \
"ngraph_cpp_cpu:${BUILD_VERSION}" \
sh -c "${DOCKUSER_HOME}/ngraph-cpp-test/contrib/docker/run_as_user.sh"
shell: build_ngraph_cpp_cpu
docker run --rm ${VOLUME} -it ngraph_cpp_cpu:${BUILD_VERSION} /bin/bash
# "make shell" runs an interactive shell in the docker image, for debugging
docker run --rm --tty --interactive \
${VOLUME} \
${DOCKER_RUN_ENV} \
--env RUN_UID="$(shell id -u)" \
"ngraph_cpp_cpu:${BUILD_VERSION}" \
sh -c "cd ${DOCKUSER_HOME} ; ${DOCKUSER_HOME}/ngraph-cpp-test/contrib/docker/run_as_user.sh"
install:
# Puts ngraph_dist in BUILD directory. This is used by Jenkins ngraph-tensorflow batch job.
# Note: We currently have a bug where cmake only installs in $HOME. Jira NGTF-205 is opened
# for this. For now, here we install to $HOME, then move the directory.
docker run --rm --tty \
${VOLUME} \
${DOCKER_RUN_ENV} \
--env RUN_UID="$(shell id -u)" \
--env RUN_CMD="set -e ; set -o pipefail; cd ${DOCKUSER_HOME}/ngraph-cpp-test/BUILD ; test -d ngraph_dist && rm -fr ngraph_dist && echo 'Removed old ngraph_dist directory' ; make install 2>&1 | tee make_install.log ; mv -v ${DOCKUSER_HOME}/ngraph_dist ${DOCKUSER_HOME}/ngraph-cpp-test/BUILD" \
"ngraph_cpp_cpu:${BUILD_VERSION}" \
sh -c "${DOCKUSER_HOME}/ngraph-cpp-test/contrib/docker/run_as_user.sh"
all: build_ngraph_cpp_cpu
#!/bin/bash
# the docker run commands leave output files with root ownership
# modify the file ownership with the UID of the calling user
if [ -z $MY_UID ];then
MY_UID=`id -u`
fi
if [ -z $MY_GID ];then
MY_GID=`id -g`
fi
if [ -z $MY_ROOT_DIR ];then
MY_ROOT_DIR=/root/ngraph-test
fi
cd $MY_ROOT_DIR
find . -user root > files_to_chown.txt
cat files_to_chown.txt | xargs chown ${MY_UID} ${1}
cat files_to_chown.txt | xargs chgrp ${MY_GID} ${1}
rm files_to_chown.txt
......@@ -7,7 +7,7 @@ echo
# clean up old docker containers
echo "Removing Exited docker containers..."
docker ps -a | grep Exited | cut -f 1 -d ' ' | xargs docker rm -f ${1}
docker ps -a | grep Exited | cut -f 1 -d ' ' | xargs docker rm -f "${1}"
echo
#list docker images for ngraph
......@@ -17,4 +17,4 @@ echo
# clean up docker images no longer in use
echo "Removing docker images for ngraph..."
docker images -qa ngraph_* | xargs docker rmi -f ${1}
docker images -qa ngraph_* | xargs docker rmi -f "${1}"
#! /bin/bash
# This script is designed to simulate running as a user with a particular UID
# within a docker container.
#
# Normally a docker container runs as root, which can cause problems with file
# ownership when a host directory tree is mounted into the docker container.
# There are other problems with building and running software as root as
# well. Good practice when validating software builds in a docker container
# is to run as a normal user, since many (most?) end users will not be building
# and installing software as root.
#
# This script should be run using "docker run", with RUN_UID (set to the user
# you want to run as) passed into the docker container as an environment
# variable. The script will then add the UID as user "dockuser" to
# /etc/passwd (important for some software, like bazel), add the new dockuser
# to the sudo group (whether or not sudo is installed), and su to a new shell
# as the dockuser (passing in the existing environment, which is important).
#
# If the environment variable RUN_CMD is passed into the docker container, then
# this script will use RUN_CMD as a command to run when su'ing. If RUN_CMD is
# not defined, then /bin/bash will run, which effectively provides an
# interactive shell in the docker container, for debugging.
set -e # Make sure we exit on any command that returns non-zero
set -u # No unset variables
if [ -z "$RUN_UID" ] ; then
# >&2 redirects echo output to stderr.
# See: https://stackoverflow.com/questions/2990414/echo-that-outputs-to-stderr
( >&2 echo 'ERROR: Environment variable RUN_UID was not set when run-as-user.sh was run' )
( >&2 echo ' Running as default user (root, in docker)' )
( >&2 echo ' ' )
exit 1
else
# The username used in the docker container to map the caller UID to
#
# Note 'dockuser' is used in other scripts, notably Makefile. If you
# choose to change it here, then you need to change it in all other
# scripts, or else the builds will break.
#
DOCK_USER='dockuser'
# We will be su'ing using a non-login shell or command, and preserving
# the environment. This is done so that env. variables passed in with
# "docker run --env ..." are honored.
# Therefore, we need to reset at least HOME=/root ...
#
# Note also that /home/dockuser is used in other scripts, notably
# Makefile. If you choose to change it here, then you need to change it
# in all other scripts, or else the builds will break.
#
export HOME="/home/${DOCK_USER}"
# Make sure the home directory is owned by the new user
if [ -d "${HOME}" ] ; then
chown "${RUN_UID}" "${HOME}"
fi
# Add a user with UID of person running docker (in ${RUN_UID})
# If $HOME does not yet exist, then it will be created
adduser --disabled-password --gecos 'Docker-User' -u "${RUN_UID}" "${DOCK_USER}"
# Add dockuser to the sudo group
adduser "${DOCK_USER}" sudo
# If root access is needed in the docker image while running as a normal
# user, uncomment this and add 'sudo' as a package installed in Dockerfile
# echo '%sudo ALL=(ALL) NOPASSWD:ALL' >> /etc/sudoers
if [ -z "${RUN_CMD+x}" ] ; then # Launch a shell as dockuser
su -m "${DOCK_USER}" -c /bin/bash
else # Run command as dockuser
su -m "${DOCK_USER}" -c "${RUN_CMD}"
fi
fi
......@@ -3,5 +3,8 @@ PROJECT_BRIEF = "Nervana graph compiler"
OUTPUT_DIRECTORY = @CMAKE_CURRENT_BINARY_DIR@
INPUT = @CMAKE_SOURCE_DIR@/src
RECURSIVE = YES
EXTRACT_STATIC = YES
USE_MATHJAX = YES
......@@ -36,7 +36,10 @@ set (SRC
ops/op.cpp
ops/parameter.cpp
ops/reduce.cpp
ops/reshape.cpp
ops/select.cpp
ops/slice.cpp
ops/sum.cpp
ops/tuple.cpp
ops/unary_elementwise_arithmetic.cpp
ops/unary_elementwise_builtin.cpp
......
......@@ -43,6 +43,9 @@ namespace ngraph
/// @brief A set of axes, for example, reduction axes
using AxisSet = std::set<size_t>;
/// @brief Coordinate in a tensor
using Coordinate = std::vector<size_t>;
/// @brief Shape for a tensor
using Shape = std::vector<size_t>;
......
......@@ -60,9 +60,9 @@ namespace ngraph
Output& m_output;
private:
// Input(const Input&) = default;
// Input(Input&&) = default;
// Input& operator=(const Input&) = delete;
Input(const Input&) = delete;
Input(Input&&) = delete;
Input& operator=(const Input&) = delete;
};
}
}
......@@ -17,21 +17,23 @@
#include <memory>
#include <set>
#include "ngraph/descriptor/input.hpp"
#include "ngraph/descriptor/tensor_view.hpp"
#include "ngraph/node.hpp"
namespace ngraph
{
// The forward declaration of Node is needed here because Node has a deque of
// Outputs, and Output is an incomplete type at this point. STL containers of
// incomplete type have undefined behavior according to the C++11 standard, and
// in practice including node.hpp here was causing compilation errors on some
// systems (namely macOS).
class Node;
namespace descriptor
{
// Describes an output tensor of an op
class Output
{
// For some odd reason emplace_back is requiring a copy constructor
// it should not. See issue #111 for details
// Output(const Output&) = delete;
// Output& operator=(const Output&) = delete;
public:
/// @param node Node that owns this output.
/// @param index Position of the output tensor in all output tensors
......@@ -53,6 +55,11 @@ namespace ngraph
size_t m_index;
std::shared_ptr<TensorView> m_tensor_view;
std::set<Input*> m_inputs;
private:
Output(const Output&) = delete;
Output(Output&&) = delete;
Output& operator=(const Output&) = delete;
};
}
}
......@@ -20,7 +20,7 @@
using namespace std;
using namespace ngraph;
size_t Function::m_next_instance_id = 0;
atomic<size_t> Function::m_next_instance_id(0);
Function::Function(const std::shared_ptr<Node>& result,
const std::shared_ptr<ValueType>& result_type,
......@@ -31,7 +31,7 @@ Function::Function(const std::shared_ptr<Node>& result,
, m_name(name)
, m_result_type(result_type)
, m_ordered_ops_valid(false)
, m_instance_id(m_next_instance_id++)
, m_instance_id(m_next_instance_id.fetch_add(1))
{
size_t i = 0;
for (auto parameter : parameters)
......@@ -39,26 +39,26 @@ Function::Function(const std::shared_ptr<Node>& result,
parameter->assign_function(this, i++);
}
traverse_nodes(result, [&](Node* node) { m_ops.push_back(node); });
traverse_nodes(result, [&](shared_ptr<Node> node) { m_ops.push_back(node); });
}
void Function::set_ordered_ops(const std::list<Node*>& ordered_ops)
void Function::set_ordered_ops(const std::list<shared_ptr<Node>>& ordered_ops)
{
m_ordered_ops = ordered_ops;
m_ordered_ops_valid = true;
}
std::list<Node*>& Function::get_ops()
std::list<shared_ptr<Node>>& Function::get_ops()
{
return m_ops;
}
const std::list<Node*>& Function::get_ops() const
const std::list<shared_ptr<Node>>& Function::get_ops() const
{
return m_ops;
}
std::list<Node*>& Function::get_ordered_ops()
std::list<shared_ptr<Node>>& Function::get_ordered_ops()
{
if (!m_ordered_ops_valid)
{
......@@ -67,7 +67,7 @@ std::list<Node*>& Function::get_ordered_ops()
return m_ordered_ops;
}
const std::list<Node*>& Function::get_ordered_ops() const
const std::list<shared_ptr<Node>>& Function::get_ordered_ops() const
{
if (!m_ordered_ops_valid)
{
......
......@@ -14,6 +14,7 @@
#pragma once
#include <atomic>
#include <initializer_list>
#include <list>
#include <memory>
......@@ -46,11 +47,11 @@ namespace ngraph
const std::shared_ptr<ValueType> get_result_type() const { return m_result_type; }
std::string get_name() const;
void set_name(const std::string& name);
std::list<Node*>& get_ops();
const std::list<Node*>& get_ops() const;
std::list<Node*>& get_ordered_ops();
const std::list<Node*>& get_ordered_ops() const;
void set_ordered_ops(const std::list<Node*>&);
std::list<std::shared_ptr<Node>>& get_ops();
const std::list<std::shared_ptr<Node>>& get_ops() const;
std::list<std::shared_ptr<Node>>& get_ordered_ops();
const std::list<std::shared_ptr<Node>>& get_ordered_ops() const;
void set_ordered_ops(const std::list<std::shared_ptr<Node>>&);
void set_ordered_ops_valid() { m_ordered_ops_valid = true; }
void clear_ordered_ops_valid() { m_ordered_ops_valid = false; }
friend std::ostream& operator<<(std::ostream&, const Function&);
......@@ -61,14 +62,14 @@ namespace ngraph
std::string m_name;
std::shared_ptr<ValueType> m_result_type;
bool m_ordered_ops_valid;
std::list<Node*> m_ordered_ops;
std::list<Node*> m_ops;
std::list<std::shared_ptr<Node>> m_ordered_ops;
std::list<std::shared_ptr<Node>> m_ops;
private:
Function(const Function&) = delete;
Function(const Function&&) = delete;
static size_t m_next_instance_id;
static std::atomic<size_t> m_next_instance_id;
size_t m_instance_id;
};
}
......@@ -49,12 +49,17 @@
#include "ngraph/function.hpp"
#include "ngraph/node.hpp"
#include "ngraph/ops/abs.hpp"
#include "ngraph/ops/acos.hpp"
#include "ngraph/ops/add.hpp"
#include "ngraph/ops/asin.hpp"
#include "ngraph/ops/atan.hpp"
#include "ngraph/ops/broadcast.hpp"
#include "ngraph/ops/ceiling.hpp"
#include "ngraph/ops/concatenate.hpp"
#include "ngraph/ops/constant.hpp"
#include "ngraph/ops/convert.hpp"
#include "ngraph/ops/cos.hpp"
#include "ngraph/ops/cosh.hpp"
#include "ngraph/ops/divide.hpp"
#include "ngraph/ops/dot.hpp"
#include "ngraph/ops/equal.hpp"
......@@ -77,8 +82,16 @@
#include "ngraph/ops/power.hpp"
#include "ngraph/ops/reduce.hpp"
#include "ngraph/ops/remainder.hpp"
#include "ngraph/ops/reshape.hpp"
#include "ngraph/ops/select.hpp"
#include "ngraph/ops/sign.hpp"
#include "ngraph/ops/sin.hpp"
#include "ngraph/ops/sinh.hpp"
#include "ngraph/ops/slice.hpp"
#include "ngraph/ops/subtract.hpp"
#include "ngraph/ops/sum.hpp"
#include "ngraph/ops/tan.hpp"
#include "ngraph/ops/tanh.hpp"
#include "ngraph/ops/tuple.hpp"
#include "ngraph/runtime/backend.hpp"
#include "ngraph/runtime/call_frame.hpp"
......
......@@ -17,12 +17,12 @@
using namespace std;
using namespace ngraph;
size_t Node::m_next_instance_id = 0;
atomic<size_t> Node::m_next_instance_id(0);
Node::Node(const std::vector<shared_ptr<Node>>& arguments, shared_ptr<ValueType> value_type)
: m_arguments(arguments)
, m_value_type(value_type)
, m_instance_id(m_next_instance_id++)
, m_instance_id(m_next_instance_id.fetch_add(1))
, m_is_output(false)
{
// Add this node as a user of each argument.
......
......@@ -14,6 +14,7 @@
#pragma once
#include <atomic>
#include <set>
#include <string>
#include <unordered_set>
......@@ -71,7 +72,8 @@ namespace ngraph
/// graph against the graph.
bool is_same_op_type(const std::shared_ptr<Node>& node) const
{
return typeid(*this) == typeid(*node.get());
Node* n = node.get();
return typeid(*this) == typeid(*n);
}
std::shared_ptr<const ValueType> get_value_type() { return m_value_type; }
......@@ -99,10 +101,10 @@ namespace ngraph
size_t get_instance_id() const { return m_instance_id; }
friend std::ostream& operator<<(std::ostream&, const Node&);
std::vector<descriptor::Input>& get_inputs() { return m_inputs; }
const std::vector<descriptor::Input>& get_inputs() const { return m_inputs; }
std::vector<descriptor::Output>& get_outputs() { return m_outputs; }
const std::vector<descriptor::Output>& get_outputs() const { return m_outputs; }
std::deque<descriptor::Input>& get_inputs() { return m_inputs; }
const std::deque<descriptor::Input>& get_inputs() const { return m_inputs; }
std::deque<descriptor::Output>& get_outputs() { return m_outputs; }
const std::deque<descriptor::Output>& get_outputs() const { return m_outputs; }
std::unordered_set<descriptor::Tensor*> liveness_live_list;
std::unordered_set<descriptor::Tensor*> liveness_new_list;
std::unordered_set<descriptor::Tensor*> liveness_free_list;
......@@ -113,9 +115,9 @@ namespace ngraph
std::multiset<Node*> m_users;
std::string m_name;
size_t m_instance_id;
static size_t m_next_instance_id;
std::vector<descriptor::Input> m_inputs;
std::vector<descriptor::Output> m_outputs;
static std::atomic<size_t> m_next_instance_id;
std::deque<descriptor::Input> m_inputs;
std::deque<descriptor::Output> m_outputs;
bool m_is_output;
};
}
......@@ -20,9 +20,31 @@ namespace ngraph
{
namespace op
{
/// \brief Elementwise absolute value operation.
///
/// ## Inputs
///
/// | | Type | Description |
/// | ----- | --------------------------------- | ----------------------------------------------- |
/// | `arg` | \f$N[d_1,\dots,d_n]~(n \geq 0)\f$ | A tensor of any shape and numeric element type. |
///
/// ## Output
///
/// | Type | Description |
/// | ---------------------- | -------------------------------------------------------------------------------- |
/// | \f$N[d_1,\dots,d_n]\f$ | The tensor \f$T\f$, where \f$T[i_1,\dots,i_n] = |\texttt{arg}[i_1,\dots,i_n]|\f$ |
///
/// ## Implementation Status
///
/// | Backend | Status |
/// | ------- | ---------------------------------- |
/// | NGVM | Implemented for signed types only. |
class Abs : public UnaryElementwiseArithmetic
{
public:
/// \brief Constructs an absolute value operation.
///
/// \param arg Node that produces the input tensor.
Abs(const std::shared_ptr<Node>& arg)
: UnaryElementwiseArithmetic(arg)
{
......
// ----------------------------------------------------------------------------
// Copyright 2017 Nervana Systems Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// ----------------------------------------------------------------------------
#pragma once
#include "ngraph/ops/op.hpp"
namespace ngraph
{
namespace op
{
/// \brief Elementwise inverse cosine (arccos) operation.
///
/// ## Inputs
///
/// | | Type | Description |
/// | ----- | --------------------------------- | ----------------------------------------------- |
/// | `arg` | \f$N[d_1,\dots,d_n]~(n \geq 0)\f$ | A tensor of any shape and numeric element type. |
///
/// ## Output
///
/// | Type | Description |
/// | ---------------------- | --------------------------------------------------------------------------------------- |
/// | \f$N[d_1,\dots,d_n]\f$ | The tensor \f$T\f$, where \f$T[i_1,\dots,i_n] = \arccos(\texttt{arg}[i_1,\dots,i_n])\f$ |
///
/// ## Implementation Status
///
/// | Backend | Status |
/// | ------- | ------------------ |
/// | NGVM | Fully implemented. |
class Acos : public UnaryElementwiseArithmetic
{
public:
/// \brief Constructs an arccos operation.
///
/// \param arg Node that produces the input tensor.
Acos(const std::shared_ptr<Node>& arg)
: UnaryElementwiseArithmetic(arg)
{
}
virtual std::string description() const override { return "Acos"; }
};
}
}
......@@ -20,9 +20,33 @@ namespace ngraph
{
namespace op
{
/// \brief Elementwise addition operation.
///
/// ## Inputs
///
/// | | Type | Description |
/// | ------ | --------------------------------- | ------------------------------------------------------ |
/// | `arg0` | \f$N[d_1,\dots,d_n]~(n \geq 0)\f$ | A tensor of any shape and numeric element type. |
/// | `arg1` | \f$N[d_1,\dots,d_n]~(n \geq 0)\f$ | A tensor of the same shape and element type as `arg0`. |
///
/// ## Output
///
/// | Type | Description |
/// | ---------------------- | -------------------------------------------------------------------------------------------------------------- |
/// | \f$N[d_1,\dots,d_n]\f$ | The tensor \f$T\f$, where \f$T[i_1,\dots,i_n] = \texttt{arg0}[i_1,\dots,i_n] + \texttt{arg1}[i_1,\dots,i_n]\f$ |
///
/// ## Implementation Status
///
/// | Backend | Status |
/// | ------- | ------------------ |
/// | NGVM | Fully implemented. |
class Add : public BinaryElementwiseArithmetic
{
public:
/// \brief Constructs an addition operation.
///
/// \param arg0 Node that produces the first input tensor.
/// \param arg1 Node that produces the second input tensor.
Add(const std::shared_ptr<Node>& arg0, const std::shared_ptr<Node>& arg1)
: BinaryElementwiseArithmetic(arg0, arg1)
{
......
// ----------------------------------------------------------------------------
// Copyright 2017 Nervana Systems Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// ----------------------------------------------------------------------------
#pragma once
#include "ngraph/ops/op.hpp"
namespace ngraph
{
namespace op
{
/// \brief Elementwise inverse sine (arcsin) operation.
///
/// ## Inputs
///
/// | | Type | Description |
/// | ----- | --------------------------------- | ----------------------------------------------- |
/// | `arg` | \f$N[d_1,\dots,d_n]~(n \geq 0)\f$ | A tensor of any shape and numeric element type. |
///
/// ## Output
///
/// | Type | Description |
/// | ---------------------- | --------------------------------------------------------------------------------------- |
/// | \f$N[d_1,\dots,d_n]\f$ | The tensor \f$T\f$, where \f$T[i_1,\dots,i_n] = \arcsin(\texttt{arg}[i_1,\dots,i_n])\f$ |
///
/// ## Implementation Status
///
/// | Backend | Status |
/// | ------- | ------------------ |
/// | NGVM | Fully implemented. |
class Asin : public UnaryElementwiseArithmetic
{
public:
/// \brief Constructs an arcsin operation.
///
/// \param arg Node that produces the input tensor.
Asin(const std::shared_ptr<Node>& arg)
: UnaryElementwiseArithmetic(arg)
{
}
virtual std::string description() const override { return "Asin"; }
};
}
}
// ----------------------------------------------------------------------------
// Copyright 2017 Nervana Systems Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// ----------------------------------------------------------------------------
#pragma once
#include "ngraph/ops/op.hpp"
namespace ngraph
{
namespace op
{
/// \brief Elementwise inverse tangent (arctan) operation.
///
/// ## Inputs
///
/// | | Type | Description |
/// | ----- | --------------------------------- | ----------------------------------------------- |
/// | `arg` | \f$N[d_1,\dots,d_n]~(n \geq 0)\f$ | A tensor of any shape and numeric element type. |
///
/// ## Output
///
/// | Type | Description |
/// | ---------------------- | --------------------------------------------------------------------------------------- |
/// | \f$N[d_1,\dots,d_n]\f$ | The tensor \f$T\f$, where \f$T[i_1,\dots,i_n] = \arctan(\texttt{arg}[i_1,\dots,i_n])\f$ |
///
/// ## Implementation Status
///
/// | Backend | Status |
/// | ------- | ------------------ |
/// | NGVM | Fully implemented. |
class Atan : public UnaryElementwiseArithmetic
{
public:
/// \brief Constructs an arctan operation.
///
/// \param arg Node that produces the input tensor.
Atan(const std::shared_ptr<Node>& arg)
: UnaryElementwiseArithmetic(arg)
{
}
virtual std::string description() const override { return "Atan"; }
};
}
}
......@@ -20,19 +20,55 @@ namespace ngraph
{
namespace op
{
class Broadcast : public IndexBuiltin
/// \brief Operation which "adds" axes to an input tensor, replicating elements from the input as needed along the new axes.
///
/// Informally, a broadcast "adds" axes to the input tensor, replicating elements from the input tensor as needed to fill the new dimensions.
/// The parameter `m_broadcast_axes` indicates which of the output axes is being so added. For example, an output shape of `{2,5,6,2,8}` and
/// broadcast axes of `{1,3,4}` means that the input must have shape `{2,6}`.
///
/// Formally, given a shape or coordinate \f$S = [d_1,\dots,d_n]\f$ and a set of axis indices \f$A\f$, define \f$\textit{del}(S,A)\f$ to be
/// the shape or coordinate obtained by deleting the \f$(a + 1)\f$th dimension from \f$S\f$ for each \f$a \in A\f$. Then given an input
/// tensor \f$T\f$ of shape \f$\textit{del}(S,A)\f$ with element type \f$E\f$, broadcasting axes \f$A\f$ produces a tensor \f$T'\f$ of shape
/// \f$S\f$ with element type \f$E\f$, where \f$T'[i_1,\dots,i_n] = T[del([i_1,\dots,i_n],A)]\f$.
///
/// ## Parameters
///
/// | | Description |
/// | ---------------- | ------------------------------------------------------------------------ |
/// | `shape` | The shape \f$[d_1,\dots,d_n]\f$ of the broadcasted output. |
/// | `broadcast_axes` | The indices \f$A\f$ in the `shape` of each broadcasted (i.e., new) axis. |
///
/// ## Inputs
///
/// | | Type | Description |
/// | ----- | --------------------------------------------------- | --------------------------------------- |
/// | `arg` | \f$E[\mathit{del}([d_1,\dots,d_n],A)]~(n \geq 0)\f$ | A tensor of any shape and element type. |
///
/// ## Output
///
/// | Type | Description |
/// | ---------------------- | ------------------------------------------------------------------------------- |
/// | \f$E[d_1,\dots,d_n]\f$ | The tensor \f$T'\f$, where \f$T'[i_1,\dots,i_n] = T[del([i_1,\dots,i_n],A)]\f$. |
///
/// ## Implementation Status
///
/// | Backend | Status |
/// | ------- | ----------------------------------------------- |
/// | NGVM | Implemented for scalars, matrices, and vectors. |
class Broadcast : public Builtin
{
public:
/// \brief Constructs a conversion operation.
///
/// @param arg The tensor view to be broadcast.
/// @param shape The shape of the result
/// @param broadcast_axes The axis positions (0-based) in the result that are being broadcast.
/// the remaining axes in shape must be the same as the shape of arg.
///
/// \param arg Node that produces the input tensor to be broadcast.
/// \param shape The shape of the output tensor.
/// \param broadcast_axes The axis positions (0-based) in the result that are being broadcast. The
/// remaining axes in shape must be the same as the shape of arg.
Broadcast(const std::shared_ptr<Node>& arg,
const Shape& shape,
const AxisSet& broadcast_axes)
: IndexBuiltin(arg)
: Builtin({arg})
, m_shape(shape)
, m_broadcast_axes(broadcast_axes)
{
......@@ -41,6 +77,7 @@ namespace ngraph
virtual std::string description() const override { return "Broadcast"; }
virtual void propagate_types() override;
/// \return An set containing the indices of the broadcast axes (0-based).
const AxisSet& get_broadcast_axes() const { return m_broadcast_axes; }
protected:
Shape m_shape;
......
......@@ -20,9 +20,31 @@ namespace ngraph
{
namespace op
{
/// \brief Elementwise ceiling operation.
///
/// ## Inputs
///
/// | | Type | Description |
/// | ----- | --------------------------------- | ----------------------------------------------- |
/// | `arg` | \f$N[d_1,\dots,d_n]~(n \geq 0)\f$ | A tensor of any shape and numeric element type. |
///
/// ## Output
///
/// | Type | Description |
/// | ---------------------- | -------------------------------------------------------------------------------------------- |
/// | \f$N[d_1,\dots,d_n]\f$ | The tensor \f$T\f$, where \f$T[i_1,\dots,i_n] = \lceil \texttt{arg}[i_1,\dots,i_n] \rceil\f$ |
///
/// ## Implementation Status
///
/// | Backend | Status |
/// | ------- | ---------------- |
/// | NGVM | Not implemented. |
class Ceiling : public UnaryElementwiseArithmetic
{
public:
/// \brief Constructs a ceiling operation.
///
/// \param arg Node that produces the input tensor.
Ceiling(const std::shared_ptr<Node>& arg)
: UnaryElementwiseArithmetic(arg)
{
......
......@@ -20,18 +20,54 @@ namespace ngraph
{
namespace op
{
/// \brief Concatenation operation.
///
/// Given an axis index \f$a\f$ and a rank \f$r \geq 1\f$ where \f$0 \leq a \lt r\f$, and one or more \f$r\f$-tensors
/// with the same element type whose shapes are the same except possibly at axis \f$a\f$, the tensors are
/// concatenated along axis \f$a\f$.
///
/// For example:
/// 1. Concatenating matrices on axis 0 (the row axis) stacks the matrices from top to bottom.
/// The number of rows in the resulting matrix is the sum of the number of rows for each
/// input matrix.
/// 2. Concatenating matrices on axis 1 (the column axis) concatenates them from left to right.
/// The number of columns in the resulting matrix is the sum of the number of columns for each
/// input matrix.
/// 3. Concatenating 3-tensors on axis 2 (the depth axis) stacks them from front to back.
/// The depth of the resulting tensor is the sum of the total depth for each input tensor.
///
/// The resulting tensor will have the same rank as the input tensors.
///
/// ## Parameters
///
/// | | Description |
/// | -------------------- | -------------------------------------------------------------- |
/// | `concatenation_axis` | The axis \f$a\f$ along which to concatenate the input tensors. |
///
/// ## Inputs
///
/// | | Type | Description |
/// | --------------- | ------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------ |
/// | `args`[\f$i\f$] | \f$E[d_1,\dots,d_{a-1},d^i_a,d_{a+1},\dots,d_n]~(n \geq 1)\f$ | One or more input tensors, all of which have the same element type, and the same shape, except possibly at axis \f$a\f$. |
///
/// ## Output
///
/// | Type | Description |
/// | ------------------------------------------------------------ | ----------------------------------------------------------------------------------------------- |
/// | \f$E[d_1,\dots,d_{a-1},\Sigma_i(d^i_a),d_{a+1},\dots,d_n]\f$ | The tensor \f$T\f$, where \f$T\f$ is the concatenation of the input tensors along axis \f$a\f$. |
///
/// ## Implementation Status
///
/// | Backend | Status |
/// | ------- | ------------------------------------- |
/// | NGVM | Implemented for vectors and matrices. |
class Concat : public Builtin
{
public:
/// Concatenates one or more tensors.
///
/// All tensors must have the same rank, and the sizes of the axes must match
/// everywhere except at the concatenation axis. The size of the concatenation
/// axis on the output is the sum of its size on all inputs; the size of other
/// axes is unchanged from the input tensors.
/// \brief Constructs a concatenation operation.
///
/// Example: n0 has shape {2,4,2}, and n1 has shape {2,5,2}. Then the output of
/// Concat(Nodes{n0,n1},1) will have shape {2,9,2}.
/// \param args The nodes producing the input tensors.
/// \param concatenation_axis The axis along which to concatenate the input tensors.
Concat(const Nodes& args, size_t concatenation_axis)
: Builtin(args)
, m_concatenation_axis(concatenation_axis)
......@@ -41,6 +77,7 @@ namespace ngraph
virtual std::string description() const override { return "Concatenate"; }
virtual void propagate_types() override;
/// \return The concatenation axis.
size_t get_concatenation_axis() const { return m_concatenation_axis; }
protected:
const size_t m_concatenation_axis;
......
......@@ -16,10 +16,35 @@
using namespace ngraph::op;
void ScalarConstantBase::propagate_types()
void ConstantBase::propagate_types()
{
}
void TensorConstantBase::propagate_types()
template <typename ET>
void check_value_strings(const std::vector<std::string>& value_strings)
{
auto result = ET::read(value_strings);
}
void Constant::propagate_types()
{
// No actual type propagation is done here; however, we check the number of value strings and
// also call check_value_strings just to make sure the result will be parseable at compile
// time. (It will throw an exception if not.)
auto tvt = std::dynamic_pointer_cast<const TensorViewType>(get_value_type());
if (nullptr == tvt)
{
throw ngraph_error("Constant does not have tensor view type");
}
auto shape = tvt->get_shape();
if (ngraph::shape_size(shape) != m_value_strings.size())
{
throw ngraph_error("Constant does not have the expected number of literals");
}
auto& et = tvt->get_element_type();
FUNCTION_ON_ELEMENT_TYPE(
et, "Constant has unhandled element type", check_value_strings, m_value_strings);
}
This diff is collapsed.
......@@ -20,9 +20,41 @@ namespace ngraph
{
namespace op
{
/// \brief Elementwise type conversion operation.
///
/// Each scalar in the input tensor is converted to the specified output element type. Note that the conversion may
/// result in loss of precision. For example, conversion from `float32` to `int32` is allowed.
///
/// ## Parameters
///
/// | | Description |
/// | -------------- | ---------------------------------------- |
/// | `element_type` | The element type \f$E'\f$ to convert to. |
///
/// ## Inputs
///
/// | | Type | Description |
/// | ----- | --------------------------------- | ----------------------------------------- |
/// | `arg` | \f$E[d_1,\dots,d_n]~(n \geq 0)\f$ | A tensor of any shape and element type. |
///
/// ## Output
///
/// | Type | Description |
/// | ----------------------- | --------------------------------------------------------------------------------------------------------- |
/// | \f$E'[d_1,\dots,d_n]\f$ | The tensor \f$T\f$, where \f$T[i_1,\dots,i_n] = \mathit{convert}_{(E,E')}(\texttt{arg}[i_1,\dots,i_n])\f$ |
///
/// ## Implementation Status
///
/// | Backend | Status |
/// | ------- | ------------------ |
/// | NGVM | Fully implemented. |
class Convert : public UnaryElementwiseBuiltin
{
public:
/// \brief Constructs a conversion operation.
///
/// \param arg Node that produces the input tensor.
/// \param element_type Element type for the output tensor.
Convert(const std::shared_ptr<Node>& arg, const ngraph::element::Type& element_type)
: UnaryElementwiseBuiltin({arg})
, m_element_type(element_type)
......
// ----------------------------------------------------------------------------
// Copyright 2017 Nervana Systems Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// ----------------------------------------------------------------------------
#pragma once
#include "ngraph/ops/op.hpp"
namespace ngraph
{
namespace op
{
/// \brief Elementwise cosine operation.
///
/// ## Inputs
///
/// | | Type | Description |
/// | ----- | --------------------------------- | ----------------------------------------------- |
/// | `arg` | \f$N[d_1,\dots,d_n]~(n \geq 0)\f$ | A tensor of any shape and numeric element type. |
///
/// ## Output
///
/// | Type | Description |
/// | ---------------------- | ------------------------------------------------------------------------------------ |
/// | \f$N[d_1,\dots,d_n]\f$ | The tensor \f$T\f$, where \f$T[i_1,\dots,i_n] = \cos(\texttt{arg}[i_1,\dots,i_n])\f$ |
///
/// ## Implementation Status
///
/// | Backend | Status |
/// | ------- | ------------------ |
/// | NGVM | Fully implemented. |
class Cos : public UnaryElementwiseArithmetic
{
public:
/// \brief Constructs a cosine operation.
///
/// \param arg Node that produces the input tensor.
Cos(const std::shared_ptr<Node>& arg)
: UnaryElementwiseArithmetic(arg)
{
}
virtual std::string description() const override { return "Cos"; }
};
}
}
// ----------------------------------------------------------------------------
// Copyright 2017 Nervana Systems Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// ----------------------------------------------------------------------------
#pragma once
#include "ngraph/ops/op.hpp"
namespace ngraph
{
namespace op
{
/// \brief Elementwise hyperbolic cosine (cosh) operation.
///
/// ## Inputs
///
/// | | Type | Description |
/// | ----- | --------------------------------- | ----------------------------------------------- |
/// | `arg` | \f$N[d_1,\dots,d_n]~(n \geq 0)\f$ | A tensor of any shape and numeric element type. |
///
/// ## Output
///
/// | Type | Description |
/// | ---------------------- | ------------------------------------------------------------------------------------- |
/// | \f$N[d_1,\dots,d_n]\f$ | The tensor \f$T\f$, where \f$T[i_1,\dots,i_n] = \cosh(\texttt{arg}[i_1,\dots,i_n])\f$ |
///
/// ## Implementation Status
///
/// | Backend | Status |
/// | ------- | ------------------ |
/// | NGVM | Fully implemented. |
class Cosh : public UnaryElementwiseArithmetic
{
public:
/// \brief Constructs a hyperbolic cosine operation.
///
/// \param arg Node that produces the input tensor.
Cosh(const std::shared_ptr<Node>& arg)
: UnaryElementwiseArithmetic(arg)
{
}
virtual std::string description() const override { return "Cosh"; }
};
}
}
......@@ -20,9 +20,33 @@ namespace ngraph
{
namespace op
{
/// \brief Elementwise division operation.
///
/// ## Inputs
///
/// | | Type | Description |
/// | ------ | --------------------------------- | ------------------------------------------------------ |
/// | `arg0` | \f$N[d_1,\dots,d_n]~(n \geq 0)\f$ | A tensor of any shape and numeric element type. |
/// | `arg1` | \f$N[d_1,\dots,d_n]~(n \geq 0)\f$ | A tensor of the same shape and element type as `arg0`. |
///
/// ## Output
///
/// | Type | Description |
/// | ---------------------- | ------------------------------------------------------------------------------------------------------------------------ |
/// | \f$N[d_1,\dots,d_n]\f$ | The tensor \f$T\f$, where \f$T[i_1,\dots,i_n] = \texttt{arg0}[i_1,\dots,i_n] \mathbin{/} \texttt{arg1}[i_1,\dots,i_n]\f$ |
///
/// ## Implementation Status
///
/// | Backend | Status |
/// | ------- | ------------------ |
/// | NGVM | Fully implemented. |
class Divide : public BinaryElementwiseArithmetic
{
public:
/// \brief Constructs a division operation.
///
/// \param arg0 Node that produces the first input tensor.
/// \param arg1 Node that produces the second input tensor.
Divide(const std::shared_ptr<Node>& arg0, const std::shared_ptr<Node>& arg1)
: BinaryElementwiseArithmetic(arg0, arg1)
{
......
......@@ -20,28 +20,95 @@ namespace ngraph
{
namespace op
{
/// \brief Inner product/dot product/matrix product/tensor contraction operation.
///
/// Takes two arguments `arg0` and `arg1`. There are three possible cases:
///
/// 1. `arg0` or `arg1` is 0-dimensional. Then, treats that 0-dimensional argument as a scalars and computes a scalar-tensor product.
/// (Example: `arg0` has shape `{1,2,3}` and arg1 has shape `{}`; then the result will have shape `{1,2,3}`.)
///
/// 2. `arg1` is a vector (1-dimensional tensor). Then, computes a dot product reducing on the innermost (rightmost) dimensions of `arg0` and `arg1`.
/// (Example: arg0 has shape `{1,2,3}` and arg1 has shape `{3}`; then the result will have shape `{1,2}`.)
///
/// 3. `arg1` is more than 1-dimensional. Then, computes a dot product reducing on the innermost (rightmost) dimension of arg0, and the next-to-innermost dimension of arg1.
/// (Example: arg0 has shape {3,4} and arg1 has shape {4,3}; then the result will have shape {3,3}.)
///
///
/// = Case 1: Scalar-tensor product =
///
/// ## Inputs
///
/// | | Type | Description |
/// | ------ | --------------------------------- | ------------------------------------------------------------ |
/// | `arg0` | \f$E[]\f$ | A scalar of any element type. |
/// | `arg1` | \f$E[d_1,\dots,d_n]~(n \geq 0)\f$ | A tensor of any shape, with the same element type as `arg0`. |
///
/// <i>(Note: the order of inputs may be reversed in this case, i.e., `arg1` can be the scalar and `arg0` the tensor.)</i>
///
/// ## Output
///
/// | Type | Description |
/// | ---------------------- | ---------------------------------------------------------------------------------------------------- |
/// | \f$E[d_1,\dots,d_n]\f$ | The tensor \f$T\f$, where \f$T[i_1,\dots,i_n] = \mathtt{arg0} \cdot \mathtt{arg1}[i_1,\dots,i_n]\f$. |
///
/// ## Implementation Status
///
/// | Backend | Status |
/// | ------- | ------------------ |
/// | NGVM | Fully implemented. |
///
/// = Case 2: Vector-tensor product =
///
/// ## Inputs
///
/// | | Type | Description |
/// | ------ | ----------------------------------- | ------------------------------------------------------------------------------------------------------------ |
/// | `arg0` | \f$E[d]\f$ | A vector of any element type. |
/// | `arg1` | \f$E[d_1,\dots,d_n,d]~(n \geq 0)\f$ | A tensor of any shape whose innermost dimension matches `arg0`'s size, with the same element type as `arg0`. |
///
/// <i>(Note: in the particular case where \f$n = 0\f$, this is a vector dot product; when \f$n = 1\f$, this is a vector-matrix product.)</i>
//
/// ## Output
///
/// | Type | Description |
/// | ---------------------- | ---------------------------------------------------------------------------------------------------- |
/// | \f$E[d_1,\dots,d_n]\f$ | The tensor \f$T\f$, where \f$T[i_1,\dots,i_n] = \mathtt{arg0} \cdot \mathtt{arg1}[i_1,\dots,i_n]\f$. |
///
/// ## Implementation Status
///
/// | Backend | Status |
/// | ------- | ------------------------------------------- |
/// | NGVM | Implemented for `arg1` with rank 2 or less. |
///
/// = Case 3: Tensor-tensor product =
///
/// ## Inputs
///
/// | | Type | Description |
/// | ------ | ----------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------ |
/// | `arg0` | \f$E[d_1,\dots,d_n]~(n \geq 1)\f$ | A tensor of any shape with rank of at least 1, and any element type. |
/// | `arg1` | \f$E[d'_1,\dots,d'_m]~(m \geq 2\text{ and }d'_{m-1}=d_n)\f$ | A tensor with the same element type as `arg0`, and any shape with rank of at least 2 whose next-to-innermost dimension matches `arg0`'s innermost dimension. |
///
/// <i>(Note: in the particular case where \f$n = m = 2\f$, this is a matrix product.)</i>
///
/// ## Output
///
/// | Type | Description |
/// | ----------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------|
/// | \f$E[d_1,\dots,d_{n-1},d'_1,\dots,d'_{m-2},d'_{m}]\f$ | The tensor \f$T\f$, where \f$T[i_1,\dots,i_{n-1},j_1,\dots,j_{m-2},j_m] = \dots\f$ TODO: FIXME: finish this; but it's like numpy. |
///
/// ## Implementation Status
///
/// | Backend | Status |
/// | ------- | ---------------------------------------------- |
/// | NGVM | Implemented for `arg1` with rank of exactly 2. |
class Dot : public Builtin
{
public:
/// Computes the dot product of two tensors.
///
/// There are three possible cases:
/// (1) arg0 or arg1 is 0-dimensional. Then, we treat the 0-dimensional
/// argument(s) as scalars and compute a scalar-tensor or
/// scalar-scalar product.
/// (Example: arg0 has shape {1,2,3} and arg1 has shape {}; then
/// the result will have shape {1,2,3}.)
///
/// (2) arg1 is 1-dimensional. Then, we compute a dot product reducing
/// on the innermost (rightmost) dimensions of arg0 and arg1.
/// (Example: arg0 has shape {1,2,3} and arg1 has shape {3}; then
/// the result will have shape {1,2}.)
///
/// (3) arg1 is more than 1-dimensional. Then, we compute a dot product
/// reducing on the innermost (rightmost) dimension of arg0, and the
/// next-to-innermost dimension of arg1.
/// (Example: arg0 has shape {3,4} and arg1 has shape {4,3}; then
/// the result will have shape {3,3}.)
/// \brief Constructs a dot product operation.
///
/// \param arg0 The node producing the first argument.
/// \param arg1 The node producing the second argument.
Dot(const std::shared_ptr<Node>& arg0, const std::shared_ptr<Node>& arg1)
: Builtin({arg0, arg1})
{
......
......@@ -20,9 +20,33 @@ namespace ngraph
{
namespace op
{
/// \brief Elementwise is-equal operation.
///
/// ## Inputs
///
/// | | Type | Description |
/// | ------ | --------------------------------- | ------------------------------------------------------ |
/// | `arg0` | \f$E[d_1,\dots,d_n]~(n \geq 0)\f$ | A tensor of any shape and element type. |
/// | `arg1` | \f$E[d_1,\dots,d_n]~(n \geq 0)\f$ | A tensor of the same shape and element type as `arg0`. |
///
/// ## Output
///
/// | Type | Description |
/// | ---------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------ |
/// | \f$\texttt{bool}[d_1,\dots,d_n]\f$ | The tensor \f$T\f$, where \f$T[i_1,\dots,i_n] = 1\text{ if }\texttt{arg0}[i_1,\dots,i_n] = \texttt{arg1}[i_1,\dots,i_n]\text{, else } 0\f$ |
///
/// ## Implementation Status
///
/// | Backend | Status |
/// | ------- | ------------------ |
/// | NGVM | Fully implemented. |
class Equal : public BinaryElementwiseComparison
{
public:
/// \brief Constructs an is-equal operation.
///
/// \param arg0 Node that produces the first input tensor.
/// \param arg1 Node that produces the second input tensor.
Equal(const std::shared_ptr<Node>& arg0, const std::shared_ptr<Node>& arg1)
: BinaryElementwiseComparison(arg0, arg1)
{
......
......@@ -20,9 +20,31 @@ namespace ngraph
{
namespace op
{
/// \brief Elementwise natural exponential (exp) operation.
///
/// ## Inputs
///
/// | | Type | Description |
/// | ----- | --------------------------------- | ----------------------------------------------- |
/// | `arg` | \f$N[d_1,\dots,d_n]~(n \geq 0)\f$ | A tensor of any shape and numeric element type. |
///
/// ## Output
///
/// | Type | Description |
/// | ---------------------- | ------------------------------------------------------------------------------------ |
/// | \f$N[d_1,\dots,d_n]\f$ | The tensor \f$T\f$, where \f$T[i_1,\dots,i_n] = \exp(\texttt{arg}[i_1,\dots,i_n])\f$ |
///
/// ## Implementation Status
///
/// | Backend | Status |
/// | ------- | ------------------ |
/// | NGVM | Fully implemented. |
class Exp : public UnaryElementwiseArithmetic
{
public:
/// \brief Constructs an exponential operation.
///
/// \param arg Node that produces the input tensor.
Exp(const std::shared_ptr<Node>& arg)
: UnaryElementwiseArithmetic(arg)
{
......
......@@ -20,9 +20,31 @@ namespace ngraph
{
namespace op
{
/// \brief Elementwise floor operation.
///
/// ## Inputs
///
/// | | Type | Description |
/// | ----- | --------------------------------- | ----------------------------------------------- |
/// | `arg` | \f$N[d_1,\dots,d_n]~(n \geq 0)\f$ | A tensor of any shape and numeric element type. |
///
/// ## Output
///
/// | Type | Description |
/// | ---------------------- | ---------------------------------------------------------------------------------------------- |
/// | \f$N[d_1,\dots,d_n]\f$ | The tensor \f$T\f$, where \f$T[i_1,\dots,i_n] = \lfloor \texttt{arg}[i_1,\dots,i_n] \rfloor\f$ |
///
/// ## Implementation Status
///
/// | Backend | Status |
/// | ------- | ---------------- |
/// | NGVM | Not implemented. |
class Floor : public UnaryElementwiseArithmetic
{
public:
/// \brief Constructs a floor operation.
///
/// \param arg Node that produces the input tensor.
Floor(const std::shared_ptr<Node>& arg)
: UnaryElementwiseArithmetic(arg)
{
......
......@@ -21,14 +21,39 @@ namespace ngraph
{
namespace op
{
/// \brief %Function call operation.
///
/// ## Parameters
///
/// | | Description |
/// | ---------- | -------------------------- |
/// | `function` | The function to be called. |
///
/// ## Inputs
///
/// | | Type | Description |
/// | ------ | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------ |
/// | `args` | \f$T_1,\dots,T_n\f$ where \f$n\f$ matches the number of arguments expected by `function` and \f$T_i\f$ matches the type expected for the \f$i\f$th argument of `function`. | The arguments for the function call. |
///
/// ## Output
///
/// | Type | Description |
/// | --------- | -------------------------------------------------------- |
/// | \f$T_R\f$ | The tensor returned by `function` when called on `args`. |
///
/// ## Implementation Status
///
/// | Backend | Status |
/// | ------- | ------------------ |
/// | NGVM | Fully implemented. |
class FunctionCall : public Builtin
{
public:
/// \brief Constructs a function call operation.
///
/// @param function The function to be called
/// @param args The function arguments
///
FunctionCall(const std::shared_ptr<Function>& function,
/// \param function The function to be called.
/// \param args The arguments for the function call.
FunctionCall(std::shared_ptr<Function> function,
const std::vector<std::shared_ptr<Node>>& args)
: Builtin(args)
, m_function(function)
......@@ -38,6 +63,7 @@ namespace ngraph
virtual std::string description() const override { return "FunctionCall"; }
virtual void propagate_types() override;
/// \return The function to be called.
std::shared_ptr<Function> get_function() const { return m_function; }
protected:
std::shared_ptr<Function> m_function;
......
......@@ -22,9 +22,38 @@ namespace ngraph
{
class Node;
/// \brief Operation to get an element from a tuple.
///
/// ## Parameters
///
/// | | Description |
/// | --- | ------------------------------------------------------------------ |
/// | `n` | The position of the element (0-based) to get from the input tuple. |
///
/// ## Inputs
///
/// | | Type | Description |
/// | ------ | ----------------------------------------------------------- | ------------------------------------------ |
/// | `arg` | \f$(T_1,\dots,T_{n-1},T_n,T_{n+1},\dots,T_m)~(m \geq 1)\f$ | An input tuple with at least `n` elements. |
///
/// ## Output
///
/// | Type | Description |
/// | --------- | ------------------------------------- |
/// | \f$T_n\f$ | The `n`th element of the input tuple. |
///
/// ## Implementation Status
///
/// | Backend | Status |
/// | ------- | ------------------ |
/// | NGVM | Fully implemented. |
class GetTupleElement : public Builtin
{
public:
/// \brief Constructs a get-tuple-element operation.
///
/// \param arg The input tuple.
/// \param n The index of the tuple element to get.
GetTupleElement(const std::shared_ptr<Node>& arg, size_t n)
: Builtin({arg})
, m_n{n}
......@@ -33,6 +62,7 @@ namespace ngraph
virtual void propagate_types() override;
virtual std::string description() const override { return "GetTupleElement"; }
/// \return The index of the tuple element to get.
size_t get_n() const { return m_n; }
protected:
size_t m_n;
......
......@@ -20,9 +20,33 @@ namespace ngraph
{
namespace op
{
/// \brief Elementwise greater-than operation.
///
/// ## Inputs
///
/// | | Type | Description |
/// | ------ | --------------------------------- | ------------------------------------------------------ |
/// | `arg0` | \f$E[d_1,\dots,d_n]~(n \geq 0)\f$ | A tensor of any shape and element type. |
/// | `arg1` | \f$E[d_1,\dots,d_n]~(n \geq 0)\f$ | A tensor of the same shape and element type as `arg0`. |
///
/// ## Output
///
/// | Type | Description |
/// | ---------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------- |
/// | \f$\texttt{bool}[d_1,\dots,d_n]\f$ | The tensor \f$T\f$, where \f$T[i_1,\dots,i_n] = 1\text{ if }\texttt{arg0}[i_1,\dots,i_n] \gt \texttt{arg1}[i_1,\dots,i_n]\text{, else } 0\f$ |
///
/// ## Implementation Status
///
/// | Backend | Status |
/// | ------- | ------------------ |
/// | NGVM | Fully implemented. |
class Greater : public BinaryElementwiseComparison
{
public:
/// \brief Constructs a greater-than operation.
///
/// \param arg0 Node that produces the first input tensor.
/// \param arg1 Node that produces the second input tensor.
Greater(const std::shared_ptr<Node>& arg0, const std::shared_ptr<Node>& arg1)
: BinaryElementwiseComparison(arg0, arg1)
{
......
......@@ -20,9 +20,33 @@ namespace ngraph
{
namespace op
{
/// \brief Elementwise greater-than-or-equal operation.
///
/// ## Inputs
///
/// | | Type | Description |
/// | ------ | --------------------------------- | ------------------------------------------------------ |
/// | `arg0` | \f$E[d_1,\dots,d_n]~(n \geq 0)\f$ | A tensor of any shape and element type. |
/// | `arg1` | \f$E[d_1,\dots,d_n]~(n \geq 0)\f$ | A tensor of the same shape and element type as `arg0`. |
///
/// ## Output
///
/// | Type | Description |
/// | ---------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------- |
/// | \f$\texttt{bool}[d_1,\dots,d_n]\f$ | The tensor \f$T\f$, where \f$T[i_1,\dots,i_n] = 1\text{ if }\texttt{arg0}[i_1,\dots,i_n] \geq \texttt{arg1}[i_1,\dots,i_n]\text{, else } 0\f$ |
///
/// ## Implementation Status
///
/// | Backend | Status |
/// | ------- | ------------------ |
/// | NGVM | Fully implemented. |
class GreaterEq : public BinaryElementwiseComparison
{
public:
/// \brief Constructs a greater-than-or-equal operation.
///
/// \param arg0 Node that produces the first input tensor.
/// \param arg1 Node that produces the second input tensor.
GreaterEq(const std::shared_ptr<Node>& arg0, const std::shared_ptr<Node>& arg1)
: BinaryElementwiseComparison(arg0, arg1)
{
......
......@@ -20,9 +20,33 @@ namespace ngraph
{
namespace op
{
/// \brief Elementwise less-than operation.
///
/// ## Inputs
///
/// | | Type | Description |
/// | ------ | --------------------------------- | ------------------------------------------------------ |
/// | `arg0` | \f$E[d_1,\dots,d_n]~(n \geq 0)\f$ | A tensor of any shape and element type. |
/// | `arg1` | \f$E[d_1,\dots,d_n]~(n \geq 0)\f$ | A tensor of the same shape and element type as `arg0`. |
///
/// ## Output
///
/// | Type | Description |
/// | ---------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------- |
/// | \f$\texttt{bool}[d_1,\dots,d_n]\f$ | The tensor \f$T\f$, where \f$T[i_1,\dots,i_n] = 1\text{ if }\texttt{arg0}[i_1,\dots,i_n] \lt \texttt{arg1}[i_1,\dots,i_n]\text{, else } 0\f$ |
///
/// ## Implementation Status
///
/// | Backend | Status |
/// | ------- | ------------------ |
/// | NGVM | Fully implemented. |
class Less : public BinaryElementwiseComparison
{
public:
/// \brief Constructs a less-than operation.
///
/// \param arg0 Node that produces the first input tensor.
/// \param arg1 Node that produces the second input tensor.
Less(const std::shared_ptr<Node>& arg0, const std::shared_ptr<Node>& arg1)
: BinaryElementwiseComparison(arg0, arg1)
{
......
......@@ -20,9 +20,33 @@ namespace ngraph
{
namespace op
{
/// \brief Elementwise less-than-or-equal operation.
///
/// ## Inputs
///
/// | | Type | Description |
/// | ------ | --------------------------------- | ------------------------------------------------------ |
/// | `arg0` | \f$E[d_1,\dots,d_n]~(n \geq 0)\f$ | A tensor of any shape and element type. |
/// | `arg1` | \f$E[d_1,\dots,d_n]~(n \geq 0)\f$ | A tensor of the same shape and element type as `arg0`. |
///
/// ## Output
///
/// | Type | Description |
/// | ---------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------- |
/// | \f$\texttt{bool}[d_1,\dots,d_n]\f$ | The tensor \f$T\f$, where \f$T[i_1,\dots,i_n] = 1\text{ if }\texttt{arg0}[i_1,\dots,i_n] \leq \texttt{arg1}[i_1,\dots,i_n]\text{, else } 0\f$ |
///
/// ## Implementation Status
///
/// | Backend | Status |
/// | ------- | ------------------ |
/// | NGVM | Fully implemented. |
class LessEq : public BinaryElementwiseComparison
{
public:
/// \brief Constructs a less-than-or-equal operation.
///
/// \param arg0 Node that produces the first input tensor.
/// \param arg1 Node that produces the second input tensor.
LessEq(const std::shared_ptr<Node>& arg0, const std::shared_ptr<Node>& arg1)
: BinaryElementwiseComparison(arg0, arg1)
{
......
......@@ -20,9 +20,31 @@ namespace ngraph
{
namespace op
{
/// \brief Elementwise natural log operation.
///
/// ## Inputs
///
/// | | Type | Description |
/// | ----- | --------------------------------- | ----------------------------------------------- |
/// | `arg` | \f$N[d_1,\dots,d_n]~(n \geq 0)\f$ | A tensor of any shape and numeric element type. |
///
/// ## Output
///
/// | Type | Description |
/// | ---------------------- | ----------------------------------------------------------------------------------- |
/// | \f$N[d_1,\dots,d_n]\f$ | The tensor \f$T\f$, where \f$T[i_1,\dots,i_n] = \ln(\texttt{arg}[i_1,\dots,i_n])\f$ |
///
/// ## Implementation Status
///
/// | Backend | Status |
/// | ------- | ------------------ |
/// | NGVM | Fully implemented. |
class Log : public UnaryElementwiseArithmetic
{
public:
/// \brief Constructs a natural log operation.
///
/// \param arg Node that produces the input tensor.
Log(const std::shared_ptr<Node>& arg)
: UnaryElementwiseArithmetic(arg)
{
......
......@@ -20,9 +20,33 @@ namespace ngraph
{
namespace op
{
/// \brief Elementwise maximum operation.
///
/// ## Inputs
///
/// | | Type | Description |
/// | ------ | --------------------------------- | ------------------------------------------------------ |
/// | `arg0` | \f$N[d_1,\dots,d_n]~(n \geq 0)\f$ | A tensor of any shape and numeric element type. |
/// | `arg1` | \f$N[d_1,\dots,d_n]~(n \geq 0)\f$ | A tensor of the same shape and element type as `arg0`. |
///
/// ## Output
///
/// | Type | Description |
/// | ---------------------- | ------------------------------------------------------------------------------------------------------------------ |
/// | \f$N[d_1,\dots,d_n]\f$ | The tensor \f$T\f$, where \f$T[i_1,\dots,i_n] = \max(\texttt{arg0}[i_1,\dots,i_n],\texttt{arg1}[i_1,\dots,i_n])\f$ |
///
/// ## Implementation Status
///
/// | Backend | Status |
/// | ------- | ------------------ |
/// | NGVM | Fully implemented. |
class Maximum : public BinaryElementwiseArithmetic
{
public:
/// \brief Constructs a maximum operation.
///
/// \param arg0 Node that produces the first input tensor.
/// \param arg1 Node that produces the second input tensor.
Maximum(const std::shared_ptr<Node>& arg0, const std::shared_ptr<Node>& arg1)
: BinaryElementwiseArithmetic(arg0, arg1)
{
......
......@@ -20,9 +20,33 @@ namespace ngraph
{
namespace op
{
/// \brief Elementwise minimum operation.
///
/// ## Inputs
///
/// | | Type | Description |
/// | ------ | --------------------------------- | ------------------------------------------------------ |
/// | `arg0` | \f$N[d_1,\dots,d_n]~(n \geq 0)\f$ | A tensor of any shape and numeric element type. |
/// | `arg1` | \f$N[d_1,\dots,d_n]~(n \geq 0)\f$ | A tensor of the same shape and element type as `arg0`. |
///
/// ## Output
///
/// | Type | Description |
/// | ---------------------- | ------------------------------------------------------------------------------------------------------------------ |
/// | \f$N[d_1,\dots,d_n]\f$ | The tensor \f$T\f$, where \f$T[i_1,\dots,i_n] = \min(\texttt{arg0}[i_1,\dots,i_n],\texttt{arg1}[i_1,\dots,i_n])\f$ |
///
/// ## Implementation Status
///
/// | Backend | Status |
/// | ------- | ------------------ |
/// | NGVM | Fully implemented. |
class Minimum : public BinaryElementwiseArithmetic
{
public:
/// \brief Constructs a minimum operation.
///
/// \param arg0 Node that produces the first input tensor.
/// \param arg1 Node that produces the second input tensor.
Minimum(const std::shared_ptr<Node>& arg0, const std::shared_ptr<Node>& arg1)
: BinaryElementwiseArithmetic(arg0, arg1)
{
......
......@@ -20,9 +20,33 @@ namespace ngraph
{
namespace op
{
/// \brief Elementwise multiplication operation.
///
/// ## Inputs
///
/// | | Type | Description |
/// | ------ | --------------------------------- | ------------------------------------------------------ |
/// | `arg0` | \f$N[d_1,\dots,d_n]~(n \geq 0)\f$ | A tensor of any shape and numeric element type. |
/// | `arg1` | \f$N[d_1,\dots,d_n]~(n \geq 0)\f$ | A tensor of the same shape and element type as `arg0`. |
///
/// ## Output
///
/// | Type | Description |
/// | ---------------------- | ------------------------------------------------------------------------------------------------------------------ |
/// | \f$N[d_1,\dots,d_n]\f$ | The tensor \f$T\f$, where \f$T[i_1,\dots,i_n] = \texttt{arg0}[i_1,\dots,i_n] \cdot \texttt{arg1}[i_1,\dots,i_n]\f$ |
///
/// ## Implementation Status
///
/// | Backend | Status |
/// | ------- | ------------------ |
/// | NGVM | Fully implemented. |
class Multiply : public BinaryElementwiseArithmetic
{
public:
/// \brief Constructs a multiplication operation.
///
/// \param arg0 Node that produces the first input tensor.
/// \param arg1 Node that produces the second input tensor.
Multiply(const std::shared_ptr<Node>& arg0, const std::shared_ptr<Node>& arg1)
: BinaryElementwiseArithmetic(arg0, arg1)
{
......
......@@ -20,9 +20,31 @@ namespace ngraph
{
namespace op
{
/// \brief Elementwise negation operation.
///
/// ## Inputs
///
/// | | Type | Description |
/// | ----- | --------------------------------- | ----------------------------------------------- |
/// | `arg` | \f$N[d_1,\dots,d_n]~(n \geq 0)\f$ | A tensor of any shape and numeric element type. |
///
/// ## Output
///
/// | Type | Description |
/// | ---------------------- | --------------------------------------------------------------------------------- |
/// | \f$N[d_1,\dots,d_n]\f$ | The tensor \f$T\f$, where \f$T[i_1,\dots,i_n] = -(\texttt{arg}[i_1,\dots,i_n])\f$ |
///
/// ## Implementation Status
///
/// | Backend | Status |
/// | ------- | ------------------ |
/// | NGVM | Fully implemented. |
class Negative : public UnaryElementwiseArithmetic
{
public:
/// \brief Constructs a negation operation.
///
/// \param arg Node that produces the input tensor.
Negative(const std::shared_ptr<Node>& arg)
: UnaryElementwiseArithmetic(arg)
{
......
......@@ -20,9 +20,33 @@ namespace ngraph
{
namespace op
{
/// \brief Elementwise not-equal operation.
///
/// ## Inputs
///
/// | | Type | Description |
/// | ------ | --------------------------------- | ------------------------------------------------------ |
/// | `arg0` | \f$E[d_1,\dots,d_n]~(n \geq 0)\f$ | A tensor of any shape and element type. |
/// | `arg1` | \f$E[d_1,\dots,d_n]~(n \geq 0)\f$ | A tensor of the same shape and element type as `arg0`. |
///
/// ## Output
///
/// | Type | Description |
/// | ---------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------- |
/// | \f$\texttt{bool}[d_1,\dots,d_n]\f$ | The tensor \f$T\f$, where \f$T[i_1,\dots,i_n] = 1\text{ if }\texttt{arg0}[i_1,\dots,i_n] \neq \texttt{arg1}[i_1,\dots,i_n]\text{, else } 0\f$ |
///
/// ## Implementation Status
///
/// | Backend | Status |
/// | ------- | ------------------ |
/// | NGVM | Fully implemented. |
class NotEqual : public BinaryElementwiseComparison
{
public:
/// \brief Constructs a not-equal operation.
///
/// \param arg0 Node that produces the first input tensor.
/// \param arg1 Node that produces the second input tensor.
NotEqual(const std::shared_ptr<Node>& arg0, const std::shared_ptr<Node>& arg1)
: BinaryElementwiseComparison(arg0, arg1)
{
......
This diff is collapsed.
......@@ -22,11 +22,29 @@ namespace ngraph
class Function;
namespace op
{
/// \brief A function parameter.
///
/// Parameters are nodes that represent the arguments that will be passed to user-defined functions.
/// Function creation requires a sequence of parameters.
/// Basic graph operations do not need parameters attached to a function.
///
/// ## Parameters
///
/// | | Description |
/// | -------------| ---------------------------------- |
/// | `value_type` | The type \f$T\f$ of the parameter. |
///
/// ## Output
///
/// | Type | Description |
/// | ------- | --------------------------------------------------------------------------------------------------------------------------- |
/// | \f$T\f$ | The value of the parameter, supplied by the `FunctionCall` to this function or in the initial `ngraph::runtime::CallFrame`. |
///
/// ## Implementation Status
///
/// | Backend | Status |
/// | ------- | ------------------ |
/// | NGVM | Fully implemented. |
class Parameter : public Node
{
friend class ngraph::Function;
......@@ -37,7 +55,14 @@ namespace ngraph
void assign_function(Function* function, size_t index);
public:
/// \brief Constructions a parameter node.
///
/// \param value_type The type of the parameter.
Parameter(const std::shared_ptr<ValueType>& value_type = nullptr);
/// \brief Constructions a tensor view-typed parameter node.
///
/// \param element_type The element type of the parameter.
/// \param shape The shape of the parameter.
Parameter(const ngraph::element::Type& element_type, const Shape& shape);
std::string description() const override { return "Parameter"; }
......
......@@ -20,9 +20,33 @@ namespace ngraph
{
namespace op
{
/// \brief Elementwise exponentiation operation.
///
/// ## Inputs
///
/// | | Type | Description |
/// | ------ | --------------------------------- | ------------------------------------------------------ |
/// | `arg0` | \f$N[d_1,\dots,d_n]~(n \geq 0)\f$ | A tensor of any shape and numeric element type. |
/// | `arg1` | \f$N[d_1,\dots,d_n]~(n \geq 0)\f$ | A tensor of the same shape and element type as `arg0`. |
///
/// ## Output
///
/// | Type | Description |
/// | ---------------------- | -------------------------------------------------------------------------------------------------------------- |
/// | \f$N[d_1,\dots,d_n]\f$ | The tensor \f$T\f$, where \f$T[i_1,\dots,i_n] = \texttt{arg0}[i_1,\dots,i_n]^{\texttt{arg1}[i_1,\dots,i_n]}\f$ |
///
/// ## Implementation Status
///
/// | Backend | Status |
/// | ------- | ---------------- |
/// | NGVM | Not implemented. |
class Power : public BinaryElementwiseArithmetic
{
public:
/// \brief Constructs an exponentiation operation.
///
/// \param arg0 Node that produces the first input tensor.
/// \param arg1 Node that produces the second input tensor.
Power(const std::shared_ptr<Node>& arg0, const std::shared_ptr<Node>& arg1)
: BinaryElementwiseArithmetic(arg0, arg1)
{
......
......@@ -20,15 +20,82 @@ namespace ngraph
{
namespace op
{
/// \brief Tensor reduction operation.
///
/// Element-wise reduces the input tensor, eliminating the specified reduction axes, given a reduction function that maps two scalars to a scalar.
/// For example, if the reduction function \f$f(x,y) = x+y\f$:
///
/// \f[
/// \mathit{reduce}\left(f,\{0\},
/// \left[ \begin{array}{ccc}
/// 1 & 2 \\
/// 3 & 4 \\
/// 5 & 6 \end{array} \right]\right) =
/// \left[ (1 + 3 + 5), (2 + 4 + 6) \right] =
/// \left[ 9, 12 \right]~~~\text{(dimension 0 (rows) is eliminated)}
/// \f]
///
/// \f[
/// \mathit{reduce}\left(f,\{1\},
/// \left[ \begin{array}{ccc}
/// 1 & 2 \\
/// 3 & 4 \\
/// 5 & 6 \end{array} \right]\right) =
/// \left[ (1 + 2), (3 + 4), (5 + 6) \right] =
/// \left[ 3, 7, 11 \right]~~~\text{(dimension 1 (columns) is eliminated)}
/// \f]
///
/// \f[
/// \mathit{reduce}\left(f,\{0,1\},
/// \left[ \begin{array}{ccc}
/// 1 & 2 \\
/// 3 & 4 \\
/// 5 & 6 \end{array} \right]\right) =
/// (1 + 2) + (3 + 4) + (5 + 6) =
/// 21~~~\text{(both dimensions (rows and columns) are eliminated)}
/// \f]
///
/// It is assumed that \f$f\f$ is associative. In other words, the order of operations is undefined. In the case where a collapsed dimension is 0,
/// the value of `arg_init` will be substituted.
///
/// Note that the parameter `reduction_axes` specifies which axes are to be <i>eliminated</i>, which can be a bit counterintuitive. For example,
/// as seen above, eliminating the column dimension results in the <i>rows</i> being summed, not the columns.
///
/// ## Parameters
///
/// | | Description |
/// | -------------------- | ------------------------------------------------------------------------------------------------------------------------- |
/// | `reduction_function` | The scalar function used to reduce the input tensor. Must take two arguments of type \f$E[]\f$ and return type \f$E[]\f$. |
/// | `reduction_axes` | The axes to eliminate through reduction. |
///
/// ## Inputs
///
/// | | Type | Description |
/// | -------------- | --------------------------------- | ----------------------------------------------------------------------------------------------------- |
/// | `arg_reductee` | \f$E[d_1,\dots,d_n]~(n \geq 0)\f$ | An input tensor of any shape, with the element type matching that expected by the reduction function. |
/// | `arg_init` | \f$E[]\f$ | An scalar to be used as a substitute output value on zero-sized axes. |
///
/// ## Output
///
/// | Type | Description |
/// | ----------------------------------------- | ---------------------------------------------------------------------------------------------------------------- |
/// | \f$E[\textit{delete}(A,d_1,\dots,d_n)]\f$ | The tensor \f$T\f$, where \f$T\f$ is the input tensor with the `reduction_axes` \f$A\f$ eliminated by reduction. |
///
/// ## Implementation Status
///
/// | Backend | Status |
/// | ------- | ----------------------------------------------------- |
/// | NGVM | Fully implemented for scalars, vectors, and matrices. |
class Reduce : public Builtin
{
public:
/// \brief Constructs a reduction operation.
///
/// @param arg_reductee The tensor view to be reduced.
/// @param arg_init The initial value for reduction.
/// @param reduction_function The reduction function to use.
/// @param reduction_axes The axis positions (0-based) to be reduced.
///
/// \param arg_reductee The tensor view to be reduced.
/// \param arg_init The initial value for reduction.
/// \param reduction_function The reduction function to use.
/// \param reduction_axes The axis positions (0-based) to be eliminated.
Reduce(const std::shared_ptr<Node>& arg_reductee,
const std::shared_ptr<Node>& arg_init,
const std::shared_ptr<Function>& reduction_function,
......@@ -42,10 +109,12 @@ namespace ngraph
virtual std::string description() const override { return "Reduce"; }
virtual void propagate_types() override;
/// \return The function to use for reduction.
std::shared_ptr<Function> get_reduction_function() const
{
return m_reduction_function;
}
/// \return The axis positions (0-based) to be eliminated through reduction.
const AxisSet& get_reduction_axes() const { return m_reduction_axes; }
protected:
std::shared_ptr<Function> m_reduction_function;
......
......@@ -20,9 +20,35 @@ namespace ngraph
{
namespace op
{
/// \brief Elementwise remainder operation.
///
/// (TODO: Get a bit more clarity on this: is it just "mod"? What about negative numbers and floats?)
///
/// ## Inputs
///
/// | | Type | Description |
/// | ------ | --------------------------------- | ------------------------------------------------------ |
/// | `arg0` | \f$N[d_1,\dots,d_n]~(n \geq 0)\f$ | A tensor of any shape and numeric element type. |
/// | `arg1` | \f$N[d_1,\dots,d_n]~(n \geq 0)\f$ | A tensor of the same shape and element type as `arg0`. |
///
/// ## Output
///
/// | Type | Description |
/// | ---------------------- | ----------------------------------------------------------------------------------------------------------------- |
/// | \f$N[d_1,\dots,d_n]\f$ | The tensor \f$T\f$, where \f$T[i_1,\dots,i_n] = \texttt{arg0}[i_1,\dots,i_n] \mod \texttt{arg1}[i_1,\dots,i_n]\f$ |
///
/// ## Implementation Status
///
/// | Backend | Status |
/// | ------- | ---------------- |
/// | NGVM | Not implemented. |
class Remainder : public BinaryElementwiseArithmetic
{
public:
/// \brief Constructs a remainder operation.
///
/// \param arg0 Node that produces the first input tensor.
/// \param arg1 Node that produces the second input tensor.
Remainder(const std::shared_ptr<Node>& arg0, const std::shared_ptr<Node>& arg1)
: BinaryElementwiseArithmetic(arg0, arg1)
{
......
......@@ -12,62 +12,69 @@
// See the License for the specific language governing permissions and
// ----------------------------------------------------------------------------
#include <algorithm>
#include "ngraph/ops/reshape.hpp"
#include "ngraph/function.hpp"
#include "ngraph/ngraph.hpp"
#include <algorithm>
using namespace std;
using namespace ngraph;
using namespace ngraph::runtime;
CallFrame::CallFrame(size_t n_inputs,
size_t n_outputs,
const TensorViewPtrs& temps,
size_t initial_pc,
const shared_ptr<vector<shared_ptr<Instruction>>>& instructions)
using namespace ngraph::op;
: m_n_inputs(n_inputs)
, m_n_outputs(n_outputs)
, m_tensor_views(n_inputs + n_outputs + temps.size())
, m_initial_pc(initial_pc)
, m_instructions(instructions)
void Reshape::propagate_types()
{
copy(temps.begin(), temps.end(), m_tensor_views.begin() + m_n_inputs + m_n_outputs);
}
if (m_arguments.size() != 1)
{
throw ngraph_error("Wrong number of arguments.");
}
void CallFrame::tensor_call(
const std::vector<std::shared_ptr<ngraph::runtime::TensorView>>& inputs,
const std::vector<std::shared_ptr<ngraph::runtime::TensorView>>& outputs)
{
copy(inputs.begin(), inputs.end(), m_tensor_views.begin());
copy(outputs.begin(), outputs.end(), m_tensor_views.begin() + m_n_inputs);
m_next_pc = m_initial_pc;
m_return = false;
while (!m_return)
auto arg_type = m_arguments.at(0)->get_value_type();
if (nullptr == arg_type)
{
m_pc = m_next_pc;
m_next_pc = m_pc + 1;
m_instructions->at(m_pc)->execute(*this);
throw ngraph_error("Argument to reshape is missing type.");
}
auto arg_tensor_view_type = dynamic_pointer_cast<const TensorViewType>(arg_type);
if (nullptr == arg_type)
{
throw ngraph_error("Argument to reshape is not a tensor view");
}
// Don't hold onto inputs/outputs
fill_n(m_tensor_views.begin(), m_n_inputs + m_n_outputs, nullptr);
}
void CallFrame::operator()(const std::vector<std::shared_ptr<ngraph::runtime::Value>>& arguments,
const std::vector<std::shared_ptr<ngraph::runtime::Value>>& results)
{
// TODO: Check types of args and result
std::vector<std::shared_ptr<ngraph::runtime::TensorView>> inputs;
for (auto argument : arguments)
auto arg_shape = arg_tensor_view_type->get_shape();
auto arg_rank = arg_shape.size();
if (m_input_order.size() != arg_rank)
{
throw ngraph_error("Input axis order for reshape is not a permutation of argument's axes");
}
for (size_t i = 0; i < arg_rank; i++)
{
auto it = std::find(std::begin(m_input_order), std::end(m_input_order), i);
if (std::end(m_input_order) == it)
{
throw ngraph_error(
"Input axis order for reshape is not a permutation of argument's axes");
}
}
size_t arg_shape_product = 1;
for (auto i : arg_shape)
{
arg_shape_product *= i;
}
size_t output_shape_product = 1;
for (auto i : m_output_shape)
{
argument->collect_tensor_views(inputs, argument);
output_shape_product *= i;
}
std::vector<std::shared_ptr<ngraph::runtime::TensorView>> outputs;
for (auto result : results)
if (arg_shape_product != output_shape_product)
{
result->collect_tensor_views(outputs, result);
throw ngraph_error(
"Product of output shape dimensions does not match product of argument shape "
"dimensions for reshape");
}
tensor_call(inputs, outputs);
set_value_type_checked(
make_shared<TensorViewType>(arg_tensor_view_type->get_element_type(), m_output_shape));
}
// ----------------------------------------------------------------------------
// Copyright 2017 Nervana Systems Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// ----------------------------------------------------------------------------
#pragma once
#include "ngraph/ops/op.hpp"
namespace ngraph
{
namespace op
{
/// \brief Tensor reshape operation.
///
/// "Converts" an input tensor into a new shape with the same number of elements.
///
/// Given that the input tensor has shape \f$[d_1,\dots,d_n]\f$, the output may have any shape \f$[d'_1,\dots,d'_m]\f$ such that
/// \f$\Pi_{0 \leq i \lt n}(d_i) = \Pi_{0 \leq i \lt m}(d'_i)\f$. For example, a \f$3\times{}4\f$ matrix can be reshaped into a
/// 3-tensor of shape \f$3\times{}2\times{}2\f$, a matrix of shape \f$6\times{}2\f$, or a vector of size \f$12\f$, but not, for
/// example, a matrix of size \f$4\times{}4\f$.
///
/// The parameter `input_order` indicates the order in which to "walk" over the input axes. Given a tensor of shape \f$(d_1,\dots,d_n)\f$,
/// an input order of \f$(a_0, a_1, \dots, a_{n-1})\f$ results in the coordinate for axis \f$a_{n-1}\f$ being varied most frequently,
/// followed by axis \f$a-2\f$, and so on down to \f$a_0\f$.
///
/// (TODO: example.)
///
/// ## Parameters
///
/// | | Description |
/// | -------------- | ---------------------------------------------------------- |
/// | `input_order` | The order in which to walk over the input axes. |
/// | `output_shape` | The shape \f$[d'_1,\dots,d'_m]\f$ for the reshaped tensor. |
///
/// ## Inputs
///
/// | | Type | Description |
/// | ----- | --------------------------------- | ------------------------------------------------------------------------------------------------------------ |
/// | `arg` | \f$E[d_1,\dots,d_n]~(n \geq 0)\f$ | An input tensor of any type and shape, as long as the product of \f$d_i\f$ equals the product of \f$d'_i\f$. |
///
/// ## Output
///
/// | Type | Description |
/// | ------------------------ | ------------------------------------------------------------------------------------------------------ |
/// | \f$E[d'_1,\dots,d'_m]\f$ | The tensor \f$T\f$, where \f$T\f$ is the input tensor with its elements rearranged as described above. |
///
/// ## Implementation Status
///
/// | Backend | Status |
/// | ------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
/// | NGVM | Fully implemented for scalars, vectors, and matrices. Implemented for other shapes only when there is no reordering of the input axes, i.e. `input_order` is \f$(0,\dots,n-1)\f$. |
class Reshape : public Builtin
{
public:
/// \brief Constructs a reshape operation.
///
/// \param arg The tensor view to be reshaped.
/// \param input_order The order in which to iterate over input axes. This must be a permutation of the
/// sequence \f$(0,\dots,n-1)\f$ where \f$n\f$ is the rank of the input tensor.
/// \param output_shape The output shape. If the input shape is \f$(a_0,\dots,a_{k-1})\f$ then the output shape must
/// be of the form \f$(b_0,\dots,b_{j-1})\f$ where \f$\Pi(a_i) = \Pi(b_i)\f$.
Reshape(const std::shared_ptr<Node>& arg,
const AxisVector& input_order,
const Shape& output_shape)
: Builtin({arg})
, m_input_order(input_order)
, m_output_shape(output_shape)
{
}
virtual std::string description() const override { return "Reshape"; }
virtual void propagate_types() override;
/// \return The order in which to iterate over input axes.
const AxisVector& get_input_order() const { return m_input_order; }
/// \return The shape of the output tensor.
const Shape& get_output_shape() const { return m_output_shape; }
protected:
const AxisVector m_input_order;
const Shape m_output_shape;
};
}
}
......@@ -20,9 +20,35 @@ namespace ngraph
{
namespace op
{
/// \brief Elementwise selection operation.
///
/// ## Inputs
///
/// | | Type | Description |
/// | ------ | --------------------------------------------- | ------------------------------------------------------------ |
/// | `arg0` | \f$\texttt{bool}[d_1,\dots,d_n]~(n \geq 0)\f$ | A tensor of any shape, with element `bool`. |
/// | `arg1` | \f$E[d_1,\dots,d_n]~(n \geq 0)\f$ | A tensor of the same shape as `arg0`, with any element type. |
/// | `arg2` | \f$E[d_1,\dots,d_n]~(n \geq 0)\f$ | A tensor of the same shape and element type as `arg1`. |
///
/// ## Output
///
/// | Type | Description |
/// | ---------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
/// | \f$E[d_1,\dots,d_n]\f$ | The tensor \f$T\f$, where \f$T[i_1,\dots,i_n] = \texttt{arg1}[i_1,\dots,i_n]\text{ if }\texttt{arg0}[i_1,\dots,i_n] \neq 0\text{, else }\texttt{arg2}[i_1,\dots,i_n]\f$ |
///
/// ## Implementation Status
///
/// | Backend | Status |
/// | ------- | ------------------ |
/// | NGVM | Fully implemented. |
class Select : public Builtin
{
public:
/// \brief Constructs a selection operation.
///
/// \param arg0 Node that produces the first input tensor.
/// \param arg1 Node that produces the second input tensor.
/// \param arg2 Node that produces the third input tensor.
Select(const std::shared_ptr<Node>& arg0,
const std::shared_ptr<Node>& arg1,
const std::shared_ptr<Node>& arg2)
......
// ----------------------------------------------------------------------------
// Copyright 2017 Nervana Systems Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// ----------------------------------------------------------------------------
#pragma once
#include "ngraph/ops/op.hpp"
namespace ngraph
{
namespace op
{
/// \brief Elementwise sign operation.
///
/// Maps each element of the input tensor to -1 (if it is negative), 0 (if it is zero), or 1 (if it is positive).
///
/// ## Inputs
///
/// | | Type | Description |
/// | ----- | --------------------------------- | ----------------------------------------------- |
/// | `arg` | \f$N[d_1,\dots,d_n]~(n \geq 0)\f$ | A tensor of any shape and numeric element type. |
///
/// ## Output
///
/// | Type | Description |
/// | ---------------------- | ------------------------------------------------------------------------------------ |
/// | \f$N[d_1,\dots,d_n]\f$ | The tensor \f$T\f$, where \f$T[i_1,\dots,i_n] = \text{sgn}(\texttt{arg}[i_1,\dots,i_n])\f$ |
///
/// ## Implementation Status
///
/// | Backend | Status |
/// | ------- | ------------------ |
/// | NGVM | Fully implemented. |
class Sign : public UnaryElementwiseArithmetic
{
public:
/// \brief Constructs an elementwise sign operation.
///
/// \param arg Node that produces the input tensor.
Sign(const std::shared_ptr<Node>& arg)
: UnaryElementwiseArithmetic(arg)
{
}
virtual std::string description() const override { return "Sign"; }
};
}
}
// ----------------------------------------------------------------------------
// Copyright 2017 Nervana Systems Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// ----------------------------------------------------------------------------
#pragma once
#include "ngraph/ops/op.hpp"
namespace ngraph
{
namespace op
{
/// \brief Elementwise sine operation.
///
/// ## Inputs
///
/// | | Type | Description |
/// | ----- | --------------------------------- | ----------------------------------------------- |
/// | `arg` | \f$N[d_1,\dots,d_n]~(n \geq 0)\f$ | A tensor of any shape and numeric element type. |
///
/// ## Output
///
/// | Type | Description |
/// | ---------------------- | ------------------------------------------------------------------------------------ |
/// | \f$N[d_1,\dots,d_n]\f$ | The tensor \f$T\f$, where \f$T[i_1,\dots,i_n] = \sin(\texttt{arg}[i_1,\dots,i_n])\f$ |
///
/// ## Implementation Status
///
/// | Backend | Status |
/// | ------- | ------------------ |
/// | NGVM | Fully implemented. |
class Sin : public UnaryElementwiseArithmetic
{
public:
/// \brief Constructs a sine operation.
///
/// \param arg Node that produces the input tensor.
Sin(const std::shared_ptr<Node>& arg)
: UnaryElementwiseArithmetic(arg)
{
}
virtual std::string description() const override { return "Sin"; }
};
}
}
// ----------------------------------------------------------------------------
// Copyright 2017 Nervana Systems Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// ----------------------------------------------------------------------------
#pragma once
#include "ngraph/ops/op.hpp"
namespace ngraph
{
namespace op
{
/// \brief Elementwise hyperbolic sine (sinh) operation.
///
/// ## Inputs
///
/// | | Type | Description |
/// | ----- | --------------------------------- | ----------------------------------------------- |
/// | `arg` | \f$N[d_1,\dots,d_n]~(n \geq 0)\f$ | A tensor of any shape and numeric element type. |
///
/// ## Output
///
/// | Type | Description |
/// | ---------------------- | ------------------------------------------------------------------------------------- |
/// | \f$N[d_1,\dots,d_n]\f$ | The tensor \f$T\f$, where \f$T[i_1,\dots,i_n] = \sinh(\texttt{arg}[i_1,\dots,i_n])\f$ |
///
/// ## Implementation Status
///
/// | Backend | Status |
/// | ------- | ------------------ |
/// | NGVM | Fully implemented. |
class Sinh : public UnaryElementwiseArithmetic
{
public:
/// \brief Constructs a hyperbolic sine operation.
///
/// \param arg Node that produces the input tensor.
Sinh(const std::shared_ptr<Node>& arg)
: UnaryElementwiseArithmetic(arg)
{
}
virtual std::string description() const override { return "Sinh"; }
};
}
}
// ----------------------------------------------------------------------------
// Copyright 2017 Nervana Systems Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// ----------------------------------------------------------------------------
#include "ngraph/ops/slice.hpp"
using namespace std;
using namespace ngraph::op;
void Slice::propagate_types()
{
if (m_arguments.size() != 1)
{
throw ngraph_error("Wrong number of arguments.");
}
auto arg_type = m_arguments.at(0)->get_value_type();
if (nullptr == arg_type)
{
throw ngraph_error("Argument to slice is missing type.");
}
auto arg_tensor_view_type = dynamic_pointer_cast<const TensorViewType>(arg_type);
if (nullptr == arg_tensor_view_type)
{
throw ngraph_error("Argument to slice is not a tensor view");
}
auto& arg_shape = arg_tensor_view_type->get_shape();
if (m_lower_bounds.size() != arg_shape.size())
{
throw ngraph_error(
"Number of lower bounds provided for slice does not match number of input axes");
}
if (m_upper_bounds.size() != arg_shape.size())
{
throw ngraph_error(
"Number of upper bounds provided for slice does not match number of input axes");
}
if (m_step.size() != arg_shape.size())
{
throw ngraph_error(
"Number of step axes provided for slice does not match number of input axes");
}
Shape result_shape;
for (size_t i = 0; i < arg_shape.size(); i++)
{
if (m_upper_bounds[i] > arg_shape[i])
{
throw ngraph_error("Upper bound for slice is out of range");
}
if (m_lower_bounds[i] > m_upper_bounds[i])
{
throw ngraph_error("Lower bound for slice is greater than upper bound");
}
if (0 == m_step[i])
{
throw ngraph_error("Step distance for slice is zero");
}
size_t result_axis_size = m_upper_bounds[i] - m_lower_bounds[i];
result_axis_size =
result_axis_size / m_step[i] + ((result_axis_size % m_step[i] == 0) ? 0 : 1);
result_shape.push_back(result_axis_size);
}
set_value_type_checked(
make_shared<TensorViewType>(arg_tensor_view_type->get_element_type(), result_shape));
}
// ----------------------------------------------------------------------------
// Copyright 2017 Nervana Systems Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// ----------------------------------------------------------------------------
#pragma once
#include "ngraph/ops/op.hpp"
namespace ngraph
{
namespace op
{
/// \brief Takes a slice of an input tensor, i.e., the sub-tensor that resides within a bounding box, optionally with stride.
///
/// Given an input tensor \f$T\f$ of shape \f$[d_1,\dots,d_n]\f$, lower bounds \f$[l_1,\dots,l_n]\f$, and upper bounds \f$[u_1,\dots,u_n]\f$,
/// where \f$l_i \leq d_i \leq d_i\f$, and a stride \f$[s_1,\dots,s_n]\f$, returns a new tensor \f$T'\f$ of the same element type and shape
/// \f$[d'_1,\dots,d'_n]\f$ where \f$d'_i = \lceil(u_i - l_i)\, /\, s_i\rceil\f$, where \f$T'[i_1,\dots,i_n] = T[i'_1,\dots,i'_n]\f$
/// where \f$i'_j = i_j s_j + l_j\f$.
///
/// ## Parameters
///
/// | | Description |
/// | -------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
/// | `lower_bounds` | The (inclusive) lower-bound coordinates \f$l_i\f$ for the tensor slice. For example, a lower-bound of \f$(1,2)\f$ means to start the slice at row 1 and column 2. |
/// | `upper_bounds` | The (non-inclusive) upper-bound coordinates \f$u_i\f$ for the tensor slice. For example, an upper-bound of \f$(5,4)\f$ means to end the slice before row 4 and column 3. |
/// | `step` | The "step" or "stride" \f$s_i\f$ for the tensor slice. For example, a stride of \f$(1,3)\f$ means to take every row, and every third column (starting at the lower bound). |
///
/// ## Inputs
///
/// | | Type | Description |
/// | ----- | --------------------------------------------------- | --------------------------------------- |
/// | `arg` | \f$E[\mathit{del}([d_1,\dots,d_n],A)]~(n \geq 0)\f$ | A tensor of any shape and element type. |
///
/// ## Output
///
/// | Type | Description |
/// | ------------------------------------------------------------------------------ | --------------------------------- |
/// | \f$E[d'_1,\dots,d'_n]\f$ where \f$d'_i = \lceil(u_i - l_i)\, /\, s_i\rceil\f$. | The tensor sliced from the input. |
///
/// ## Implementation Status
///
/// | Backend | Status |
/// | ------- | ----------------------------------------------- |
/// | NGVM | Implemented for scalars, matrices, and vectors. |
class Slice : public Builtin
{
public:
/// \brief Constructs a tensor slice operation.
///
/// \param arg The tensor view to be sliced.
/// \param lower_bounds The axiswise lower bounds of the slice (inclusive).
/// \param upper_bounds The axiswise upper bounds of the slice (exclusive).
/// \param step The slicing step; for example, step of `{n,m}` means to take
/// every nth row and every mth column of the input matrix.
Slice(const std::shared_ptr<Node>& arg,
const Coordinate& lower_bounds,
const Coordinate& upper_bounds,
const Shape& step)
: Builtin({arg})
, m_lower_bounds(lower_bounds)
, m_upper_bounds(upper_bounds)
, m_step(step)
{
}
/// \brief Constructs a tensor slice operation with unit step; i.e., every element inside the bounding box will be copied to the output slice.
///
/// \param arg The tensor view to be sliced.
/// \param lower_bounds The axiswise lower bounds of the slice (inclusive).
/// \param upper_bounds The axiswise upper bounds of the slice (exclusive).
Slice(const std::shared_ptr<Node>& arg,
const Coordinate& lower_bounds,
const Coordinate& upper_bounds)
: Builtin({arg})
, m_lower_bounds(lower_bounds)
, m_upper_bounds(upper_bounds)
, m_step(Shape(lower_bounds.size(), 1))
{
}
virtual std::string description() const override { return "Slice"; }
virtual void propagate_types() override;
/// \return The inclusive lower-bound coordinates.
const Coordinate& get_lower_bounds() const { return m_lower_bounds; }
/// \return The exclusive upper-bound coordinates.
const Coordinate& get_upper_bounds() const { return m_upper_bounds; }
/// \return The slicing step.
const Shape& get_step() const { return m_step; }
protected:
const Coordinate m_lower_bounds;
const Coordinate m_upper_bounds;
const Shape m_step;
};
}
}
......@@ -20,9 +20,33 @@ namespace ngraph
{
namespace op
{
/// \brief Elementwise subtraction operation.
///
/// ## Inputs
///
/// | | Type | Description |
/// | ------ | --------------------------------- | ------------------------------------------------------ |
/// | `arg0` | \f$N[d_1,\dots,d_n]~(n \geq 0)\f$ | A tensor of any shape and numeric element type. |
/// | `arg1` | \f$N[d_1,\dots,d_n]~(n \geq 0)\f$ | A tensor of the same shape and element type as `arg0`. |
///
/// ## Output
///
/// | Type | Description |
/// | ---------------------- | -------------------------------------------------------------------------------------------------------------- |
/// | \f$N[d_1,\dots,d_n]\f$ | The tensor \f$T\f$, where \f$T[i_1,\dots,i_n] = \texttt{arg0}[i_1,\dots,i_n] - \texttt{arg1}[i_1,\dots,i_n]\f$ |
///
/// ## Implementation Status
///
/// | Backend | Status |
/// | ------- | ------------------ |
/// | NGVM | Fully implemented. |
class Subtract : public BinaryElementwiseArithmetic
{
public:
/// \brief Constructs an subtraction operation.
///
/// \param arg0 Node that produces the first input tensor.
/// \param arg1 Node that produces the second input tensor.
Subtract(const std::shared_ptr<Node>& arg0, const std::shared_ptr<Node>& arg1)
: BinaryElementwiseArithmetic(arg0, arg1)
{
......
// ----------------------------------------------------------------------------
// Copyright 2017 Nervana Systems Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// ----------------------------------------------------------------------------
#include "ngraph/ops/sum.hpp"
#include "ngraph/function.hpp"
using namespace std;
using namespace ngraph::op;
void Sum::propagate_types()
{
if (m_arguments.size() != 1)
{
throw ngraph_error("Wrong number of arguments.");
}
auto arg_type = m_arguments.at(0)->get_value_type();
if (nullptr == arg_type)
{
throw ngraph_error("Argument to sum is missing type.");
}
auto arg_tensor_view_type = dynamic_pointer_cast<const TensorViewType>(arg_type);
if (nullptr == arg_tensor_view_type)
{
throw ngraph_error("Argument to sum is not a tensor view");
}
auto& arg_element_type = arg_tensor_view_type->get_element_type();
if (arg_element_type == element::Bool::element_type())
{
throw ngraph_error("Argument for sum must have numeric element type");
}
auto arg_shape = arg_tensor_view_type->get_shape();
for (auto axis : m_reduction_axes)
{
if (axis >= arg_shape.size())
{
throw ngraph_error("Reduction axis for sum is out of bounds");
}
}
Shape result_shape;
for (size_t i = 0; i < arg_shape.size(); i++)
{
if (m_reduction_axes.count(i) == 0)
{
result_shape.push_back(arg_shape.at(i));
}
}
set_value_type_checked(
make_shared<TensorViewType>(arg_tensor_view_type->get_element_type(), result_shape));
}
// ----------------------------------------------------------------------------
// Copyright 2017 Nervana Systems Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// ----------------------------------------------------------------------------
#pragma once
#include "ngraph/ops/op.hpp"
namespace ngraph
{
namespace op
{
/// \brief Tensor sum operation.
///
/// Element-wise sums the input tensor, eliminating the specified reduction axes.
/// For example:
///
/// \f[
/// \mathit{sum}\left(\{0\},
/// \left[ \begin{array}{ccc}
/// 1 & 2 \\
/// 3 & 4 \\
/// 5 & 6 \end{array} \right]\right) =
/// \left[ (1 + 3 + 5), (2 + 4 + 6) \right] =
/// \left[ 9, 12 \right]~~~\text{(dimension 0 (rows) is eliminated)}
/// \f]
///
/// \f[
/// \mathit{sum}\left(\{1\},
/// \left[ \begin{array}{ccc}
/// 1 & 2 \\
/// 3 & 4 \\
/// 5 & 6 \end{array} \right]\right) =
/// \left[ (1 + 2), (3 + 4), (5 + 6) \right] =
/// \left[ 3, 7, 11 \right]~~~\text{(dimension 1 (columns) is eliminated)}
/// \f]
///
/// \f[
/// \mathit{sum}\left(\{0,1\},
/// \left[ \begin{array}{ccc}
/// 1 & 2 \\
/// 3 & 4 \\
/// 5 & 6 \end{array} \right]\right) =
/// (1 + 2) + (3 + 4) + (5 + 6) =
/// 21~~~\text{(both dimensions (rows and columns) are eliminated)}
/// \f]
///
/// This is equivalent to Reduce where `arg_init` = 0 and `reduction_function` is \f$f(x,y) = x+y\f$.
///
/// ## Parameters
///
/// | | Description |
/// | -------------------- | ---------------------------------------- |
/// | `reduction_axes` | The axes to eliminate through summation. |
///
/// ## Inputs
///
/// | | Type | Description |
/// | ----- | --------------------------------- | ------------------------------------------------------ |
/// | `arg` | \f$N[d_1,\dots,d_n]~(n \geq 0)\f$ | An input tensor of any shape and numeric element type. |
///
/// ## Output
///
/// | Type | Description |
/// | ----------------------------------------- | ---------------------------------------------------------------------------------------------------------------- |
/// | \f$N[\textit{delete}(A,d_1,\dots,d_n)]\f$ | The tensor \f$T\f$, where \f$T\f$ is the input tensor with the `reduction_axes` \f$A\f$ eliminated by summation. |
///
/// ## Implementation Status
///
/// | Backend | Status |
/// | ------- | ----------------------------------------------------- |
/// | NGVM | Fully implemented for scalars, vectors, and matrices. |
class Sum : public Builtin
{
public:
/// \brief Constructs a summation operation.
///
/// \param arg The tensor view to be summed.
/// \param reduction_axes The axis positions (0-based) to be eliminated.
Sum(const std::shared_ptr<Node>& arg, const AxisSet& reduction_axes)
: Builtin({arg})
, m_reduction_axes(reduction_axes)
{
}
virtual std::string description() const override { return "Sum"; }
virtual void propagate_types() override;
/// \return The axis positions (0-based) to be eliminated through summation.
const AxisSet& get_reduction_axes() const { return m_reduction_axes; }
protected:
AxisSet m_reduction_axes;
};
}
}
// ----------------------------------------------------------------------------
// Copyright 2017 Nervana Systems Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// ----------------------------------------------------------------------------
#pragma once
#include "ngraph/ops/op.hpp"
namespace ngraph
{
namespace op
{
/// \brief Elementwise tangent operation.
///
/// ## Inputs
///
/// | | Type | Description |
/// | ----- | --------------------------------- | ----------------------------------------------- |
/// | `arg` | \f$N[d_1,\dots,d_n]~(n \geq 0)\f$ | A tensor of any shape and numeric element type. |
///
/// ## Output
///
/// | Type | Description |
/// | ---------------------- | ------------------------------------------------------------------------------------ |
/// | \f$N[d_1,\dots,d_n]\f$ | The tensor \f$T\f$, where \f$T[i_1,\dots,i_n] = \tan(\texttt{arg}[i_1,\dots,i_n])\f$ |
///
/// ## Implementation Status
///
/// | Backend | Status |
/// | ------- | ------------------ |
/// | NGVM | Fully implemented. |
class Tan : public UnaryElementwiseArithmetic
{
public:
/// \brief Constructs a tangent operation.
///
/// \param arg Node that produces the input tensor.
Tan(const std::shared_ptr<Node>& arg)
: UnaryElementwiseArithmetic(arg)
{
}
virtual std::string description() const override { return "Tan"; }
};
}
}
// ----------------------------------------------------------------------------
// Copyright 2017 Nervana Systems Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// ----------------------------------------------------------------------------
#pragma once
#include "ngraph/ops/op.hpp"
namespace ngraph
{
namespace op
{
/// \brief Elementwise hyperbolic tangent operation.
///
/// ## Inputs
///
/// | | Type | Description |
/// | ----- | --------------------------------- | ----------------------------------------------- |
/// | `arg` | \f$N[d_1,\dots,d_n]~(n \geq 0)\f$ | A tensor of any shape and numeric element type. |
///
/// ## Output
///
/// | Type | Description |
/// | ---------------------- | ------------------------------------------------------------------------------------- |
/// | \f$N[d_1,\dots,d_n]\f$ | The tensor \f$T\f$, where \f$T[i_1,\dots,i_n] = \tanh(\texttt{arg}[i_1,\dots,i_n])\f$ |
///
/// ## Implementation Status
///
/// | Backend | Status |
/// | ------- | ------------------ |
/// | NGVM | Fully implemented. |
class Tanh : public UnaryElementwiseArithmetic
{
public:
/// \brief Constructs a hyperbolic tangent operation.
///
/// \param arg Node that produces the input tensor.
Tanh(const std::shared_ptr<Node>& arg)
: UnaryElementwiseArithmetic(arg)
{
}
virtual std::string description() const override { return "Tanh"; }
};
}
}
......@@ -20,9 +20,31 @@ namespace ngraph
{
namespace op
{
/// \brief Operation to construct a tuple.
///
/// ## Inputs
///
/// | | Type | Description |
/// | ------ | ------------------------------ | -------------------------------------- |
/// | `args` | \f$T_1,\dots,T_n~(n \geq 0)\f$ | The elements of the constructed tuple. |
///
/// ## Output
///
/// | Type | Description |
/// | --------------------- | ---------------------------------------------------------- |
/// | \f$(T_1,\dots,T_n)\f$ | The tuple \f$(\texttt{args}[0],\dots,\texttt{args}[n])\f$. |
///
/// ## Implementation Status
///
/// | Backend | Status |
/// | ------- | ------------------ |
/// | NGVM | Fully implemented. |
class Tuple : public Builtin
{
public:
/// \brief Constructs a tuple construction operation.
///
/// \param args The nodes that produce the elements of the constructed tuple.
Tuple(const Nodes& args)
: Builtin(args)
{
......
......@@ -25,15 +25,15 @@
using namespace std;
using namespace ngraph;
bool pass::AssignTensors::run_on_call_graph(list<Node*>& nodes)
bool pass::AssignTensors::run_on_call_graph(list<std::shared_ptr<Node>>& nodes)
{
for (Node* node : nodes)
for (shared_ptr<Node> node : nodes)
{
try
{
// We need to set the nodes is_output state prior to call assign_tensors
// so that the output state can be passes to the constructed tensors.
if (node == get_state().get_functions().at(0)->get_result().get())
if (node == get_state().get_functions().at(0)->get_result())
{
node->set_is_output();
}
......
......@@ -27,7 +27,7 @@ namespace ngraph
class ngraph::pass::AssignTensors : public CallGraphPass
{
public:
virtual bool run_on_call_graph(std::list<Node*>& nodes) override;
virtual bool run_on_call_graph(std::list<std::shared_ptr<Node>>& nodes) override;
private:
};
......@@ -24,22 +24,22 @@ using namespace std;
using namespace ngraph;
using namespace ngraph::pass;
bool CollectFunctions::run_on_function(ngraph::Function* func)
bool CollectFunctions::run_on_function(shared_ptr<ngraph::Function> func)
{
set<Function*> functions;
deque<Function*> stack;
set<shared_ptr<ngraph::Function>> functions;
deque<shared_ptr<ngraph::Function>> stack;
stack.push_back(func);
while (stack.empty() == false)
{
Function* f = stack.front();
shared_ptr<ngraph::Function> f = stack.front();
stack.pop_front();
functions.insert(f);
traverse_nodes(f->get_result(), [&](Node* node) {
op::FunctionCall* fc = dynamic_cast<op::FunctionCall*>(node);
traverse_nodes(f->get_result(), [&](shared_ptr<Node> node) {
shared_ptr<op::FunctionCall> fc = dynamic_pointer_cast<op::FunctionCall>(node);
if (fc)
{
stack.push_back(fc->get_function().get());
stack.push_back(fc->get_function());
}
});
}
......
......@@ -27,7 +27,7 @@ namespace ngraph
class ngraph::pass::CollectFunctions : public FunctionPass
{
public:
bool run_on_function(ngraph::Function*) override;
bool run_on_function(std::shared_ptr<ngraph::Function>) override;
private:
};
......@@ -28,14 +28,14 @@ pass::DumpSorted::DumpSorted(const string& output_file)
{
}
bool pass::DumpSorted::run_on_module(vector<Function*>& functions)
bool pass::DumpSorted::run_on_module(vector<shared_ptr<ngraph::Function>>& functions)
{
ofstream out{m_output_file};
if (out)
{
for (Function* f : functions)
for (shared_ptr<Function> f : functions)
{
for (const Node* node : f->get_ordered_ops())
for (const shared_ptr<Node>& node : f->get_ordered_ops())
{
out << node->get_name() << "(";
vector<string> inputs;
......
......@@ -31,7 +31,7 @@ class ngraph::pass::DumpSorted : public ModulePass
public:
DumpSorted(const std::string& output_file);
virtual bool run_on_module(std::vector<Function*>&) override;
virtual bool run_on_module(std::vector<std::shared_ptr<ngraph::Function>>&) override;
private:
const std::string m_output_file;
......
......@@ -28,18 +28,18 @@ using namespace std;
using namespace ngraph;
using namespace ngraph::descriptor;
bool pass::Liveness::run_on_call_graph(list<Node*>& ops)
bool pass::Liveness::run_on_call_graph(list<shared_ptr<Node>>& ops)
{
unordered_set<Tensor*> currently_live;
for (auto it = ops.rbegin(); it != ops.rend(); it++)
{
Node* node = *it;
shared_ptr<Node> node = *it;
node->liveness_live_list.clear();
node->liveness_new_list.clear();
node->liveness_free_list.clear();
unordered_set<Tensor*> input_tensor_decls;
for (auto input_decl : node->get_inputs())
for (Input& input_decl : node->get_inputs())
{
Tensor& tensor = input_decl.get_tensor();
if (is_temporary(tensor))
......@@ -49,7 +49,7 @@ bool pass::Liveness::run_on_call_graph(list<Node*>& ops)
}
unordered_set<Tensor*> output_tensor_decls;
for (auto output_decl : node->get_outputs())
for (Output& output_decl : node->get_outputs())
{
Tensor& tensor = output_decl.get_tensor();
if (is_temporary(tensor))
......@@ -91,7 +91,7 @@ bool pass::Liveness::run_on_call_graph(list<Node*>& ops)
// Add outputs to live_list and remove from free_list
unordered_set<Tensor*> outputs;
unordered_set<Tensor*> seen;
for (Node* node : ops)
for (shared_ptr<Node> node : ops)
{
for (Tensor* tensor : node->liveness_live_list)
{
......
......@@ -28,7 +28,7 @@ namespace ngraph
class ngraph::pass::Liveness : public CallGraphPass
{
public:
virtual bool run_on_call_graph(std::list<Node*>&) override;
virtual bool run_on_call_graph(std::list<std::shared_ptr<Node>>&) override;
private:
bool is_temporary(const descriptor::Tensor&);
......
......@@ -38,12 +38,7 @@ void ngraph::pass::Manager::initialize_default_passes()
void ngraph::pass::Manager::run_passes(shared_ptr<Function> func)
{
run_passes(func.get());
}
void ngraph::pass::Manager::run_passes(Function* func)
{
vector<Function*> fs = {func};
vector<shared_ptr<Function>> fs = {func};
get_state().set_functions(fs);
for (shared_ptr<PassBase> pass : m_pass_list)
......@@ -59,16 +54,16 @@ void ngraph::pass::Manager::run_passes(Function* func)
}
else if (function_pass)
{
for (Function* f : fs)
for (shared_ptr<Function> f : fs)
{
function_pass->run_on_function(f);
}
}
else if (node_pass)
{
for (Function* f : fs)
for (shared_ptr<Function> f : fs)
{
for (Node* n : f->get_ops())
for (shared_ptr<Node> n : f->get_ops())
{
node_pass->run_on_node(n);
}
......@@ -76,7 +71,7 @@ void ngraph::pass::Manager::run_passes(Function* func)
}
else if (call_graph_pass)
{
for (Function* f : fs)
for (shared_ptr<Function> f : fs)
{
call_graph_pass->run_on_call_graph(f->get_ordered_ops());
}
......
......@@ -47,7 +47,6 @@ public:
m_pass_list.push_back(pass_base);
}
void run_passes(Function*);
void run_passes(std::shared_ptr<Function>);
ManagerState& get_state();
......
......@@ -23,7 +23,7 @@
using namespace std;
using namespace ngraph;
vector<Function*>& ngraph::pass::ManagerState::get_functions()
vector<shared_ptr<Function>>& ngraph::pass::ManagerState::get_functions()
{
return m_function_list;
}
......
......@@ -30,7 +30,7 @@ namespace ngraph
class ngraph::pass::ManagerState
{
public:
std::vector<Function*>& get_functions();
std::vector<std::shared_ptr<Function>>& get_functions();
template <typename T>
void set_functions(const T& collection)
......@@ -44,5 +44,5 @@ public:
private:
size_t m_temporary_pool_size = 0;
std::vector<Function*> m_function_list;
std::vector<std::shared_ptr<Function>> m_function_list;
};
......@@ -26,10 +26,10 @@ using namespace std;
using namespace ngraph;
using namespace ngraph::descriptor;
bool pass::MemoryLayout::run_on_call_graph(std::list<Node*>& node_list)
bool pass::MemoryLayout::run_on_call_graph(std::list<std::shared_ptr<Node>>& node_list)
{
MemoryManager mm;
for (const Node* node : node_list)
for (shared_ptr<Node> node : node_list)
{
for (Tensor* tensor : node->liveness_new_list)
{
......
......@@ -33,7 +33,7 @@ namespace ngraph
class ngraph::pass::MemoryLayout : public CallGraphPass
{
public:
virtual bool run_on_call_graph(std::list<Node*>&) override;
virtual bool run_on_call_graph(std::list<std::shared_ptr<Node>>&) override;
private:
};
......
......@@ -32,13 +32,13 @@ pass::MemoryVisualize::MemoryVisualize(const string& filename)
{
}
bool pass::MemoryVisualize::run_on_module(vector<Function*>& functions)
bool pass::MemoryVisualize::run_on_module(vector<shared_ptr<ngraph::Function>>& functions)
{
ofstream file(m_filename);
{
for (const Function* f : functions)
for (shared_ptr<Function> f : functions)
{
const list<Node*> nodes = f->get_ordered_ops();
list<shared_ptr<Node>> nodes = f->get_ordered_ops();
file << "<!DOCTYPE html>\n<html>\n";
file << "<head>\n";
file << " <style>\n";
......@@ -62,7 +62,7 @@ bool pass::MemoryVisualize::run_on_module(vector<Function*>& functions)
file << "<body>\n";
unordered_set<descriptor::Tensor*> tensors;
size_t temp_max_size = 0;
for (Node* node : nodes)
for (shared_ptr<Node> node : nodes)
{
tensors.insert(node->liveness_live_list.begin(), node->liveness_live_list.end());
}
......@@ -96,11 +96,11 @@ bool pass::MemoryVisualize::run_on_module(vector<Function*>& functions)
return false;
}
const Node* pass::MemoryVisualize::find_largest_op(const list<Node*>& nodes)
shared_ptr<Node> pass::MemoryVisualize::find_largest_op(const list<shared_ptr<Node>>& nodes)
{
const Node* largest_op = nullptr;
shared_ptr<Node> largest_op = nullptr;
size_t largest_size = 0;
for (const Node* exop : nodes)
for (shared_ptr<Node> exop : nodes)
{
size_t size = 0;
for (const Tensor* tensor : exop->liveness_live_list)
......@@ -116,9 +116,9 @@ const Node* pass::MemoryVisualize::find_largest_op(const list<Node*>& nodes)
return largest_op;
}
void pass::MemoryVisualize::draw_tensor_weight(ostream& file, const list<Node*>& nodes)
void pass::MemoryVisualize::draw_tensor_weight(ostream& file, const list<shared_ptr<Node>>& nodes)
{
const Node* largest_op = find_largest_op(nodes);
shared_ptr<Node> largest_op = find_largest_op(nodes);
if (largest_op)
{
......@@ -130,7 +130,7 @@ void pass::MemoryVisualize::draw_tensor_weight(ostream& file, const list<Node*>&
unordered_map<const Tensor*, size_t> age_list;
vector<const Tensor*> tensor_set;
unordered_map<const Tensor*, const Node*> generator_op;
unordered_map<const Tensor*, shared_ptr<Node>> generator_op;
file << "<table>\n";
file << " <tr>";
file << "<th align=\"left\">tensor</th>";
......@@ -139,7 +139,7 @@ void pass::MemoryVisualize::draw_tensor_weight(ostream& file, const list<Node*>&
file << "<th align=\"right\">generator weight</th>";
file << "</tr>\n";
size_t i = 0;
for (const Node* exop : nodes)
for (shared_ptr<Node> exop : nodes)
{
for (const Tensor* tensor : exop->liveness_new_list)
{
......@@ -179,7 +179,7 @@ void pass::MemoryVisualize::draw_tensor_weight(ostream& file, const list<Node*>&
}
}
void pass::MemoryVisualize::draw_histogram(ostream& file, const list<Node*>& nodes)
void pass::MemoryVisualize::draw_histogram(ostream& file, const list<shared_ptr<Node>>& nodes)
{
size_t stroke_width = 14;
size_t text_offset = 4;
......@@ -188,7 +188,7 @@ void pass::MemoryVisualize::draw_histogram(ostream& file, const list<Node*>& nod
size_t scale = width - offset;
size_t line_spacing = stroke_width * 1.5;
size_t line_count = 0;
for (const Node* node : nodes)
for (shared_ptr<Node> node : nodes)
{
(void)node;
line_count += 1;
......@@ -198,7 +198,7 @@ void pass::MemoryVisualize::draw_histogram(ostream& file, const list<Node*>& nod
file << "<svg viewBox=\"0 0 " << width << " " << height << "\">\n";
size_t y = 0;
for (const Node* node : nodes)
for (shared_ptr<Node> node : nodes)
{
float usage = float(MemoryVisualize::memory_usage(node));
float footprint = float(MemoryVisualize::memory_footprint(node));
......@@ -220,14 +220,14 @@ void pass::MemoryVisualize::draw_histogram(ostream& file, const list<Node*>& nod
file << "</svg>\n";
}
void pass::MemoryVisualize::draw_op_influence(ostream& file, const list<Node*>& nodes)
void pass::MemoryVisualize::draw_op_influence(ostream& file, const list<shared_ptr<Node>>& nodes)
{
file << "<table>\n";
file << " <tr>";
file << "<th align=\"left\">op</th>";
file << "<th align=\"right\">influence</th>";
file << "</tr>\n";
for (const Node* exop : nodes)
for (shared_ptr<Node> exop : nodes)
{
int weight = compute_op_weight(exop);
file << " <tr>";
......@@ -237,7 +237,7 @@ void pass::MemoryVisualize::draw_op_influence(ostream& file, const list<Node*>&
}
}
int pass::MemoryVisualize::compute_op_weight(const Node* exop)
int pass::MemoryVisualize::compute_op_weight(const shared_ptr<Node> exop)
{
int mass = 0;
// for input_decl in exop.input_decls:
......@@ -265,17 +265,17 @@ int pass::MemoryVisualize::compute_op_weight(const Node* exop)
return mass;
}
size_t pass::MemoryVisualize::memory_usage(const Node* node)
size_t pass::MemoryVisualize::memory_usage(shared_ptr<Node> node)
{
return 0;
}
size_t pass::MemoryVisualize::memory_footprint(const Node* node)
size_t pass::MemoryVisualize::memory_footprint(shared_ptr<Node> node)
{
return 0;
}
size_t pass::MemoryVisualize::memory_footprint(const std::list<Node*>& nodes)
size_t pass::MemoryVisualize::memory_footprint(const std::list<shared_ptr<Node>>& nodes)
{
return 0;
}
......@@ -32,18 +32,18 @@ class ngraph::pass::MemoryVisualize : public ModulePass
{
public:
MemoryVisualize(const std::string& filename);
virtual bool run_on_module(std::vector<Function*>&) override;
virtual bool run_on_module(std::vector<std::shared_ptr<ngraph::Function>>&) override;
private:
const Node* find_largest_op(const std::list<Node*>& nodes);
void draw_tensor_weight(std::ostream& file, const std::list<Node*>& nodes);
void draw_histogram(std::ostream& file, const std::list<Node*>& nodes);
void draw_op_influence(std::ostream& file, const std::list<Node*>& nodes);
int compute_op_weight(const Node* exop);
static size_t memory_usage(const Node*);
static size_t memory_footprint(const Node*);
static size_t memory_footprint(const std::list<Node*>&);
std::shared_ptr<Node> find_largest_op(const std::list<std::shared_ptr<Node>>& nodes);
void draw_tensor_weight(std::ostream& file, const std::list<std::shared_ptr<Node>>& nodes);
void draw_histogram(std::ostream& file, const std::list<std::shared_ptr<Node>>& nodes);
void draw_op_influence(std::ostream& file, const std::list<std::shared_ptr<Node>>& nodes);
int compute_op_weight(std::shared_ptr<Node> exop);
static size_t memory_usage(std::shared_ptr<Node>);
static size_t memory_footprint(std::shared_ptr<Node>);
static size_t memory_footprint(const std::list<std::shared_ptr<Node>>&);
const std::string m_filename;
};
......@@ -53,26 +53,26 @@ class ngraph::pass::ModulePass : public PassBase
{
public:
virtual ~ModulePass() {}
virtual bool run_on_module(std::vector<ngraph::Function*>&) = 0;
virtual bool run_on_module(std::vector<std::shared_ptr<ngraph::Function>>&) = 0;
};
class ngraph::pass::FunctionPass : public PassBase
{
public:
virtual ~FunctionPass() {}
virtual bool run_on_function(ngraph::Function*) = 0;
virtual bool run_on_function(std::shared_ptr<ngraph::Function>) = 0;
};
class ngraph::pass::NodePass : public PassBase
{
public:
virtual ~NodePass() {}
virtual bool run_on_node(ngraph::Node*) = 0;
virtual bool run_on_node(std::shared_ptr<ngraph::Node>) = 0;
};
class ngraph::pass::CallGraphPass : public PassBase
{
public:
virtual ~CallGraphPass() {}
virtual bool run_on_call_graph(std::list<ngraph::Node*>&) = 0;
virtual bool run_on_call_graph(std::list<std::shared_ptr<ngraph::Node>>&) = 0;
};
......@@ -20,9 +20,9 @@
using namespace std;
using namespace ngraph;
bool pass::PropagateTypes::run_on_call_graph(list<Node*>& nodes)
bool pass::PropagateTypes::run_on_call_graph(list<shared_ptr<Node>>& nodes)
{
for (Node* node : nodes)
for (shared_ptr<Node> node : nodes)
{
try
{
......
......@@ -27,7 +27,7 @@ namespace ngraph
class ngraph::pass::PropagateTypes : public CallGraphPass
{
public:
virtual bool run_on_call_graph(std::list<Node*>&) override;
virtual bool run_on_call_graph(std::list<std::shared_ptr<Node>>&) override;
private:
};
......@@ -25,24 +25,26 @@
using namespace ngraph;
using namespace std;
bool ngraph::pass::TopologicalSort::run_on_function(ngraph::Function* func)
bool ngraph::pass::TopologicalSort::run_on_function(shared_ptr<ngraph::Function> func)
{
list<Node*> result_list;
list<shared_ptr<Node>> result_list;
deque<Node*> independent_nodes;
unordered_map<Node*, size_t> node_depencency_count;
unordered_map<const Node*, size_t> node_depencency_count;
unordered_map<Node*, shared_ptr<Node>> node_map;
traverse_nodes(func->get_result(), [&](Node* node) {
node_depencency_count[node] = node->get_arguments().size();
traverse_nodes(func->get_result(), [&](shared_ptr<Node> node) {
node_map[node.get()] = node;
node_depencency_count[node.get()] = node->get_arguments().size();
if (node->get_arguments().size() == 0)
{
independent_nodes.push_back(node);
independent_nodes.push_back(node.get());
}
});
while (independent_nodes.size() > 0)
{
auto independent_node = independent_nodes.front();
result_list.push_back(independent_node);
result_list.push_back(node_map[independent_node]);
independent_nodes.pop_front();
for (auto user : independent_node->users())
......
......@@ -31,5 +31,5 @@ class ngraph::pass::TopologicalSort : public FunctionPass
{
public:
TopologicalSort() {}
bool run_on_function(ngraph::Function*) override;
bool run_on_function(std::shared_ptr<ngraph::Function>) override;
};
......@@ -23,15 +23,15 @@
using namespace ngraph;
using namespace std;
bool pass::VisualizeTree::run_on_module(vector<ngraph::Function*>& functions)
bool pass::VisualizeTree::run_on_module(vector<shared_ptr<ngraph::Function>>& functions)
{
for (Function* f : functions)
for (shared_ptr<Function> f : functions)
{
// map<size_t, list<node_ptr>> dependent_nodes;
traverse_nodes(f->get_result(), [&](Node* node) {
traverse_nodes(f->get_result(), [&](shared_ptr<Node> node) {
for (auto arg : node->get_arguments())
{
m_ss << add_attributes(arg.get());
m_ss << add_attributes(arg);
m_ss << add_attributes(node);
m_ss << " " << arg->get_name() << " -> " << node->get_name();
m_ss << ";\n";
......@@ -49,7 +49,7 @@ pass::VisualizeTree::VisualizeTree(const string& file_name)
{
}
std::string pass::VisualizeTree::add_attributes(const Node* node)
std::string pass::VisualizeTree::add_attributes(shared_ptr<Node> node)
{
string rc;
if (!contains(m_nodes_with_attributes, node))
......@@ -60,7 +60,7 @@ std::string pass::VisualizeTree::add_attributes(const Node* node)
return rc;
}
std::string pass::VisualizeTree::get_attributes(const Node* node)
std::string pass::VisualizeTree::get_attributes(shared_ptr<Node> node)
{
stringstream ss;
if (node->is_parameter())
......
......@@ -32,14 +32,14 @@ class ngraph::pass::VisualizeTree : public ModulePass
{
public:
VisualizeTree(const std::string& file_name);
bool run_on_module(std::vector<ngraph::Function*>&) override;
bool run_on_module(std::vector<std::shared_ptr<ngraph::Function>>&) override;
private:
std::string add_attributes(const Node* node);
std::string get_attributes(const Node* node);
std::string add_attributes(std::shared_ptr<Node> node);
std::string get_attributes(std::shared_ptr<Node> node);
void render() const;
std::stringstream m_ss;
std::string m_name;
std::set<const Node*> m_nodes_with_attributes;
std::set<std::shared_ptr<Node>> m_nodes_with_attributes;
};
// ----------------------------------------------------------------------------
// Copyright 2017 Nervana Systems Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// ----------------------------------------------------------------------------
#pragma once
#include "ngraph/runtime/ngvm/call_frame.hpp"
#include "ngraph/runtime/ngvm/eigen/utils.hpp"
#include "ngraph/runtime/ngvm/instruction.hpp"
#include "ngraph/runtime/tensor_view.hpp"
namespace ngraph
{
namespace runtime
{
namespace ngvm
{
namespace eigen
{
template <typename ET>
class AcosInstruction : public Instruction
{
public:
AcosInstruction(TensorViewInfo arg, TensorViewInfo out)
: m_arg(arg)
, m_out(out)
{
}
virtual void execute(CallFrame& call_frame) const override
{
EigenArray1d<ET, fmt::V>(call_frame, m_out) =
EigenArray1d<ET, fmt::V>(call_frame, m_arg).acos();
}
protected:
TensorViewInfo m_arg;
TensorViewInfo m_out;
};
}
}
}
}
// ----------------------------------------------------------------------------
// Copyright 2017 Nervana Systems Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// ----------------------------------------------------------------------------
#pragma once
#include "ngraph/runtime/ngvm/call_frame.hpp"
#include "ngraph/runtime/ngvm/eigen/utils.hpp"
#include "ngraph/runtime/ngvm/instruction.hpp"
#include "ngraph/runtime/tensor_view.hpp"
namespace ngraph
{
namespace runtime
{
namespace ngvm
{
namespace eigen
{
template <typename ET>
class AsinInstruction : public Instruction
{
public:
AsinInstruction(TensorViewInfo arg, TensorViewInfo out)
: m_arg(arg)
, m_out(out)
{
}
virtual void execute(CallFrame& call_frame) const override
{
EigenArray1d<ET, fmt::V>(call_frame, m_out) =
EigenArray1d<ET, fmt::V>(call_frame, m_arg).asin();
}
protected:
TensorViewInfo m_arg;
TensorViewInfo m_out;
};
}
}
}
}
// ----------------------------------------------------------------------------
// Copyright 2017 Nervana Systems Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// ----------------------------------------------------------------------------
#pragma once
#include "ngraph/runtime/ngvm/call_frame.hpp"
#include "ngraph/runtime/ngvm/eigen/utils.hpp"
#include "ngraph/runtime/ngvm/instruction.hpp"
#include "ngraph/runtime/tensor_view.hpp"
namespace ngraph
{
namespace runtime
{
namespace ngvm
{
namespace eigen
{
template <typename ET>
class AtanInstruction : public Instruction
{
public:
AtanInstruction(TensorViewInfo arg, TensorViewInfo out)
: m_arg(arg)
, m_out(out)
{
}
virtual void execute(CallFrame& call_frame) const override
{
EigenArray1d<ET, fmt::V>(call_frame, m_out) =
EigenArray1d<ET, fmt::V>(call_frame, m_arg).atan();
}
protected:
TensorViewInfo m_arg;
TensorViewInfo m_out;
};
}
}
}
}
// ----------------------------------------------------------------------------
// Copyright 2017 Nervana Systems Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// ----------------------------------------------------------------------------
#pragma once
#include "ngraph/runtime/ngvm/call_frame.hpp"
#include "ngraph/runtime/ngvm/eigen/utils.hpp"
#include "ngraph/runtime/ngvm/instruction.hpp"
#include "ngraph/runtime/tensor_view.hpp"
namespace ngraph
{
namespace runtime
{
namespace ngvm
{
namespace eigen
{
template <typename ET>
class CosInstruction : public Instruction
{
public:
CosInstruction(TensorViewInfo arg, TensorViewInfo out)
: m_arg(arg)
, m_out(out)
{
}
virtual void execute(CallFrame& call_frame) const override
{
EigenArray1d<ET, fmt::V>(call_frame, m_out) =
EigenArray1d<ET, fmt::V>(call_frame, m_arg).cos();
}
protected:
TensorViewInfo m_arg;
TensorViewInfo m_out;
};
}
}
}
}
// ----------------------------------------------------------------------------
// Copyright 2017 Nervana Systems Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// ----------------------------------------------------------------------------
#pragma once
#include "ngraph/runtime/ngvm/call_frame.hpp"
#include "ngraph/runtime/ngvm/eigen/utils.hpp"
#include "ngraph/runtime/ngvm/instruction.hpp"
#include "ngraph/runtime/tensor_view.hpp"
namespace ngraph
{
namespace runtime
{
namespace ngvm
{
namespace eigen
{
template <typename ET>
class CoshInstruction : public Instruction
{
public:
CoshInstruction(TensorViewInfo arg, TensorViewInfo out)
: m_arg(arg)
, m_out(out)
{
}
virtual void execute(CallFrame& call_frame) const override
{
EigenArray1d<ET, fmt::V>(call_frame, m_out) =
EigenArray1d<ET, fmt::V>(call_frame, m_arg).cosh();
}
protected:
TensorViewInfo m_arg;
TensorViewInfo m_out;
};
}
}
}
}
// ----------------------------------------------------------------------------
// Copyright 2017 Nervana Systems Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// ----------------------------------------------------------------------------
#pragma once
#include "ngraph/runtime/ngvm/call_frame.hpp"
#include "ngraph/runtime/ngvm/eigen/utils.hpp"
#include "ngraph/runtime/ngvm/instruction.hpp"
#include "ngraph/runtime/tensor_view.hpp"
namespace ngraph
{
namespace runtime
{
namespace ngvm
{
namespace eigen
{
template <typename ET>
class ExpInstruction : public Instruction
{
public:
ExpInstruction(TensorViewInfo arg, TensorViewInfo out)
: m_arg(arg)
, m_out(out)
{
}
virtual void execute(CallFrame& call_frame) const override
{
EigenArray1d<ET, fmt::V>(call_frame, m_out) =
EigenArray1d<ET, fmt::V>(call_frame, m_arg).exp();
}
protected:
TensorViewInfo m_arg;
TensorViewInfo m_out;
};
}
}
}
}
// ----------------------------------------------------------------------------
// Copyright 2017 Nervana Systems Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// ----------------------------------------------------------------------------
#pragma once
#include "ngraph/runtime/ngvm/call_frame.hpp"
#include "ngraph/runtime/ngvm/eigen/utils.hpp"
#include "ngraph/runtime/ngvm/instruction.hpp"
#include "ngraph/runtime/tensor_view.hpp"
namespace ngraph
{
namespace runtime
{
namespace ngvm
{
namespace eigen
{
template <typename ET>
class MatrixSliceInstruction : public Instruction
{
public:
MatrixSliceInstruction(const TensorViewInfo& arg,
const TensorViewInfo& out,
size_t lower_row,
size_t lower_col,
size_t upper_row,
size_t upper_col)
: m_arg(arg)
, m_out(out)
, m_lower_row(lower_row)
, m_lower_col(lower_col)
, m_upper_row(upper_row)
, m_upper_col(upper_col)
{
}
virtual void execute(CallFrame& call_frame) const override
{
EigenMatrix<ET>(call_frame, m_out) = EigenMatrix<ET>(call_frame, m_arg)
.block(m_lower_row,
m_lower_col,
m_upper_row - m_lower_row,
m_upper_col - m_lower_col);
}
protected:
TensorViewInfo m_arg;
TensorViewInfo m_out;
size_t m_lower_row;
size_t m_lower_col;
size_t m_upper_row;
size_t m_upper_col;
};
}
}
}
}
// ----------------------------------------------------------------------------
// Copyright 2017 Nervana Systems Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// ----------------------------------------------------------------------------
#pragma once
#include "ngraph/runtime/ngvm/call_frame.hpp"
#include "ngraph/runtime/ngvm/eigen/utils.hpp"
#include "ngraph/runtime/ngvm/instruction.hpp"
#include "ngraph/runtime/tensor_view.hpp"
namespace ngraph
{
namespace runtime
{
namespace ngvm
{
namespace eigen
{
template <typename ET>
class MatrixTransposeInstruction : public Instruction
{
public:
MatrixTransposeInstruction(const TensorViewInfo& arg, const TensorViewInfo& out)
: m_arg(arg)
, m_out(out)
{
}
virtual void execute(CallFrame& call_frame) const override
{
EigenMatrix<ET>(call_frame, m_out) =
EigenMatrix<ET>(call_frame, m_arg).transpose();
}
protected:
TensorViewInfo m_arg;
TensorViewInfo m_out;
};
}
}
}
}
// ----------------------------------------------------------------------------
// Copyright 2017 Nervana Systems Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// ----------------------------------------------------------------------------
#pragma once
#include "ngraph/runtime/ngvm/call_frame.hpp"
#include "ngraph/runtime/ngvm/eigen/utils.hpp"
#include "ngraph/runtime/ngvm/instruction.hpp"
#include "ngraph/runtime/tensor_view.hpp"
namespace ngraph
{
namespace runtime
{
namespace ngvm
{
namespace eigen
{
template <typename ET>
class SignInstruction : public Instruction
{
public:
SignInstruction(TensorViewInfo arg, TensorViewInfo out)
: m_arg(arg)
, m_out(out)
{
}
virtual void execute(CallFrame& call_frame) const override
{
EigenArray1d<ET, fmt::V>(call_frame, m_out) =
EigenArray1d<ET, fmt::V>(call_frame, m_arg).sign();
}
protected:
TensorViewInfo m_arg;
TensorViewInfo m_out;
};
}
}
}
}
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment