docker rmi ngraph_cpp_cpu:latest ||echo"keep going if docker rmi command fails"
docker rmi ngraph_cpp_cpu:latest ||echo"keep going if docker rmi command fails"
docker tag `docker images -qngraph_cpp_cpu:${BUILD_VERSION}` ngraph_cpp_cpu:latest
docker tag `docker images -q"ngraph_cpp_cpu:${BUILD_VERSION}"` ngraph_cpp_cpu:latest
build_all:build_ngraph_cpp_cpu
build_all:build_ngraph_cpp_cpu
check_cpu:build_ngraph_cpp_cpu
check_cpu:build_ngraph_cpp_cpu
# Remove old distribution directory if present
(test-d"${DIR}"/BUILD/ngraph_dist &&rm-fr"${DIR}"/BUILD/ngraph_dist &&echo"Removed old ${DIR}/BUILD/ngraph_dist directory")||echo"Previous ngraph_dist directory not found"
# Make BUILD directory as user
# Make BUILD directory as user
mkdir-p${DIR}/BUILD
mkdir-p"${DIR}"/BUILD
chmod ug+rwx ${DIR}/BUILD
chmod ug+rwx "${DIR}"/BUILD
# Need to use /tmp/ngraph-cpp-test/BUILD, because running as user
docker run --rm--tty\
# Can't use /root/ngraph-cpp-test/BUILD, because /root not accessible to user
--envRUN_CMD="set -e ; set -o pipefail ; cd ${DOCKUSER_HOME}/ngraph-cpp-test/BUILD; cmake -DCMAKE_CXX_COMPILER=clang++-3.9 -DCMAKE_C_COMPILER=clang-3.9 .. 2>&1 | tee cmake.log ; env VERBOSE=1 make ${PARALLEL} 2>&1 | tee make.log ; env VERBOSE=1 make check 2>&1 | tee make_check.log"\
sh -c"cmake -DCMAKE_CXX_COMPILER=clang++-3.9 -DCMAKE_C_COMPILER=clang-3.9 .. ; env VERBOSE=1 make check"
"ngraph_cpp_cpu:${BUILD_VERSION}"\
# update the files to be owned by the calling user instead of root, to avoid docker mount problems with file ownership
sh -c"${DOCKUSER_HOME}/ngraph-cpp-test/contrib/docker/run_as_user.sh"
docker run --rm${VOLUME}\
--envMY_UID=${CALLER_UID}\
--envMY_GID=${CALLER_GID}\
--envMY_ROOT_DIR=/root/ngraph-cpp-test \
-t ngraph_cpp_cpu \
/tmp/chown_files.sh
shell:build_ngraph_cpp_cpu
shell:build_ngraph_cpp_cpu
docker run --rm${VOLUME}-it ngraph_cpp_cpu:${BUILD_VERSION} /bin/bash
# "make shell" runs an interactive shell in the docker image, for debugging
docker run --rm--tty--interactive\
${VOLUME}\
${DOCKER_RUN_ENV}\
--envRUN_UID="$(shellid -u)"\
"ngraph_cpp_cpu:${BUILD_VERSION}"\
sh -c"cd ${DOCKUSER_HOME} ; ${DOCKUSER_HOME}/ngraph-cpp-test/contrib/docker/run_as_user.sh"
install:
# Puts ngraph_dist in BUILD directory. This is used by Jenkins ngraph-tensorflow batch job.
# Note: We currently have a bug where cmake only installs in $HOME. Jira NGTF-205 is opened
# for this. For now, here we install to $HOME, then move the directory.
docker run --rm--tty\
${VOLUME}\
${DOCKER_RUN_ENV}\
--envRUN_UID="$(shellid -u)"\
--envRUN_CMD="set -e ; set -o pipefail; cd ${DOCKUSER_HOME}/ngraph-cpp-test/BUILD ; test -d ngraph_dist && rm -fr ngraph_dist && echo 'Removed old ngraph_dist directory' ; make install 2>&1 | tee make_install.log ; mv -v ${DOCKUSER_HOME}/ngraph_dist ${DOCKUSER_HOME}/ngraph-cpp-test/BUILD"\
"ngraph_cpp_cpu:${BUILD_VERSION}"\
sh -c"${DOCKUSER_HOME}/ngraph-cpp-test/contrib/docker/run_as_user.sh"
/// | `args`[\f$i\f$] | \f$E[d_1,\dots,d_{a-1},d^i_a,d_{a+1},\dots,d_n]~(n \geq 1)\f$ | One or more input tensors, all of which have the same element type, and the same shape, except possibly at axis \f$a\f$. |
/// | \f$E[d_1,\dots,d_{a-1},\Sigma_i(d^i_a),d_{a+1},\dots,d_n]\f$ | The tensor \f$T\f$, where \f$T\f$ is the concatenation of the input tensors along axis \f$a\f$. |
/// There are two subclasses: ParameterizedConstant and Constant. ParameterizedConstant allows constant values to be supplied via vectors of the corresponding C++ type;
/// however, the ParameterizedConstant subclass can only be used when type information is available at C++ compile-time. In cases where types are not known until
/// C++ runtime, the Constant subclass must be used instead.
classConstantBase:publicNode
classConstantBase:publicNode
{
{
protected:
protected:
/// \brief Constructs a constant base-type node.
///
/// \param type The TensorViewType for the constant.
/// \brief Class for constants whose element types may not be known until graph construction time.
///
/// This class must be used when the type of the tensor constant is unknown at C++ compile-time. For other cases, ParameterizedConstant should be used.
/// | `et` | The ngraph::element::Type of the tensor constant. |
/// | `shape` | The ngraph::Shape of the tensor constant. |
/// | `value_strings` | A list of strings containing literals for initialization of the tensor constant. These strings are parsed with the appropriate instance of ngraph::element::TraitedType::read. |
/// | \f$E[d_1,\dots,d_n]\f$ | A constant tensor with the specified element type, shape, and values. |
///
/// ## Implementation Status
///
/// | Backend | Status |
/// | ------- | ------------------ |
/// | NGVM | Fully implemented. |
classConstant:publicConstantBase
classConstant:publicConstantBase
{
{
public:
public:
/// \brief Constructs a tensor constant.
///
/// \param et The element type of the tensor constant.
/// \param shape The shape of the tensor constant.
/// \param value_strings A list of literals for initializing the tensor constant. There must be one literal for each element of the tensor; i.e., `value_strings.size()` must equal `ngraph::shape_size(shape)`.
Constant(constelement::Type&et,
Constant(constelement::Type&et,
constShape&shape,
constShape&shape,
conststd::vector<std::string>&value_strings)
conststd::vector<std::string>&value_strings)
...
@@ -90,6 +160,11 @@ namespace ngraph
...
@@ -90,6 +160,11 @@ namespace ngraph
{
{
}
}
/// \brief Constructs a tensor constant with the same initialization value copied across the tensor.
///
/// \param et The element type of the tensor constant.
/// \param shape The shape of the tensor constant.
/// \param value_string A literal for initializing each tensor constant.
/// Takes two arguments `arg0` and `arg1`. There are three possible cases:
///
/// 1. `arg0` or `arg1` is 0-dimensional. Then, treats that 0-dimensional argument as a scalars and computes a scalar-tensor product.
/// (Example: `arg0` has shape `{1,2,3}` and arg1 has shape `{}`; then the result will have shape `{1,2,3}`.)
///
/// 2. `arg1` is a vector (1-dimensional tensor). Then, computes a dot product reducing on the innermost (rightmost) dimensions of `arg0` and `arg1`.
/// (Example: arg0 has shape `{1,2,3}` and arg1 has shape `{3}`; then the result will have shape `{1,2}`.)
///
/// 3. `arg1` is more than 1-dimensional. Then, computes a dot product reducing on the innermost (rightmost) dimension of arg0, and the next-to-innermost dimension of arg1.
/// (Example: arg0 has shape {3,4} and arg1 has shape {4,3}; then the result will have shape {3,3}.)
/// | `arg0` | \f$E[d]\f$ | A vector of any element type. |
/// | `arg1` | \f$E[d_1,\dots,d_n,d]~(n \geq 0)\f$ | A tensor of any shape whose innermost dimension matches `arg0`'s size, with the same element type as `arg0`. |
///
/// <i>(Note: in the particular case where \f$n = 0\f$, this is a vector dot product; when \f$n = 1\f$, this is a vector-matrix product.)</i>
/// | `arg0` | \f$E[d_1,\dots,d_n]~(n \geq 1)\f$ | A tensor of any shape with rank of at least 1, and any element type. |
/// | `arg1` | \f$E[d'_1,\dots,d'_m]~(m \geq 2\text{ and }d'_{m-1}=d_n)\f$ | A tensor with the same element type as `arg0`, and any shape with rank of at least 2 whose next-to-innermost dimension matches `arg0`'s innermost dimension. |
///
/// <i>(Note: in the particular case where \f$n = m = 2\f$, this is a matrix product.)</i>
/// | \f$E[d_1,\dots,d_{n-1},d'_1,\dots,d'_{m-2},d'_{m}]\f$ | The tensor \f$T\f$, where \f$T[i_1,\dots,i_{n-1},j_1,\dots,j_{m-2},j_m] = \dots\f$ TODO: FIXME: finish this; but it's like numpy. |
/// | `args` | \f$T_1,\dots,T_n\f$ where \f$n\f$ matches the number of arguments expected by `function` and \f$T_i\f$ matches the type expected for the \f$i\f$th argument of `function`. | The arguments for the function call. |
/// | `reduction_function` | The scalar function used to reduce the input tensor. Must take two arguments of type \f$E[]\f$ and return type \f$E[]\f$. |
/// | `reduction_axes` | The axes to eliminate through reduction. |
/// | `arg_reductee` | \f$E[d_1,\dots,d_n]~(n \geq 0)\f$ | An input tensor of any shape, with the element type matching that expected by the reduction function. |
/// | `arg_init` | \f$E[]\f$ | An scalar to be used as a substitute output value on zero-sized axes. |
/// | \f$E[\textit{delete}(A,d_1,\dots,d_n)]\f$ | The tensor \f$T\f$, where \f$T\f$ is the input tensor with the `reduction_axes` \f$A\f$ eliminated by reduction. |
/// | `arg` | \f$E[d_1,\dots,d_n]~(n \geq 0)\f$ | An input tensor of any type and shape, as long as the product of \f$d_i\f$ equals the product of \f$d'_i\f$. |
/// | NGVM | Fully implemented for scalars, vectors, and matrices. Implemented for other shapes only when there is no reordering of the input axes, i.e. `input_order` is \f$(0,\dots,n-1)\f$. |
classReshape:publicBuiltin
classReshape:publicBuiltin
{
{
public:
public:
/// \brief Constructs a reshape operation.
///
///
/// @param arg The tensor view to be reshaped.
/// \param arg The tensor view to be reshaped.
/// @param input_order The order in which to iterate over input axes. (TODO: that needs more explanation)
/// \param input_order The order in which to iterate over input axes. This must be a permutation of the
/// This must be a permutation of the sequence (0,...,n-1) where n is the rank of the input tensor.
/// sequence \f$(0,\dots,n-1)\f$ where \f$n\f$ is the rank of the input tensor.
/// @param output_shape The output shape. If the input shape is (a0,...,ak-1) then the output shape must
/// \param output_shape The output shape. If the input shape is \f$(a_0,\dots,a_{k-1})\f$ then the output shape must
/// be of the form (b0,...,bj-1) where product(ai) == product(bi).
/// be of the form \f$(b_0,\dots,b_{j-1})\f$ where \f$\Pi(a_i) = \Pi(b_i)\f$.
/// | `lower_bounds` | The (inclusive) lower-bound coordinates \f$l_i\f$ for the tensor slice. For example, a lower-bound of \f$(1,2)\f$ means to start the slice at row 1 and column 2. |
/// | `upper_bounds` | The (non-inclusive) upper-bound coordinates \f$u_i\f$ for the tensor slice. For example, an upper-bound of \f$(5,4)\f$ means to end the slice before row 4 and column 3. |
/// | `step` | The "step" or "stride" \f$s_i\f$ for the tensor slice. For example, a stride of \f$(1,3)\f$ means to take every row, and every third column (starting at the lower bound). |
/// | \f$N[\textit{delete}(A,d_1,\dots,d_n)]\f$ | The tensor \f$T\f$, where \f$T\f$ is the input tensor with the `reduction_axes` \f$A\f$ eliminated by summation. |