Unverified Commit 4ab74ea4 authored by Jai Menon's avatar Jai Menon Committed by GitHub

Merge branch 'master' into jmenon/cpu

parents 8c47e8f1 e6a41237
......@@ -6,7 +6,8 @@ RUN apt-get update && apt-get install -y \
build-essential cmake \
clang-3.9 clang-format-3.9 \
git \
wget patch diffutils zlib1g-dev libtinfo-dev
wget patch diffutils zlib1g-dev libtinfo-dev \
doxygen sphinx-doc
RUN apt-get clean autoclean && \
apt-get autoremove -y
......
......@@ -75,7 +75,7 @@ check_cpu: build_ngraph_cpp_cpu
${VOLUME} \
${DOCKER_RUN_ENV} \
--env RUN_UID="$(shell id -u)" \
--env RUN_CMD="set -e ; set -o pipefail ; cd ${DOCKUSER_HOME}/ngraph-cpp-test/BUILD; cmake -DCMAKE_CXX_COMPILER=clang++-3.9 -DCMAKE_C_COMPILER=clang-3.9 .. 2>&1 | tee cmake.log ; env VERBOSE=1 make ${PARALLEL} 2>&1 | tee make.log ; env VERBOSE=1 make check 2>&1 | tee make_check.log" \
--env RUN_CMD="set -e ; set -o pipefail ; cd ${DOCKUSER_HOME}/ngraph-cpp-test/BUILD; cmake -DCMAKE_CXX_COMPILER=clang++-3.9 -DCMAKE_C_COMPILER=clang-3.9 -DNGRAPH_BUILD_DOXYGEN_DOCS=ON -DNGRAPH_BUILD_SPHINX_DOCS=ON .. 2>&1 | tee cmake.log ; env VERBOSE=1 make ${PARALLEL} 2>&1 | tee make.log ; env VERBOSE=1 make check 2>&1 | tee make_check.log" \
"ngraph_cpp_cpu:${BUILD_VERSION}" \
sh -c "${DOCKUSER_HOME}/ngraph-cpp-test/contrib/docker/run_as_user.sh"
......
......@@ -41,6 +41,8 @@ set (SRC
ops/function_call.cpp
ops/get_tuple_element.cpp
ops/log.cpp
ops/maximum.cpp
ops/minimum.cpp
ops/multiply.cpp
ops/negative.cpp
ops/op.cpp
......@@ -180,6 +182,7 @@ install(DIRECTORY
${EIGEN_INCLUDE_DIR}/
DESTINATION "${CMAKE_INSTALL_INCLUDE}"
)
if (NOT APPLE)
install(DIRECTORY
${MKLDNN_INCLUDE_DIR}/
DESTINATION "${CMAKE_INSTALL_INCLUDE}"
......@@ -188,6 +191,7 @@ install(DIRECTORY
${MKLDNN_LIB_DIR}/
DESTINATION "${CMAKE_INSTALL_LIB}"
)
endif()
add_dependencies(ngraph eigen)
......
......@@ -72,8 +72,7 @@ std::vector<std::shared_ptr<ngraph::runtime::ParameterizedTensorView<ET>>>
for (size_t i = 0; i < args.size(); ++i)
{
auto arg = args[i];
auto df_darg = results[i];
auto df_darg_it = df_darg->get_vector().begin();
auto& res = results[i]->get_vector();
auto& vec = arg->get_vector();
for (size_t j = 0; j < vec.size(); j++)
{
......@@ -81,13 +80,14 @@ std::vector<std::shared_ptr<ngraph::runtime::ParameterizedTensorView<ET>>>
vec[j] += delta;
cf->tensor_call(args_tv, {inc_y});
vec[j] = old_val;
df_darg_it = std::transform(inc_vec.begin(),
inc_vec.end(),
ref_vec.begin(),
df_darg_it,
[inv_delta](typename ET::type y1, typename ET::type y0) {
return inv_delta * (y1 - y0);
});
size_t res_k = j;
for (size_t k = 0; k < inc_vec.size(); k++)
{
auto y1 = inc_vec[k];
auto y0 = ref_vec[k];
res[res_k] = inv_delta * (y1 - y0);
res_k += vec.size();
}
}
}
return results;
......
......@@ -117,6 +117,8 @@ namespace ngraph
std::shared_ptr<Node> backprop_node(const std::shared_ptr<Node>& x,
const std::shared_ptr<Node>& c);
/// Returns the shape if this node has tensor type, othetwise error.
const Shape& get_shape() const { return m_value_type->get_shape(); }
protected:
Nodes m_arguments;
std::shared_ptr<const ValueType> m_value_type;
......
......@@ -13,6 +13,7 @@
// ----------------------------------------------------------------------------
#include "ngraph/ops/broadcast.hpp"
#include "ngraph/ops/sum.hpp"
using namespace std;
using namespace ngraph::op;
......@@ -46,3 +47,11 @@ void Broadcast::propagate_types()
set_value_type_checked(
make_shared<TensorViewType>(arg_tensor_view_type->get_element_type(), m_shape));
}
void ngraph::op::Broadcast::generate_adjoints(autodiff::Adjoints& adjoints,
const std::shared_ptr<Node>& delta)
{
auto x = m_arguments[0];
adjoints.add_delta(x, make_shared<op::Sum>(delta, m_broadcast_axes));
}
......@@ -80,6 +80,10 @@ namespace ngraph
/// \return An set containing the indices of the broadcast axes (0-based).
const AxisSet& get_broadcast_axes() const { return m_broadcast_axes; }
protected:
protected:
virtual void generate_adjoints(autodiff::Adjoints& adjoints,
const std::shared_ptr<Node>& delta) override;
Shape m_shape;
AxisSet m_broadcast_axes;
};
......
......@@ -12,9 +12,15 @@
// See the License for the specific language governing permissions and
// ----------------------------------------------------------------------------
#include <functional>
#include <memory>
#include "ngraph/ops/broadcast.hpp"
#include "ngraph/ops/dot.hpp"
#include "ngraph/ops/multiply.hpp"
#include "ngraph/ops/reshape.hpp"
#include "ngraph/ops/sum.hpp"
#include "ngraph/shape.hpp"
using namespace std;
using namespace ngraph::op;
......@@ -76,3 +82,166 @@ void Dot::propagate_types()
make_shared<TensorViewType>(arg0_tensor_type->get_element_type(), result_shape);
set_value_type_checked(result_type);
}
template <typename T>
T range(size_t n);
template <>
ngraph::AxisSet range<ngraph::AxisSet>(size_t n)
{
ngraph::AxisSet result;
for (size_t i = 0; i < n; i++)
{
result.insert(i);
}
return result;
}
template <>
ngraph::AxisVector range<ngraph::AxisVector>(size_t n)
{
ngraph::AxisVector result;
for (size_t i = 0; i < n; i++)
{
result.push_back(i);
}
return result;
}
void ngraph::op::Dot::generate_adjoints(autodiff::Adjoints& adjoints,
const std::shared_ptr<Node>& delta)
{
auto x = m_arguments[0];
auto y = m_arguments[1];
auto x_shape = x->get_shape();
auto y_shape = y->get_shape();
auto delta_shape = delta->get_shape();
if (is_scalar(x_shape))
{
adjoints.add_delta(y, make_shared<Dot>(delta, x));
if (is_scalar(y_shape))
{
// Just multiplication
adjoints.add_delta(x, delta * y);
return;
}
// scale dot tensor
adjoints.add_delta(x, make_shared<Sum>(delta * y, range<AxisSet>(y_shape.size())));
return;
}
if (is_scalar(y_shape))
{
// tensor dot scalar
adjoints.add_delta(x, make_shared<Dot>(delta, y));
adjoints.add_delta(y, make_shared<Sum>(delta * x, range<AxisSet>(x_shape.size())));
return;
}
if (is_vector(y_shape))
{
if (is_vector(x_shape))
{
adjoints.add_delta(x, make_shared<Dot>(delta, y));
}
else
{
// X has shape IJ, Y has shape J, delta has shape I
// delta -> (I, 1)
// Y -> (1, J)
// delta . Y is (I, J)
Shape shape_delta_1 = delta->get_shape();
shape_delta_1.push_back(1);
auto delta_1 =
make_shared<Broadcast>(delta, shape_delta_1, AxisSet{delta->get_shape().size()});
Shape shape_1_y{1};
shape_1_y.insert(shape_1_y.end(), y_shape.begin(), y_shape.end());
auto y_1 = make_shared<Broadcast>(y, shape_1_y, AxisSet{0});
adjoints.add_delta(x, make_shared<Dot>(delta_1, y_1));
}
// X has shape IJ
// Y has shape J
// delta has shape I
// Need to move J to front of X and multiply by Y
Shape shape_xt(x_shape.size());
AxisVector x_axes(x_shape.size());
shape_xt[0] = x_shape.at(x_shape.size() - 1);
x_axes[0] = x_shape.size() - 1;
for (size_t i = 1; i < x_shape.size(); ++i)
{
shape_xt[i] = x_shape[i - 1];
x_axes[i] = i - 1;
}
auto x_reshape = make_shared<Reshape>(x, x_axes, shape_xt);
adjoints.add_delta(y, make_shared<Dot>(x_reshape, delta));
return;
}
// Tensor tensor case
// X is Ij
// Y = Kjl
// X.Y, delta is IKl
//
// delta -> I(Kl)
// Y -> (Kl)j
// delta.Y -> Ij
Shape s_I;
s_I.insert(s_I.begin(), x_shape.begin(), x_shape.end() - 1);
size_t s_j = x_shape[x_shape.size() - 1];
Shape s_K;
s_K.insert(s_K.begin(), y_shape.begin(), y_shape.end() - 2);
size_t s_l = y_shape[y_shape.size() - 1];
size_t s_Kl = shape_size(s_K) * s_l;
Shape shape_delta_I_Kl;
shape_delta_I_Kl.insert(shape_delta_I_Kl.end(), s_I.begin(), s_I.end());
shape_delta_I_Kl.push_back(s_Kl);
AxisVector idx_delta_I_Kl = range<AxisVector>(delta_shape.size());
auto delta_I_Kl = make_shared<Reshape>(delta, idx_delta_I_Kl, shape_delta_I_Kl);
Shape shape_y_Kl_j{s_Kl, s_j};
AxisVector idx_y_Kl_j = range<AxisVector>(y_shape.size() - 2);
idx_y_Kl_j.push_back(y_shape.size() - 1);
idx_y_Kl_j.push_back(y_shape.size() - 2);
auto y_Kl_j = make_shared<Reshape>(y, idx_y_Kl_j, shape_y_Kl_j);
adjoints.add_delta(x, make_shared<Dot>(delta_I_Kl, y_Kl_j));
// delta -> K(I)l
// X -> j(I)
// X.delta -> jKl -> Kjl
Shape shape_delta_K_I_l;
shape_delta_K_I_l.insert(shape_delta_K_I_l.begin(), s_K.begin(), s_K.end());
shape_delta_K_I_l.push_back(shape_size(s_I));
shape_delta_K_I_l.push_back(s_l);
AxisVector idx_delta = range<AxisVector>(delta_shape.size());
AxisVector idx_delta_K_I_l;
idx_delta_K_I_l.insert(idx_delta_K_I_l.end(),
idx_delta.begin() + s_I.size(),
idx_delta.begin() + s_I.size() + s_K.size());
idx_delta_K_I_l.insert(
idx_delta_K_I_l.end(), idx_delta.begin(), idx_delta.begin() + s_I.size());
idx_delta_K_I_l.push_back(delta_shape.size() - 1);
auto delta_K_I_l = make_shared<Reshape>(delta, idx_delta_K_I_l, shape_delta_K_I_l);
Shape shape_x_j_I;
shape_x_j_I.push_back(s_j);
shape_x_j_I.push_back(shape_size(s_I));
AxisVector idx_x = range<AxisVector>(x_shape.size());
AxisVector idx_x_j_I;
idx_x_j_I.push_back(idx_x[idx_x.size() - 1]);
idx_x_j_I.insert(idx_x_j_I.end(), idx_x.begin(), idx_x.begin() + idx_x.size() - 1);
auto x_j_I = make_shared<Reshape>(x, idx_x_j_I, shape_x_j_I);
auto jKl = make_shared<Dot>(x_j_I, delta_K_I_l);
Shape shape_Kjl;
shape_Kjl.insert(shape_Kjl.end(), s_K.begin(), s_K.end());
shape_Kjl.push_back(s_j);
shape_Kjl.push_back(s_l);
AxisVector idx_Kjl;
for (size_t i = 1; i < s_K.size() + 1; ++i)
{
idx_Kjl.push_back(i);
}
idx_Kjl.push_back(0);
idx_Kjl.push_back(y_shape.size() - 1);
auto Klj = make_shared<Reshape>(jKl, idx_Kjl, shape_Kjl);
adjoints.add_delta(y, Klj);
}
......@@ -116,6 +116,10 @@ namespace ngraph
virtual std::string description() const override { return "Dot"; }
virtual void propagate_types() override;
protected:
virtual void generate_adjoints(autodiff::Adjoints& adjoints,
const std::shared_ptr<Node>& delta) override;
};
}
}
// ----------------------------------------------------------------------------
// Copyright 2017 Nervana Systems Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// ----------------------------------------------------------------------------
#include <memory>
#include "ngraph/ops/convert.hpp"
#include "ngraph/ops/greater.hpp"
#include "ngraph/ops/maximum.hpp"
#include "ngraph/ops/multiply.hpp"
#include "ngraph/types/element_type.hpp"
using namespace std;
using namespace ngraph;
void ngraph::op::Maximum::generate_adjoints(autodiff::Adjoints& adjoints,
const std::shared_ptr<Node>& delta)
{
auto x = m_arguments[0];
auto y = m_arguments[1];
adjoints.add_delta(x,
delta * make_shared<op::Convert>(make_shared<op::Greater>(x, y),
element::Float32::element_type()));
adjoints.add_delta(y,
delta * make_shared<op::Convert>(make_shared<op::Greater>(y, x),
element::Float32::element_type()));
}
......@@ -52,6 +52,9 @@ namespace ngraph
{
}
virtual std::string description() const override { return "Maximum"; }
protected:
virtual void generate_adjoints(autodiff::Adjoints& adjoints,
const std::shared_ptr<Node>& delta) override;
};
}
}
// ----------------------------------------------------------------------------
// Copyright 2017 Nervana Systems Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// ----------------------------------------------------------------------------
#include <memory>
#include "ngraph/ops/convert.hpp"
#include "ngraph/ops/less.hpp"
#include "ngraph/ops/minimum.hpp"
#include "ngraph/ops/multiply.hpp"
#include "ngraph/types/element_type.hpp"
using namespace std;
using namespace ngraph;
void ngraph::op::Minimum::generate_adjoints(autodiff::Adjoints& adjoints,
const std::shared_ptr<Node>& delta)
{
auto x = m_arguments[0];
auto y = m_arguments[1];
adjoints.add_delta(x,
delta * make_shared<op::Convert>(make_shared<op::Less>(x, y),
element::Float32::element_type()));
adjoints.add_delta(y,
delta * make_shared<op::Convert>(make_shared<op::Less>(y, x),
element::Float32::element_type()));
}
......@@ -52,6 +52,9 @@ namespace ngraph
{
}
virtual std::string description() const override { return "Minimum"; }
protected:
virtual void generate_adjoints(autodiff::Adjoints& adjoints,
const std::shared_ptr<Node>& delta) override;
};
}
}
......@@ -78,3 +78,42 @@ void Reshape::propagate_types()
set_value_type_checked(
make_shared<TensorViewType>(arg_tensor_view_type->get_element_type(), m_output_shape));
}
void ngraph::op::Reshape::generate_adjoints(autodiff::Adjoints& adjoints,
const std::shared_ptr<Node>& delta)
{
auto x = m_arguments[0];
auto x_type = x->get_value_type();
auto x_tensor_view_type = dynamic_pointer_cast<const TensorViewType>(x_type);
if (nullptr == x_type)
{
throw ngraph_error("Argument to reshape is not a tensor view");
}
auto x_shape = x_tensor_view_type->get_shape();
auto x_rank = x_shape.size();
Shape permuted_x_shape(x_rank);
AxisVector x_input_order(x_rank);
bool is_permuted = false;
for (size_t i = 0; i < x_rank; ++i)
{
size_t permuted_i = m_input_order[i];
if (i != permuted_i)
{
is_permuted = true;
}
permuted_x_shape[i] = x_shape[permuted_i];
x_input_order[permuted_i] = i;
}
AxisVector input_order(m_output_shape.size());
for (size_t i = 0; i < m_output_shape.size(); i++)
{
input_order[i] = i;
}
auto reshape = make_shared<op::Reshape>(delta, input_order, permuted_x_shape);
if (is_permuted)
{
reshape = make_shared<op::Reshape>(reshape, x_input_order, x_shape);
}
adjoints.add_delta(x, reshape);
}
......@@ -86,6 +86,9 @@ namespace ngraph
/// \return The shape of the output tensor.
const Shape& get_output_shape() const { return m_output_shape; }
protected:
virtual void generate_adjoints(autodiff::Adjoints& adjoints,
const std::shared_ptr<Node>& delta) override;
const AxisVector m_input_order;
const Shape m_output_shape;
};
......
// ----------------------------------------------------------------------------
// Copyright 2017 Nervana Systems Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// ----------------------------------------------------------------------------
#pragma once
#include "ngraph/runtime/ngvm/call_frame.hpp"
#include "ngraph/runtime/ngvm/eigen/utils.hpp"
#include "ngraph/runtime/ngvm/instruction.hpp"
#include "ngraph/runtime/tensor_view.hpp"
namespace ngraph
{
namespace runtime
{
namespace ngvm
{
namespace eigen
{
template <typename ET>
class MinimumInstruction : public Instruction
{
public:
MinimumInstruction(TensorViewInfo arg0, TensorViewInfo arg1, TensorViewInfo out)
: m_arg0(arg0)
, m_arg1(arg1)
, m_out(out)
{
}
virtual void execute(CallFrame& call_frame) const override
{
EigenArray1d<ET>(call_frame, m_out) =
EigenArray1d<ET>(call_frame, m_arg0)
.min(EigenArray1d<ET>(call_frame, m_arg1));
}
protected:
TensorViewInfo m_arg0;
TensorViewInfo m_arg1;
TensorViewInfo m_out;
};
}
}
}
}
......@@ -46,6 +46,7 @@
#include "ngraph/ops/less_eq.hpp"
#include "ngraph/ops/log.hpp"
#include "ngraph/ops/maximum.hpp"
#include "ngraph/ops/minimum.hpp"
#include "ngraph/ops/multiply.hpp"
#include "ngraph/ops/negative.hpp"
#include "ngraph/ops/not_equal.hpp"
......@@ -95,6 +96,7 @@
#include "ngraph/runtime/ngvm/eigen/matrix_transpose.hpp"
#include "ngraph/runtime/ngvm/eigen/matrix_vector_product.hpp"
#include "ngraph/runtime/ngvm/eigen/maximum.hpp"
#include "ngraph/runtime/ngvm/eigen/minimum.hpp"
#include "ngraph/runtime/ngvm/eigen/multiply.hpp"
#include "ngraph/runtime/ngvm/eigen/negate.hpp"
#include "ngraph/runtime/ngvm/eigen/not_equal.hpp"
......@@ -370,6 +372,7 @@ ExternalFunction::OpMap& ExternalFunction::get_op_map()
REGISTER_NUMERIC_BINOP(op::Less, eigen::LessThanInstruction);
REGISTER_NUMERIC_BINOP(op::LessEq, eigen::LessEqInstruction);
REGISTER_NUMERIC_BINOP(op::Maximum, eigen::MaximumInstruction);
REGISTER_NUMERIC_BINOP(op::Minimum, eigen::MinimumInstruction);
REGISTER_NUMERIC_BINOP(op::Multiply, eigen::MultiplyInstruction);
REGISTER_NUMERIC_BINOP(op::Subtract, eigen::SubtractInstruction);
......
......@@ -27,4 +27,7 @@ namespace ngraph
/// Row-major strides for a shape
Strides row_major_strides(const Shape& shape);
inline bool is_scalar(const Shape& shape) { return 0 == shape.size(); }
inline bool is_vector(const Shape& shape) { return 1 == shape.size(); }
}
......@@ -14,8 +14,9 @@
#include <memory>
#include "ngraph/except.hpp"
#include "ngraph/log.hpp"
#include "ngraph/ngraph.hpp"
#include "ngraph/types/type.hpp"
#include "ngraph/util.hpp"
using namespace std;
......@@ -64,6 +65,11 @@ void TupleType::collect_tensor_views(
}
}
const Shape& TupleType::get_shape() const
{
throw ngraph_error("get_shape() called on Tuple");
}
std::ostream& ngraph::operator<<(std::ostream& out, const ValueType& obj)
{
out << "ValueType()";
......
......@@ -42,6 +42,7 @@ namespace ngraph
/// Add tensor views in depth-first order.
virtual void collect_tensor_views(
std::vector<std::shared_ptr<const TensorViewType>>& views) const = 0;
virtual const Shape& get_shape() const = 0;
friend std::ostream& operator<<(std::ostream&, const ValueType&);
};
......@@ -59,7 +60,7 @@ namespace ngraph
}
const element::Type& get_element_type() const { return m_element_type; }
const Shape& get_shape() const { return m_shape; }
virtual const Shape& get_shape() const override { return m_shape; }
virtual bool operator==(const ValueType& that) const override;
virtual void collect_tensor_views(
std::vector<std::shared_ptr<const TensorViewType>>& views) const override;
......@@ -95,6 +96,7 @@ namespace ngraph
virtual bool operator==(const ValueType& that) const override;
virtual void collect_tensor_views(
std::vector<std::shared_ptr<const TensorViewType>>& views) const override;
virtual const Shape& get_shape() const override;
friend std::ostream& operator<<(std::ostream&, const TupleType&);
protected:
......
......@@ -65,6 +65,44 @@ TEST(backwards, add)
manager, backend, make_graph, {x0, x1}, .01f, .01f));
}
TEST(backwards, broadcast0)
{
auto manager = runtime::Manager::get("NGVM");
auto backend = manager->allocate_backend();
test::Uniform<element::Float32> rng(-1.0f, 1.0f);
auto shape = Shape{3};
auto x0 = rng.initialize(backend->make_parameterized_tensor_view<element::Float32>(shape));
auto make_graph = [shape]() {
auto X0 = make_shared<op::Parameter>(element::Float32::element_type(), shape);
return make_shared<Function>(make_shared<op::Broadcast>(X0, Shape{2, 3}, AxisSet{0}),
nullptr,
std::vector<std::shared_ptr<op::Parameter>>{X0});
};
EXPECT_TRUE(
autodiff_numeric_compare<element::Float32>(manager, backend, make_graph, {x0}, .01f, .01f));
}
TEST(backwards, broadcast1)
{
auto manager = runtime::Manager::get("NGVM");
auto backend = manager->allocate_backend();
test::Uniform<element::Float32> rng(-1.0f, 1.0f);
auto shape = Shape{3};
auto x0 = rng.initialize(backend->make_parameterized_tensor_view<element::Float32>(shape));
auto make_graph = [shape]() {
auto X0 = make_shared<op::Parameter>(element::Float32::element_type(), shape);
return make_shared<Function>(make_shared<op::Broadcast>(X0, Shape{3, 2}, AxisSet{1}),
nullptr,
std::vector<std::shared_ptr<op::Parameter>>{X0});
};
EXPECT_TRUE(
autodiff_numeric_compare<element::Float32>(manager, backend, make_graph, {x0}, .01f, .01f));
}
TEST(backwards, divide)
{
auto manager = runtime::Manager::get("NGVM");
......@@ -90,6 +128,138 @@ TEST(backwards, divide)
manager, backend, make_graph, {x0, x2}, .01f, .01f));
}
TEST(backwards, dot_scalar_scalar)
{
auto manager = runtime::Manager::get("NGVM");
auto backend = manager->allocate_backend();
test::Uniform<element::Float32> rng(-1.0f, 1.0f);
auto shape0 = Shape{};
auto shape1 = Shape{};
auto x0 = rng.initialize(backend->make_parameterized_tensor_view<element::Float32>(shape0));
auto x1 = rng.initialize(backend->make_parameterized_tensor_view<element::Float32>(shape1));
auto make_graph = [shape0, shape1]() {
auto X0 = make_shared<op::Parameter>(element::Float32::element_type(), shape0);
auto X1 = make_shared<op::Parameter>(element::Float32::element_type(), shape1);
return make_shared<Function>(make_shared<op::Dot>(X0, X1),
nullptr,
std::vector<std::shared_ptr<op::Parameter>>{X0, X1});
};
EXPECT_TRUE(autodiff_numeric_compare<element::Float32>(
manager, backend, make_graph, {x0, x1}, .01f, .01f));
}
TEST(backwards, dot_scalar_tensor)
{
auto manager = runtime::Manager::get("NGVM");
auto backend = manager->allocate_backend();
test::Uniform<element::Float32> rng(-1.0f, 1.0f);
auto shape0 = Shape{};
auto shape1 = Shape{3, 4};
auto x0 = rng.initialize(backend->make_parameterized_tensor_view<element::Float32>(shape0));
auto x1 = rng.initialize(backend->make_parameterized_tensor_view<element::Float32>(shape1));
auto make_graph = [shape0, shape1]() {
auto X0 = make_shared<op::Parameter>(element::Float32::element_type(), shape0);
auto X1 = make_shared<op::Parameter>(element::Float32::element_type(), shape1);
return make_shared<Function>(make_shared<op::Dot>(X0, X1),
nullptr,
std::vector<std::shared_ptr<op::Parameter>>{X0, X1});
};
EXPECT_TRUE(autodiff_numeric_compare<element::Float32>(
manager, backend, make_graph, {x0, x1}, .01f, .01f));
}
TEST(backwards, dot_tensor_scalar)
{
auto manager = runtime::Manager::get("NGVM");
auto backend = manager->allocate_backend();
test::Uniform<element::Float32> rng(-1.0f, 1.0f);
auto shape0 = Shape{3, 4};
auto shape1 = Shape{};
auto x0 = rng.initialize(backend->make_parameterized_tensor_view<element::Float32>(shape0));
auto x1 = rng.initialize(backend->make_parameterized_tensor_view<element::Float32>(shape1));
auto make_graph = [shape0, shape1]() {
auto X0 = make_shared<op::Parameter>(element::Float32::element_type(), shape0);
auto X1 = make_shared<op::Parameter>(element::Float32::element_type(), shape1);
return make_shared<Function>(make_shared<op::Dot>(X0, X1),
nullptr,
std::vector<std::shared_ptr<op::Parameter>>{X0, X1});
};
EXPECT_TRUE(autodiff_numeric_compare<element::Float32>(
manager, backend, make_graph, {x0, x1}, .01f, .01f));
}
TEST(backwards, dot_vector_vector)
{
auto manager = runtime::Manager::get("NGVM");
auto backend = manager->allocate_backend();
test::Uniform<element::Float32> rng(-1.0f, 1.0f);
auto shape0 = Shape{3};
auto shape1 = Shape{3};
auto x0 = rng.initialize(backend->make_parameterized_tensor_view<element::Float32>(shape0));
auto x1 = rng.initialize(backend->make_parameterized_tensor_view<element::Float32>(shape1));
auto make_graph = [shape0, shape1]() {
auto X0 = make_shared<op::Parameter>(element::Float32::element_type(), shape0);
auto X1 = make_shared<op::Parameter>(element::Float32::element_type(), shape1);
return make_shared<Function>(make_shared<op::Dot>(X0, X1),
nullptr,
std::vector<std::shared_ptr<op::Parameter>>{X0, X1});
};
EXPECT_TRUE(autodiff_numeric_compare<element::Float32>(
manager, backend, make_graph, {x0, x1}, .01f, .01f));
}
TEST(backwards, dot_tensor_vector)
{
auto manager = runtime::Manager::get("NGVM");
auto backend = manager->allocate_backend();
test::Uniform<element::Float32> rng(-1.0f, 1.0f);
auto shape0 = Shape{4, 3};
auto shape1 = Shape{3};
auto x0 = rng.initialize(backend->make_parameterized_tensor_view<element::Float32>(shape0));
auto x1 = rng.initialize(backend->make_parameterized_tensor_view<element::Float32>(shape1));
auto make_graph = [shape0, shape1]() {
auto X0 = make_shared<op::Parameter>(element::Float32::element_type(), shape0);
auto X1 = make_shared<op::Parameter>(element::Float32::element_type(), shape1);
return make_shared<Function>(make_shared<op::Dot>(X0, X1),
nullptr,
std::vector<std::shared_ptr<op::Parameter>>{X0, X1});
};
EXPECT_TRUE(autodiff_numeric_compare<element::Float32>(
manager, backend, make_graph, {x0, x1}, .01f, .01f));
}
TEST(backwards, dot_tensor2_tensor2)
{
auto manager = runtime::Manager::get("NGVM");
auto backend = manager->allocate_backend();
test::Uniform<element::Float32> rng(-1.0f, 1.0f);
auto shape0 = Shape{4, 3};
auto shape1 = Shape{3, 5};
auto x0 = rng.initialize(backend->make_parameterized_tensor_view<element::Float32>(shape0));
auto x1 = rng.initialize(backend->make_parameterized_tensor_view<element::Float32>(shape1));
auto make_graph = [shape0, shape1]() {
auto X0 = make_shared<op::Parameter>(element::Float32::element_type(), shape0);
auto X1 = make_shared<op::Parameter>(element::Float32::element_type(), shape1);
return make_shared<Function>(make_shared<op::Dot>(X0, X1),
nullptr,
std::vector<std::shared_ptr<op::Parameter>>{X0, X1});
};
EXPECT_TRUE(autodiff_numeric_compare<element::Float32>(
manager, backend, make_graph, {x0, x1}, .01f, .01f));
}
TEST(backwards, exp)
{
auto manager = runtime::Manager::get("NGVM");
......@@ -126,6 +296,48 @@ TEST(backwards, log)
autodiff_numeric_compare<element::Float32>(manager, backend, make_graph, {x0}, .01f, .01f));
}
TEST(backwards, maximum)
{
auto manager = runtime::Manager::get("NGVM");
auto backend = manager->allocate_backend();
test::Uniform<element::Float32> rng(-1.0f, 1.0f);
auto shape = Shape{2, 3};
auto x0 = rng.initialize(backend->make_parameterized_tensor_view<element::Float32>(shape));
auto x1 = rng.initialize(backend->make_parameterized_tensor_view<element::Float32>(shape));
auto make_graph = [shape]() {
auto X0 = make_shared<op::Parameter>(element::Float32::element_type(), shape);
auto X1 = make_shared<op::Parameter>(element::Float32::element_type(), shape);
return make_shared<Function>(make_shared<op::Maximum>(X0, X1),
nullptr,
std::vector<std::shared_ptr<op::Parameter>>{X0, X1});
};
EXPECT_TRUE(autodiff_numeric_compare<element::Float32>(
manager, backend, make_graph, {x0, x1}, .01f, .01f));
}
TEST(backwards, minimum)
{
auto manager = runtime::Manager::get("NGVM");
auto backend = manager->allocate_backend();
test::Uniform<element::Float32> rng(-1.0f, 1.0f);
auto shape = Shape{2, 3};
auto x0 = rng.initialize(backend->make_parameterized_tensor_view<element::Float32>(shape));
auto x1 = rng.initialize(backend->make_parameterized_tensor_view<element::Float32>(shape));
auto make_graph = [shape]() {
auto X0 = make_shared<op::Parameter>(element::Float32::element_type(), shape);
auto X1 = make_shared<op::Parameter>(element::Float32::element_type(), shape);
return make_shared<Function>(make_shared<op::Minimum>(X0, X1),
nullptr,
std::vector<std::shared_ptr<op::Parameter>>{X0, X1});
};
EXPECT_TRUE(autodiff_numeric_compare<element::Float32>(
manager, backend, make_graph, {x0, x1}, .01f, .01f));
}
TEST(backwards, multiply)
{
auto manager = runtime::Manager::get("NGVM");
......@@ -179,6 +391,25 @@ TEST(backwards, parameter)
autodiff_numeric_compare<element::Float32>(manager, backend, make_graph, {x0}, .01f, .01f));
}
TEST(backwards, reshape)
{
auto manager = runtime::Manager::get("NGVM");
auto backend = manager->allocate_backend();
test::Uniform<element::Float32> rng(-1.0f, 1.0f);
auto shape = Shape{3, 4};
auto x0 = rng.initialize(backend->make_parameterized_tensor_view<element::Float32>(shape));
auto make_graph = [shape]() {
auto X0 = make_shared<op::Parameter>(element::Float32::element_type(), shape);
return make_shared<Function>(make_shared<op::Reshape>(X0, AxisVector{1, 0}, Shape{4, 3}),
nullptr,
std::vector<std::shared_ptr<op::Parameter>>{X0});
};
EXPECT_TRUE(
autodiff_numeric_compare<element::Float32>(manager, backend, make_graph, {x0}, .01f, .01f));
}
TEST(backwards, subtract)
{
auto manager = runtime::Manager::get("NGVM");
......
......@@ -898,6 +898,30 @@ TEST(execute, maximum)
ASSERT_EQ((vector<float>{1, 8, 4, 17, 0, 0.5, 2, 1.5}), result->get_vector());
}
TEST(execute, minimum)
{
auto shape = Shape{2, 2, 2};
auto A = make_shared<op::Parameter>(element::Float32::element_type(), shape);
auto B = make_shared<op::Parameter>(element::Float32::element_type(), shape);
auto rt = make_shared<TensorViewType>(element::Float32::element_type(), shape);
auto f = make_shared<Function>(make_shared<op::Minimum>(A, B), rt, op::Parameters{A, B});
auto manager = runtime::Manager::get("NGVM");
auto external = manager->compile(f);
auto backend = manager->allocate_backend();
auto cf = backend->make_call_frame(external);
// Create some tensors for input/output
auto a = backend->make_parameterized_tensor_view<element::Float32>(shape);
*a = vector<float>{1, 8, -8, 17, -0.5, 0.5, 2, 1};
auto b = backend->make_parameterized_tensor_view<element::Float32>(shape);
*b = vector<float>{1, 2, 4, 8, 0, 0, 1, 1.5};
auto result = backend->make_parameterized_tensor_view<element::Float32>(shape);
(*cf)({a, b}, {result});
ASSERT_EQ((vector<float>{1, 2, -8, 8, -.5, 0, 1, 1}), result->get_vector());
}
TEST(execute, negative)
{
auto shape = Shape{2, 3};
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment