Unverified Commit 8351293a authored by Tomasz Dołbniak's avatar Tomasz Dołbniak Committed by GitHub

Merge branch 'master' into etusien/clamp

parents 0df3202b f6f3a032
......@@ -306,12 +306,12 @@ set(NGRAPH_INSTALL_DOC "${CMAKE_INSTALL_PREFIX}/${CMAKE_INSTALL_DOCDIR}")
set(NGRAPH_INSTALL_BIN "${CMAKE_INSTALL_PREFIX}/${CMAKE_INSTALL_BINDIR}")
if (LINUX)
if (DEFINED NGRAPH_RPATH)
set(CMAKE_BUILD_RPATH "$ORIGIN:${NGRAPH_RPATH}")
set(CMAKE_INSTALL_RPATH "$ORIGIN:${NGRAPH_RPATH}")
else()
set(CMAKE_BUILD_RPATH "$ORIGIN")
set(CMAKE_INSTALL_RPATH "$ORIGIN")
endif()
set(CMAKE_INSTALL_RPATH_USE_LINK_PATH TRUE)
set(CMAKE_BUILD_WITH_INSTALL_RPATH TRUE)
endif()
#-----------------------------------------------------------------------------------------------
......
six
numpy==1.15.4; python_version == "3.4"
numpy; python_version != "3.4"
numpy==1.16.4; python_version == "2.7"
numpy; python_version != "2.7" and python_version != "3.4"
typing
......@@ -370,6 +370,9 @@ class BuildExt(build_ext):
with open(os.path.join(PYNGRAPH_ROOT_DIR, 'requirements.txt')) as req:
requirements = req.read().splitlines()
setup_requires = [
item for item in requirements if item.strip().startswith('numpy')
]
setup(
name='ngraph-core',
......@@ -386,7 +389,7 @@ setup(
packages=packages,
cmdclass={'build_ext': BuildExt},
data_files=data_files,
setup_requires=['numpy==1.16.4'],
setup_requires=setup_requires,
install_requires=requirements,
zip_safe=False,
extras_require={
......
......@@ -34,7 +34,7 @@ namespace ngraph
/// \param arg_pad_value The node producing the scalar value to be inserted for padding.
/// \param padding_below The padding-below widths.
/// \param padding_above The padding-above widths.
/// \param pad_mode The padding mode: CONSTANT(default), EDGE or REFLECT.
/// \param pad_mode The padding mode: CONSTANT(default), EDGE, REFLECT or SYMMETRIC.
Pad(const std::shared_ptr<Node>& arg,
const std::shared_ptr<Node>& arg_pad_value,
const CoordinateDiff& padding_below,
......
......@@ -3027,7 +3027,9 @@ namespace ngraph
case ngraph::op::PadMode::REFLECT:
pad_mode_string = "ngraph::op::PadMode::REFLECT";
break;
case ngraph::op::PadMode::SYMMETRIC: throw ngraph_error("Unsupported PadMode");
case ngraph::op::PadMode::SYMMETRIC:
pad_mode_string = "ngraph::op::PadMode::SYMMETRIC";
break;
}
writer << "reference::pad<" << out[0].get_type() << ">(" << args[0].get_name()
<< ",\n";
......
......@@ -123,6 +123,7 @@ pad_reflect_1d_bottom_neg_bigger_than_tensor
pad_reflect_1d_multi_reflect
pad_reflect_2d
pad_reflect_2d_with_neg
pad_symmetric
# Quantized operators are not supported on gpu backend
model_dequantize_linear
......
......@@ -40,6 +40,7 @@ pad_reflect_1d_bottom_neg_bigger_than_tensor
pad_reflect_1d_multi_reflect
pad_reflect_2d
pad_reflect_2d_with_neg
pad_symmetric
# Not implemented
batch_mat_mul_forward
......
......@@ -74,3 +74,11 @@ target_include_directories(plaidml_backend SYSTEM PUBLIC ${PLAIDML_INCLUDE_DIRS}
target_link_libraries(plaidml_backend PUBLIC ngraph libplaidml)
install(TARGETS plaidml_backend LIBRARY DESTINATION ${NGRAPH_INSTALL_LIB})
set(CMAKE_MACOSX_RPATH 1)
if(APPLE)
set_property(TARGET plaidml_backend PROPERTY INSTALL_RPATH "@loader_path/;@loader_path/../../..")
elseif(DEFINED NGRAPH_RPATH)
set_property(TARGET plaidml_backend PROPERTY INSTALL_RPATH "\$ORIGIN;\$ORIGIN/../../..;${NGRAPH_RPATH}")
else()
set_property(TARGET plaidml_backend PROPERTY INSTALL_RPATH "\$ORIGIN;\$ORIGIN/../../..")
endif()
......@@ -137,6 +137,7 @@ pad_reflect_2d_with_neg
pad_negative_exterior_2d
pad_negative_exterior_2d_all_negative
pad_negative_exterior_4d
pad_symmetric
max_trivial_int8
max_trivial_5d_int32
max_3d_to_scalar_double
......@@ -315,16 +316,3 @@ avg_pool_3d_uneven_strided_padded
rnn_cell_activation_function
gru_cell_bias_clip
gru_cell_linear_before_reset
# After https://github.com/NervanaSystems/ngraph/pull/3262, these tests began
# failing with what appear to be precision issues. That PR simply split the
# old "backend_test.in.cpp" into multiple files. The only relevant side effect
# I can think of here is that the order of test execution changed as a result.
softmax_all
softmax_axis
softmax_underflow
softmax_overflow
sigmoid_n1c1h2w2
sigmoid_n1c1h4
sigmoid_bprop_n1c1h4
lrn
......@@ -164,8 +164,31 @@ namespace ngraph
}
case op::PadMode::SYMMETRIC:
{
// TODO: Add support for Symmetric mode
throw ngraph_error("Symmetric mode padding not supported");
Coordinate c = in_coord; // have to copy because in_coord is const
for (size_t i = 0; i < c.size(); i++)
{
ptrdiff_t pos = padding_below[i] - (c[i] + 1);
if (pos >= 0)
{
c[i] = static_cast<size_t>(pos + padding_below[i]);
}
else
{
pos = -(pos + 1);
ptrdiff_t src_dim = static_cast<ptrdiff_t>(arg0_shape[i]);
if (pos < src_dim)
{
c[i] = static_cast<size_t>(pos + padding_below[i]);
}
else
{
c[i] = static_cast<size_t>(padding_below[i] + src_dim +
padding_above[i] - pos);
}
}
}
v = arg0[input_transform.index(c)];
break;
}
}
......
......@@ -14,6 +14,16 @@
// limitations under the License.
//*****************************************************************************
// clang-format off
#ifdef ${BACKEND_NAME}_FLOAT_TOLERANCE_BITS
#define DEFAULT_FLOAT_TOLERANCE_BITS ${BACKEND_NAME}_FLOAT_TOLERANCE_BITS
#endif
#ifdef ${BACKEND_NAME}_DOUBLE_TOLERANCE_BITS
#define DEFAULT_DOUBLE_TOLERANCE_BITS ${BACKEND_NAME}_DOUBLE_TOLERANCE_BITS
#endif
// clang-format on
#include "gtest/gtest.h"
#include "ngraph/ngraph.hpp"
#include "util/all_close.hpp"
......
......@@ -939,3 +939,36 @@ NGRAPH_TEST(${BACKEND_NAME}, pad_2channel_2image_asym)
read_vector<float>(result),
MIN_FLOAT_TOLERANCE_BITS));
}
NGRAPH_TEST(${BACKEND_NAME}, pad_symmetric)
{
Shape shape_a{2, 3};
auto A = make_shared<op::Parameter>(element::f32, shape_a);
Shape shape_b{};
auto B = make_shared<op::Parameter>(element::f32, shape_b);
Shape shape_r{4, 7};
CoordinateDiff padding_below{1, 2};
CoordinateDiff padding_above{1, 2};
auto f = make_shared<Function>(
make_shared<op::Pad>(A, B, padding_below, padding_above, op::PadMode::SYMMETRIC),
ParameterVector{A, B});
auto backend = runtime::Backend::create("${BACKEND_NAME}");
// Create some tensors for input/output
auto a = backend->create_tensor(element::f32, shape_a);
copy_data(a, test::NDArray<float, 2>({{1, 2, 3}, {4, 5, 6}}).get_vector());
auto b = backend->create_tensor(element::f32, shape_b);
copy_data(b, vector<float>{2112});
auto result = backend->create_tensor(element::f32, shape_r);
auto handle = backend->compile(f);
handle->call_with_validate({result}, {a, b});
EXPECT_TRUE(test::all_close_f((test::NDArray<float, 2>({{2, 1, 1, 2, 3, 3, 2},
{2, 1, 1, 2, 3, 3, 2},
{5, 4, 4, 5, 6, 6, 5},
{5, 4, 4, 5, 6, 6, 5}})
.get_vector()),
read_vector<float>(result),
MIN_FLOAT_TOLERANCE_BITS));
}
......@@ -14,6 +14,16 @@
// limitations under the License.
//*****************************************************************************
// clang-format off
#ifdef ${BACKEND_NAME}_FLOAT_TOLERANCE_BITS
#define DEFAULT_FLOAT_TOLERANCE_BITS ${BACKEND_NAME}_FLOAT_TOLERANCE_BITS
#endif
#ifdef ${BACKEND_NAME}_DOUBLE_TOLERANCE_BITS
#define DEFAULT_DOUBLE_TOLERANCE_BITS ${BACKEND_NAME}_DOUBLE_TOLERANCE_BITS
#endif
// clang-format on
#include "gtest/gtest.h"
#include "ngraph/ngraph.hpp"
#include "util/all_close.hpp"
......
......@@ -14,6 +14,16 @@
// limitations under the License.
//*****************************************************************************
// clang-format off
#ifdef ${BACKEND_NAME}_FLOAT_TOLERANCE_BITS
#define DEFAULT_FLOAT_TOLERANCE_BITS ${BACKEND_NAME}_FLOAT_TOLERANCE_BITS
#endif
#ifdef ${BACKEND_NAME}_DOUBLE_TOLERANCE_BITS
#define DEFAULT_DOUBLE_TOLERANCE_BITS ${BACKEND_NAME}_DOUBLE_TOLERANCE_BITS
#endif
// clang-format on
#include "gtest/gtest.h"
#include "ngraph/ngraph.hpp"
#include "util/all_close.hpp"
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment