Unverified Commit c36e4980 authored by Scott Cyphers's avatar Scott Cyphers Committed by GitHub

Merge pull request #3113 from NervanaSystems/rearhart/plaidml-tolerances

Adjust PlaidML test tolerances
parents f5384c9e 7f02a5bc
...@@ -4,6 +4,8 @@ computation_reuse ...@@ -4,6 +4,8 @@ computation_reuse
dot_matrix_vector_int64 dot_matrix_vector_int64
generate_mask generate_mask
generate_mask2 generate_mask2
# Gives inaccurate value
sigmoid_bprop_n1c1h4
# custom_mem is not implemented on GPU # custom_mem is not implemented on GPU
tensorview_custom_mem tensorview_custom_mem
# integer is not supported by cuDNN on backward pooling # integer is not supported by cuDNN on backward pooling
......
...@@ -261,6 +261,8 @@ endif() ...@@ -261,6 +261,8 @@ endif()
if (NGRAPH_PLAIDML_ENABLE) if (NGRAPH_PLAIDML_ENABLE)
target_link_libraries(unit-test PRIVATE plaidml_backend) target_link_libraries(unit-test PRIVATE plaidml_backend)
# Some PlaidML devices aren't so precise, so we increase the allowable tolerance.
target_compile_definitions(unit-test PRIVATE "PlaidML_FLOAT_TOLERANCE_BITS=12")
endif() endif()
if (NGRAPH_TBB_ENABLE) if (NGRAPH_TBB_ENABLE)
......
...@@ -21,6 +21,16 @@ ...@@ -21,6 +21,16 @@
#include <random> #include <random>
#include <string> #include <string>
// clang-format off
#ifdef ${BACKEND_NAME}_FLOAT_TOLERANCE_BITS
#define DEFAULT_FLOAT_TOLERANCE_BITS ${BACKEND_NAME}_FLOAT_TOLERANCE_BITS
#endif
#ifdef ${BACKEND_NAME}_DOUBLE_TOLERANCE_BITS
#define DEFAULT_DOUBLE_TOLERANCE_BITS ${BACKEND_NAME}_DOUBLE_TOLERANCE_BITS
#endif
// clang-format on
#include "gtest/gtest.h" #include "gtest/gtest.h"
#include "ngraph/ngraph.hpp" #include "ngraph/ngraph.hpp"
#include "util/all_close.hpp" #include "util/all_close.hpp"
......
...@@ -22,6 +22,16 @@ ...@@ -22,6 +22,16 @@
#include <string> #include <string>
#include "gtest/gtest.h" #include "gtest/gtest.h"
// clang-format off
#ifdef ${BACKEND_NAME}_FLOAT_TOLERANCE_BITS
#define DEFAULT_FLOAT_TOLERANCE_BITS ${BACKEND_NAME}_FLOAT_TOLERANCE_BITS
#endif
#ifdef ${BACKEND_NAME}_DOUBLE_TOLERANCE_BITS
#define DEFAULT_DOUBLE_TOLERANCE_BITS ${BACKEND_NAME}_DOUBLE_TOLERANCE_BITS
#endif
// clang-format on
#include "ngraph/autodiff/adjoints.hpp" #include "ngraph/autodiff/adjoints.hpp"
#include "ngraph/graph_util.hpp" #include "ngraph/graph_util.hpp"
#include "ngraph/log.hpp" #include "ngraph/log.hpp"
...@@ -5184,12 +5194,17 @@ NGRAPH_TEST(${BACKEND_NAME}, sigmoid_n1c1h2w2) ...@@ -5184,12 +5194,17 @@ NGRAPH_TEST(${BACKEND_NAME}, sigmoid_n1c1h2w2)
shared_ptr<runtime::Tensor> a = backend->create_tensor(element::f32, input->get_shape()); shared_ptr<runtime::Tensor> a = backend->create_tensor(element::f32, input->get_shape());
shared_ptr<runtime::Tensor> result = backend->create_tensor(element::f32, input->get_shape()); shared_ptr<runtime::Tensor> result = backend->create_tensor(element::f32, input->get_shape());
vector<float> dataA{1.0f, 4.0f, 1.0f, 4.0f}; float x1 = 1.0f;
float x2 = 4.0f;
float sigma1 = 1.0f / (1.0f + std::exp(-x1));
float sigma2 = 1.0f / (1.0f + std::exp(-x2));
vector<float> dataA{x1, x2, x1, x2};
copy_data(a, dataA); copy_data(a, dataA);
auto handle = backend->compile(func); auto handle = backend->compile(func);
handle->call_with_validate({result}, {a}); handle->call_with_validate({result}, {a});
vector<float> expected{0.73105858f, 0.98201379f, 0.73105858f, 0.98201379f}; vector<float> expected{sigma1, sigma2, sigma1, sigma2};
EXPECT_TRUE(test::all_close_f(read_vector<float>(result), expected)); EXPECT_TRUE(test::all_close_f(read_vector<float>(result), expected));
} }
...@@ -5204,12 +5219,17 @@ NGRAPH_TEST(${BACKEND_NAME}, sigmoid_n1c1h4) ...@@ -5204,12 +5219,17 @@ NGRAPH_TEST(${BACKEND_NAME}, sigmoid_n1c1h4)
shared_ptr<runtime::Tensor> a = backend->create_tensor(element::f32, input->get_shape()); shared_ptr<runtime::Tensor> a = backend->create_tensor(element::f32, input->get_shape());
shared_ptr<runtime::Tensor> result = backend->create_tensor(element::f32, input->get_shape()); shared_ptr<runtime::Tensor> result = backend->create_tensor(element::f32, input->get_shape());
vector<float> dataA{1.0f, 4.0f, 1.0f, 4.0f}; float x1 = 1.0f;
float x2 = 4.0f;
float sigma1 = 1.0f / (1.0f + std::exp(-x1));
float sigma2 = 1.0f / (1.0f + std::exp(-x2));
vector<float> dataA{x1, x2, x1, x2};
copy_data(a, dataA); copy_data(a, dataA);
auto handle = backend->compile(func); auto handle = backend->compile(func);
handle->call_with_validate({result}, {a}); handle->call_with_validate({result}, {a});
vector<float> expected{0.73105858f, 0.98201379f, 0.73105858f, 0.98201379f}; vector<float> expected{sigma1, sigma2, sigma1, sigma2};
EXPECT_TRUE(test::all_close_f(read_vector<float>(result), expected)); EXPECT_TRUE(test::all_close_f(read_vector<float>(result), expected));
} }
...@@ -5225,16 +5245,24 @@ NGRAPH_TEST(${BACKEND_NAME}, sigmoid_bprop_n1c1h4) ...@@ -5225,16 +5245,24 @@ NGRAPH_TEST(${BACKEND_NAME}, sigmoid_bprop_n1c1h4)
shared_ptr<runtime::Tensor> b = backend->create_tensor(element::f32, delta->get_shape()); shared_ptr<runtime::Tensor> b = backend->create_tensor(element::f32, delta->get_shape());
shared_ptr<runtime::Tensor> result = backend->create_tensor(element::f32, input->get_shape()); shared_ptr<runtime::Tensor> result = backend->create_tensor(element::f32, input->get_shape());
vector<float> dataA{1.0f, 4.0f, 1.0f, 4.0f}; float x1 = 1.0f;
vector<float> dataB{1.0f, 1.0f, 1.0f, 1.0f}; float x2 = 4.0f;
float dt = 1.0f;
float sigma1 = 1.0f / (1.0f + std::exp(-x1));
float sigma2 = 1.0f / (1.0f + std::exp(-x2));
float bprop1 = sigma1 * (1 - sigma1) * dt;
float bprop2 = sigma2 * (1 - sigma2) * dt;
vector<float> dataA{x1, x2, x1, x2};
vector<float> dataB{dt, dt, dt, dt};
copy_data(a, dataA); copy_data(a, dataA);
copy_data(b, dataB); copy_data(b, dataB);
auto handle = backend->compile(func); auto handle = backend->compile(func);
handle->call_with_validate({result}, {a, b}); handle->call_with_validate({result}, {a, b});
vector<float> expected{0.196612f, 0.0176627f, 0.196612f, 0.0176627f}; vector<float> expected{bprop1, bprop2, bprop1, bprop2};
EXPECT_TRUE(test::all_close(expected, read_vector<float>(result))); EXPECT_TRUE(test::all_close_f(expected, read_vector<float>(result)));
} }
NGRAPH_TEST(${BACKEND_NAME}, relu_2Dfprop) NGRAPH_TEST(${BACKEND_NAME}, relu_2Dfprop)
...@@ -5540,7 +5568,7 @@ NGRAPH_TEST(${BACKEND_NAME}, softmax_underflow) ...@@ -5540,7 +5568,7 @@ NGRAPH_TEST(${BACKEND_NAME}, softmax_underflow)
handle->call_with_validate({result}, {a}); handle->call_with_validate({result}, {a});
vector<float> expected{ vector<float> expected{
expf(low) / d0, expf(1) / d1, expf(2) / d2, expf(3) / d0, expf(4) / d1, expf(5) / d2}; expf(low) / d0, expf(1) / d1, expf(2) / d2, expf(3) / d0, expf(4) / d1, expf(5) / d2};
EXPECT_TRUE(test::all_close(expected, read_vector<float>(result))); EXPECT_TRUE(test::all_close_f(expected, read_vector<float>(result)));
} }
NGRAPH_TEST(${BACKEND_NAME}, softmax_overflow) NGRAPH_TEST(${BACKEND_NAME}, softmax_overflow)
......
...@@ -21,6 +21,16 @@ ...@@ -21,6 +21,16 @@
#include <random> #include <random>
#include <string> #include <string>
// clang-format off
#ifdef ${BACKEND_NAME}_FLOAT_TOLERANCE_BITS
#define DEFAULT_FLOAT_TOLERANCE_BITS ${BACKEND_NAME}_FLOAT_TOLERANCE_BITS
#endif
#ifdef ${BACKEND_NAME}_DOUBLE_TOLERANCE_BITS
#define DEFAULT_DOUBLE_TOLERANCE_BITS ${BACKEND_NAME}_DOUBLE_TOLERANCE_BITS
#endif
// clang-format on
#include "gtest/gtest.h" #include "gtest/gtest.h"
#include "ngraph/ngraph.hpp" #include "ngraph/ngraph.hpp"
#include "util/all_close.hpp" #include "util/all_close.hpp"
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment