Unverified Commit 7042165a authored by Fenglei's avatar Fenglei Committed by GitHub

Merge branch 'master' into tfl/gpu_broadcast

parents c82cb3f7 5d973a6e
...@@ -263,7 +263,7 @@ if (NGRAPH_CPU_ENABLE AND LLVM_INCLUDE_DIR AND MKLDNN_INCLUDE_DIR) ...@@ -263,7 +263,7 @@ if (NGRAPH_CPU_ENABLE AND LLVM_INCLUDE_DIR AND MKLDNN_INCLUDE_DIR)
BYPRODUCTS BYPRODUCTS
) )
add_dependencies(ngraph header_resource ext_json) add_dependencies(ngraph header_resource ext_json)
include_directories(${CMAKE_BINARY_DIR}) include_directories(SYSTEM ${CMAKE_BINARY_DIR})
include_directories(SYSTEM ${JSON_INCLUDE_DIR}) include_directories(SYSTEM ${JSON_INCLUDE_DIR})
endif() endif()
......
...@@ -150,9 +150,12 @@ string ngraph::file_util::make_temp_directory(const string& path) ...@@ -150,9 +150,12 @@ string ngraph::file_util::make_temp_directory(const string& path)
string tmp_template = file_util::path_join(fname, "ngraph_XXXXXX"); string tmp_template = file_util::path_join(fname, "ngraph_XXXXXX");
char* tmpname = strdup(tmp_template.c_str()); char* tmpname = strdup(tmp_template.c_str());
mkdtemp(tmpname); string rc;
if (mkdtemp(tmpname))
{
rc = tmpname;
}
string rc = tmpname;
free(tmpname); free(tmpname);
return rc; return rc;
} }
......
...@@ -76,5 +76,4 @@ namespace ngraph ...@@ -76,5 +76,4 @@ namespace ngraph
inline bool is_scalar(const Shape& shape) { return 0 == shape.size(); } inline bool is_scalar(const Shape& shape) { return 0 == shape.size(); }
inline bool is_vector(const Shape& shape) { return 1 == shape.size(); } inline bool is_vector(const Shape& shape) { return 1 == shape.size(); }
Shape project_shape(const Shape& shape, const AxisSet& deleted_axes);
} }
...@@ -28,62 +28,63 @@ static int tensor_volume(const mkldnn::memory::dims& t) ...@@ -28,62 +28,63 @@ static int tensor_volume(const mkldnn::memory::dims& t)
return x; return x;
} }
TEST(mkldnn, engine) void test()
{ {
using namespace mkldnn; using namespace mkldnn;
#pragma GCC diagnostic ignored "-Wgnu-statement-expression" auto cpu_engine = engine(engine::cpu, 0);
EXPECT_NO_THROW(({ const int mb = 2;
auto cpu_engine = engine(engine::cpu, 0); const int groups = 2;
memory::dims input_tz = {mb, 256, 13, 13};
memory::dims weights_tz = {groups, 384 / groups, 256 / groups, 3, 3};
memory::dims bias_tz = {384};
memory::dims strides = {1, 1};
memory::dims padding = {0, 0};
memory::dims output_tz = {
mb,
384,
(input_tz[2] + 2 * padding[0] - weights_tz[3]) / strides[0] + 1,
(input_tz[3] + 2 * padding[1] - weights_tz[4]) / strides[1] + 1,
};
const int mb = 2; std::vector<float> input(tensor_volume(input_tz), .0f);
const int groups = 2; std::vector<float> weights(tensor_volume(weights_tz), .0f);
memory::dims input_tz = {mb, 256, 13, 13}; std::vector<float> bias(tensor_volume(bias_tz), .0f);
memory::dims weights_tz = {groups, 384 / groups, 256 / groups, 3, 3}; std::vector<float> output(tensor_volume(output_tz), .0f);
memory::dims bias_tz = {384};
memory::dims strides = {1, 1};
memory::dims padding = {0, 0};
memory::dims output_tz = {
mb,
384,
(input_tz[2] + 2 * padding[0] - weights_tz[3]) / strides[0] + 1,
(input_tz[3] + 2 * padding[1] - weights_tz[4]) / strides[1] + 1,
};
std::vector<float> input(tensor_volume(input_tz), .0f); auto c3_src_desc = memory::desc({input_tz}, memory::data_type::f32, memory::format::nchw);
std::vector<float> weights(tensor_volume(weights_tz), .0f); auto c3_weights_desc =
std::vector<float> bias(tensor_volume(bias_tz), .0f); memory::desc({weights_tz}, memory::data_type::f32, memory::format::goihw);
std::vector<float> output(tensor_volume(output_tz), .0f); auto c3_bias_desc = memory::desc({bias_tz}, memory::data_type::f32, memory::format::x);
auto c3_dst_desc = memory::desc({output_tz}, memory::data_type::f32, memory::format::nchw);
auto c3_src_desc = memory::desc({input_tz}, memory::data_type::f32, memory::format::nchw); auto c3_src = memory({c3_src_desc, cpu_engine}, input.data());
auto c3_weights_desc = auto c3_weights = memory({c3_weights_desc, cpu_engine}, weights.data());
memory::desc({weights_tz}, memory::data_type::f32, memory::format::goihw); auto c3_bias = memory({c3_bias_desc, cpu_engine}, bias.data());
auto c3_bias_desc = memory::desc({bias_tz}, memory::data_type::f32, memory::format::x); auto c3_dst = memory({c3_dst_desc, cpu_engine}, output.data());
auto c3_dst_desc = memory::desc({output_tz}, memory::data_type::f32, memory::format::nchw);
auto c3_src = memory({c3_src_desc, cpu_engine}, input.data()); auto c3 = convolution_forward(
auto c3_weights = memory({c3_weights_desc, cpu_engine}, weights.data()); convolution_forward::primitive_desc(convolution_forward::desc(prop_kind::forward,
auto c3_bias = memory({c3_bias_desc, cpu_engine}, bias.data()); algorithm::convolution_direct,
auto c3_dst = memory({c3_dst_desc, cpu_engine}, output.data()); c3_src_desc,
c3_weights_desc,
c3_bias_desc,
c3_dst_desc,
strides,
padding,
padding,
padding_kind::zero),
cpu_engine),
c3_src,
c3_weights,
c3_bias,
c3_dst);
auto c3 = convolution_forward(convolution_forward::primitive_desc( stream(stream::kind::eager).submit({c3}).wait();
convolution_forward::desc(prop_kind::forward, }
algorithm::convolution_direct,
c3_src_desc,
c3_weights_desc,
c3_bias_desc,
c3_dst_desc,
strides,
padding,
padding,
padding_kind::zero),
cpu_engine),
c3_src,
c3_weights,
c3_bias,
c3_dst);
stream(stream::kind::eager).submit({c3}).wait(); TEST(mkldnn, engine)
})); {
EXPECT_NO_THROW(test());
} }
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment