Unverified Commit 95594d93 authored by Jayaram Bobba's avatar Jayaram Bobba Committed by GitHub

Merge branch 'master' into jmenon/dex2

parents c829a9c7 3d66cba4
......@@ -68,16 +68,16 @@ public:
std::string generate_temporary_name(std::string prefix = "tempvar");
void block_begin(std::string block_prefix = "")
void block_begin()
{
*this << "{" << block_prefix << "\n";
*this << "{\n";
indent++;
}
void block_end(std::string block_suffix = "")
void block_end()
{
indent--;
*this << "}" << block_suffix << "\n";
*this << "}\n";
}
private:
......
......@@ -265,7 +265,6 @@ void codegen::StaticCompiler::add_header_search_path(const string& p)
vector<string> paths = split(p, ';');
for (const string& path : paths)
{
NGRAPH_INFO << path;
if (!contains(m_extra_search_path_list, path))
{
m_extra_search_path_list.push_back(path);
......
......@@ -268,8 +268,8 @@ size_t runtime::gpu::CUDAEmitter::build_pad_dynamic(const runtime::gpu::GPURunti
compiled_kernel = ctx->compiled_kernel_pool->set(kernel_name.str(), writer.get_code());
}
unsigned int rank = static_cast<unsigned int>(input_shape.size());
unsigned int nthreads = static_cast<unsigned int>(shape_size(input_shape));
uint32_t rank = static_cast<uint32_t>(input_shape.size());
uint32_t nthreads = static_cast<uint32_t>(shape_size(input_shape));
GPUShape pad_below(input_shape.size(), 0);
GPUShape pad_interior(input_shape.size(), 1);
......@@ -286,14 +286,14 @@ size_t runtime::gpu::CUDAEmitter::build_pad_dynamic(const runtime::gpu::GPURunti
// get an allocator for transient per kernel gpu memory
GPUAllocator allocator = this->m_primitive_emitter->get_memory_allocator();
size_t idx_input_strides = allocator.reserve_argspace(
input_strides.data(), input_strides.size() * sizeof(unsigned int));
size_t idx_output_strides = allocator.reserve_argspace(
output_strides.data(), output_strides.size() * sizeof(unsigned int));
size_t idx_input_strides =
allocator.reserve_argspace(input_strides.data(), input_strides.size() * sizeof(uint32_t));
size_t idx_output_strides =
allocator.reserve_argspace(output_strides.data(), output_strides.size() * sizeof(uint32_t));
size_t idx_padding_below =
allocator.reserve_argspace(pad_below.data(), pad_below.size() * sizeof(unsigned int));
allocator.reserve_argspace(pad_below.data(), pad_below.size() * sizeof(uint32_t));
size_t idx_padding_interior =
allocator.reserve_argspace(pad_interior.data(), pad_interior.size() * sizeof(unsigned int));
allocator.reserve_argspace(pad_interior.data(), pad_interior.size() * sizeof(uint32_t));
// create the launch primitive
std::unique_ptr<gpu::primitive> pad_dynamic(new gpu::primitive{[=](void** inputs,
......@@ -1015,7 +1015,7 @@ size_t runtime::gpu::CUDAEmitter::build_reduce_window(const GPURuntimeContext* c
args_list[6] = &nthreads;
CUDA_SAFE_CALL(cuLaunchKernel(*compiled_kernel.get(),
static_cast<unsigned int>(nthreads),
static_cast<uint32_t>(nthreads),
1,
1, // grid dim
1,
......
......@@ -285,19 +285,19 @@ void runtime::gpu::CudaKernelBuilder::get_pad_dynamic_op(
const std::array<std::string, 2>& data_types)
{
writer << "extern \"C\" __global__ void cuda_" << name << "(" << data_types[0] << "* in, "
<< data_types[1] << "* out, unsigned int* input_strides, unsigned int* output_strides, "
"unsigned int* padding_below, unsigned int* "
"padding_interior, unsigned int rank, unsigned int n)\n";
<< data_types[1] << "* out, uint32_t* input_strides, uint32_t* output_strides, "
"uint32_t* padding_below, uint32_t* "
"padding_interior, uint32_t rank, uint32_t n)\n";
writer.block_begin();
{
writer << "unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x;\n";
writer << "uint32_t tid = blockIdx.x * blockDim.x + threadIdx.x;\n";
writer << "if (tid < n)\n";
writer.block_begin();
{
writer << "unsigned int output_idx = 0;\n";
writer << "unsigned int input_idx = tid;\n";
writer << "uint32_t output_idx = 0;\n";
writer << "uint32_t input_idx = tid;\n";
writer << "for(unsigned int i = 0; i < rank; i++)\n";
writer << "for(uint32_t i = 0; i < rank; i++)\n";
writer.block_begin();
{
writer << "output_idx += (input_idx / input_strides[i] * padding_interior[i] + "
......
......@@ -47,7 +47,7 @@ void runtime::gpu::emit_onehot(const std::string& name,
void* args_list[] = {&in, &out, &repeat_size, &repeat_times, &count};
CUDA_SAFE_CALL(cuLaunchKernel(*compiled_kernel.get(),
static_cast<unsigned int>(count),
static_cast<uint32_t>(count),
1,
1, // grid dim
1,
......@@ -84,7 +84,7 @@ void runtime::gpu::emit_reshape(const std::string& name,
void* args_list[] = {&in, &out, &input_strides, &trans_strides, &rank, &count};
CUDA_SAFE_CALL(cuLaunchKernel(*compiled_kernel.get(),
static_cast<unsigned int>(count),
static_cast<uint32_t>(count),
1,
1, // grid dim
1,
......@@ -124,7 +124,7 @@ void runtime::gpu::emit_slice(const std::string& name,
void* args_list[] = {
&in, &out, &input_strides, &lower_bounds, &slice_strides, &output_strides, &rank, &count};
CUDA_SAFE_CALL(cuLaunchKernel(*compiled_kernel.get(),
static_cast<unsigned int>(count),
static_cast<uint32_t>(count),
1,
1, // grid dim
1,
......@@ -161,7 +161,7 @@ void runtime::gpu::emit_reverse(const std::string& name,
void* args_list[] = {&in, &out, &input_shapes, &reverse_axes, &rank, &count};
CUDA_SAFE_CALL(cuLaunchKernel(*compiled_kernel.get(),
static_cast<unsigned int>(count),
static_cast<uint32_t>(count),
1,
1, // grid dim
1,
......
This diff is collapsed.
......@@ -77,7 +77,7 @@ namespace ngraph
auto& cuda_emitter =
external_function->get_primitive_emitter()->get_cuda_emitter();
writer.block_begin(" // " + node->get_name());
writer.block_begin();
{
std::vector<std::string> dtypes;
for (auto& arg : args)
......
......@@ -83,6 +83,13 @@ namespace ngraph
const Node&,
const std::unordered_map<descriptor::TensorView*, std::vector<size_t>>&);
void release_function() { m_function = nullptr; }
std::string emit_op_as_function(const Node& node, const std::string& function_name);
std::string strip_comments(const std::string& s) const;
bool is_functionally_identical(
const Node& n1,
const Node& n2,
const std::unordered_map<const Node*, std::string>& node_cache) const;
std::unique_ptr<codegen::Compiler> m_compiler;
std::unique_ptr<codegen::ExecutionEngine> m_execution_engine;
bool m_emit_timing;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment