Unverified Commit 3cef466f authored by Adam Procter's avatar Adam Procter Committed by GitHub

Merge branch 'master' into aprocter/partial-shape

parents 01b94186 ee712ae8
......@@ -33,8 +33,8 @@ set(SRC
gpu_memory_manager.cpp
gpu_primitive_emitter.cpp
gpu_runtime_context.cpp
gpu_tensor_view_wrapper.cpp
gpu_tensor_view.cpp
gpu_tensor_wrapper.cpp
gpu_tensor.cpp
gpu_util.cpp
type_info.cpp
pass/gpu_layout.cpp
......
......@@ -23,7 +23,7 @@
#include "ngraph/runtime/gpu/gpu_backend.hpp"
#include "ngraph/runtime/gpu/gpu_external_function.hpp"
#include "ngraph/runtime/gpu/gpu_primitive_emitter.hpp"
#include "ngraph/runtime/gpu/gpu_tensor_view.hpp"
#include "ngraph/runtime/gpu/gpu_tensor.hpp"
#include "ngraph/util.hpp"
using namespace ngraph;
......@@ -101,13 +101,13 @@ runtime::gpu::GPU_Backend::BackendContext::~BackendContext()
shared_ptr<runtime::Tensor>
runtime::gpu::GPU_Backend::create_tensor(const element::Type& element_type, const Shape& shape)
{
return make_shared<runtime::gpu::GPU_TensorView>(element_type, shape);
return make_shared<runtime::gpu::GPUTensor>(element_type, shape);
}
shared_ptr<runtime::Tensor> runtime::gpu::GPU_Backend::create_tensor(
const element::Type& element_type, const Shape& shape, void* memory_pointer)
{
return make_shared<runtime::gpu::GPU_TensorView>(element_type, shape, memory_pointer);
return make_shared<runtime::gpu::GPUTensor>(element_type, shape, memory_pointer);
}
bool runtime::gpu::GPU_Backend::compile(shared_ptr<Function> func)
......@@ -130,8 +130,8 @@ void runtime::gpu::GPU_Backend::initialize_io(void** target,
{
for (size_t i = 0; i < source.size(); i++)
{
shared_ptr<runtime::gpu::GPU_TensorView> tv =
dynamic_pointer_cast<runtime::gpu::GPU_TensorView>(source[i]);
shared_ptr<runtime::gpu::GPUTensor> tv =
dynamic_pointer_cast<runtime::gpu::GPUTensor>(source[i]);
if (tv)
{
target[i] = tv->m_allocated_buffer_pool;
......
......@@ -1579,7 +1579,7 @@ void runtime::gpu::GPU_Emitter::emit_TopK(EMIT_ARGS)
throw unsupported_op("Unsupported op '" + node->description() + "'");
}
string runtime::gpu::GPU_Emitter::node_names(const vector<GPU_TensorViewWrapper>& args,
string runtime::gpu::GPU_Emitter::node_names(const vector<GPUTensorWrapper>& args,
initializer_list<int> arg_indexes)
{
vector<string> names;
......
......@@ -22,7 +22,7 @@
#include "ngraph/codegen/code_writer.hpp"
#include "ngraph/node.hpp"
#include "ngraph/runtime/gpu/gpu_external_function.hpp"
#include "ngraph/runtime/gpu/gpu_tensor_view_wrapper.hpp"
#include "ngraph/runtime/gpu/gpu_tensor_wrapper.hpp"
namespace ngraph
{
......@@ -81,7 +81,7 @@ namespace ngraph
/// \param arg_indexes a list of indexes into args for which args to include in
/// the output list, so {1, 2} will include args 1 and 2 and skip 0.
/// \ return returns a string containing "arg0_name, arg1_name, etc."
static std::string node_names(const std::vector<GPU_TensorViewWrapper>& args,
static std::string node_names(const std::vector<GPUTensorWrapper>& args,
std::initializer_list<int> arg_indexes = {});
};
......
......@@ -110,7 +110,7 @@
#include "ngraph/runtime/gpu/gpu_external_function.hpp"
#include "ngraph/runtime/gpu/gpu_kernel_emitters.hpp"
#include "ngraph/runtime/gpu/gpu_runtime_context.hpp"
#include "ngraph/runtime/gpu/gpu_tensor_view_wrapper.hpp"
#include "ngraph/runtime/gpu/gpu_tensor_wrapper.hpp"
#include "ngraph/runtime/gpu/pass/gpu_layout.hpp"
#include "ngraph/runtime/gpu/pass/tensor_memory_reservation.hpp"
......@@ -167,8 +167,8 @@ static GPUStaticInitializers s_static_initializers;
void runtime::gpu::GPU_ExternalFunction::emit_op(GPU_ExternalFunction* external_function,
codegen::CodeWriter& writer,
const ngraph::Node* node,
const std::vector<GPU_TensorViewWrapper>& args,
const std::vector<GPU_TensorViewWrapper>& out)
const std::vector<GPUTensorWrapper>& args,
const std::vector<GPUTensorWrapper>& out)
{
auto emit_function = GPU_Emitter::get_emit_function(*node);
emit_function(external_function, writer, node, args, out);
......@@ -468,21 +468,21 @@ void runtime::gpu::GPU_ExternalFunction::emit_functions()
for (shared_ptr<Node> node : m_function_ordered_ops.at(current_function))
{
vector<GPU_TensorViewWrapper> in;
vector<GPUTensorWrapper> in;
vector<string> node_input_names;
vector<string> node_output_names;
for (const descriptor::Input& input : node->get_inputs())
{
const descriptor::Output& output = input.get_output();
shared_ptr<descriptor::Tensor> tv = output.get_tensor_ptr();
in.push_back(GPU_TensorViewWrapper(tv, m_variable_name_map[tv->get_name()]));
in.push_back(GPUTensorWrapper(tv, m_variable_name_map[tv->get_name()]));
node_input_names.emplace_back(tv->get_name());
}
vector<GPU_TensorViewWrapper> out;
vector<GPUTensorWrapper> out;
for (const descriptor::Output& output : node->get_outputs())
{
shared_ptr<descriptor::Tensor> tv = output.get_tensor_ptr();
out.push_back(GPU_TensorViewWrapper(tv, m_variable_name_map[tv->get_name()]));
out.push_back(GPUTensorWrapper(tv, m_variable_name_map[tv->get_name()]));
node_output_names.emplace_back(tv->get_name());
}
......@@ -509,11 +509,11 @@ void runtime::gpu::GPU_ExternalFunction::emit_functions()
string func_name =
ngraph::pass::CommonFunctionCollection::create_function_name(*it->second);
vector<string> names;
for (const GPU_TensorViewWrapper& tv : in)
for (const GPUTensorWrapper& tv : in)
{
names.push_back(tv.get_name());
}
for (const GPU_TensorViewWrapper& tv : out)
for (const GPUTensorWrapper& tv : out)
{
names.push_back(tv.get_name());
}
......@@ -642,14 +642,14 @@ string runtime::gpu::GPU_ExternalFunction::emit_op_as_function(const Node& node,
codegen::CodeWriter writer;
writer << "static void " << function_name << "(";
writer.indent++;
vector<GPU_TensorViewWrapper> in;
vector<GPUTensorWrapper> in;
size_t arg_index = 0;
set<string> arg_names;
for (const descriptor::Input& input : node.get_inputs())
{
const descriptor::Output& output = input.get_output();
shared_ptr<descriptor::Tensor> tv = output.get_tensor_ptr();
GPU_TensorViewWrapper tvw{tv, "_arg" + to_string(arg_index)};
GPUTensorWrapper tvw{tv, "_arg" + to_string(arg_index)};
if (!contains(arg_names, tvw.get_name()))
{
arg_names.insert(tvw.get_name());
......@@ -662,11 +662,11 @@ string runtime::gpu::GPU_ExternalFunction::emit_op_as_function(const Node& node,
}
in.push_back(tvw);
}
vector<GPU_TensorViewWrapper> out;
vector<GPUTensorWrapper> out;
for (const descriptor::Output& output : node.get_outputs())
{
shared_ptr<descriptor::Tensor> tv = output.get_tensor_ptr();
GPU_TensorViewWrapper tvw{tv, "_out" + to_string(arg_index)};
GPUTensorWrapper tvw{tv, "_out" + to_string(arg_index)};
if (arg_index++ > 0)
{
writer << ",";
......
......@@ -33,12 +33,12 @@
#include "ngraph/pass/memory_layout.hpp"
#include "ngraph/runtime/gpu/gpu_backend.hpp"
#include "ngraph/runtime/gpu/gpu_primitive_emitter.hpp"
#include "ngraph/runtime/gpu/gpu_tensor_view_wrapper.hpp"
#include "ngraph/runtime/gpu/gpu_tensor_wrapper.hpp"
#define EMIT_ARGS \
runtime::gpu::GPU_ExternalFunction *external_function, codegen::CodeWriter &writer, \
const Node *node, const std::vector<runtime::gpu::GPU_TensorViewWrapper> &args, \
const std::vector<runtime::gpu::GPU_TensorViewWrapper> &out
const Node *node, const std::vector<runtime::gpu::GPUTensorWrapper> &args, \
const std::vector<runtime::gpu::GPUTensorWrapper> &out
namespace ngraph
{
......
......@@ -24,7 +24,7 @@
using namespace ngraph;
void runtime::gpu::kernel::emit_memset(codegen::CodeWriter& writer,
const GPU_TensorViewWrapper& dst,
const GPUTensorWrapper& dst,
int value,
size_t buffer_size)
{
......@@ -37,8 +37,8 @@ void runtime::gpu::kernel::emit_memset(codegen::CodeWriter& writer,
}
void runtime::gpu::kernel::emit_memcpyDtD(codegen::CodeWriter& writer,
const GPU_TensorViewWrapper& dst,
const GPU_TensorViewWrapper& src,
const GPUTensorWrapper& dst,
const GPUTensorWrapper& src,
size_t buffer_size)
{
if (buffer_size == 0)
......@@ -192,8 +192,8 @@ void runtime::gpu::kernel::emit_cudnnTensorNdDescriptor(codegen::CodeWriter& wri
}
void runtime::gpu::kernel::emit_cudnnReduceTensor(codegen::CodeWriter& writer,
const GPU_TensorViewWrapper& in,
const GPU_TensorViewWrapper& out,
const GPUTensorWrapper& in,
const GPUTensorWrapper& out,
const std::string& reduce_op,
const std::string& data_type,
const std::string& nan_prop,
......
......@@ -19,7 +19,7 @@
#include "ngraph/codegen/code_writer.hpp"
#include "ngraph/coordinate_transform.hpp"
#include "ngraph/node.hpp"
#include "ngraph/runtime/gpu/gpu_tensor_view_wrapper.hpp"
#include "ngraph/runtime/gpu/gpu_tensor_wrapper.hpp"
namespace ngraph
{
......@@ -30,13 +30,13 @@ namespace ngraph
namespace kernel
{
void emit_memset(codegen::CodeWriter& writer,
const GPU_TensorViewWrapper& dst,
const GPUTensorWrapper& dst,
int value,
size_t buffer_size = 0);
void emit_memcpyDtD(codegen::CodeWriter& writer,
const GPU_TensorViewWrapper& dst,
const GPU_TensorViewWrapper& src,
const GPUTensorWrapper& dst,
const GPUTensorWrapper& src,
size_t buffer_size = 0);
void emit_cudnnConvolutionDescriptor(codegen::CodeWriter& writer,
......@@ -73,8 +73,8 @@ namespace ngraph
const std::vector<size_t>& strides);
void emit_cudnnReduceTensor(codegen::CodeWriter& writer,
const GPU_TensorViewWrapper& in,
const GPU_TensorViewWrapper& out,
const GPUTensorWrapper& in,
const GPUTensorWrapper& out,
const std::string& reduce_op,
const std::string& data_type,
const std::string& nan_prop,
......
......@@ -21,15 +21,15 @@
#include "ngraph/descriptor/layout/dense_tensor_layout.hpp"
#include "ngraph/runtime/gpu/cuda_error_check.hpp"
#include "ngraph/runtime/gpu/gpu_backend.hpp"
#include "ngraph/runtime/gpu/gpu_tensor_view.hpp"
#include "ngraph/runtime/gpu/gpu_tensor.hpp"
#include "ngraph/runtime/gpu/gpu_util.hpp"
using namespace ngraph;
using namespace std;
runtime::gpu::GPU_TensorView::GPU_TensorView(const ngraph::element::Type& element_type,
const Shape& shape,
void* memory_pointer)
runtime::gpu::GPUTensor::GPUTensor(const ngraph::element::Type& element_type,
const Shape& shape,
void* memory_pointer)
: runtime::Tensor(std::make_shared<ngraph::descriptor::Tensor>(element_type, shape, "external"))
, m_custom_memory(false)
{
......@@ -48,13 +48,12 @@ runtime::gpu::GPU_TensorView::GPU_TensorView(const ngraph::element::Type& elemen
}
}
runtime::gpu::GPU_TensorView::GPU_TensorView(const ngraph::element::Type& element_type,
const Shape& shape)
: GPU_TensorView(element_type, shape, nullptr)
runtime::gpu::GPUTensor::GPUTensor(const ngraph::element::Type& element_type, const Shape& shape)
: GPUTensor(element_type, shape, nullptr)
{
}
runtime::gpu::GPU_TensorView::~GPU_TensorView()
runtime::gpu::GPUTensor::~GPUTensor()
{
if (!m_custom_memory && (m_allocated_buffer_pool != nullptr))
{
......@@ -62,12 +61,12 @@ runtime::gpu::GPU_TensorView::~GPU_TensorView()
}
}
void runtime::gpu::GPU_TensorView::write(const void* source, size_t tensor_offset, size_t n)
void runtime::gpu::GPUTensor::write(const void* source, size_t tensor_offset, size_t n)
{
CUDA_RT_SAFE_CALL(cudaMemcpy(m_allocated_buffer_pool, source, n, cudaMemcpyHostToDevice));
}
void runtime::gpu::GPU_TensorView::read(void* target, size_t tensor_offset, size_t n) const
void runtime::gpu::GPUTensor::read(void* target, size_t tensor_offset, size_t n) const
{
CUDA_RT_SAFE_CALL(cudaMemcpy(target, m_allocated_buffer_pool, n, cudaMemcpyDeviceToHost));
}
......@@ -28,19 +28,17 @@ namespace ngraph
{
namespace gpu
{
class GPU_TensorView;
class GPUTensor;
}
}
}
class ngraph::runtime::gpu::GPU_TensorView : public ngraph::runtime::Tensor
class ngraph::runtime::gpu::GPUTensor : public ngraph::runtime::Tensor
{
public:
GPU_TensorView(const ngraph::element::Type& element_type, const Shape& shape);
GPU_TensorView(const ngraph::element::Type& element_type,
const Shape& shape,
void* memory_pointer);
virtual ~GPU_TensorView();
GPUTensor(const ngraph::element::Type& element_type, const Shape& shape);
GPUTensor(const ngraph::element::Type& element_type, const Shape& shape, void* memory_pointer);
virtual ~GPUTensor();
/// \brief Write bytes directly into the tensor
/// \param p Pointer to source of data
......
......@@ -14,41 +14,41 @@
// limitations under the License.
//*****************************************************************************
#include "ngraph/runtime/gpu/gpu_tensor_view_wrapper.hpp"
#include "ngraph/runtime/gpu/gpu_tensor_wrapper.hpp"
#include "ngraph/descriptor/layout/tensor_layout.hpp"
#include "ngraph/descriptor/tensor.hpp"
using namespace std;
using namespace ngraph;
runtime::gpu::GPU_TensorViewWrapper::GPU_TensorViewWrapper(const shared_ptr<descriptor::Tensor>& tv,
const string& alias)
runtime::gpu::GPUTensorWrapper::GPUTensorWrapper(const shared_ptr<descriptor::Tensor>& tv,
const string& alias)
: m_tensor(tv)
, m_alias(alias)
{
}
size_t runtime::gpu::GPU_TensorViewWrapper::get_size() const
size_t runtime::gpu::GPUTensorWrapper::get_size() const
{
return m_tensor->get_tensor_layout()->get_size();
}
const Shape& runtime::gpu::GPU_TensorViewWrapper::get_shape() const
const Shape& runtime::gpu::GPUTensorWrapper::get_shape() const
{
return m_tensor->get_tensor_layout()->get_shape();
}
Strides runtime::gpu::GPU_TensorViewWrapper::get_strides() const
Strides runtime::gpu::GPUTensorWrapper::get_strides() const
{
return m_tensor->get_tensor_layout()->get_strides();
}
const element::Type& runtime::gpu::GPU_TensorViewWrapper::get_element_type() const
const element::Type& runtime::gpu::GPUTensorWrapper::get_element_type() const
{
return m_tensor->get_tensor_layout()->get_element_type();
}
const std::string& runtime::gpu::GPU_TensorViewWrapper::get_name() const
const std::string& runtime::gpu::GPUTensorWrapper::get_name() const
{
if (m_alias.empty())
{
......@@ -60,7 +60,7 @@ const std::string& runtime::gpu::GPU_TensorViewWrapper::get_name() const
}
}
const std::string& runtime::gpu::GPU_TensorViewWrapper::get_type() const
const std::string& runtime::gpu::GPUTensorWrapper::get_type() const
{
return get_element_type().c_type_string();
}
......@@ -27,16 +27,15 @@ namespace ngraph
{
namespace gpu
{
class GPU_TensorViewWrapper;
class GPUTensorWrapper;
}
}
}
class ngraph::runtime::gpu::GPU_TensorViewWrapper
class ngraph::runtime::gpu::GPUTensorWrapper
{
public:
GPU_TensorViewWrapper(const std::shared_ptr<descriptor::Tensor>&,
const std::string& alias = "");
GPUTensorWrapper(const std::shared_ptr<descriptor::Tensor>&, const std::string& alias = "");
size_t get_size() const;
const Shape& get_shape() const;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment