Commit c3e6cfcf authored by Fenglei's avatar Fenglei Committed by Robert Kimball

nvgpu backend check if the ptr pass to create_tensor is device pointer (#2584)

* add error msg is not pass device pointer to GPUtensor

* add check to create_tensor

* fix bug

* mv from cpu to gpu

* style

* fix if else and nullptr
parent 919d9ec2
...@@ -26,6 +26,7 @@ ...@@ -26,6 +26,7 @@
#include "ngraph/runtime/gpu/gpu_internal_function.hpp" #include "ngraph/runtime/gpu/gpu_internal_function.hpp"
#include "ngraph/runtime/gpu/gpu_primitive_emitter.hpp" #include "ngraph/runtime/gpu/gpu_primitive_emitter.hpp"
#include "ngraph/runtime/gpu/gpu_tensor.hpp" #include "ngraph/runtime/gpu/gpu_tensor.hpp"
#include "ngraph/runtime/gpu/gpu_util.hpp"
#include "ngraph/runtime/hybrid/hybrid_backend.hpp" #include "ngraph/runtime/hybrid/hybrid_backend.hpp"
#include "ngraph/util.hpp" #include "ngraph/util.hpp"
...@@ -114,6 +115,10 @@ shared_ptr<runtime::Tensor> ...@@ -114,6 +115,10 @@ shared_ptr<runtime::Tensor>
shared_ptr<runtime::Tensor> runtime::gpu::GPU_Backend::create_tensor( shared_ptr<runtime::Tensor> runtime::gpu::GPU_Backend::create_tensor(
const element::Type& element_type, const Shape& shape, void* memory_pointer) const element::Type& element_type, const Shape& shape, void* memory_pointer)
{ {
if (memory_pointer != nullptr && !is_device_pointer(memory_pointer))
{
throw ngraph_error("The pointer passed to create_tensor is not a device pointer.");
}
return make_shared<runtime::gpu::GPUTensor>(element_type, shape, memory_pointer, this); return make_shared<runtime::gpu::GPUTensor>(element_type, shape, memory_pointer, this);
} }
......
...@@ -40,10 +40,17 @@ runtime::gpu::GPUTensor::GPUTensor(const ngraph::element::Type& element_type, ...@@ -40,10 +40,17 @@ runtime::gpu::GPUTensor::GPUTensor(const ngraph::element::Type& element_type,
m_buffer_size = shape_size(shape) * element_type.size(); m_buffer_size = shape_size(shape) * element_type.size();
if (memory_pointer != nullptr) if (memory_pointer != nullptr)
{
if (is_device_pointer(memory_pointer))
{ {
m_allocated_buffer_pool = memory_pointer; m_allocated_buffer_pool = memory_pointer;
m_custom_memory = true; m_custom_memory = true;
} }
else
{
throw ngraph_error("The pointer passed to GPUTensor is not a device pointer.");
}
}
else if (m_buffer_size > 0) else if (m_buffer_size > 0)
{ {
m_allocated_buffer_pool = runtime::gpu::create_gpu_buffer(m_buffer_size); m_allocated_buffer_pool = runtime::gpu::create_gpu_buffer(m_buffer_size);
......
...@@ -62,6 +62,17 @@ void runtime::gpu::free_gpu_buffer(void* buffer) ...@@ -62,6 +62,17 @@ void runtime::gpu::free_gpu_buffer(void* buffer)
} }
} }
bool runtime::gpu::is_device_pointer(const void* ptr)
{
cudaPointerAttributes attributes;
CUDA_RT_SAFE_CALL_NO_THROW(cudaPointerGetAttributes(&attributes, ptr));
if (attributes.devicePointer != nullptr)
{
return true;
}
return false;
}
void runtime::gpu::cuda_memcpyDtD(void* dst, const void* src, size_t buffer_size) void runtime::gpu::cuda_memcpyDtD(void* dst, const void* src, size_t buffer_size)
{ {
CUDA_RT_SAFE_CALL(cudaMemcpy(dst, src, buffer_size, cudaMemcpyDeviceToDevice)); CUDA_RT_SAFE_CALL(cudaMemcpy(dst, src, buffer_size, cudaMemcpyDeviceToDevice));
......
...@@ -30,6 +30,7 @@ namespace ngraph ...@@ -30,6 +30,7 @@ namespace ngraph
void check_cuda_errors(CUresult err); void check_cuda_errors(CUresult err);
void* create_gpu_buffer(size_t buffer_size, const void* data = nullptr); void* create_gpu_buffer(size_t buffer_size, const void* data = nullptr);
void free_gpu_buffer(void* buffer); void free_gpu_buffer(void* buffer);
bool is_device_pointer(const void* ptr);
void cuda_memcpyDtD(void* dst, const void* src, size_t buffer_size); void cuda_memcpyDtD(void* dst, const void* src, size_t buffer_size);
void cuda_memcpyHtD(void* dst, const void* src, size_t buffer_size); void cuda_memcpyHtD(void* dst, const void* src, size_t buffer_size);
void cuda_memcpyDtH(void* dst, const void* src, size_t buffer_size); void cuda_memcpyDtH(void* dst, const void* src, size_t buffer_size);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment