Commit 555d5929 authored by Fenglei Tian's avatar Fenglei Tian

code style

parent 2ef72022
...@@ -34,7 +34,7 @@ runtime::gpu::GPU_CallFrame::GPU_CallFrame(std::shared_ptr<GPU_ExternalFunction> ...@@ -34,7 +34,7 @@ runtime::gpu::GPU_CallFrame::GPU_CallFrame(std::shared_ptr<GPU_ExternalFunction>
{ {
//Create context use driver API and make it current, the runtime call will pickup the context //Create context use driver API and make it current, the runtime call will pickup the context
//http://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#interoperability-between-runtime-and-driver-apis //http://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#interoperability-between-runtime-and-driver-apis
ngraph::runtime::gpu::CudaContextManager::Instance(); ngraph::runtime::gpu::CudaContextManager::instance();
cublasStatus_t cublasStatus = cublasCreate(&m_cublas_handle); cublasStatus_t cublasStatus = cublasCreate(&m_cublas_handle);
if (cublasStatus != CUBLAS_STATUS_SUCCESS) if (cublasStatus != CUBLAS_STATUS_SUCCESS)
{ {
......
...@@ -30,7 +30,7 @@ namespace ngraph ...@@ -30,7 +30,7 @@ namespace ngraph
class CudaContextManager class CudaContextManager
{ {
public: public:
static CudaContextManager& Instance() static CudaContextManager& instance()
{ {
static CudaContextManager manager; static CudaContextManager manager;
return manager; return manager;
...@@ -41,7 +41,7 @@ namespace ngraph ...@@ -41,7 +41,7 @@ namespace ngraph
CudaContextManager& operator=(CudaContextManager const&) = delete; CudaContextManager& operator=(CudaContextManager const&) = delete;
CudaContextManager& operator=(CudaContextManager&&) = delete; CudaContextManager& operator=(CudaContextManager&&) = delete;
std::shared_ptr<CUcontext> GetContext() { return m_context_ptr; } std::shared_ptr<CUcontext> get_context() { return m_context_ptr; }
protected: protected:
CudaContextManager() CudaContextManager()
{ {
......
...@@ -30,7 +30,7 @@ namespace ngraph ...@@ -30,7 +30,7 @@ namespace ngraph
class CudaFunctionBuilder class CudaFunctionBuilder
{ {
public: public:
static std::shared_ptr<CUfunction> Get(const std::string& name, static std::shared_ptr<CUfunction> get(const std::string& name,
const std::string& kernel, const std::string& kernel,
int number_of_options, int number_of_options,
const char** options) const char** options)
......
...@@ -30,7 +30,7 @@ namespace ngraph ...@@ -30,7 +30,7 @@ namespace ngraph
class CudaFunctionPool class CudaFunctionPool
{ {
public: public:
static CudaFunctionPool& Instance() static CudaFunctionPool& instance()
{ {
static CudaFunctionPool pool; static CudaFunctionPool pool;
return pool; return pool;
...@@ -41,12 +41,12 @@ namespace ngraph ...@@ -41,12 +41,12 @@ namespace ngraph
CudaFunctionPool& operator=(CudaFunctionPool const&) = delete; CudaFunctionPool& operator=(CudaFunctionPool const&) = delete;
CudaFunctionPool& operator=(CudaFunctionPool&&) = delete; CudaFunctionPool& operator=(CudaFunctionPool&&) = delete;
void Set(std::string& name, std::shared_ptr<CUfunction> function) void set(std::string& name, std::shared_ptr<CUfunction> function)
{ {
m_function_map.insert({name, function}); m_function_map.insert({name, function});
} }
std::shared_ptr<CUfunction> Get(std::string& name) std::shared_ptr<CUfunction> get(std::string& name)
{ {
auto it = m_function_map.find(name); auto it = m_function_map.find(name);
if (it != m_function_map.end()) if (it != m_function_map.end())
......
...@@ -27,7 +27,7 @@ namespace ngraph ...@@ -27,7 +27,7 @@ namespace ngraph
class CudaKernelBuilder class CudaKernelBuilder
{ {
public: public:
static void Get_1_element_op(const std::string& name, static void get_1_element_op(const std::string& name,
const std::string& data_type, const std::string& data_type,
const std::string& op, const std::string& op,
std::string& kernel) std::string& kernel)
...@@ -45,7 +45,7 @@ out[tid] =)" + op + "(in[tid]);\n" + ...@@ -45,7 +45,7 @@ out[tid] =)" + op + "(in[tid]);\n" +
return; return;
} }
static void Get_2_element_op(const std::string& name, static void get_2_element_op(const std::string& name,
const std::string& data_type, const std::string& data_type,
const std::string& op, const std::string& op,
std::string& kernel) std::string& kernel)
...@@ -64,7 +64,7 @@ out[tid] = in1[tid] )" + op + "in2[tid]\n" + ...@@ -64,7 +64,7 @@ out[tid] = in1[tid] )" + op + "in2[tid]\n" +
return; return;
} }
static void Get_n_element_op(const std::string& name, static void get_n_element_op(const std::string& name,
const std::string& data_type, const std::string& data_type,
const std::vector<std::string>& ops, const std::vector<std::string>& ops,
std::string& kernel) std::string& kernel)
......
...@@ -36,14 +36,14 @@ namespace ngraph ...@@ -36,14 +36,14 @@ namespace ngraph
{ {
std::string name = "abs"; std::string name = "abs";
// Create an instance of nvrtcProgram with the code string. // Create an instance of nvrtcProgram with the code string.
if (CudaFunctionPool::Instance().Get(name) == nullptr) if (CudaFunctionPool::Instance().get(name) == nullptr)
{ {
const char* opts[] = {"--gpu-architecture=compute_35", const char* opts[] = {"--gpu-architecture=compute_35",
"--relocatable-device-code=true"}; "--relocatable-device-code=true"};
std::string kernel; std::string kernel;
CudaKernelBuilder::Get_1_element_op(name, "float", "fabsf", kernel); CudaKernelBuilder::get_1_element_op(name, "float", "fabsf", kernel);
CudaFunctionPool::Instance().Set( CudaFunctionPool::Instance().set(
name, CudaFunctionBuilder::Get("cuda_" + name, kernel, 2, opts)); name, CudaFunctionBuilder::get("cuda_" + name, kernel, 2, opts));
} }
//convert runtime ptr to driver api ptr //convert runtime ptr to driver api ptr
...@@ -53,7 +53,7 @@ namespace ngraph ...@@ -53,7 +53,7 @@ namespace ngraph
void* args_list[] = {&d_ptr_in, &d_ptr_out, &count}; void* args_list[] = {&d_ptr_in, &d_ptr_out, &count};
CUDA_SAFE_CALL( CUDA_SAFE_CALL(
cuLaunchKernel(*CudaFunctionPool::Instance().Get(name).get(), cuLaunchKernel(*CudaFunctionPool::Instance().get(name).get(),
count, count,
1, 1,
1, // grid dim 1, // grid dim
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment