Commit 24054d8e authored by fenglei.tian's avatar fenglei.tian

clang-format-3.9

parent 938329f0
......@@ -30,7 +30,7 @@ runtime::gpu::GPU_CallFrame::GPU_CallFrame(std::shared_ptr<GPU_ExternalFunction>
: m_external_function(external_function)
, m_compiled_function(compiled_function)
{
cublasStatus_t cublasStatus = cublasCreate(&m_cublas_handle);
cublasStatus_t cublasStatus = cublasCreate(&m_cublas_handle);
if (cublasStatus != CUBLAS_STATUS_SUCCESS)
{
std::cout << "error : " << (int)cublasStatus << std::endl;
......
......@@ -21,8 +21,8 @@
#include <vector>
#include <cuda_runtime.h>
#include "cublas_v2.h"
#include <cudnn.h>
#include "cublas_v2.h"
#include "ngraph/function.hpp"
#include "ngraph/runtime/call_frame.hpp"
......
This diff is collapsed.
......@@ -23,10 +23,10 @@
#include <typeinfo>
#include <unordered_map>
#include <cuda_runtime.h>
#include <cudnn_v7.h>
#include "cublas_v2.h"
#include "cuda.h"
#include <cuda_runtime.h>
#include <cudnn_v7.h>
#include "cublas_v2.h"
#include "cuda.h"
#include "ngraph/codegen/code_writer.hpp"
#include "ngraph/codegen/compiler.hpp"
......@@ -359,8 +359,9 @@ void runtime::gpu::GPU_ExternalFunction::compile()
writer << "// Declare all functions\n";
for (shared_ptr<Function> f : pass_manager.get_state().get_functions())
{
writer << "extern \"C\" void " << f->get_name()
<< "(void** inputs, void** outputs, cublasHandle_t& cublas_handle, cudnnHandle_t& cudnn_handle);\n";
writer << "extern \"C\" void " << f->get_name() << "(void** inputs, void** outputs, "
"cublasHandle_t& cublas_handle, "
"cudnnHandle_t& cudnn_handle);\n";
}
writer << "\n";
......@@ -478,7 +479,8 @@ void runtime::gpu::GPU_ExternalFunction::compile()
}
writer << "extern \"C\" void " << current_function->get_name();
writer << "(void** inputs, void** outputs, cublasHandle_t& cublas_handle, cudnnHandle_t& cudnn_handle)\n";
writer << "(void** inputs, void** outputs, cublasHandle_t& cublas_handle, cudnnHandle_t& "
"cudnn_handle)\n";
writer << "{\n";
writer.indent++;
......
......@@ -41,7 +41,7 @@ runtime::gpu::GPU_TensorView::GPU_TensorView(const ngraph::element::Type& elemen
m_buffer_size = m_descriptor->get_tensor_view_layout()->get_size() * element_type.size();
if (m_buffer_size > 0)
{
cudaMalloc((void**) &m_allocated_buffer_pool, m_buffer_size);
cudaMalloc((void**)&m_allocated_buffer_pool, m_buffer_size);
}
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment