Commit 24054d8e authored by fenglei.tian's avatar fenglei.tian

clang-format-3.9

parent 938329f0
...@@ -21,8 +21,8 @@ ...@@ -21,8 +21,8 @@
#include <vector> #include <vector>
#include <cuda_runtime.h> #include <cuda_runtime.h>
#include "cublas_v2.h"
#include <cudnn.h> #include <cudnn.h>
#include "cublas_v2.h"
#include "ngraph/function.hpp" #include "ngraph/function.hpp"
#include "ngraph/runtime/call_frame.hpp" #include "ngraph/runtime/call_frame.hpp"
......
...@@ -17,16 +17,16 @@ ...@@ -17,16 +17,16 @@
#include <algorithm> #include <algorithm>
#include <cassert> #include <cassert>
#include <cmath> #include <cmath>
#include <cublas_v2.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <cudnn_v7.h>
#include <iostream> #include <iostream>
#include <nvrtc.h>
#include <string> #include <string>
#include <typeindex> #include <typeindex>
#include <unordered_map> #include <unordered_map>
#include <vector> #include <vector>
#include <nvrtc.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <cublas_v2.h>
#include <cudnn_v7.h>
#include "ngraph/node.hpp" #include "ngraph/node.hpp"
#include "ngraph/ops/broadcast.hpp" #include "ngraph/ops/broadcast.hpp"
...@@ -51,29 +51,30 @@ ...@@ -51,29 +51,30 @@
using namespace std; using namespace std;
using namespace ngraph; using namespace ngraph;
#define NVRTC_SAFE_CALL(x) \ #define NVRTC_SAFE_CALL(x) \
do { \ do \
{ \
nvrtcResult result = x; \ nvrtcResult result = x; \
if (result != NVRTC_SUCCESS) { \ if (result != NVRTC_SUCCESS) \
std::cerr << "\nerror: " #x " failed with error " \ { \
<< nvrtcGetErrorString(result) << '\n'; \ std::cerr << "\nerror: " #x " failed with error " << nvrtcGetErrorString(result) \
<< '\n'; \
exit(1); \ exit(1); \
} \ } \
} while(0) } while (0)
#define CUDA_SAFE_CALL(x) \ #define CUDA_SAFE_CALL(x) \
do { \ do \
{ \
CUresult result = x; \ CUresult result = x; \
if (result != CUDA_SUCCESS) { \ if (result != CUDA_SUCCESS) \
const char *msg; \ { \
const char* msg; \
cuGetErrorName(result, &msg); \ cuGetErrorName(result, &msg); \
std::cerr << "\nerror: " #x " failed with error " \ std::cerr << "\nerror: " #x " failed with error " << msg << '\n'; \
<< msg << '\n'; \
exit(1); \ exit(1); \
} \ } \
} while(0) } while (0)
void runtime::gpu::GPU_Emitter::EmitNop(codegen::CodeWriter& writer, void runtime::gpu::GPU_Emitter::EmitNop(codegen::CodeWriter& writer,
const ngraph::Node* n, const ngraph::Node* n,
...@@ -122,14 +123,11 @@ cudnnSetOpTensorDescriptor(opTensorDesc, ...@@ -122,14 +123,11 @@ cudnnSetOpTensorDescriptor(opTensorDesc,
writer << "cudnnOpTensor(cudnn_handle," writer << "cudnnOpTensor(cudnn_handle,"
<< "opTensorDesc," << "opTensorDesc,"
<< "&alpha1," << "&alpha1,"
<< "descriptor," << "descriptor," << args[0].get_name() << ","
<< args[0].get_name() << ","
<< "&alpha2," << "&alpha2,"
<< "descriptor," << "descriptor," << args[1].get_name() << ","
<< args[1].get_name() << ","
<< "&beta," << "&beta,"
<< "descriptor," << "descriptor," << out[0].get_name() << ");\n";
<< out[0].get_name() << ");\n";
writer.indent--; writer.indent--;
writer << "}\n"; writer << "}\n";
} }
...@@ -247,12 +245,11 @@ void runtime::gpu::GPU_Emitter::EmitDot(codegen::CodeWriter& writer, ...@@ -247,12 +245,11 @@ void runtime::gpu::GPU_Emitter::EmitDot(codegen::CodeWriter& writer,
// clang-format on // clang-format on
writer.indent--; writer.indent--;
writer << "}\n"; writer << "}\n";
} }
else else
{ {
// General ND Call? // General ND Call?
} }
} }
void runtime::gpu::GPU_Emitter::EmitDivide(codegen::CodeWriter& writer, void runtime::gpu::GPU_Emitter::EmitDivide(codegen::CodeWriter& writer,
...@@ -344,14 +341,11 @@ cudnnSetOpTensorDescriptor(opTensorDesc, ...@@ -344,14 +341,11 @@ cudnnSetOpTensorDescriptor(opTensorDesc,
writer << "cudnnOpTensor(cudnn_handle," writer << "cudnnOpTensor(cudnn_handle,"
<< "opTensorDesc," << "opTensorDesc,"
<< "&alpha1," << "&alpha1,"
<< "descriptor," << "descriptor," << args[0].get_name() << ","
<< args[0].get_name() << ","
<< "&alpha2," << "&alpha2,"
<< "descriptor," << "descriptor," << args[1].get_name() << ","
<< args[1].get_name() << ","
<< "&beta," << "&beta,"
<< "descriptor," << "descriptor," << out[0].get_name() << ");\n";
<< out[0].get_name() << ");\n";
writer.indent--; writer.indent--;
writer << "}\n"; writer << "}\n";
} }
...@@ -388,17 +382,13 @@ cudnnSetOpTensorDescriptor(opTensorDesc, ...@@ -388,17 +382,13 @@ cudnnSetOpTensorDescriptor(opTensorDesc,
writer << "cudnnOpTensor(cudnn_handle," writer << "cudnnOpTensor(cudnn_handle,"
<< "opTensorDesc," << "opTensorDesc,"
<< "&alpha1," << "&alpha1,"
<< "descriptor," << "descriptor," << args[0].get_name() << ","
<< args[0].get_name() << ","
<< "&alpha2," << "&alpha2,"
<< "descriptor," << "descriptor," << args[1].get_name() << ","
<< args[1].get_name() << ","
<< "&beta," << "&beta,"
<< "descriptor," << "descriptor," << out[0].get_name() << ");\n";
<< out[0].get_name() << ");\n";
writer.indent--; writer.indent--;
writer << "}\n"; writer << "}\n";
} }
void runtime::gpu::GPU_Emitter::EmitNegative( void runtime::gpu::GPU_Emitter::EmitNegative(
...@@ -434,14 +424,11 @@ cudnnSetOpTensorDescriptor(opTensorDesc, ...@@ -434,14 +424,11 @@ cudnnSetOpTensorDescriptor(opTensorDesc,
writer << "cudnnOpTensor(cudnn_handle," writer << "cudnnOpTensor(cudnn_handle,"
<< "opTensorDesc," << "opTensorDesc,"
<< "&alpha1," << "&alpha1,"
<< "descriptor," << "descriptor," << args[0].get_name() << ","
<< args[0].get_name() << ","
<< "&alpha2," << "&alpha2,"
<< "descriptor," << "descriptor," << args[0].get_name() << ","
<< args[0].get_name() << ","
<< "&beta," << "&beta,"
<< "descriptor," << "descriptor," << out[0].get_name() << ");\n";
<< out[0].get_name() << ");\n";
writer.indent--; writer.indent--;
writer << "}\n"; writer << "}\n";
} }
......
...@@ -23,10 +23,10 @@ ...@@ -23,10 +23,10 @@
#include <typeinfo> #include <typeinfo>
#include <unordered_map> #include <unordered_map>
#include <cuda_runtime.h> #include <cuda_runtime.h>
#include <cudnn_v7.h> #include <cudnn_v7.h>
#include "cublas_v2.h" #include "cublas_v2.h"
#include "cuda.h" #include "cuda.h"
#include "ngraph/codegen/code_writer.hpp" #include "ngraph/codegen/code_writer.hpp"
#include "ngraph/codegen/compiler.hpp" #include "ngraph/codegen/compiler.hpp"
...@@ -359,8 +359,9 @@ void runtime::gpu::GPU_ExternalFunction::compile() ...@@ -359,8 +359,9 @@ void runtime::gpu::GPU_ExternalFunction::compile()
writer << "// Declare all functions\n"; writer << "// Declare all functions\n";
for (shared_ptr<Function> f : pass_manager.get_state().get_functions()) for (shared_ptr<Function> f : pass_manager.get_state().get_functions())
{ {
writer << "extern \"C\" void " << f->get_name() writer << "extern \"C\" void " << f->get_name() << "(void** inputs, void** outputs, "
<< "(void** inputs, void** outputs, cublasHandle_t& cublas_handle, cudnnHandle_t& cudnn_handle);\n"; "cublasHandle_t& cublas_handle, "
"cudnnHandle_t& cudnn_handle);\n";
} }
writer << "\n"; writer << "\n";
...@@ -478,7 +479,8 @@ void runtime::gpu::GPU_ExternalFunction::compile() ...@@ -478,7 +479,8 @@ void runtime::gpu::GPU_ExternalFunction::compile()
} }
writer << "extern \"C\" void " << current_function->get_name(); writer << "extern \"C\" void " << current_function->get_name();
writer << "(void** inputs, void** outputs, cublasHandle_t& cublas_handle, cudnnHandle_t& cudnn_handle)\n"; writer << "(void** inputs, void** outputs, cublasHandle_t& cublas_handle, cudnnHandle_t& "
"cudnn_handle)\n";
writer << "{\n"; writer << "{\n";
writer.indent++; writer.indent++;
......
...@@ -41,7 +41,7 @@ runtime::gpu::GPU_TensorView::GPU_TensorView(const ngraph::element::Type& elemen ...@@ -41,7 +41,7 @@ runtime::gpu::GPU_TensorView::GPU_TensorView(const ngraph::element::Type& elemen
m_buffer_size = m_descriptor->get_tensor_view_layout()->get_size() * element_type.size(); m_buffer_size = m_descriptor->get_tensor_view_layout()->get_size() * element_type.size();
if (m_buffer_size > 0) if (m_buffer_size > 0)
{ {
cudaMalloc((void**) &m_allocated_buffer_pool, m_buffer_size); cudaMalloc((void**)&m_allocated_buffer_pool, m_buffer_size);
} }
} }
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment