Unverified Commit c2974ac2 authored by Robert Kimball's avatar Robert Kimball Committed by GitHub

Move CodeWriter out of codegen to ngraph root. (#2473)

* Move codewriter out of codegen to ngraph root. It is useful for more than writing code.

* remove codewriter.* from intel gpu backend and use ngraph version

* fix merge issues
parent cf33669b
...@@ -21,13 +21,10 @@ ...@@ -21,13 +21,10 @@
namespace ngraph namespace ngraph
{ {
namespace codegen class CodeWriter;
{
class CodeWriter;
}
} }
class ngraph::codegen::CodeWriter class ngraph::CodeWriter
{ {
public: public:
CodeWriter() CodeWriter()
......
...@@ -18,7 +18,7 @@ ...@@ -18,7 +18,7 @@
#include <unordered_map> #include <unordered_map>
#include "ngraph/codegen/code_writer.hpp" #include "ngraph/code_writer.hpp"
#include "ngraph/pass/pass.hpp" #include "ngraph/pass/pass.hpp"
namespace ngraph namespace ngraph
......
...@@ -246,7 +246,7 @@ namespace ngraph ...@@ -246,7 +246,7 @@ namespace ngraph
} }
#endif #endif
static void emitCblasSgemmBatch(codegen::CodeWriter& writer, static void emitCblasSgemmBatch(CodeWriter& writer,
const Shape& shape_a, const Shape& shape_a,
const Shape& shape_b, const Shape& shape_b,
const Shape& shape_c, const Shape& shape_c,
...@@ -334,7 +334,7 @@ namespace ngraph ...@@ -334,7 +334,7 @@ namespace ngraph
const Shape& shape_c, const Shape& shape_c,
const std::vector<TensorViewWrapper>& args, const std::vector<TensorViewWrapper>& args,
const std::vector<TensorViewWrapper>& out, const std::vector<TensorViewWrapper>& out,
codegen::CodeWriter& writer) CodeWriter& writer)
{ {
writer.block_begin(); writer.block_begin();
...@@ -588,7 +588,7 @@ namespace ngraph ...@@ -588,7 +588,7 @@ namespace ngraph
template <typename T> template <typename T>
void CPU_Emitter::emitBatchNorm(CPU_ExternalFunction* external_function, void CPU_Emitter::emitBatchNorm(CPU_ExternalFunction* external_function,
codegen::CodeWriter& writer, CodeWriter& writer,
const ngraph::Node* node, const ngraph::Node* node,
const std::vector<TensorViewWrapper>& args, const std::vector<TensorViewWrapper>& args,
const std::vector<TensorViewWrapper>& out, const std::vector<TensorViewWrapper>& out,
...@@ -1776,7 +1776,7 @@ namespace ngraph ...@@ -1776,7 +1776,7 @@ namespace ngraph
const std::vector<TensorViewWrapper>& out, const std::vector<TensorViewWrapper>& out,
size_t reduction_axis, size_t reduction_axis,
const char* kernel_name, const char* kernel_name,
codegen::CodeWriter& writer) CodeWriter& writer)
{ {
if (out[0].get_element_type() != element::i64 && if (out[0].get_element_type() != element::i64 &&
out[0].get_element_type() != element::i32) out[0].get_element_type() != element::i32)
......
...@@ -19,14 +19,14 @@ ...@@ -19,14 +19,14 @@
#include <string> #include <string>
#include <vector> #include <vector>
#include "ngraph/codegen/code_writer.hpp" #include "ngraph/code_writer.hpp"
#include "ngraph/node.hpp" #include "ngraph/node.hpp"
#include "ngraph/runtime/cpu/cpu_external_function.hpp" #include "ngraph/runtime/cpu/cpu_external_function.hpp"
#include "ngraph/runtime/cpu/cpu_tensor_view_wrapper.hpp" #include "ngraph/runtime/cpu/cpu_tensor_view_wrapper.hpp"
#define EMITTER_DECL(op_name) \ #define EMITTER_DECL(op_name) \
emit<op_name>(CPU_ExternalFunction * external_function, \ emit<op_name>(CPU_ExternalFunction * external_function, \
codegen::CodeWriter & writer, \ CodeWriter & writer, \
const ngraph::Node* node, \ const ngraph::Node* node, \
const std::vector<TensorViewWrapper>& args, \ const std::vector<TensorViewWrapper>& args, \
const std::vector<TensorViewWrapper>& out) const std::vector<TensorViewWrapper>& out)
...@@ -42,7 +42,7 @@ namespace ngraph ...@@ -42,7 +42,7 @@ namespace ngraph
public: public:
template <typename OP> template <typename OP>
static void emit(CPU_ExternalFunction* external_function, static void emit(CPU_ExternalFunction* external_function,
codegen::CodeWriter& writer, CodeWriter& writer,
const ngraph::Node* node, const ngraph::Node* node,
const std::vector<TensorViewWrapper>& args, const std::vector<TensorViewWrapper>& args,
const std::vector<TensorViewWrapper>& out) const std::vector<TensorViewWrapper>& out)
...@@ -52,7 +52,7 @@ namespace ngraph ...@@ -52,7 +52,7 @@ namespace ngraph
} }
static void nop(CPU_ExternalFunction* external_function, static void nop(CPU_ExternalFunction* external_function,
codegen::CodeWriter& writer, CodeWriter& writer,
const ngraph::Node* node, const ngraph::Node* node,
const std::vector<TensorViewWrapper>& args, const std::vector<TensorViewWrapper>& args,
const std::vector<TensorViewWrapper>& out) const std::vector<TensorViewWrapper>& out)
...@@ -61,7 +61,7 @@ namespace ngraph ...@@ -61,7 +61,7 @@ namespace ngraph
template <typename T> template <typename T>
static void emitBatchNorm(CPU_ExternalFunction* external_function, static void emitBatchNorm(CPU_ExternalFunction* external_function,
codegen::CodeWriter& writer, CodeWriter& writer,
const ngraph::Node* node, const ngraph::Node* node,
const std::vector<TensorViewWrapper>& args, const std::vector<TensorViewWrapper>& args,
const std::vector<TensorViewWrapper>& out, const std::vector<TensorViewWrapper>& out,
......
...@@ -28,7 +28,7 @@ ...@@ -28,7 +28,7 @@
#include <tbb/flow_graph.h> #include <tbb/flow_graph.h>
#if !defined(NGRAPH_DEX_ONLY) #if !defined(NGRAPH_DEX_ONLY)
#include "ngraph/codegen/code_writer.hpp" #include "ngraph/code_writer.hpp"
#include "ngraph/codegen/compiler.hpp" #include "ngraph/codegen/compiler.hpp"
#include "ngraph/codegen/execution_engine.hpp" #include "ngraph/codegen/execution_engine.hpp"
#endif #endif
...@@ -414,7 +414,7 @@ static const runtime::cpu::OpMap dispatcher{ ...@@ -414,7 +414,7 @@ static const runtime::cpu::OpMap dispatcher{
}; };
static void static void
generate_isnan_isinf_check(codegen::CodeWriter& writer, generate_isnan_isinf_check(CodeWriter& writer,
std::shared_ptr<Node> node, std::shared_ptr<Node> node,
const std::vector<ngraph::runtime::cpu::TensorViewWrapper>& out, const std::vector<ngraph::runtime::cpu::TensorViewWrapper>& out,
const char* funcname) const char* funcname)
...@@ -428,13 +428,13 @@ static void ...@@ -428,13 +428,13 @@ static void
writer << "}\n"; writer << "}\n";
} }
static void generate_class_declarations(codegen::CodeWriter& writer) static void generate_class_declarations(CodeWriter& writer)
{ {
writer << "// Declare all classes\n"; writer << "// Declare all classes\n";
writer << "struct CPURuntimeContextCG;\n"; writer << "struct CPURuntimeContextCG;\n";
} }
static void generate_runtime_context_class(codegen::CodeWriter& writer) static void generate_runtime_context_class(CodeWriter& writer)
{ {
writer << writer <<
#include "ngraph/runtime/cpu/pregenerated_src/cpu_cg_runtime_context.hpp" #include "ngraph/runtime/cpu/pregenerated_src/cpu_cg_runtime_context.hpp"
...@@ -472,7 +472,7 @@ void runtime::cpu::CPU_ExternalFunction::compile() ...@@ -472,7 +472,7 @@ void runtime::cpu::CPU_ExternalFunction::compile()
function_ordered_ops.insert({current_function, current_function->get_ordered_ops()}); function_ordered_ops.insert({current_function, current_function->get_ordered_ops()});
} }
codegen::CodeWriter writer; CodeWriter writer;
writer << "// Generated by the nGraph CPU backend\n"; writer << "// Generated by the nGraph CPU backend\n";
if (m_use_tbb) if (m_use_tbb)
...@@ -2199,7 +2199,7 @@ void runtime::cpu::CPU_ExternalFunction::write_to_file(const std::string& code, ...@@ -2199,7 +2199,7 @@ void runtime::cpu::CPU_ExternalFunction::write_to_file(const std::string& code,
#if !defined(NGRAPH_DEX_ONLY) #if !defined(NGRAPH_DEX_ONLY)
void runtime::cpu::CPU_ExternalFunction::emit_debug_function_entry( void runtime::cpu::CPU_ExternalFunction::emit_debug_function_entry(
codegen::CodeWriter& writer, CodeWriter& writer,
Node* node, Node* node,
const std::vector<TensorViewWrapper>& in, const std::vector<TensorViewWrapper>& in,
const std::vector<TensorViewWrapper>& out) const std::vector<TensorViewWrapper>& out)
...@@ -2211,7 +2211,7 @@ void runtime::cpu::CPU_ExternalFunction::emit_debug_function_entry( ...@@ -2211,7 +2211,7 @@ void runtime::cpu::CPU_ExternalFunction::emit_debug_function_entry(
} }
void runtime::cpu::CPU_ExternalFunction::emit_debug_function_exit( void runtime::cpu::CPU_ExternalFunction::emit_debug_function_exit(
codegen::CodeWriter& writer, CodeWriter& writer,
Node* node, Node* node,
const std::vector<TensorViewWrapper>& in, const std::vector<TensorViewWrapper>& in,
const std::vector<TensorViewWrapper>& out) const std::vector<TensorViewWrapper>& out)
...@@ -2231,7 +2231,7 @@ bool runtime::cpu::CPU_ExternalFunction::is_functionally_identical( ...@@ -2231,7 +2231,7 @@ bool runtime::cpu::CPU_ExternalFunction::is_functionally_identical(
string runtime::cpu::CPU_ExternalFunction::emit_op_as_function(const Node& node, string runtime::cpu::CPU_ExternalFunction::emit_op_as_function(const Node& node,
const string& function_name) const string& function_name)
{ {
codegen::CodeWriter writer; CodeWriter writer;
writer << "static void " << function_name << "("; writer << "static void " << function_name << "(";
writer.indent++; writer.indent++;
// Work around a compiler warning (*node inside typeid may have effects // Work around a compiler warning (*node inside typeid may have effects
......
...@@ -33,7 +33,7 @@ ...@@ -33,7 +33,7 @@
#if !defined(NGRAPH_DEX_ONLY) #if !defined(NGRAPH_DEX_ONLY)
#include "ngraph/codegen/code_writer.hpp" #include "ngraph/code_writer.hpp"
#include "ngraph/codegen/compiler.hpp" #include "ngraph/codegen/compiler.hpp"
#include "ngraph/codegen/execution_engine.hpp" #include "ngraph/codegen/execution_engine.hpp"
...@@ -64,7 +64,7 @@ namespace ngraph ...@@ -64,7 +64,7 @@ namespace ngraph
#if !defined(NGRAPH_DEX_ONLY) #if !defined(NGRAPH_DEX_ONLY)
using OpFunction = std::function<void(CPU_ExternalFunction* external_function, using OpFunction = std::function<void(CPU_ExternalFunction* external_function,
codegen::CodeWriter&, CodeWriter&,
const ngraph::Node*, const ngraph::Node*,
const std::vector<TensorViewWrapper>& inputs, const std::vector<TensorViewWrapper>& inputs,
const std::vector<TensorViewWrapper>& outputs)>; const std::vector<TensorViewWrapper>& outputs)>;
...@@ -218,16 +218,16 @@ namespace ngraph ...@@ -218,16 +218,16 @@ namespace ngraph
bool computes_result(Node* node); bool computes_result(Node* node);
void release_function() { m_function = nullptr; } void release_function() { m_function = nullptr; }
#if !defined(NGRAPH_DEX_ONLY) #if !defined(NGRAPH_DEX_ONLY)
void emit_debug_function_entry(codegen::CodeWriter& writer, void emit_debug_function_entry(CodeWriter& writer,
Node* node, Node* node,
const std::vector<TensorViewWrapper>& in, const std::vector<TensorViewWrapper>& in,
const std::vector<TensorViewWrapper>& out); const std::vector<TensorViewWrapper>& out);
void emit_debug_function_exit(codegen::CodeWriter& writer, void emit_debug_function_exit(CodeWriter& writer,
Node* node, Node* node,
const std::vector<TensorViewWrapper>& in, const std::vector<TensorViewWrapper>& in,
const std::vector<TensorViewWrapper>& out); const std::vector<TensorViewWrapper>& out);
void handle_output_alias( void handle_output_alias(
codegen::CodeWriter& writer, CodeWriter& writer,
const Node&, const Node&,
const std::unordered_map<descriptor::Tensor*, std::vector<size_t>>&); const std::unordered_map<descriptor::Tensor*, std::vector<size_t>>&);
......
...@@ -16,7 +16,7 @@ ...@@ -16,7 +16,7 @@
#include <algorithm> #include <algorithm>
#include <map> #include <map>
#include "ngraph/codegen/code_writer.hpp" #include "ngraph/code_writer.hpp"
#include "ngraph/runtime/cpu/cpu_kernel_emitters.hpp" #include "ngraph/runtime/cpu/cpu_kernel_emitters.hpp"
#include "ngraph/runtime/cpu/cpu_kernel_utils.hpp" #include "ngraph/runtime/cpu/cpu_kernel_utils.hpp"
#include "ngraph/util.hpp" #include "ngraph/util.hpp"
...@@ -40,7 +40,7 @@ string emit_bracketed_string(T data) ...@@ -40,7 +40,7 @@ string emit_bracketed_string(T data)
} }
// Convert a buffer into a C-style multi-index array // Convert a buffer into a C-style multi-index array
string recast_tmp_var(codegen::CodeWriter& writer, string recast_tmp_var(CodeWriter& writer,
const string& element_type, const string& element_type,
const string& arg_name, const string& arg_name,
const Shape& arg_shape, const Shape& arg_shape,
...@@ -57,8 +57,7 @@ string recast_tmp_var(codegen::CodeWriter& writer, ...@@ -57,8 +57,7 @@ string recast_tmp_var(codegen::CodeWriter& writer,
// write openings to for loops, for variables in the order of top, // write openings to for loops, for variables in the order of top,
// where each loop ranges from bottom[i] to top[i] // where each loop ranges from bottom[i] to top[i]
// creates index variables for each loop and returns them // creates index variables for each loop and returns them
vector<string> vector<string> open_for_loops(CodeWriter& writer, const Shape& top, const Shape& bottom = {})
open_for_loops(codegen::CodeWriter& writer, const Shape& top, const Shape& bottom = {})
{ {
Shape new_bottom; Shape new_bottom;
if (bottom.size() == 0) if (bottom.size() == 0)
...@@ -98,7 +97,7 @@ vector<string> ...@@ -98,7 +97,7 @@ vector<string>
return index_vars; return index_vars;
} }
//close the for loops created by open_for_loops //close the for loops created by open_for_loops
void close_for_loops(codegen::CodeWriter& writer, const vector<string>& index_vars) void close_for_loops(CodeWriter& writer, const vector<string>& index_vars)
{ {
for (size_t i = index_vars.size(); i-- > 0;) for (size_t i = index_vars.size(); i-- > 0;)
{ {
...@@ -107,7 +106,7 @@ void close_for_loops(codegen::CodeWriter& writer, const vector<string>& index_va ...@@ -107,7 +106,7 @@ void close_for_loops(codegen::CodeWriter& writer, const vector<string>& index_va
} }
} }
void ngraph::runtime::cpu::kernel::emit_broadcast(codegen::CodeWriter& writer, void ngraph::runtime::cpu::kernel::emit_broadcast(CodeWriter& writer,
const string& element_type, const string& element_type,
const string& arg0, // replacement context const string& arg0, // replacement context
const string& out, const string& out,
...@@ -141,7 +140,7 @@ void ngraph::runtime::cpu::kernel::emit_broadcast(codegen::CodeWriter& writer, ...@@ -141,7 +140,7 @@ void ngraph::runtime::cpu::kernel::emit_broadcast(codegen::CodeWriter& writer,
// //
// For the reference kernel this is based on, see ngraph/runtime/reference/concat.hpp. // For the reference kernel this is based on, see ngraph/runtime/reference/concat.hpp.
// //
void ngraph::runtime::cpu::kernel::emit_concat(codegen::CodeWriter& writer, void ngraph::runtime::cpu::kernel::emit_concat(CodeWriter& writer,
const string& element_type, const string& element_type,
const vector<string>& args, const vector<string>& args,
const string& out, const string& out,
...@@ -174,7 +173,7 @@ void ngraph::runtime::cpu::kernel::emit_concat(codegen::CodeWriter& writer, ...@@ -174,7 +173,7 @@ void ngraph::runtime::cpu::kernel::emit_concat(codegen::CodeWriter& writer,
} }
} }
void ngraph::runtime::cpu::kernel::emit_replace_slice(codegen::CodeWriter& writer, void ngraph::runtime::cpu::kernel::emit_replace_slice(CodeWriter& writer,
const string& element_type, const string& element_type,
const string& arg0, // replacement context const string& arg0, // replacement context
const string& arg1, // replacement value const string& arg1, // replacement value
...@@ -197,7 +196,7 @@ void ngraph::runtime::cpu::kernel::emit_replace_slice(codegen::CodeWriter& write ...@@ -197,7 +196,7 @@ void ngraph::runtime::cpu::kernel::emit_replace_slice(codegen::CodeWriter& write
} }
void ngraph::runtime::cpu::kernel::emit_replace_slice_inplace( void ngraph::runtime::cpu::kernel::emit_replace_slice_inplace(
codegen::CodeWriter& writer, CodeWriter& writer,
const string& element_type, const string& element_type,
const string& arg0, // replacement context const string& arg0, // replacement context
const string& arg1, // replacement value const string& arg1, // replacement value
...@@ -214,7 +213,7 @@ void ngraph::runtime::cpu::kernel::emit_replace_slice_inplace( ...@@ -214,7 +213,7 @@ void ngraph::runtime::cpu::kernel::emit_replace_slice_inplace(
emit_pointwise_copy(writer, element_type, arg1, arg0, input_transform, output_transform); emit_pointwise_copy(writer, element_type, arg1, arg0, input_transform, output_transform);
} }
void ngraph::runtime::cpu::kernel::emit_slice(codegen::CodeWriter& writer, void ngraph::runtime::cpu::kernel::emit_slice(CodeWriter& writer,
const string& element_type, const string& element_type,
const string& arg0, // replacement context const string& arg0, // replacement context
const string& out, const string& out,
...@@ -258,7 +257,7 @@ void ngraph::runtime::cpu::kernel::emit_slice(codegen::CodeWriter& writer, ...@@ -258,7 +257,7 @@ void ngraph::runtime::cpu::kernel::emit_slice(codegen::CodeWriter& writer,
close_for_loops(writer, index_vars); close_for_loops(writer, index_vars);
} }
void ngraph::runtime::cpu::kernel::emit_reshape(codegen::CodeWriter& writer, void ngraph::runtime::cpu::kernel::emit_reshape(CodeWriter& writer,
const string& element_type, const string& element_type,
const string& arg0, // replacement context const string& arg0, // replacement context
const string& out, const string& out,
...@@ -334,7 +333,7 @@ struct SumHeuristic ...@@ -334,7 +333,7 @@ struct SumHeuristic
} }
string get_thread_safe_dest() const { return m_thread_safe_dest; } string get_thread_safe_dest() const { return m_thread_safe_dest; }
void emit_omp(codegen::CodeWriter& writer, const size_t loop_index) const void emit_omp(CodeWriter& writer, const size_t loop_index) const
{ {
if (!m_skip_parallel_for && loop_index == m_parallel_for_index) if (!m_skip_parallel_for && loop_index == m_parallel_for_index)
{ {
...@@ -355,7 +354,7 @@ struct SumHeuristic ...@@ -355,7 +354,7 @@ struct SumHeuristic
writer << "\n"; writer << "\n";
} }
} }
void emit_thread_local(codegen::CodeWriter& writer, void emit_thread_local(CodeWriter& writer,
const size_t loop_index, const size_t loop_index,
const vector<string>& out_indexes, const vector<string>& out_indexes,
const Shape& out_shape, const Shape& out_shape,
...@@ -375,7 +374,7 @@ struct SumHeuristic ...@@ -375,7 +374,7 @@ struct SumHeuristic
m_thread_safe_dest += emit_bracketed_string(out_indexes); m_thread_safe_dest += emit_bracketed_string(out_indexes);
} }
} }
void emit_thread_local_finalize(codegen::CodeWriter& writer, void emit_thread_local_finalize(CodeWriter& writer,
const size_t loop_index, const size_t loop_index,
const vector<string>& index_vars, const vector<string>& index_vars,
const vector<string>& out_indexes, const vector<string>& out_indexes,
...@@ -495,7 +494,7 @@ private: ...@@ -495,7 +494,7 @@ private:
bool m_skip_parallel_for{false}; bool m_skip_parallel_for{false};
}; };
void ngraph::runtime::cpu::kernel::emit_sum(codegen::CodeWriter& writer, void ngraph::runtime::cpu::kernel::emit_sum(CodeWriter& writer,
const string& element_type, const string& element_type,
const string& arg0, // replacement context const string& arg0, // replacement context
const string& out, const string& out,
...@@ -574,7 +573,7 @@ void ngraph::runtime::cpu::kernel::emit_sum(codegen::CodeWriter& writer, ...@@ -574,7 +573,7 @@ void ngraph::runtime::cpu::kernel::emit_sum(codegen::CodeWriter& writer,
} }
} }
} }
void ngraph::runtime::cpu::kernel::emit_reduce(codegen::CodeWriter& writer, void ngraph::runtime::cpu::kernel::emit_reduce(CodeWriter& writer,
const string& element_type, const string& element_type,
const string& arg0, // replacement context const string& arg0, // replacement context
const string& arg1, const string& arg1,
......
...@@ -17,7 +17,7 @@ ...@@ -17,7 +17,7 @@
#pragma once #pragma once
#include "ngraph/axis_vector.hpp" #include "ngraph/axis_vector.hpp"
#include "ngraph/codegen/code_writer.hpp" #include "ngraph/code_writer.hpp"
#include "ngraph/coordinate.hpp" #include "ngraph/coordinate.hpp"
#include "ngraph/shape.hpp" #include "ngraph/shape.hpp"
...@@ -29,14 +29,14 @@ namespace ngraph ...@@ -29,14 +29,14 @@ namespace ngraph
{ {
namespace kernel namespace kernel
{ {
void emit_broadcast(codegen::CodeWriter& writer, void emit_broadcast(CodeWriter& writer,
const std::string& element_type, const std::string& element_type,
const std::string& arg0, // replacement context const std::string& arg0, // replacement context
const std::string& out, const std::string& out,
const Shape& arg0_shape, const Shape& arg0_shape,
const Shape& out_shape, const Shape& out_shape,
const AxisSet& broadcast_axes); const AxisSet& broadcast_axes);
void emit_concat(codegen::CodeWriter& writer, void emit_concat(CodeWriter& writer,
const std::string& element_type, const std::string& element_type,
const std::vector<std::string>& args, const std::vector<std::string>& args,
const std::string& out, const std::string& out,
...@@ -44,7 +44,7 @@ namespace ngraph ...@@ -44,7 +44,7 @@ namespace ngraph
const Shape& out_shape, const Shape& out_shape,
const size_t concatenation_axis); const size_t concatenation_axis);
void emit_replace_slice(codegen::CodeWriter& writer, void emit_replace_slice(CodeWriter& writer,
const std::string& element_type, const std::string& element_type,
const std::string& arg0, // replacement context const std::string& arg0, // replacement context
const std::string& arg1, // replacement value const std::string& arg1, // replacement value
...@@ -54,7 +54,7 @@ namespace ngraph ...@@ -54,7 +54,7 @@ namespace ngraph
const Coordinate& lower_bounds, const Coordinate& lower_bounds,
const Coordinate& upper_bounds, const Coordinate& upper_bounds,
const Strides& strides); const Strides& strides);
void emit_replace_slice_inplace(codegen::CodeWriter& writer, void emit_replace_slice_inplace(CodeWriter& writer,
const std::string& element_type, const std::string& element_type,
const std::string& arg0, // replacement context const std::string& arg0, // replacement context
const std::string& arg1, // replacement value const std::string& arg1, // replacement value
...@@ -63,7 +63,7 @@ namespace ngraph ...@@ -63,7 +63,7 @@ namespace ngraph
const Coordinate& lower_bounds, const Coordinate& lower_bounds,
const Coordinate& upper_bounds, const Coordinate& upper_bounds,
const Strides& strides); const Strides& strides);
void emit_slice(codegen::CodeWriter& writer, void emit_slice(CodeWriter& writer,
const std::string& element_type, const std::string& element_type,
const std::string& arg0, // replacement context const std::string& arg0, // replacement context
const std::string& out, const std::string& out,
...@@ -72,21 +72,21 @@ namespace ngraph ...@@ -72,21 +72,21 @@ namespace ngraph
const Coordinate& lower_bounds, const Coordinate& lower_bounds,
const Coordinate& upper_bounds, const Coordinate& upper_bounds,
const Strides& strides); const Strides& strides);
void emit_reshape(codegen::CodeWriter& writer, void emit_reshape(CodeWriter& writer,
const std::string& element_type, const std::string& element_type,
const std::string& arg0, // replacement context const std::string& arg0, // replacement context
const std::string& out, const std::string& out,
const Shape& arg0_shape, const Shape& arg0_shape,
const Shape& out_shape, const Shape& out_shape,
const AxisVector& arg0_axis_order); const AxisVector& arg0_axis_order);
void emit_sum(codegen::CodeWriter& writer, void emit_sum(CodeWriter& writer,
const std::string& element_type, const std::string& element_type,
const std::string& arg0, // replacement context const std::string& arg0, // replacement context
const std::string& out, const std::string& out,
const Shape& arg0_shape, const Shape& arg0_shape,
const Shape& out_shape, const Shape& out_shape,
const AxisSet& reduction_axes); const AxisSet& reduction_axes);
void emit_reduce(codegen::CodeWriter& writer, void emit_reduce(CodeWriter& writer,
const std::string& element_type, const std::string& element_type,
const std::string& arg0, // replacement context const std::string& arg0, // replacement context
const std::string& arg1, const std::string& arg1,
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
//***************************************************************************** //*****************************************************************************
#include "ngraph/runtime/cpu/cpu_kernel_utils.hpp" #include "ngraph/runtime/cpu/cpu_kernel_utils.hpp"
#include "ngraph/codegen/code_writer.hpp" #include "ngraph/code_writer.hpp"
#include "ngraph/coordinate_transform.hpp" #include "ngraph/coordinate_transform.hpp"
#include "ngraph/util.hpp" #include "ngraph/util.hpp"
...@@ -182,7 +182,7 @@ string ngraph::runtime::cpu::kernel::emit_nd_index(CoordinateTransform& trans, ...@@ -182,7 +182,7 @@ string ngraph::runtime::cpu::kernel::emit_nd_index(CoordinateTransform& trans,
// Emits a pointwise copy from source_buffer mediated by in_trans, to // Emits a pointwise copy from source_buffer mediated by in_trans, to
// dest_buffer mediated by dest_trans. // dest_buffer mediated by dest_trans.
// //
void ngraph::runtime::cpu::kernel::emit_pointwise_copy(codegen::CodeWriter& writer, void ngraph::runtime::cpu::kernel::emit_pointwise_copy(CodeWriter& writer,
const string& element_type, const string& element_type,
const string& source_buffer, const string& source_buffer,
const string& dest_buffer, const string& dest_buffer,
......
...@@ -16,7 +16,7 @@ ...@@ -16,7 +16,7 @@
#pragma once #pragma once
#include "ngraph/codegen/code_writer.hpp" #include "ngraph/code_writer.hpp"
#include "ngraph/coordinate_transform.hpp" #include "ngraph/coordinate_transform.hpp"
namespace ngraph namespace ngraph
...@@ -40,7 +40,7 @@ namespace ngraph ...@@ -40,7 +40,7 @@ namespace ngraph
std::string emit_nd_sizes(CoordinateTransform& trans); std::string emit_nd_sizes(CoordinateTransform& trans);
std::string emit_nd_index(CoordinateTransform& trans, std::string emit_nd_index(CoordinateTransform& trans,
const std::vector<std::string>& index_vars); const std::vector<std::string>& index_vars);
void emit_pointwise_copy(codegen::CodeWriter& writer, void emit_pointwise_copy(CodeWriter& writer,
const std::string& element_type, const std::string& element_type,
const std::string& source_buffer, const std::string& source_buffer,
const std::string& dest_buffer, const std::string& dest_buffer,
......
This diff is collapsed.
...@@ -17,7 +17,7 @@ ...@@ -17,7 +17,7 @@
#pragma once #pragma once
#include <array> #include <array>
#include "ngraph/codegen/code_writer.hpp" #include "ngraph/code_writer.hpp"
#include "ngraph/runtime/gpu/gpu_cuda_kernel_ops.hpp" #include "ngraph/runtime/gpu/gpu_cuda_kernel_ops.hpp"
#include "ngraph/runtime/gpu/gpu_host_parameters.hpp" #include "ngraph/runtime/gpu/gpu_host_parameters.hpp"
#include "ngraph/runtime/gpu/nvdiff.hpp" #include "ngraph/runtime/gpu/nvdiff.hpp"
...@@ -195,7 +195,7 @@ namespace ngraph ...@@ -195,7 +195,7 @@ namespace ngraph
GPURuntimeContext* ctx, GPURuntimeContext* ctx,
std::shared_ptr<GPUHostParameters> params); std::shared_ptr<GPUHostParameters> params);
uint32_t align_to_block_size(uint32_t threads, uint32_t block_size); uint32_t align_to_block_size(uint32_t threads, uint32_t block_size);
void print_tensor_from_gpu(codegen::CodeWriter& writer, void print_tensor_from_gpu(CodeWriter& writer,
const std::string& tensor_name, const std::string& tensor_name,
NVShape shape); NVShape shape);
std::string include_helpers(); std::string include_helpers();
......
...@@ -24,7 +24,7 @@ ...@@ -24,7 +24,7 @@
#include <string> #include <string>
#include <tuple> #include <tuple>
#include "ngraph/codegen/code_writer.hpp" #include "ngraph/code_writer.hpp"
#include "ngraph/descriptor/input.hpp" #include "ngraph/descriptor/input.hpp"
#include "ngraph/descriptor/output.hpp" #include "ngraph/descriptor/output.hpp"
#include "ngraph/file_util.hpp" #include "ngraph/file_util.hpp"
...@@ -168,7 +168,7 @@ std::string runtime::gpu::GPUExternalFunction::add_to_runtime( ...@@ -168,7 +168,7 @@ std::string runtime::gpu::GPUExternalFunction::add_to_runtime(
const std::vector<runtime::gpu::GPUTensorWrapper>& args, const std::vector<runtime::gpu::GPUTensorWrapper>& args,
const std::vector<runtime::gpu::GPUTensorWrapper>& out) const std::vector<runtime::gpu::GPUTensorWrapper>& out)
{ {
codegen::CodeWriter writer; CodeWriter writer;
writer.block_begin(); writer.block_begin();
{ {
writer << "void* input[] = {" << node_names(args) << "};\n"; writer << "void* input[] = {" << node_names(args) << "};\n";
...@@ -185,7 +185,7 @@ std::string runtime::gpu::GPUExternalFunction::add_call_to_runtime( ...@@ -185,7 +185,7 @@ std::string runtime::gpu::GPUExternalFunction::add_call_to_runtime(
const std::vector<runtime::gpu::GPUTensorWrapper>& args, const std::vector<runtime::gpu::GPUTensorWrapper>& args,
const std::vector<runtime::gpu::GPUTensorWrapper>& out) const std::vector<runtime::gpu::GPUTensorWrapper>& out)
{ {
codegen::CodeWriter writer; CodeWriter writer;
writer.block_begin(); writer.block_begin();
{ {
writer << "void* input[] = {" << node_names(args) << "};\n"; writer << "void* input[] = {" << node_names(args) << "};\n";
...@@ -618,7 +618,7 @@ void runtime::gpu::GPUExternalFunction::emit_debug_function_exit(Node* node) ...@@ -618,7 +618,7 @@ void runtime::gpu::GPUExternalFunction::emit_debug_function_exit(Node* node)
string runtime::gpu::GPUExternalFunction::emit_op_as_function(const Node& node, string runtime::gpu::GPUExternalFunction::emit_op_as_function(const Node& node,
const string& function_name) const string& function_name)
{ {
codegen::CodeWriter writer; CodeWriter writer;
writer << "static void " << function_name << "("; writer << "static void " << function_name << "(";
writer.indent++; writer.indent++;
vector<GPUTensorWrapper> in; vector<GPUTensorWrapper> in;
......
...@@ -24,7 +24,7 @@ ...@@ -24,7 +24,7 @@
#include <typeinfo> #include <typeinfo>
#include <unordered_map> #include <unordered_map>
#include "ngraph/codegen/code_writer.hpp" #include "ngraph/code_writer.hpp"
#include "ngraph/codegen/compiler.hpp" #include "ngraph/codegen/compiler.hpp"
#include "ngraph/codegen/execution_engine.hpp" #include "ngraph/codegen/execution_engine.hpp"
#include "ngraph/function.hpp" #include "ngraph/function.hpp"
...@@ -106,7 +106,7 @@ namespace ngraph ...@@ -106,7 +106,7 @@ namespace ngraph
// internal ops // internal ops
virtual void propagate_in_place_output(ngraph::descriptor::Output* res_src_output, virtual void propagate_in_place_output(ngraph::descriptor::Output* res_src_output,
const std::string& output_name) override; const std::string& output_name) override;
codegen::CodeWriter m_writer; CodeWriter m_writer;
std::string m_common_function_string; std::string m_common_function_string;
std::unique_ptr<codegen::Compiler> m_compiler; std::unique_ptr<codegen::Compiler> m_compiler;
std::unique_ptr<codegen::ExecutionEngine> m_execution_engine; std::unique_ptr<codegen::ExecutionEngine> m_execution_engine;
......
...@@ -24,7 +24,7 @@ ...@@ -24,7 +24,7 @@
#include <string> #include <string>
#include <tuple> #include <tuple>
#include "ngraph/codegen/code_writer.hpp" #include "ngraph/code_writer.hpp"
#include "ngraph/descriptor/input.hpp" #include "ngraph/descriptor/input.hpp"
#include "ngraph/descriptor/layout/dense_tensor_layout.hpp" #include "ngraph/descriptor/layout/dense_tensor_layout.hpp"
#include "ngraph/descriptor/output.hpp" #include "ngraph/descriptor/output.hpp"
...@@ -203,7 +203,7 @@ std::string runtime::gpu::GPUInternalFunction::add_call_to_runtime( ...@@ -203,7 +203,7 @@ std::string runtime::gpu::GPUInternalFunction::add_call_to_runtime(
const std::vector<runtime::gpu::GPUTensorWrapper>& out) const std::vector<runtime::gpu::GPUTensorWrapper>& out)
{ {
m_runtime_constructor->add_call(caller, callee, args, out); m_runtime_constructor->add_call(caller, callee, args, out);
codegen::CodeWriter writer; CodeWriter writer;
writer.block_begin(); writer.block_begin();
{ {
for (auto const& tensor : args) for (auto const& tensor : args)
...@@ -225,7 +225,7 @@ std::string runtime::gpu::GPUInternalFunction::compose_manifest( ...@@ -225,7 +225,7 @@ std::string runtime::gpu::GPUInternalFunction::compose_manifest(
const std::vector<runtime::gpu::GPUTensorWrapper>& args, const std::vector<runtime::gpu::GPUTensorWrapper>& args,
const std::vector<runtime::gpu::GPUTensorWrapper>& out) const const std::vector<runtime::gpu::GPUTensorWrapper>& out) const
{ {
codegen::CodeWriter writer; CodeWriter writer;
writer.block_begin(); writer.block_begin();
{ {
for (auto const& tensor : args) for (auto const& tensor : args)
...@@ -389,7 +389,7 @@ void runtime::gpu::GPUInternalFunction::emit() ...@@ -389,7 +389,7 @@ void runtime::gpu::GPUInternalFunction::emit()
if (std::getenv("NGRAPH_GPU_TRACE")) if (std::getenv("NGRAPH_GPU_TRACE"))
{ {
m_trace = std::make_shared<codegen::CodeWriter>(); m_trace = std::make_shared<CodeWriter>();
} }
// build and emit functions // build and emit functions
......
...@@ -92,8 +92,8 @@ namespace ngraph ...@@ -92,8 +92,8 @@ namespace ngraph
std::tuple<runtime::gpu::GPUTensorWrapper::TensorType, size_t, std::string>> std::tuple<runtime::gpu::GPUTensorWrapper::TensorType, size_t, std::string>>
m_variable_name_map; m_variable_name_map;
std::unique_ptr<GPURuntimeConstructor> m_runtime_constructor; std::unique_ptr<GPURuntimeConstructor> m_runtime_constructor;
std::shared_ptr<codegen::CodeWriter> m_trace; std::shared_ptr<CodeWriter> m_trace;
codegen::CodeWriter m_manifest; CodeWriter m_manifest;
}; };
} }
} }
......
...@@ -18,12 +18,12 @@ ...@@ -18,12 +18,12 @@
#include <map> #include <map>
#include "gpu_kernel_emitters.hpp" #include "gpu_kernel_emitters.hpp"
#include "ngraph/codegen/code_writer.hpp" #include "ngraph/code_writer.hpp"
#include "ngraph/util.hpp" #include "ngraph/util.hpp"
using namespace ngraph; using namespace ngraph;
void runtime::gpu::kernel::emit_memset(codegen::CodeWriter& writer, void runtime::gpu::kernel::emit_memset(CodeWriter& writer,
const GPUTensorWrapper& dst, const GPUTensorWrapper& dst,
int value, int value,
size_t buffer_size) size_t buffer_size)
...@@ -36,7 +36,7 @@ void runtime::gpu::kernel::emit_memset(codegen::CodeWriter& writer, ...@@ -36,7 +36,7 @@ void runtime::gpu::kernel::emit_memset(codegen::CodeWriter& writer,
<< ");\n"; << ");\n";
} }
void runtime::gpu::kernel::emit_memcpyDtD(codegen::CodeWriter& writer, void runtime::gpu::kernel::emit_memcpyDtD(CodeWriter& writer,
const GPUTensorWrapper& dst, const GPUTensorWrapper& dst,
const GPUTensorWrapper& src, const GPUTensorWrapper& src,
size_t buffer_size) size_t buffer_size)
...@@ -52,7 +52,7 @@ void runtime::gpu::kernel::emit_memcpyDtD(codegen::CodeWriter& writer, ...@@ -52,7 +52,7 @@ void runtime::gpu::kernel::emit_memcpyDtD(codegen::CodeWriter& writer,
return; return;
} }
void runtime::gpu::kernel::emit_cudnnConvolutionDescriptor(codegen::CodeWriter& writer, void runtime::gpu::kernel::emit_cudnnConvolutionDescriptor(CodeWriter& writer,
const std::string& name, const std::string& name,
const CoordinateDiff& padding, const CoordinateDiff& padding,
const Strides& window_movement_strides, const Strides& window_movement_strides,
...@@ -84,7 +84,7 @@ void runtime::gpu::kernel::emit_cudnnConvolutionDescriptor(codegen::CodeWriter& ...@@ -84,7 +84,7 @@ void runtime::gpu::kernel::emit_cudnnConvolutionDescriptor(codegen::CodeWriter&
} }
} }
void runtime::gpu::kernel::emit_cudnnFilterDescriptor(codegen::CodeWriter& writer, void runtime::gpu::kernel::emit_cudnnFilterDescriptor(CodeWriter& writer,
const std::string& name, const std::string& name,
const std::string& format, const std::string& format,
const std::string& data_type, const std::string& data_type,
...@@ -120,7 +120,7 @@ void runtime::gpu::kernel::emit_cudnnFilterDescriptor(codegen::CodeWriter& write ...@@ -120,7 +120,7 @@ void runtime::gpu::kernel::emit_cudnnFilterDescriptor(codegen::CodeWriter& write
} }
} }
void runtime::gpu::kernel::emit_cudnnTensorDescriptor(codegen::CodeWriter& writer, void runtime::gpu::kernel::emit_cudnnTensorDescriptor(CodeWriter& writer,
const std::string& name, const std::string& name,
const std::string& format, const std::string& format,
const std::string& data_type, const std::string& data_type,
...@@ -157,7 +157,7 @@ void runtime::gpu::kernel::emit_cudnnTensorDescriptor(codegen::CodeWriter& write ...@@ -157,7 +157,7 @@ void runtime::gpu::kernel::emit_cudnnTensorDescriptor(codegen::CodeWriter& write
} }
} }
void runtime::gpu::kernel::emit_cudnnTensor4dDescriptor(codegen::CodeWriter& writer, void runtime::gpu::kernel::emit_cudnnTensor4dDescriptor(CodeWriter& writer,
const std::string& name, const std::string& name,
const std::string& format, const std::string& format,
const std::string& data_type, const std::string& data_type,
...@@ -174,7 +174,7 @@ void runtime::gpu::kernel::emit_cudnnTensor4dDescriptor(codegen::CodeWriter& wri ...@@ -174,7 +174,7 @@ void runtime::gpu::kernel::emit_cudnnTensor4dDescriptor(codegen::CodeWriter& wri
writer << "));\n"; writer << "));\n";
} }
void runtime::gpu::kernel::emit_cudnnTensorNdDescriptor(codegen::CodeWriter& writer, void runtime::gpu::kernel::emit_cudnnTensorNdDescriptor(CodeWriter& writer,
const std::string& name, const std::string& name,
const std::string& data_type, const std::string& data_type,
const size_t& num_axes, const size_t& num_axes,
...@@ -191,7 +191,7 @@ void runtime::gpu::kernel::emit_cudnnTensorNdDescriptor(codegen::CodeWriter& wri ...@@ -191,7 +191,7 @@ void runtime::gpu::kernel::emit_cudnnTensorNdDescriptor(codegen::CodeWriter& wri
writer << " /*strides*/" << name << "_strides));\n"; writer << " /*strides*/" << name << "_strides));\n";
} }
void runtime::gpu::kernel::emit_cudnnReduceTensor(codegen::CodeWriter& writer, void runtime::gpu::kernel::emit_cudnnReduceTensor(CodeWriter& writer,
const GPUTensorWrapper& in, const GPUTensorWrapper& in,
const GPUTensorWrapper& out, const GPUTensorWrapper& out,
const std::string& reduce_op, const std::string& reduce_op,
......
...@@ -16,7 +16,7 @@ ...@@ -16,7 +16,7 @@
#pragma once #pragma once
#include "ngraph/codegen/code_writer.hpp" #include "ngraph/code_writer.hpp"
#include "ngraph/coordinate_transform.hpp" #include "ngraph/coordinate_transform.hpp"
#include "ngraph/node.hpp" #include "ngraph/node.hpp"
#include "ngraph/runtime/gpu/gpu_tensor_wrapper.hpp" #include "ngraph/runtime/gpu/gpu_tensor_wrapper.hpp"
...@@ -29,17 +29,17 @@ namespace ngraph ...@@ -29,17 +29,17 @@ namespace ngraph
{ {
namespace kernel namespace kernel
{ {
void emit_memset(codegen::CodeWriter& writer, void emit_memset(CodeWriter& writer,
const GPUTensorWrapper& dst, const GPUTensorWrapper& dst,
int value, int value,
size_t buffer_size = 0); size_t buffer_size = 0);
void emit_memcpyDtD(codegen::CodeWriter& writer, void emit_memcpyDtD(CodeWriter& writer,
const GPUTensorWrapper& dst, const GPUTensorWrapper& dst,
const GPUTensorWrapper& src, const GPUTensorWrapper& src,
size_t buffer_size = 0); size_t buffer_size = 0);
void emit_cudnnConvolutionDescriptor(codegen::CodeWriter& writer, void emit_cudnnConvolutionDescriptor(CodeWriter& writer,
const std::string& name, const std::string& name,
const CoordinateDiff& padding, const CoordinateDiff& padding,
const Strides& window_movement_strides, const Strides& window_movement_strides,
...@@ -47,32 +47,32 @@ namespace ngraph ...@@ -47,32 +47,32 @@ namespace ngraph
const std::string& mode, const std::string& mode,
const std::string& data_type); const std::string& data_type);
void emit_cudnnFilterDescriptor(codegen::CodeWriter& writer, void emit_cudnnFilterDescriptor(CodeWriter& writer,
const std::string& name, const std::string& name,
const std::string& format, const std::string& format,
const std::string& data_type, const std::string& data_type,
const Shape& shape); const Shape& shape);
void emit_cudnnTensorDescriptor(codegen::CodeWriter& writer, void emit_cudnnTensorDescriptor(CodeWriter& writer,
const std::string& name, const std::string& name,
const std::string& format, const std::string& format,
const std::string& data_type, const std::string& data_type,
const Shape& shape); const Shape& shape);
void emit_cudnnTensor4dDescriptor(codegen::CodeWriter& writer, void emit_cudnnTensor4dDescriptor(CodeWriter& writer,
const std::string& name, const std::string& name,
const std::string& format, const std::string& format,
const std::string& data_type, const std::string& data_type,
const std::array<size_t, 4>& axes); const std::array<size_t, 4>& axes);
void emit_cudnnTensorNdDescriptor(codegen::CodeWriter& writer, void emit_cudnnTensorNdDescriptor(CodeWriter& writer,
const std::string& name, const std::string& name,
const std::string& data_type, const std::string& data_type,
const size_t& num_axes, const size_t& num_axes,
const std::vector<size_t>& axes, const std::vector<size_t>& axes,
const std::vector<size_t>& strides); const std::vector<size_t>& strides);
void emit_cudnnReduceTensor(codegen::CodeWriter& writer, void emit_cudnnReduceTensor(CodeWriter& writer,
const GPUTensorWrapper& in, const GPUTensorWrapper& in,
const GPUTensorWrapper& out, const GPUTensorWrapper& out,
const std::string& reduce_op, const std::string& reduce_op,
......
...@@ -25,7 +25,6 @@ set(SRC ...@@ -25,7 +25,6 @@ set(SRC
intelgpu_op_convolution.cpp intelgpu_op_convolution.cpp
intelgpu_op_softmax.cpp intelgpu_op_softmax.cpp
intelgpu_op_custom_func_call.cpp intelgpu_op_custom_func_call.cpp
code_writer.cpp
visualize_tree.cpp visualize_tree.cpp
) )
......
//*****************************************************************************
// Copyright 2017-2019 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#include "code_writer.hpp"
using namespace std;
using namespace ngraph;
codegen::CodeWriter::CodeWriter()
: indent(0)
, m_pending_indent(true)
, m_temporary_name_count(0)
{
}
string codegen::CodeWriter::get_code() const
{
return m_ss.str();
}
void codegen::CodeWriter::operator+=(const std::string& s)
{
*this << s;
}
std::string codegen::CodeWriter::generate_temporary_name(std::string prefix)
{
std::stringstream ss;
ss << prefix << m_temporary_name_count;
m_temporary_name_count++;
return ss.str();
}
//*****************************************************************************
// Copyright 2017-2019 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#pragma once
#include <sstream>
#include <string>
namespace ngraph
{
namespace codegen
{
class CodeWriter;
}
}
class ngraph::codegen::CodeWriter
{
public:
CodeWriter();
std::string get_code() const;
void operator+=(const std::string&);
size_t indent;
template <typename T>
friend CodeWriter& operator<<(CodeWriter& out, const T& obj)
{
std::stringstream ss;
ss << obj;
for (char c : ss.str())
{
if (c == '\n')
{
out.m_pending_indent = true;
}
else
{
if (out.m_pending_indent)
{
out.m_pending_indent = false;
for (size_t i = 0; i < out.indent; i++)
{
out.m_ss << " ";
}
}
}
out.m_ss << c;
}
return out;
}
std::string generate_temporary_name(std::string prefix = "tempvar");
void block_begin()
{
*this << "{\n";
indent++;
}
void block_end()
{
indent--;
*this << "}\n";
}
private:
std::stringstream m_ss;
bool m_pending_indent;
size_t m_temporary_name_count;
};
...@@ -20,7 +20,7 @@ ...@@ -20,7 +20,7 @@
#include <CPP/scale.hpp> #include <CPP/scale.hpp>
#include <CPP/split.hpp> #include <CPP/split.hpp>
#include "ngraph/runtime/intelgpu/code_writer.hpp" #include "ngraph/code_writer.hpp"
#include "ngraph/runtime/intelgpu/intelgpu_layout.hpp" #include "ngraph/runtime/intelgpu/intelgpu_layout.hpp"
#include "ngraph/runtime/intelgpu/intelgpu_op_batchnorm.hpp" #include "ngraph/runtime/intelgpu/intelgpu_op_batchnorm.hpp"
#include "ngraph/runtime/intelgpu/intelgpu_op_custom_kernels.hpp" #include "ngraph/runtime/intelgpu/intelgpu_op_custom_kernels.hpp"
...@@ -65,7 +65,7 @@ void runtime::intelgpu::do_create_mean(cldnn::topology& topology, ...@@ -65,7 +65,7 @@ void runtime::intelgpu::do_create_mean(cldnn::topology& topology,
const string entry_point_name = "create_mean_" + output_name; const string entry_point_name = "create_mean_" + output_name;
const size_t output_counts = shape_size<Shape>(input_shape) / input_shape.at(channel_axis); const size_t output_counts = shape_size<Shape>(input_shape) / input_shape.at(channel_axis);
const string kernel_data_type = get_opencl_type_name(output_type); const string kernel_data_type = get_opencl_type_name(output_type);
codegen::CodeWriter writer; CodeWriter writer;
writer << "__kernel void " << entry_point_name << "( const __global " << kernel_data_type writer << "__kernel void " << entry_point_name << "( const __global " << kernel_data_type
<< " input" << array_dims(input_shape) << ", __global " << kernel_data_type << " output" << " input" << array_dims(input_shape) << ", __global " << kernel_data_type << " output"
...@@ -141,7 +141,7 @@ void runtime::intelgpu::do_create_variance(cldnn::topology& topology, ...@@ -141,7 +141,7 @@ void runtime::intelgpu::do_create_variance(cldnn::topology& topology,
const string entry_point_name = "create_variance_" + output_name; const string entry_point_name = "create_variance_" + output_name;
const size_t output_counts = shape_size<Shape>(input_shape) / input_shape.at(channel_axis); const size_t output_counts = shape_size<Shape>(input_shape) / input_shape.at(channel_axis);
const string kernel_data_type = get_opencl_type_name(output_type); const string kernel_data_type = get_opencl_type_name(output_type);
codegen::CodeWriter writer; CodeWriter writer;
writer << "__kernel void " << entry_point_name << "( const __global " << kernel_data_type writer << "__kernel void " << entry_point_name << "( const __global " << kernel_data_type
<< " input" << array_dims(input_shape) << ", const __global " << kernel_data_type << " input" << array_dims(input_shape) << ", const __global " << kernel_data_type
...@@ -221,7 +221,7 @@ void runtime::intelgpu::do_batch_norm_operation(cldnn::topology& topology, ...@@ -221,7 +221,7 @@ void runtime::intelgpu::do_batch_norm_operation(cldnn::topology& topology,
const vector<size_t> gws(input_shape.begin(), input_shape.begin() + 2); const vector<size_t> gws(input_shape.begin(), input_shape.begin() + 2);
const string entry_point_name = "batch_norm_" + output_name; const string entry_point_name = "batch_norm_" + output_name;
const string kernel_data_type = get_opencl_type_name(output_type); const string kernel_data_type = get_opencl_type_name(output_type);
codegen::CodeWriter writer; CodeWriter writer;
// The kernel name and parameters // The kernel name and parameters
writer << "__attribute__((reqd_work_group_size(1,1,1)))\n" writer << "__attribute__((reqd_work_group_size(1,1,1)))\n"
...@@ -293,7 +293,7 @@ void runtime::intelgpu::do_create_variance_back(cldnn::topology& topology, ...@@ -293,7 +293,7 @@ void runtime::intelgpu::do_create_variance_back(cldnn::topology& topology,
const cldnn::layout layout = IntelGPULayout::create_cldnn_layout(output_type, channel_shape); const cldnn::layout layout = IntelGPULayout::create_cldnn_layout(output_type, channel_shape);
const string entry_point_name = "create_variance_back_" + output_name; const string entry_point_name = "create_variance_back_" + output_name;
const string kernel_data_type = get_opencl_type_name(output_type); const string kernel_data_type = get_opencl_type_name(output_type);
codegen::CodeWriter writer; CodeWriter writer;
vector<size_t> gws; vector<size_t> gws;
writer << "__kernel void " << entry_point_name << "(const __global " << kernel_data_type writer << "__kernel void " << entry_point_name << "(const __global " << kernel_data_type
...@@ -374,7 +374,7 @@ void runtime::intelgpu::do_batch_norm_backprop_operation(cldnn::topology& topolo ...@@ -374,7 +374,7 @@ void runtime::intelgpu::do_batch_norm_backprop_operation(cldnn::topology& topolo
const string entry_point_name = "batch_norm_backprop_" + output_name; const string entry_point_name = "batch_norm_backprop_" + output_name;
const size_t r_axes_size = shape_size(shape) / shape_size(channel_shape); const size_t r_axes_size = shape_size(shape) / shape_size(channel_shape);
const string kernel_data_type = get_opencl_type_name(type); const string kernel_data_type = get_opencl_type_name(type);
codegen::CodeWriter writer; CodeWriter writer;
vector<size_t> gws; vector<size_t> gws;
writer << "__kernel void " << entry_point_name << "(const __global " << kernel_data_type writer << "__kernel void " << entry_point_name << "(const __global " << kernel_data_type
......
...@@ -18,7 +18,7 @@ ...@@ -18,7 +18,7 @@
#include <CPP/custom_gpu_primitive.hpp> #include <CPP/custom_gpu_primitive.hpp>
#include <CPP/reshape.hpp> #include <CPP/reshape.hpp>
#include "ngraph/runtime/intelgpu/code_writer.hpp" #include "ngraph/code_writer.hpp"
#include "ngraph/runtime/intelgpu/intelgpu_layout.hpp" #include "ngraph/runtime/intelgpu/intelgpu_layout.hpp"
#include "ngraph/runtime/intelgpu/intelgpu_op_broadcast.hpp" #include "ngraph/runtime/intelgpu/intelgpu_op_broadcast.hpp"
#include "ngraph/runtime/intelgpu/intelgpu_op_custom_kernels.hpp" #include "ngraph/runtime/intelgpu/intelgpu_op_custom_kernels.hpp"
...@@ -42,7 +42,7 @@ static void do_sum_to_scalar_operation(cldnn::topology& topology, ...@@ -42,7 +42,7 @@ static void do_sum_to_scalar_operation(cldnn::topology& topology,
const string output_type_str = runtime::intelgpu::get_opencl_type_name(output_type); const string output_type_str = runtime::intelgpu::get_opencl_type_name(output_type);
const size_t main_loop_count = shape_size(input_shape); const size_t main_loop_count = shape_size(input_shape);
const size_t vect_channels = 32; const size_t vect_channels = 32;
codegen::CodeWriter writer; CodeWriter writer;
vector<size_t> gws = {32}; vector<size_t> gws = {32};
vector<size_t> lws = {vect_channels}; vector<size_t> lws = {vect_channels};
...@@ -113,7 +113,7 @@ void runtime::intelgpu::do_bcast_sum_operation(cldnn::topology& topology, ...@@ -113,7 +113,7 @@ void runtime::intelgpu::do_bcast_sum_operation(cldnn::topology& topology,
{ {
string function_name = is_bcast ? "broadcast_" : "sum_"; string function_name = is_bcast ? "broadcast_" : "sum_";
function_name += output_name; function_name += output_name;
codegen::CodeWriter writer; CodeWriter writer;
vector<size_t> gws; vector<size_t> gws;
runtime::intelgpu::gen_func_def(writer, runtime::intelgpu::gen_func_def(writer,
...@@ -196,7 +196,7 @@ void runtime::intelgpu::do_max_min_operation(cldnn::topology& topology, ...@@ -196,7 +196,7 @@ void runtime::intelgpu::do_max_min_operation(cldnn::topology& topology,
const size_t input_size = shape_size<Shape>(input_shape); const size_t input_size = shape_size<Shape>(input_shape);
const string& init_value = get_opencl_type_min_max_value(output_type, !is_min); const string& init_value = get_opencl_type_min_max_value(output_type, !is_min);
const string& operation = is_min ? " < " : " > "; const string& operation = is_min ? " < " : " > ";
codegen::CodeWriter writer; CodeWriter writer;
runtime::intelgpu::gen_func_def(writer, runtime::intelgpu::gen_func_def(writer,
function_name, function_name,
...@@ -277,7 +277,7 @@ void runtime::intelgpu::do_product_operation(cldnn::topology& topology, ...@@ -277,7 +277,7 @@ void runtime::intelgpu::do_product_operation(cldnn::topology& topology,
{ {
const string function_name = "product_" + output_name; const string function_name = "product_" + output_name;
const size_t input_size = shape_size<Shape>(input_shape); const size_t input_size = shape_size<Shape>(input_shape);
codegen::CodeWriter writer; CodeWriter writer;
runtime::intelgpu::gen_func_def(writer, runtime::intelgpu::gen_func_def(writer,
function_name, function_name,
......
...@@ -16,7 +16,7 @@ ...@@ -16,7 +16,7 @@
#include <CPP/custom_gpu_primitive.hpp> #include <CPP/custom_gpu_primitive.hpp>
#include "ngraph/runtime/intelgpu/code_writer.hpp" #include "ngraph/code_writer.hpp"
#include "ngraph/runtime/intelgpu/intelgpu_layout.hpp" #include "ngraph/runtime/intelgpu/intelgpu_layout.hpp"
#include "ngraph/runtime/intelgpu/intelgpu_op_convolution.hpp" #include "ngraph/runtime/intelgpu/intelgpu_op_convolution.hpp"
#include "ngraph/runtime/intelgpu/intelgpu_op_custom_kernels.hpp" #include "ngraph/runtime/intelgpu/intelgpu_op_custom_kernels.hpp"
...@@ -114,7 +114,7 @@ void runtime::intelgpu::do_convolution_operation(cldnn::topology& topology, ...@@ -114,7 +114,7 @@ void runtime::intelgpu::do_convolution_operation(cldnn::topology& topology,
const Shape input_data(input_shape.cbegin() + 2, input_shape.cend()); const Shape input_data(input_shape.cbegin() + 2, input_shape.cend());
const Shape filter_data(filter_shape.cbegin() + 2, filter_shape.cend()); const Shape filter_data(filter_shape.cbegin() + 2, filter_shape.cend());
const Shape output_data(output_shape.cbegin() + 2, output_shape.cend()); const Shape output_data(output_shape.cbegin() + 2, output_shape.cend());
codegen::CodeWriter writer; CodeWriter writer;
vector<size_t> gws; vector<size_t> gws;
writer << "__kernel void " << entry_point_name << "(const __global " << kernel_type_name writer << "__kernel void " << entry_point_name << "(const __global " << kernel_type_name
......
...@@ -16,7 +16,7 @@ ...@@ -16,7 +16,7 @@
#include <CPP/custom_gpu_primitive.hpp> #include <CPP/custom_gpu_primitive.hpp>
#include "ngraph/runtime/intelgpu/code_writer.hpp" #include "ngraph/code_writer.hpp"
#include "ngraph/runtime/intelgpu/intelgpu_layout.hpp" #include "ngraph/runtime/intelgpu/intelgpu_layout.hpp"
#include "ngraph/runtime/intelgpu/intelgpu_op_custom_func_call.hpp" #include "ngraph/runtime/intelgpu/intelgpu_op_custom_func_call.hpp"
#include "ngraph/runtime/intelgpu/intelgpu_op_custom_kernels.hpp" #include "ngraph/runtime/intelgpu/intelgpu_op_custom_kernels.hpp"
...@@ -37,7 +37,7 @@ void runtime::intelgpu::do_all_any_op(cldnn::topology& topology, ...@@ -37,7 +37,7 @@ void runtime::intelgpu::do_all_any_op(cldnn::topology& topology,
const string entry_point_name = "custom_op_all_any_" + output_name; const string entry_point_name = "custom_op_all_any_" + output_name;
const string kernel_type_name = get_opencl_type_name(output_type); const string kernel_type_name = get_opencl_type_name(output_type);
const size_t input_size = shape_size<Shape>(input0_shape); const size_t input_size = shape_size<Shape>(input0_shape);
codegen::CodeWriter writer; CodeWriter writer;
// The kernel name and parameters // The kernel name and parameters
gen_func_def(writer, gen_func_def(writer,
......
...@@ -135,7 +135,7 @@ string runtime::intelgpu::access_dims(const Shape& dimentions, ...@@ -135,7 +135,7 @@ string runtime::intelgpu::access_dims(const Shape& dimentions,
return buffer.str(); return buffer.str();
} }
void runtime::intelgpu::gen_func_def(codegen::CodeWriter& writer, void runtime::intelgpu::gen_func_def(CodeWriter& writer,
const string& entry_point_name, const string& entry_point_name,
const vector<string>& input_types, const vector<string>& input_types,
const vector<Shape>& input_shapes, const vector<Shape>& input_shapes,
...@@ -157,9 +157,8 @@ void runtime::intelgpu::gen_func_def(codegen::CodeWriter& writer, ...@@ -157,9 +157,8 @@ void runtime::intelgpu::gen_func_def(codegen::CodeWriter& writer,
writer << ", __global " << output_type << " output" << array_dims(output_shape) << ")\n"; writer << ", __global " << output_type << " output" << array_dims(output_shape) << ")\n";
} }
vector<size_t> runtime::intelgpu::generate_loops(codegen::CodeWriter& writer, vector<size_t>
const Shape& shape, runtime::intelgpu::generate_loops(CodeWriter& writer, const Shape& shape, bool is_begin)
bool is_begin)
{ {
const size_t cldnn_gws_lim = 3; const size_t cldnn_gws_lim = 3;
vector<size_t> gws; vector<size_t> gws;
...@@ -200,7 +199,7 @@ vector<size_t> runtime::intelgpu::generate_loops(codegen::CodeWriter& writer, ...@@ -200,7 +199,7 @@ vector<size_t> runtime::intelgpu::generate_loops(codegen::CodeWriter& writer,
return gws; return gws;
} }
vector<size_t> runtime::intelgpu::generate_loops_w_axes(codegen::CodeWriter& writer, vector<size_t> runtime::intelgpu::generate_loops_w_axes(CodeWriter& writer,
const Shape& shape, const Shape& shape,
bool is_begin, bool is_begin,
const AxisSet& axis, const AxisSet& axis,
...@@ -327,7 +326,7 @@ void runtime::intelgpu::do_pad_operation(cldnn::topology& topology, ...@@ -327,7 +326,7 @@ void runtime::intelgpu::do_pad_operation(cldnn::topology& topology,
{ {
const string entry_point_name = "op_pad_" + output_name; const string entry_point_name = "op_pad_" + output_name;
const size_t cldnn_gws_lim = 3; const size_t cldnn_gws_lim = 3;
codegen::CodeWriter writer; CodeWriter writer;
vector<size_t> gws; vector<size_t> gws;
// The kernel name and parameters // The kernel name and parameters
...@@ -413,7 +412,7 @@ void runtime::intelgpu::do_pad_operation(cldnn::topology& topology, ...@@ -413,7 +412,7 @@ void runtime::intelgpu::do_pad_operation(cldnn::topology& topology,
topology.add(op_pad); topology.add(op_pad);
} }
static void gen_window_loop(codegen::CodeWriter& writer, static void gen_window_loop(CodeWriter& writer,
const Shape& output_shape, const Shape& output_shape,
const Shape& win_shape, const Shape& win_shape,
const Shape& win_stride, const Shape& win_stride,
...@@ -474,7 +473,7 @@ void runtime::intelgpu::do_max_pool_backprop_operation(cldnn::topology& topology ...@@ -474,7 +473,7 @@ void runtime::intelgpu::do_max_pool_backprop_operation(cldnn::topology& topology
const string type_name = get_opencl_type_name(output_type); const string type_name = get_opencl_type_name(output_type);
const Shape delta_data(delta_shape.cbegin() + 2, delta_shape.cend()); const Shape delta_data(delta_shape.cbegin() + 2, delta_shape.cend());
const Shape output_data(output_shape.cbegin() + 2, output_shape.cend()); const Shape output_data(output_shape.cbegin() + 2, output_shape.cend());
codegen::CodeWriter writer; CodeWriter writer;
vector<size_t> gws; vector<size_t> gws;
// The kernel name and parameters // The kernel name and parameters
...@@ -629,7 +628,7 @@ void runtime::intelgpu::do_max_avg_pool_operation(cldnn::topology& topology, ...@@ -629,7 +628,7 @@ void runtime::intelgpu::do_max_avg_pool_operation(cldnn::topology& topology,
const string entry_point_name = "op_pool_" + to_string(is_max_pool) + "_" + output_name; const string entry_point_name = "op_pool_" + to_string(is_max_pool) + "_" + output_name;
const string type_name = get_opencl_type_name(output_type); const string type_name = get_opencl_type_name(output_type);
const string init_accumulator = is_max_pool ? "-FLT_MAX" : def_val; const string init_accumulator = is_max_pool ? "-FLT_MAX" : def_val;
codegen::CodeWriter writer; CodeWriter writer;
vector<size_t> gws; vector<size_t> gws;
const Shape input_data(input_shape.cbegin() + 2, input_shape.cend()); const Shape input_data(input_shape.cbegin() + 2, input_shape.cend());
...@@ -789,7 +788,7 @@ void runtime::intelgpu::do_avg_pool_backprop_operation(cldnn::topology& topology ...@@ -789,7 +788,7 @@ void runtime::intelgpu::do_avg_pool_backprop_operation(cldnn::topology& topology
{ {
const string entry_point_name = "op_avg_pool_backprop_" + output_name; const string entry_point_name = "op_avg_pool_backprop_" + output_name;
const string type_name = get_opencl_type_name(output_type); const string type_name = get_opencl_type_name(output_type);
codegen::CodeWriter writer; CodeWriter writer;
vector<size_t> gws; vector<size_t> gws;
const Shape delta_data(delta_shape.cbegin() + 2, delta_shape.cend()); const Shape delta_data(delta_shape.cbegin() + 2, delta_shape.cend());
...@@ -928,7 +927,7 @@ void runtime::intelgpu::do_dot_operation(cldnn::topology& topology, ...@@ -928,7 +927,7 @@ void runtime::intelgpu::do_dot_operation(cldnn::topology& topology,
const size_t input0_axes = input0_shape.size() - reduction_axes_count; const size_t input0_axes = input0_shape.size() - reduction_axes_count;
size_t var_idx = reduction_axes_count; size_t var_idx = reduction_axes_count;
Shape reduction_shape; Shape reduction_shape;
codegen::CodeWriter writer; CodeWriter writer;
vector<size_t> gws; vector<size_t> gws;
for (auto it = input1_shape.begin(); (it != input1_shape.end()) && (var_idx > 0); ++it) for (auto it = input1_shape.begin(); (it != input1_shape.end()) && (var_idx > 0); ++it)
...@@ -1045,7 +1044,7 @@ void runtime::intelgpu::do_slice_operation(cldnn::topology& topology, ...@@ -1045,7 +1044,7 @@ void runtime::intelgpu::do_slice_operation(cldnn::topology& topology,
{ {
const cldnn::layout layout = IntelGPULayout::create_cldnn_layout(output_type, output_shape); const cldnn::layout layout = IntelGPULayout::create_cldnn_layout(output_type, output_shape);
const string entry_point_name = "slice_" + output_name; const string entry_point_name = "slice_" + output_name;
codegen::CodeWriter writer; CodeWriter writer;
vector<size_t> gws; vector<size_t> gws;
gen_func_def(writer, gen_func_def(writer,
...@@ -1092,7 +1091,7 @@ void runtime::intelgpu::do_select_operation(cldnn::topology& topology, ...@@ -1092,7 +1091,7 @@ void runtime::intelgpu::do_select_operation(cldnn::topology& topology,
{ {
const cldnn::layout layout = IntelGPULayout::create_cldnn_layout(output_type, output_shape); const cldnn::layout layout = IntelGPULayout::create_cldnn_layout(output_type, output_shape);
const string entry_point_name = "select_" + output_name; const string entry_point_name = "select_" + output_name;
codegen::CodeWriter writer; CodeWriter writer;
vector<size_t> gws; vector<size_t> gws;
gen_func_def(writer, gen_func_def(writer,
...@@ -1140,7 +1139,7 @@ void runtime::intelgpu::do_logic_kernel(cldnn::topology& topology, ...@@ -1140,7 +1139,7 @@ void runtime::intelgpu::do_logic_kernel(cldnn::topology& topology,
{ {
const cldnn::layout layout = IntelGPULayout::create_cldnn_layout(output_type, output_shape); const cldnn::layout layout = IntelGPULayout::create_cldnn_layout(output_type, output_shape);
const string entry_point_name = "logic_" + output_name; const string entry_point_name = "logic_" + output_name;
codegen::CodeWriter writer; CodeWriter writer;
vector<size_t> gws; vector<size_t> gws;
gen_func_def(writer, gen_func_def(writer,
...@@ -1188,7 +1187,7 @@ void runtime::intelgpu::do_eltwise_kernel(cldnn::topology& topology, ...@@ -1188,7 +1187,7 @@ void runtime::intelgpu::do_eltwise_kernel(cldnn::topology& topology,
{ {
const cldnn::layout layout = IntelGPULayout::create_cldnn_layout(output_type, output_shape); const cldnn::layout layout = IntelGPULayout::create_cldnn_layout(output_type, output_shape);
const string entry_point_name = "eltwise_" + output_name; const string entry_point_name = "eltwise_" + output_name;
codegen::CodeWriter writer; CodeWriter writer;
vector<size_t> gws; vector<size_t> gws;
gen_func_def(writer, gen_func_def(writer,
...@@ -1251,7 +1250,7 @@ void runtime::intelgpu::do_reverse_operation(cldnn::topology& topology, ...@@ -1251,7 +1250,7 @@ void runtime::intelgpu::do_reverse_operation(cldnn::topology& topology,
{ {
const cldnn::layout layout = IntelGPULayout::create_cldnn_layout(output_type, output_shape); const cldnn::layout layout = IntelGPULayout::create_cldnn_layout(output_type, output_shape);
const string entry_point_name = "reverse_" + output_name; const string entry_point_name = "reverse_" + output_name;
codegen::CodeWriter writer; CodeWriter writer;
vector<size_t> gws; vector<size_t> gws;
gen_func_def(writer, entry_point_name, {"float"}, {input_shape}, "float", output_shape); gen_func_def(writer, entry_point_name, {"float"}, {input_shape}, "float", output_shape);
...@@ -1287,7 +1286,7 @@ void runtime::intelgpu::do_not_operation(cldnn::topology& topology, ...@@ -1287,7 +1286,7 @@ void runtime::intelgpu::do_not_operation(cldnn::topology& topology,
{ {
const cldnn::layout layout = IntelGPULayout::create_cldnn_layout(output_type, output_shape); const cldnn::layout layout = IntelGPULayout::create_cldnn_layout(output_type, output_shape);
const string entry_point_name = "logic_" + output_name; const string entry_point_name = "logic_" + output_name;
codegen::CodeWriter writer; CodeWriter writer;
vector<size_t> gws; vector<size_t> gws;
gen_func_def(writer, entry_point_name, {"char"}, {input_shape}, "char", output_shape); gen_func_def(writer, entry_point_name, {"char"}, {input_shape}, "char", output_shape);
...@@ -1325,7 +1324,7 @@ void runtime::intelgpu::do_one_hot_operation(cldnn::topology& topology, ...@@ -1325,7 +1324,7 @@ void runtime::intelgpu::do_one_hot_operation(cldnn::topology& topology,
{ {
const cldnn::layout layout = IntelGPULayout::create_cldnn_layout(output_type, output_shape); const cldnn::layout layout = IntelGPULayout::create_cldnn_layout(output_type, output_shape);
const string entry_point_name = "one_hot_" + output_name; const string entry_point_name = "one_hot_" + output_name;
codegen::CodeWriter writer; CodeWriter writer;
vector<size_t> gws; vector<size_t> gws;
gen_func_def(writer, gen_func_def(writer,
...@@ -1390,7 +1389,7 @@ void runtime::intelgpu::do_convert_operation(cldnn::topology& topology, ...@@ -1390,7 +1389,7 @@ void runtime::intelgpu::do_convert_operation(cldnn::topology& topology,
const string entry_point_name = "convert_" + output_name; const string entry_point_name = "convert_" + output_name;
const string& input_type_name = get_opencl_type_name(input_type); const string& input_type_name = get_opencl_type_name(input_type);
const string& output_type_name = get_opencl_type_name(output_type); const string& output_type_name = get_opencl_type_name(output_type);
codegen::CodeWriter writer; CodeWriter writer;
vector<size_t> gws; vector<size_t> gws;
gen_func_def( gen_func_def(
...@@ -1428,7 +1427,7 @@ void runtime::intelgpu::do_sigmoid_backprop_operation(cldnn::topology& topology, ...@@ -1428,7 +1427,7 @@ void runtime::intelgpu::do_sigmoid_backprop_operation(cldnn::topology& topology,
const element::Type& output_type) const element::Type& output_type)
{ {
const string entry_point_name = "op_sigmoid_backprop_" + output_name; const string entry_point_name = "op_sigmoid_backprop_" + output_name;
codegen::CodeWriter writer; CodeWriter writer;
vector<size_t> gws; vector<size_t> gws;
gen_func_def( gen_func_def(
...@@ -1470,7 +1469,7 @@ void runtime::intelgpu::do_custom_unary_operation(cldnn::topology& topology, ...@@ -1470,7 +1469,7 @@ void runtime::intelgpu::do_custom_unary_operation(cldnn::topology& topology,
{ {
const string entry_point_name = "op_custom_unary_" + output_name; const string entry_point_name = "op_custom_unary_" + output_name;
const string intermidiate_type = input_type.size() < 8 ? "float" : "double"; const string intermidiate_type = input_type.size() < 8 ? "float" : "double";
codegen::CodeWriter writer; CodeWriter writer;
vector<size_t> gws; vector<size_t> gws;
gen_func_def(writer, gen_func_def(writer,
...@@ -1523,7 +1522,7 @@ void runtime::intelgpu::do_arg_max_min_operation(cldnn::topology& topology, ...@@ -1523,7 +1522,7 @@ void runtime::intelgpu::do_arg_max_min_operation(cldnn::topology& topology,
{ {
const string operation_name = is_max ? "max" : "min"; const string operation_name = is_max ? "max" : "min";
const string entry_point_name = "op_arg_" + operation_name + "_" + output_name; const string entry_point_name = "op_arg_" + operation_name + "_" + output_name;
codegen::CodeWriter writer; CodeWriter writer;
vector<size_t> gws; vector<size_t> gws;
const string operation_sign = is_max ? " > " : " < "; const string operation_sign = is_max ? " > " : " < ";
...@@ -1605,7 +1604,7 @@ void runtime::intelgpu::do_reshape_operation(cldnn::topology& topology, ...@@ -1605,7 +1604,7 @@ void runtime::intelgpu::do_reshape_operation(cldnn::topology& topology,
const string& input_type_name = get_opencl_type_name(input_type); const string& input_type_name = get_opencl_type_name(input_type);
const string& output_type_name = get_opencl_type_name(output_type); const string& output_type_name = get_opencl_type_name(output_type);
const size_t dst_shape_size = shape_size(output_shape); const size_t dst_shape_size = shape_size(output_shape);
codegen::CodeWriter writer; CodeWriter writer;
gen_func_def(writer, gen_func_def(writer,
entry_point_name, entry_point_name,
...@@ -1669,7 +1668,7 @@ void runtime::intelgpu::do_quantize_operation(cldnn::topology& topology, ...@@ -1669,7 +1668,7 @@ void runtime::intelgpu::do_quantize_operation(cldnn::topology& topology,
const string entry_point_name = "quantize_" + output_name; const string entry_point_name = "quantize_" + output_name;
const string real_type_str = get_opencl_type_name(input0_type); const string real_type_str = get_opencl_type_name(input0_type);
const string quant_type_str = get_opencl_type_name(output_type); const string quant_type_str = get_opencl_type_name(output_type);
codegen::CodeWriter writer; CodeWriter writer;
vector<size_t> gws; vector<size_t> gws;
gen_func_def(writer, gen_func_def(writer,
...@@ -1797,7 +1796,7 @@ void runtime::intelgpu::do_dequantize_operation(cldnn::topology& topology, ...@@ -1797,7 +1796,7 @@ void runtime::intelgpu::do_dequantize_operation(cldnn::topology& topology,
{ {
const cldnn::layout layout = IntelGPULayout::create_cldnn_layout(output_type, output_shape); const cldnn::layout layout = IntelGPULayout::create_cldnn_layout(output_type, output_shape);
const string entry_point_name = "dequantize_" + output_name; const string entry_point_name = "dequantize_" + output_name;
codegen::CodeWriter writer; CodeWriter writer;
vector<size_t> gws; vector<size_t> gws;
gen_func_def(writer, gen_func_def(writer,
......
...@@ -18,7 +18,7 @@ ...@@ -18,7 +18,7 @@
#include <CPP/topology.hpp> #include <CPP/topology.hpp>
#include "ngraph/runtime/intelgpu/code_writer.hpp" #include "ngraph/code_writer.hpp"
#include "ngraph/axis_set.hpp" #include "ngraph/axis_set.hpp"
#include "ngraph/axis_vector.hpp" #include "ngraph/axis_vector.hpp"
...@@ -245,14 +245,14 @@ namespace ngraph ...@@ -245,14 +245,14 @@ namespace ngraph
const AxisSet& axis = {}, const AxisSet& axis = {},
bool is_reversed = false); bool is_reversed = false);
std::vector<size_t> std::vector<size_t>
generate_loops(codegen::CodeWriter& writer, const Shape& shape, bool is_begin); generate_loops(CodeWriter& writer, const Shape& shape, bool is_begin);
std::vector<size_t> std::vector<size_t>
generate_loops_w_axes(codegen::CodeWriter& writer, generate_loops_w_axes(CodeWriter& writer,
const Shape& shape, const Shape& shape,
bool is_begin, bool is_begin,
const AxisSet& axis = {}, const AxisSet& axis = {},
const std::string& expression = std::string()); const std::string& expression = std::string());
void gen_func_def(codegen::CodeWriter& writer, void gen_func_def(CodeWriter& writer,
const std::string& entry_point_name, const std::string& entry_point_name,
const std::vector<std::string>& input_types, const std::vector<std::string>& input_types,
const std::vector<Shape>& input_shapes, const std::vector<Shape>& input_shapes,
......
...@@ -16,7 +16,7 @@ ...@@ -16,7 +16,7 @@
#include <CPP/custom_gpu_primitive.hpp> #include <CPP/custom_gpu_primitive.hpp>
#include "ngraph/runtime/intelgpu/code_writer.hpp" #include "ngraph/code_writer.hpp"
#include "ngraph/runtime/intelgpu/intelgpu_layout.hpp" #include "ngraph/runtime/intelgpu/intelgpu_layout.hpp"
#include "ngraph/runtime/intelgpu/intelgpu_op_custom_kernels.hpp" #include "ngraph/runtime/intelgpu/intelgpu_op_custom_kernels.hpp"
#include "ngraph/runtime/intelgpu/intelgpu_op_softmax.hpp" #include "ngraph/runtime/intelgpu/intelgpu_op_softmax.hpp"
...@@ -61,8 +61,8 @@ void runtime::intelgpu::do_softmax_operation(cldnn::topology& topology, ...@@ -61,8 +61,8 @@ void runtime::intelgpu::do_softmax_operation(cldnn::topology& topology,
const string expression = "output" + access_dims(input_shape, "i", axes) + " = 0.0f;\n"; const string expression = "output" + access_dims(input_shape, "i", axes) + " = 0.0f;\n";
const Shape new_shape = shape_dims(output_shape, axes); const Shape new_shape = shape_dims(output_shape, axes);
const cldnn::layout layout_middle = IntelGPULayout::create_cldnn_layout(output_type, new_shape); const cldnn::layout layout_middle = IntelGPULayout::create_cldnn_layout(output_type, new_shape);
codegen::CodeWriter writer0; CodeWriter writer0;
codegen::CodeWriter writer1; CodeWriter writer1;
vector<size_t> gws; vector<size_t> gws;
writer0 << "__kernel void " << entry_point_middle_name << "(const __global " writer0 << "__kernel void " << entry_point_middle_name << "(const __global "
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment