Commit 1c9a1996 authored by Diego Caballero's avatar Diego Caballero Committed by Scott Cyphers

[MLIR] Add `ngraph` prefix to MLIR flags (#3625)

* [MLIR] Add `ngraph` prefix to MLIR flags

Some flags collision with some MLIR flags.

* [MLIR] Add support for nGraph tensor type in parser

Initial commit that enables nGraph parsing. It's needed for testing.

* Rename ngraph print flag

* Rename ngraph dump mlir flags

* Clang format

* Revert "[MLIR] Add support for nGraph tensor type in parser"

This reverts commit ae371d6a5c8ea590322d5d3b9ba110159d4bf5fa.
parent c42ccb33
...@@ -85,7 +85,7 @@ using namespace ngraph::runtime::ngmlir; ...@@ -85,7 +85,7 @@ using namespace ngraph::runtime::ngmlir;
// *** Debug flags *** // *** Debug flags ***
static llvm::cl::opt<bool> clPrintIRAfterAll( static llvm::cl::opt<bool> clPrintIRAfterAll(
"print-ngraph-ir-after-all", "ngraph-print-ir-after-all",
llvm::cl::init(false), llvm::cl::init(false),
llvm::cl::desc( llvm::cl::desc(
"Print IR after transformation that are not implemented as passes in the MLIRCompiler. It " "Print IR after transformation that are not implemented as passes in the MLIRCompiler. It "
...@@ -99,37 +99,37 @@ static llvm::cl::opt<bool> clEnableNgInPlaceMemoryOpt( ...@@ -99,37 +99,37 @@ static llvm::cl::opt<bool> clEnableNgInPlaceMemoryOpt(
llvm::cl::desc("Enable ngraph dialect in-place memory optimization pass")); llvm::cl::desc("Enable ngraph dialect in-place memory optimization pass"));
static llvm::cl::opt<bool> static llvm::cl::opt<bool>
clEnableAffineLoopFusion("affine-loop-fusion", clEnableAffineLoopFusion("ngraph-affine-loop-fusion",
llvm::cl::init(false), llvm::cl::init(false),
llvm::cl::desc("Enable loop fusion optimization in Affine dialect")); llvm::cl::desc("Enable loop fusion optimization in Affine dialect"));
static llvm::cl::opt<bool> static llvm::cl::opt<bool>
clEnableAffineLoopTiling("affine-loop-tile", clEnableAffineLoopTiling("ngraph-affine-loop-tile",
llvm::cl::init(false), llvm::cl::init(false),
llvm::cl::desc("Enable loop tiling optimization in Affine dialect")); llvm::cl::desc("Enable loop tiling optimization in Affine dialect"));
static llvm::cl::opt<unsigned> static llvm::cl::opt<unsigned>
clLoopTilingCacheLevel("affine-loop-tile-cache-level", clLoopTilingCacheLevel("ngraph-affine-loop-tile-cache-level",
llvm::cl::init(2), llvm::cl::init(2),
llvm::cl::desc("Cache level to which to apply affine loop tiling.")); llvm::cl::desc("Cache level to which to apply affine loop tiling."));
static llvm::cl::opt<unsigned> clLoopTilingCacheSize( static llvm::cl::opt<unsigned> clLoopTilingCacheSize(
"affine-loop-tile-cache-size", "ngraph-affine-loop-tile-cache-size",
llvm::cl::init(0), llvm::cl::init(0),
llvm::cl::desc( llvm::cl::desc(
"Cache size to use in affine loop tiling. If not zero, it overrides the cache-size " "Cache size to use in affine loop tiling. If not zero, it overrides the cache-size "
"inferred from the host CPU using for the cache level specified by " "inferred from the host CPU using for the cache level specified by "
"-loop-tile-cache-level.")); "-ngraph-loop-tile-cache-level."));
// *** Debug flags *** // *** Debug flags ***
static llvm::cl::opt<bool> static llvm::cl::opt<bool>
clDumpObjectFile("dump-mlir-object-file", clDumpObjectFile("ngraph-dump-mlir-object-file",
llvm::cl::desc("Dump MLIR JITted-compiled object to file specified with " llvm::cl::desc("Dump MLIR JITted-compiled object to file specified with "
"-object-filename (<input file>.o by default).")); "-object-filename (<input file>.o by default)."));
static llvm::cl::opt<std::string> static llvm::cl::opt<std::string>
clObjectFilename("mlir-object-filename", clObjectFilename("ngraph-mlir-object-filename",
llvm::cl::desc("Dump MLIR JITted-compiled object to file jitted_mlir.o")); llvm::cl::desc("Dump MLIR JITted-compiled object to file jitted_mlir.o"));
#define COMPILE_OP_DECL(op_name) \ #define COMPILE_OP_DECL(op_name) \
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment