Unverified Commit 308af86d authored by Amy Zhuang's avatar Amy Zhuang Committed by GitHub

[MLIR] Use sane utilities to handle environment variables. (#4365)

* [MLIR] Use sane utilities to handle environment variables.

* Fix a bug.
Co-authored-by: 's avatarScott Cyphers <diyessi@users.noreply.github.com>
parent d2be2c76
......@@ -21,6 +21,7 @@
#include "contrib/mlir/backend/pass/affine_lowerer.hpp"
#include "contrib/mlir/utils.hpp"
#include "ngraph/check.hpp"
#include "ngraph/env_util.hpp"
#include <llvm/ADT/STLExtras.h>
#include <llvm/Analysis/TargetTransformInfo.h>
......@@ -144,9 +145,10 @@ void MLIRCPUBackend::init()
if (!initialized)
{
// Override default optimization level with macro value.
if (char* optLevelStr = std::getenv("NGRAPH_MLIR_OPT_LEVEL"))
int32_t clOptLevel = getenv_int("NGRAPH_MLIR_OPT_LEVEL");
// -1 is the value returned if the env variable is not set
if (clOptLevel != -1)
{
unsigned clOptLevel = std::stoi(optLevelStr);
NGRAPH_CHECK(clOptLevel >= 0 && clOptLevel <= 3, "Invalid optimization level");
mlirOptLevel = (llvm::CodeGenOpt::Level)clOptLevel;
}
......
......@@ -19,6 +19,7 @@
#include "mlir_subgraph_extraction.hpp"
#include "ngraph/assertion.hpp"
#include "ngraph/env_util.hpp"
#include "ngraph/graph_util.hpp"
#include "ngraph/log.hpp"
#include "ngraph/node.hpp"
......@@ -477,7 +478,7 @@ bool MLIRSubgraphExtractionPass::is_supported_mlir_op(std::shared_ptr<Node> node
if (auto softmax = as_type_ptr<ngraph::op::Softmax>(node))
{
// Softmax is only supported through callback
if (std::getenv("NGRAPH_MLIR_CALLBACK") == nullptr)
if (!getenv_bool("NGRAPH_MLIR_CALLBACK"))
{
return false;
}
......@@ -491,7 +492,7 @@ bool MLIRSubgraphExtractionPass::is_supported_mlir_op(std::shared_ptr<Node> node
if (auto avg_pool = as_type_ptr<ngraph::op::AvgPool>(node))
{
// AvgPool is only supported through callback
if (std::getenv("NGRAPH_MLIR_CALLBACK") == nullptr)
if (!getenv_bool("NGRAPH_MLIR_CALLBACK"))
{
return false;
}
......@@ -506,7 +507,7 @@ bool MLIRSubgraphExtractionPass::is_supported_mlir_op(std::shared_ptr<Node> node
if (auto avg_pool_backprop = as_type_ptr<ngraph::op::AvgPoolBackprop>(node))
{
// AvgPoolBackprop is only supported through callback
if (std::getenv("NGRAPH_MLIR_CALLBACK") == nullptr)
if (!getenv_bool("NGRAPH_MLIR_CALLBACK"))
{
return false;
}
......@@ -521,7 +522,7 @@ bool MLIRSubgraphExtractionPass::is_supported_mlir_op(std::shared_ptr<Node> node
if (auto max_pool_backprop = as_type_ptr<ngraph::op::MaxPoolBackprop>(node))
{
// MaxPoolBackprop is only supported through callback
if (std::getenv("NGRAPH_MLIR_CALLBACK") == nullptr)
if (!getenv_bool("NGRAPH_MLIR_CALLBACK"))
{
return false;
}
......@@ -536,7 +537,7 @@ bool MLIRSubgraphExtractionPass::is_supported_mlir_op(std::shared_ptr<Node> node
if (auto max_pool = as_type_ptr<ngraph::op::MaxPool>(node))
{
// MaxPool is only supported through callback
if (std::getenv("NGRAPH_MLIR_CALLBACK") == nullptr)
if (!getenv_bool("NGRAPH_MLIR_CALLBACK"))
{
return false;
}
......@@ -551,7 +552,7 @@ bool MLIRSubgraphExtractionPass::is_supported_mlir_op(std::shared_ptr<Node> node
if (is_type<ngraph::op::MatMul>(node))
{
// MatMul is only supported through callback
if (std::getenv("NGRAPH_MLIR_CALLBACK") == nullptr)
if (!getenv_bool("NGRAPH_MLIR_CALLBACK"))
{
return false;
}
......@@ -560,7 +561,7 @@ bool MLIRSubgraphExtractionPass::is_supported_mlir_op(std::shared_ptr<Node> node
if (is_type<ngraph::op::Gemm>(node))
{
// Gemm is only supported through callback
if (std::getenv("NGRAPH_MLIR_CALLBACK") == nullptr)
if (!getenv_bool("NGRAPH_MLIR_CALLBACK"))
{
return false;
}
......
......@@ -15,6 +15,7 @@
//*****************************************************************************
#include "ngraph/runtime/cache.hpp"
#include "ngraph/env_util.hpp"
using namespace ngraph;
using namespace std;
......@@ -22,14 +23,14 @@ using namespace std;
// Constructor
runtime::LRUCache::LRUCache()
{
char* cache_size = getenv("NGRAPH_CACHE_SIZE");
if (cache_size == nullptr)
int32_t cache_size = getenv_int("NGRAPH_CACHE_SIZE");
if (cache_size <= 0)
{
m_cache_size = 1024; // TODO(nbpatel): Figure out a default size for the cache
}
else
{
m_cache_size = atoi(cache_size);
m_cache_size = cache_size;
}
m_map = {};
......
......@@ -21,6 +21,7 @@
#include "cpu_backend_visibility.h"
#include "ngraph/component_manager.hpp"
#include "ngraph/env_util.hpp"
#include "ngraph/graph_util.hpp"
#include "ngraph/log.hpp"
#include "ngraph/runtime/backend_manager.hpp"
......@@ -94,7 +95,7 @@ shared_ptr<runtime::Executable>
bool performance_counters_enabled)
{
#ifdef NGRAPH_MLIR_ENABLE
if (std::getenv("NGRAPH_MLIR") != nullptr)
if (getenv_bool("NGRAPH_MLIR"))
{
// Initialize MLIR compiler
ngmlir::MLIRCompiler::init();
......
......@@ -17,6 +17,7 @@
#include <algorithm>
#include <thread>
#include "ngraph/env_util.hpp"
#include "ngraph/runtime/aligned_buffer.hpp"
#include "ngraph/runtime/cpu/cpu_call_frame.hpp"
#include "ngraph/runtime/cpu/cpu_external_function.hpp"
......@@ -37,14 +38,14 @@ runtime::cpu::CPU_CallFrame::CPU_CallFrame(std::shared_ptr<CPU_ExternalFunction>
, m_compiled_destroy_ctx_func(compiled_destroy_ctx_func)
, m_compiled_function(compiled_function)
{
const auto envConcurrency = std::getenv("NGRAPH_CPU_CONCURRENCY");
m_num_ctx = envConcurrency == nullptr ? 1 : std::atoi(envConcurrency);
const auto envConcurrency = getenv_int("NGRAPH_CPU_CONCURRENCY");
m_num_ctx = envConcurrency <= 0 ? 1 : envConcurrency;
if (m_num_ctx > std::thread::hardware_concurrency())
{
throw ngraph_error(
"Unexpected value specified for NGRAPH_CPU_CONCURRENCY "
"(" +
std::string(envConcurrency) + "). Please specify a value in range [1-" +
std::to_string(envConcurrency) + "). Please specify a value in range [1-" +
std::to_string(std::thread::hardware_concurrency()) + "]");
}
......@@ -234,14 +235,13 @@ void runtime::cpu::CPU_CallFrame::setup_runtime_context(Allocator* allocator)
ctx->states = m_external_function->m_states.data();
#if defined(NGRAPH_TBB_ENABLE)
if (m_external_function->is_direct_execution() &&
std::getenv("NGRAPH_CPU_USE_TBB") != nullptr)
if (m_external_function->is_direct_execution() && getenv_bool("NGRAPH_CPU_USE_TBB"))
{
// For codegen mode, graph and global control are now part of the code generated
// CPURuntimeContextCG class.
ctx->G = new tbb::flow::graph;
const auto envParallelism = std::getenv("NGRAPH_INTER_OP_PARALLELISM");
const auto parallelism = envParallelism == nullptr ? 1 : std::atoi(envParallelism);
const auto envParallelism = getenv_int("NGRAPH_INTER_OP_PARALLELISM");
const auto parallelism = envParallelism <= 0 ? 1 : envParallelism;
ctx->c =
new tbb::global_control(tbb::global_control::max_allowed_parallelism, parallelism);
}
......@@ -281,8 +281,7 @@ void runtime::cpu::CPU_CallFrame::cleanup_runtime_context()
}
#if defined(NGRAPH_TBB_ENABLE)
if (m_external_function->is_direct_execution() &&
std::getenv("NGRAPH_CPU_USE_TBB") != nullptr)
if (m_external_function->is_direct_execution() && getenv_bool("NGRAPH_CPU_USE_TBB"))
{
// For codegen mode, graph and global control are now part of a code generated
// CPURuntimeContext class.
......
......@@ -15,6 +15,7 @@
//*****************************************************************************
#include "ngraph/runtime/cpu/cpu_debug_tracer.hpp"
#include "ngraph/env_util.hpp"
using namespace std;
using namespace ngraph;
......@@ -22,8 +23,8 @@ using namespace ngraph;
runtime::cpu::CPU_DebugTracer::CPU_DebugTracer()
: m_serial_number(0)
{
static const auto debug_t = std::getenv("NGRAPH_CPU_DEBUG_TRACER");
if (debug_t != nullptr)
static const auto debug_t = getenv_bool("NGRAPH_CPU_DEBUG_TRACER");
if (debug_t)
{
m_enable_tracing = true;
......@@ -38,15 +39,15 @@ void runtime::cpu::CPU_DebugTracer::init_streams()
return;
}
static auto trace_file_path = std::getenv("NGRAPH_CPU_TRACER_LOG");
static auto trace_bin_file_path = std::getenv("NGRAPH_CPU_BIN_TRACER_LOG");
if (trace_file_path == nullptr)
static auto trace_file_path = getenv_string("NGRAPH_CPU_TRACER_LOG");
static auto trace_bin_file_path = getenv_string("NGRAPH_CPU_BIN_TRACER_LOG");
if (trace_file_path.empty())
{
trace_file_path = const_cast<char*>("trace_meta.log");
trace_file_path = "trace_meta.log";
}
if (trace_bin_file_path == nullptr)
if (trace_bin_file_path.empty())
{
trace_bin_file_path = const_cast<char*>("trace_bin_data.log");
trace_bin_file_path = "trace_bin_data.log";
}
m_tracer_stream.open(trace_file_path, ios_base::out | ios_base::ate);
......
......@@ -22,6 +22,7 @@
#include <typeindex>
#include <unordered_map>
#include <vector>
#include "ngraph/env_util.hpp"
#include "ngraph/node.hpp"
#include "ngraph/op/abs.hpp"
#include "ngraph/op/acos.hpp"
......@@ -138,7 +139,7 @@
using namespace std;
using namespace ngraph;
static bool s_use_ref_kernels = (std::getenv("NGRAPH_CPU_USE_REF_KERNELS") != nullptr);
static bool s_use_ref_kernels = getenv_bool("NGRAPH_CPU_USE_REF_KERNELS");
static string eigen_vector_format(const runtime::cpu::TensorViewWrapper& tvi)
{
......
......@@ -18,23 +18,24 @@
#include "cpu_executor.hpp"
#include "ngraph/env_util.hpp"
#include "ngraph/except.hpp"
#define MAX_PARALLELISM_THRESHOLD 2
static int GetNumCores()
{
const auto omp_num_threads = std::getenv("OMP_NUM_THREADS");
const auto ngraph_intra_op_parallelism = std::getenv("NGRAPH_INTRA_OP_PARALLELISM");
const auto omp_num_threads = ngraph::getenv_int("OMP_NUM_THREADS");
const auto ngraph_intra_op_parallelism = ngraph::getenv_int("NGRAPH_INTRA_OP_PARALLELISM");
int count = 0;
if (omp_num_threads)
if (omp_num_threads > 0)
{
count = std::atoi(omp_num_threads);
count = omp_num_threads;
}
else if (ngraph_intra_op_parallelism)
else if (ngraph_intra_op_parallelism > 0)
{
count = std::atoi(ngraph_intra_op_parallelism);
count = ngraph_intra_op_parallelism;
}
else
{
......@@ -56,12 +57,12 @@ static int GetNumCores()
static int GetNumThreadPools()
{
const auto ngraph_inter_op_parallelism = std::getenv("NGRAPH_INTER_OP_PARALLELISM");
const auto ngraph_inter_op_parallelism = ngraph::getenv_int("NGRAPH_INTER_OP_PARALLELISM");
int count = 0;
if (ngraph_inter_op_parallelism)
if (ngraph_inter_op_parallelism > 0)
{
count = std::atoi(ngraph_inter_op_parallelism);
count = ngraph_inter_op_parallelism;
}
return count < 1 ? 1 : count;
......@@ -88,16 +89,17 @@ namespace ngraph
num_threads_per_pool = GetNumCores();
// User override
char* eigen_tp_count = std::getenv("NGRAPH_CPU_EIGEN_THREAD_COUNT");
if (eigen_tp_count != nullptr)
int32_t eigen_tp_count =
ngraph::getenv_int("NGRAPH_CPU_EIGEN_THREAD_COUNT");
if (eigen_tp_count > 0)
{
const int tp_count = std::atoi(eigen_tp_count);
const int tp_count = eigen_tp_count;
if (tp_count < 1 || tp_count > GetNumCores())
{
throw ngraph_error(
"Unexpected value specified for NGRAPH_CPU_EIGEN_THREAD_COUNT "
"(" +
std::string(eigen_tp_count) +
std::to_string(eigen_tp_count) +
"). Please specify a value in range [1-" +
std::to_string(GetNumCores()) + "]");
}
......
......@@ -41,6 +41,7 @@
#include "ngraph/descriptor/input.hpp"
#include "ngraph/descriptor/output.hpp"
#include "ngraph/env_util.hpp"
#include "ngraph/file_util.hpp"
#include "ngraph/function.hpp"
#include "ngraph/graph_util.hpp"
......@@ -244,12 +245,11 @@ runtime::cpu::CPU_ExternalFunction::CPU_ExternalFunction(
, m_release_function(release_function)
, m_emit_timing(false)
#if defined(NGRAPH_TBB_ENABLE)
, m_use_tbb(std::getenv("NGRAPH_CPU_USE_TBB") != nullptr)
, m_use_tbb(getenv_bool("NGRAPH_CPU_USE_TBB"))
#endif
#if !defined(NGRAPH_DEX_ONLY)
, m_is_compiled(false)
, m_direct_execution((std::getenv("NGRAPH_CODEGEN") == nullptr) ||
(std::string(std::getenv("NGRAPH_CODEGEN")) == "0"))
, m_direct_execution(!getenv_bool("NGRAPH_CODEGEN"))
#else
, m_direct_execution(true)
#endif
......@@ -1005,14 +1005,14 @@ using namespace ngraph::runtime;
{
// check inputs and constants?
if ((!node->is_parameter() && !node->is_constant()) ||
std::getenv("NGRAPH_CPU_CHECK_PARMS_AND_CONSTS"))
getenv_bool("NGRAPH_CPU_CHECK_PARMS_AND_CONSTS"))
{
if (std::getenv("NGRAPH_CPU_NAN_CHECK"))
if (getenv_bool("NGRAPH_CPU_NAN_CHECK"))
{
generate_isnan_isinf_check(writer, node, out, "isnan");
}
if (std::getenv("NGRAPH_CPU_INF_CHECK"))
if (getenv_bool("NGRAPH_CPU_INF_CHECK"))
{
generate_isnan_isinf_check(writer, node, out, "isinf");
}
......@@ -1189,7 +1189,7 @@ void runtime::cpu::CPU_ExternalFunction::register_common_passes(
auto dex = is_direct_execution();
auto is_supported = [dex](const Node& node) {
#ifdef NGRAPH_MLIR_ENABLE
if (std::getenv("NGRAPH_MLIR") != nullptr && std::getenv("NGRAPH_MLIR_CALLBACK") != nullptr)
if (getenv_bool("NGRAPH_MLIR") && getenv_bool("NGRAPH_MLIR_CALLBACK"))
{
if (typeid(ngraph::op::MatMul) == typeid(node) &&
node.get_input_element_type(0) == element::f32)
......@@ -1294,7 +1294,7 @@ void runtime::cpu::CPU_ExternalFunction::register_common_passes(
REGISTER_KNOBBED_PASS(CPUPreFusion, true, runtime::cpu::pass)
// Disable CPUFusion if MLIR is enabled to preserve core ops.
if (std::getenv("NGRAPH_MLIR") == nullptr)
if (!getenv_bool("NGRAPH_MLIR"))
{
REGISTER_KNOBBED_PASS(CPUFusion, true, runtime::cpu::pass)
}
......@@ -1303,7 +1303,7 @@ void runtime::cpu::CPU_ExternalFunction::register_common_passes(
REGISTER_KNOBBED_PASS(CPUCollapseDims, true, runtime::cpu::pass)
#ifdef NGRAPH_MLIR_ENABLE
if (std::getenv("NGRAPH_MLIR") != nullptr)
if (getenv_bool("NGRAPH_MLIR"))
{
REGISTER_KNOBBED_PASS(MLIRSubgraphExtractionPass, /*enable by default*/ true, ngraph::pass)
}
......@@ -1456,7 +1456,7 @@ void runtime::cpu::CPU_ExternalFunction::build(ngraph::pass::PassConfig& pass_co
static StaticInitializers s_static_initializers(s_debug_dir);
m_mkldnn_emitter.reset(new MKLDNNEmitter());
ngraph::pass::Manager pass_manager;
if (std::getenv("NGRAPH_ENABLE_VISUALIZE_TRACING"))
if (getenv_bool("NGRAPH_ENABLE_VISUALIZE_TRACING"))
{
// Enable per_pass_validation if required for debug purpose
pass_manager.set_per_pass_validation(false);
......@@ -1465,7 +1465,7 @@ void runtime::cpu::CPU_ExternalFunction::build(ngraph::pass::PassConfig& pass_co
pass_manager.run_passes(m_function, false);
static runtime::cpu::CPU_DebugTracer debug_tracer;
if (std::getenv("NGRAPH_CPU_DEBUG_TRACER") != nullptr)
if (getenv_bool("NGRAPH_CPU_DEBUG_TRACER"))
{
debug_tracer.set_enable_tracing(true);
}
......@@ -1717,7 +1717,7 @@ void runtime::cpu::CPU_ExternalFunction::build(ngraph::pass::PassConfig& pass_co
m_perf_counters.emplace_back(node, 0, 0);
}
if ((std::getenv("NGRAPH_DEX_DEBUG") != nullptr))
if (getenv_bool("NGRAPH_DEX_DEBUG"))
{
string filename = file_util::path_join(s_debug_dir, m_function_name + "_debug.txt");
std::stringstream strm;
......@@ -1921,8 +1921,8 @@ void runtime::cpu::CPU_ExternalFunction::build(ngraph::pass::PassConfig& pass_co
else
#endif
{
static const auto ddebug = std::getenv("NGRAPH_DEX_DEBUG");
if (ddebug != nullptr)
static const auto ddebug = getenv_bool("NGRAPH_DEX_DEBUG");
if (ddebug)
{
if (ctx->first_iteration)
{
......
......@@ -18,6 +18,7 @@
#include <map>
#include "cpu_tracing.hpp"
#include "ngraph/env_util.hpp"
#ifndef NGRAPH_JSON_DISABLE
void ngraph::runtime::cpu::to_json(nlohmann::json& json, const TraceEvent& event)
......@@ -74,7 +75,7 @@ void ngraph::runtime::cpu::GenerateTimeline(const std::vector<OpAttributes>& op_
bool ngraph::runtime::cpu::IsTracingEnabled()
{
static bool enabled = (std::getenv("NGRAPH_CPU_TRACING") != nullptr);
static bool enabled = getenv_bool("NGRAPH_CPU_TRACING");
return enabled;
}
#else
......
......@@ -16,6 +16,7 @@
#pragma once
#include "ngraph/env_util.hpp"
#include "ngraph/pass/graph_rewrite.hpp"
#include "ngraph/runtime/cpu/cpu_backend_visibility.h"
#include "ngraph/runtime/cpu/mkldnn_utils.hpp"
......@@ -88,7 +89,7 @@ public:
#if MKLDNN_VERSION_MAJOR < 1
construct_fuse_lstm_recurrent_state();
#endif
if (std::getenv("NGRAPH_DECONV_FUSE") != nullptr)
if (getenv_bool("NGRAPH_DECONV_FUSE"))
{
// Note: enable when the deconv perf is better than convbackpropdata
construct_deconvolution_affine_folding();
......
......@@ -25,6 +25,7 @@
#include "cpu_layout.hpp"
#include "ngraph/axis_vector.hpp"
#include "ngraph/descriptor/output.hpp"
#include "ngraph/env_util.hpp"
#include "ngraph/graph_util.hpp"
#include "ngraph/log.hpp"
#include "ngraph/op/add.hpp"
......@@ -342,13 +343,8 @@ void set_layouts_binaryeltwise(ngraph::runtime::cpu::CPU_ExternalFunction* exter
{
vector<memory::desc> i_mds;
vector<memory::desc> o_mds;
int select = 0;
char* ngraph_pass_cpu_layout_eltwise = std::getenv("NGRAPH_PASS_CPU_LAYOUT_ELTWISE");
if (ngraph_pass_cpu_layout_eltwise != nullptr)
{
const int user_select = std::atoi(ngraph_pass_cpu_layout_eltwise);
select = (user_select == 0 || user_select == 1) ? user_select : select;
}
const int32_t user_select = getenv_int("NGRAPH_PASS_CPU_LAYOUT_ELTWISE");
int select = (user_select == 0 || user_select == 1) ? user_select : 0;
i_mds.push_back(arg_mds[select]);
i_mds.push_back(arg_mds[select]);
o_mds.push_back(arg_mds[select]);
......
......@@ -28,6 +28,7 @@
#include "ngraph/descriptor/input.hpp"
#include "ngraph/descriptor/layout/dense_tensor_layout.hpp"
#include "ngraph/descriptor/output.hpp"
#include "ngraph/env_util.hpp"
#include "ngraph/file_util.hpp"
#include "ngraph/function.hpp"
#include "ngraph/graph_util.hpp"
......@@ -388,7 +389,7 @@ void runtime::gpu::GPUInternalFunction::emit()
m_runtime_constructor =
runtime::gpu::make_unique<GPURuntimeConstructor>(m_function_ordered_ops);
if (std::getenv("NGRAPH_GPU_TRACE"))
if (getenv_bool("NGRAPH_GPU_TRACE"))
{
m_trace = std::make_shared<CodeWriter>();
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment