Unverified Commit 1c96c45c authored by Jayaram Bobba's avatar Jayaram Bobba Committed by GitHub

Merge branch 'master' into jbobba/batchnorm-layout

parents 7bad261f b0648401
...@@ -48,6 +48,7 @@ output/ ...@@ -48,6 +48,7 @@ output/
*.mpg *.mpg
*.cpio *.cpio
*.wav *.wav
*.backup
doc/source/generated doc/source/generated
.cache/ .cache/
nervana_aeon.egg-info/ nervana_aeon.egg-info/
......
This diff is collapsed.
This diff is collapsed.
...@@ -38,7 +38,7 @@ set (SRC ...@@ -38,7 +38,7 @@ set (SRC
ops/avg_pool.cpp ops/avg_pool.cpp
ops/batch_norm.cpp ops/batch_norm.cpp
ops/broadcast.cpp ops/broadcast.cpp
ops/concatenate.cpp ops/concat.cpp
ops/constant.cpp ops/constant.cpp
ops/convert.cpp ops/convert.cpp
ops/convolution.cpp ops/convolution.cpp
......
...@@ -274,6 +274,9 @@ std::unique_ptr<codegen::Module> ...@@ -274,6 +274,9 @@ std::unique_ptr<codegen::Module>
preprocessor_options.DisablePCHValidation = 0; preprocessor_options.DisablePCHValidation = 0;
} }
// Clear warnings and errors
m_compiler->getDiagnosticClient().clear();
// Map code filename to a memoryBuffer // Map code filename to a memoryBuffer
StringRef source_ref(source); StringRef source_ref(source);
unique_ptr<MemoryBuffer> buffer = MemoryBuffer::getMemBufferCopy(source_ref); unique_ptr<MemoryBuffer> buffer = MemoryBuffer::getMemBufferCopy(source_ref);
...@@ -408,24 +411,21 @@ void codegen::StaticCompiler::configure_search_path() ...@@ -408,24 +411,21 @@ void codegen::StaticCompiler::configure_search_path()
void codegen::StaticCompiler::load_headers_from_resource() void codegen::StaticCompiler::load_headers_from_resource()
{ {
const string builtin_root = "/$builtin";
HeaderSearchOptions& hso = m_compiler->getInvocation().getHeaderSearchOpts(); HeaderSearchOptions& hso = m_compiler->getInvocation().getHeaderSearchOpts();
PreprocessorOptions& preprocessor_options = m_compiler->getInvocation().getPreprocessorOpts(); PreprocessorOptions& preprocessor_options = m_compiler->getInvocation().getPreprocessorOpts();
std::set<std::string> header_search_paths; for (const string& search_path : builtin_search_paths)
for (const HeaderInfo& hi : header_info) {
string builtin = builtin_root + search_path;
hso.AddPath(builtin, clang::frontend::System, false, false);
}
for (const pair<string, string>& header_info : builtin_headers)
{ {
string search_path = hi.search_path; string absolute_path = header_info.first;
string absolute_path = file_util::path_join(search_path, hi.header_path); string builtin = builtin_root + absolute_path;
string builtin = "/$builtin" + absolute_path;
std::unique_ptr<llvm::MemoryBuffer> mb( std::unique_ptr<llvm::MemoryBuffer> mb(
llvm::MemoryBuffer::getMemBuffer(hi.header_data, builtin)); llvm::MemoryBuffer::getMemBuffer(header_info.second, builtin));
preprocessor_options.addRemappedFile(builtin, mb.release()); preprocessor_options.addRemappedFile(builtin, mb.release());
if (!contains(header_search_paths, search_path))
{
string builtin = "/$builtin" + search_path;
hso.AddPath(builtin, clang::frontend::System, false, false);
header_search_paths.insert(search_path);
}
} }
} }
......
...@@ -73,7 +73,7 @@ ...@@ -73,7 +73,7 @@
#include "ngraph/ops/avg_pool.hpp" #include "ngraph/ops/avg_pool.hpp"
#include "ngraph/ops/broadcast.hpp" #include "ngraph/ops/broadcast.hpp"
#include "ngraph/ops/ceiling.hpp" #include "ngraph/ops/ceiling.hpp"
#include "ngraph/ops/concatenate.hpp" #include "ngraph/ops/concat.hpp"
#include "ngraph/ops/constant.hpp" #include "ngraph/ops/constant.hpp"
#include "ngraph/ops/convert.hpp" #include "ngraph/ops/convert.hpp"
#include "ngraph/ops/convolution.hpp" #include "ngraph/ops/convolution.hpp"
......
...@@ -17,7 +17,7 @@ ...@@ -17,7 +17,7 @@
#include <cassert> #include <cassert>
#include <memory> #include <memory>
#include "ngraph/ops/concatenate.hpp" #include "ngraph/ops/concat.hpp"
#include "ngraph/ops/slice.hpp" #include "ngraph/ops/slice.hpp"
using namespace std; using namespace std;
......
/*******************************************************************************
* Copyright 2017-2018 Intel Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*******************************************************************************/
#include <cassert>
#include <memory>
#include "ngraph/ops/concatenate.hpp"
#include "ngraph/ops/slice.hpp"
using namespace std;
using namespace ngraph;
op::Concat::Concat(const Nodes& args, size_t concatenation_axis)
: RequiresTensorViewArgs("Concat", args)
, m_concatenation_axis(concatenation_axis)
{
if (m_inputs.size() < 1)
{
throw ngraph_error("At least one argument required");
}
auto& input_0 = get_inputs().at(0);
auto input_0_shape = input_0.get_shape();
if (m_concatenation_axis >= input_0_shape.size())
{
throw ngraph_error("Concatenation axis is out of bounds");
}
size_t concatenation_axis_length = input_0_shape.at(m_concatenation_axis);
auto& input_0_element_type = input_0.get_element_type();
for (auto i = 1; i < get_inputs().size(); i++)
{
auto& input_i = get_inputs().at(i);
auto input_i_shape = input_i.get_shape();
if (input_i_shape.size() != input_0_shape.size())
{
throw ngraph_error("Arguments to concat do not have same rank");
}
if (input_i.get_element_type() != input_0_element_type)
{
throw ngraph_error("Argument element types do not match");
}
for (auto j = 0; j < input_i_shape.size(); j++)
{
if (j != m_concatenation_axis && input_0_shape.at(j) != input_i_shape.at(j))
{
throw ngraph_error(
"Arguments to concat do not have same dimension on a non-concatenation axis");
}
else if (j == m_concatenation_axis)
{
concatenation_axis_length += input_i_shape.at(j);
}
}
}
vector<size_t> concatenated_shape = input_0_shape;
concatenated_shape.at(m_concatenation_axis) = concatenation_axis_length;
set_value_type_checked(make_shared<TensorViewType>(input_0_element_type, concatenated_shape));
}
void op::Concat::generate_adjoints(autodiff::Adjoints& adjoints, const std::shared_ptr<Node>& delta)
{
auto concat_result_shape = get_outputs().at(0).get_shape();
Coordinate arg_delta_slice_lower = Coordinate(concat_result_shape.size(), 0);
Coordinate arg_delta_slice_upper = concat_result_shape;
Coordinate arg_delta_slice_strides = Coordinate(concat_result_shape.size(), 1);
size_t pos = 0;
for (auto arg : get_input_ops())
{
auto arg_shape = arg->get_shape();
auto slice_width = arg_shape[m_concatenation_axis];
size_t next_pos = pos + slice_width;
arg_delta_slice_lower[m_concatenation_axis] = pos;
arg_delta_slice_upper[m_concatenation_axis] = next_pos;
adjoints.add_delta(
arg,
make_shared<op::Slice>(
delta, arg_delta_slice_lower, arg_delta_slice_upper, arg_delta_slice_strides));
pos = next_pos;
}
}
/*******************************************************************************
* Copyright 2017-2018 Intel Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*******************************************************************************/
#pragma once
#include <memory>
#include "ngraph/ops/util/requires_tensor_view_args.hpp"
namespace ngraph
{
namespace op
{
/// \brief Concatenation operation.
class Concat : public util::RequiresTensorViewArgs
{
public:
/// \brief Constructs a concatenation operation.
///
/// \param args The nodes producing the input tensors.
/// \param concatenation_axis The axis along which to concatenate the input tensors.
Concat(const Nodes& args, size_t concatenation_axis);
virtual std::shared_ptr<Node> copy_with_new_args(
const std::vector<std::shared_ptr<Node>>& new_args) const override
{
return std::make_shared<Concat>(new_args, m_concatenation_axis);
}
/// \return The concatenation axis.
size_t get_concatenation_axis() const { return m_concatenation_axis; }
protected:
virtual void generate_adjoints(autodiff::Adjoints& adjoints,
const std::shared_ptr<Node>& delta) override;
const size_t m_concatenation_axis;
};
}
}
...@@ -34,7 +34,7 @@ ...@@ -34,7 +34,7 @@
#include "ngraph/ops/batch_norm.hpp" #include "ngraph/ops/batch_norm.hpp"
#include "ngraph/ops/broadcast.hpp" #include "ngraph/ops/broadcast.hpp"
#include "ngraph/ops/ceiling.hpp" #include "ngraph/ops/ceiling.hpp"
#include "ngraph/ops/concatenate.hpp" #include "ngraph/ops/concat.hpp"
#include "ngraph/ops/constant.hpp" #include "ngraph/ops/constant.hpp"
#include "ngraph/ops/convert.hpp" #include "ngraph/ops/convert.hpp"
#include "ngraph/ops/convolution.hpp" #include "ngraph/ops/convolution.hpp"
......
...@@ -43,7 +43,7 @@ ...@@ -43,7 +43,7 @@
#include "ngraph/ops/batch_norm.hpp" #include "ngraph/ops/batch_norm.hpp"
#include "ngraph/ops/broadcast.hpp" #include "ngraph/ops/broadcast.hpp"
#include "ngraph/ops/ceiling.hpp" #include "ngraph/ops/ceiling.hpp"
#include "ngraph/ops/concatenate.hpp" #include "ngraph/ops/concat.hpp"
#include "ngraph/ops/constant.hpp" #include "ngraph/ops/constant.hpp"
#include "ngraph/ops/convert.hpp" #include "ngraph/ops/convert.hpp"
#include "ngraph/ops/convolution.hpp" #include "ngraph/ops/convolution.hpp"
......
...@@ -29,7 +29,7 @@ ...@@ -29,7 +29,7 @@
#include "ngraph/node.hpp" #include "ngraph/node.hpp"
#include "ngraph/ops/broadcast.hpp" #include "ngraph/ops/broadcast.hpp"
#include "ngraph/ops/concatenate.hpp" #include "ngraph/ops/concat.hpp"
#include "ngraph/ops/constant.hpp" #include "ngraph/ops/constant.hpp"
#include "ngraph/ops/convolution.hpp" #include "ngraph/ops/convolution.hpp"
#include "ngraph/ops/dot.hpp" #include "ngraph/ops/dot.hpp"
......
...@@ -45,7 +45,7 @@ ...@@ -45,7 +45,7 @@
#include "ngraph/ops/atan.hpp" #include "ngraph/ops/atan.hpp"
#include "ngraph/ops/broadcast.hpp" #include "ngraph/ops/broadcast.hpp"
#include "ngraph/ops/ceiling.hpp" #include "ngraph/ops/ceiling.hpp"
#include "ngraph/ops/concatenate.hpp" #include "ngraph/ops/concat.hpp"
#include "ngraph/ops/constant.hpp" #include "ngraph/ops/constant.hpp"
#include "ngraph/ops/convert.hpp" #include "ngraph/ops/convert.hpp"
#include "ngraph/ops/convolution.hpp" #include "ngraph/ops/convolution.hpp"
......
...@@ -25,7 +25,7 @@ ...@@ -25,7 +25,7 @@
#include "ngraph/node.hpp" #include "ngraph/node.hpp"
#include "ngraph/ops/avg_pool.hpp" #include "ngraph/ops/avg_pool.hpp"
#include "ngraph/ops/broadcast.hpp" #include "ngraph/ops/broadcast.hpp"
#include "ngraph/ops/concatenate.hpp" #include "ngraph/ops/concat.hpp"
#include "ngraph/ops/constant.hpp" #include "ngraph/ops/constant.hpp"
#include "ngraph/ops/convolution.hpp" #include "ngraph/ops/convolution.hpp"
#include "ngraph/ops/dot.hpp" #include "ngraph/ops/dot.hpp"
......
...@@ -34,7 +34,7 @@ ...@@ -34,7 +34,7 @@
#include "ngraph/ops/asin.hpp" #include "ngraph/ops/asin.hpp"
#include "ngraph/ops/atan.hpp" #include "ngraph/ops/atan.hpp"
#include "ngraph/ops/broadcast.hpp" #include "ngraph/ops/broadcast.hpp"
#include "ngraph/ops/concatenate.hpp" #include "ngraph/ops/concat.hpp"
#include "ngraph/ops/constant.hpp" #include "ngraph/ops/constant.hpp"
#include "ngraph/ops/convert.hpp" #include "ngraph/ops/convert.hpp"
#include "ngraph/ops/cos.hpp" #include "ngraph/ops/cos.hpp"
......
...@@ -25,7 +25,7 @@ ...@@ -25,7 +25,7 @@
#include "ngraph/ops/batch_norm.hpp" #include "ngraph/ops/batch_norm.hpp"
#include "ngraph/ops/broadcast.hpp" #include "ngraph/ops/broadcast.hpp"
#include "ngraph/ops/ceiling.hpp" #include "ngraph/ops/ceiling.hpp"
#include "ngraph/ops/concatenate.hpp" #include "ngraph/ops/concat.hpp"
#include "ngraph/ops/constant.hpp" #include "ngraph/ops/constant.hpp"
#include "ngraph/ops/convert.hpp" #include "ngraph/ops/convert.hpp"
#include "ngraph/ops/convolution.hpp" #include "ngraph/ops/convolution.hpp"
...@@ -372,7 +372,7 @@ static shared_ptr<ngraph::Function> ...@@ -372,7 +372,7 @@ static shared_ptr<ngraph::Function>
auto padding_below = node_js.at("padding_below").get<vector<size_t>>(); auto padding_below = node_js.at("padding_below").get<vector<size_t>>();
auto padding_above = node_js.at("padding_above").get<vector<size_t>>(); auto padding_above = node_js.at("padding_above").get<vector<size_t>>();
auto include_padding_in_avg_computation = auto include_padding_in_avg_computation =
node_js.at("include_padding_in_avg_computation").get<bool>(); get_or_default<bool>(node_js, "include_padding_in_avg_computation", false);
node = make_shared<op::AvgPoolBackprop>(forward_arg_shape, node = make_shared<op::AvgPoolBackprop>(forward_arg_shape,
args[0], args[0],
window_shape, window_shape,
......
...@@ -28,4 +28,19 @@ namespace ngraph ...@@ -28,4 +28,19 @@ namespace ngraph
std::string serialize(std::shared_ptr<ngraph::Function>, size_t indent = 0); std::string serialize(std::shared_ptr<ngraph::Function>, size_t indent = 0);
std::shared_ptr<ngraph::Function> deserialize(std::istream&); std::shared_ptr<ngraph::Function> deserialize(std::istream&);
std::shared_ptr<ngraph::Function> deserialize(const std::string&); std::shared_ptr<ngraph::Function> deserialize(const std::string&);
template <typename T>
T get_or_default(nlohmann::json& j, const std::string& key, const T& default_value)
{
T rc;
try
{
rc = j.at(key).get<T>();
}
catch (...)
{
rc = default_value;
}
return rc;
}
} }
...@@ -100,8 +100,8 @@ int main(int argc, char** argv) ...@@ -100,8 +100,8 @@ int main(int argc, char** argv)
include_paths.push_back({CLANG_BUILTIN_HEADERS_PATH, {}, true}); include_paths.push_back({CLANG_BUILTIN_HEADERS_PATH, {}, true});
include_paths.push_back({"/usr/include/x86_64-linux-gnu", {"asm", "sys", "bits", "gnu"}}); include_paths.push_back({"/usr/include/x86_64-linux-gnu", {"asm", "sys", "bits", "gnu"}});
include_paths.push_back({"/usr/include", {"asm", "sys", "bits", "gnu"}}); include_paths.push_back(
include_paths.push_back({"/usr/include", {"linux", "asm-generic"}}); {"/usr/include", {"asm", "sys", "bits", "gnu", "linux", "asm-generic"}});
include_paths.push_back({cpp0, {"bits"}}); include_paths.push_back({cpp0, {"bits"}});
include_paths.push_back({"/usr/include/c++/4.8.2/x86_64-redhat-linux", {"bits"}}); include_paths.push_back({"/usr/include/c++/4.8.2/x86_64-redhat-linux", {"bits"}});
include_paths.push_back({cpp1, {"bits", "ext", "debug", "backward"}}); include_paths.push_back({cpp1, {"bits", "ext", "debug", "backward"}});
...@@ -168,63 +168,41 @@ int main(int argc, char** argv) ...@@ -168,63 +168,41 @@ int main(int argc, char** argv)
if (update_needed) if (update_needed)
{ {
size_t total_size = 0;
size_t total_count = 0;
const string prefix = "pReFiX";
ofstream out(output_path); ofstream out(output_path);
out << "#pragma clang diagnostic ignored \"-Weverything\"\n"; out << "#pragma clang diagnostic ignored \"-Weverything\"\n";
out << "#include <vector>\n"; out << "#include <vector>\n";
out << "namespace ngraph\n"; out << "namespace ngraph\n";
out << "{\n"; out << "{\n";
out << " static const uint8_t header_resources[] =\n"; out << " const std::vector<std::string> builtin_search_paths =\n";
out << " {\n"; out << " {\n";
vector<pair<size_t, size_t>> offset_size_list;
size_t offset = 0;
size_t total_size = 0;
size_t total_count = 0;
for (const ResourceInfo& path : include_paths) for (const ResourceInfo& path : include_paths)
{ {
for (const string& header_file : path.files) out << " \"" << path.search_path << "\",\n";
}
out << " };\n";
out << " const std::vector<std::pair<std::string, std::string>> builtin_headers =\n";
out << " {\n";
for (const ResourceInfo& path : include_paths)
{
for (const string& header_path : path.files)
{ {
string header_data = read_file_to_string(header_file); string header_data = read_file_to_string(header_path);
string base_path = header_file.substr(path.search_path.size() + 1); string relative_path = header_path.substr(path.search_path.size() + 1);
header_data = rewrite_header(header_data, base_path); header_data = rewrite_header(header_data, relative_path);
// header_data = uncomment(header_data); // header_data = uncomment(header_data);
total_size += header_data.size(); total_size += header_data.size();
total_count++; total_count++;
// data layout is triplet of strings containing: out << " {";
// 1) search path out << "\"" << header_path << "\",\nR\"" << prefix << "(" << header_data << ")"
// 2) header path within search path << prefix << "\"},\n";
// 3) header data
// all strings are null terminated and the length includes the null
// The + 1 below is to account for the null terminator
dump(out, path.search_path.c_str(), path.search_path.size() + 1);
offset_size_list.push_back({offset, path.search_path.size() + 1});
offset += path.search_path.size() + 1;
dump(out, header_file.c_str(), header_file.size() + 1);
offset_size_list.push_back({offset, header_file.size() + 1});
offset += header_file.size() + 1;
dump(out, header_data.c_str(), header_data.size() + 1);
offset_size_list.push_back({offset, header_data.size() + 1});
offset += header_data.size() + 1;
} }
} }
out << " };\n"; out << " };\n";
out << " struct HeaderInfo\n";
out << " {\n";
out << " const char* search_path;\n";
out << " const char* header_path;\n";
out << " const char* header_data;\n";
out << " };\n";
out << " std::vector<HeaderInfo> header_info\n";
out << " {\n";
for (size_t i = 0; i < offset_size_list.size();)
{
out << " {(char*)(&header_resources[" << offset_size_list[i++].first;
out << "]), (char*)(&header_resources[" << offset_size_list[i++].first;
out << "]), (char*)(&header_resources[" << offset_size_list[i++].first << "])},\n";
}
out << " };\n";
out << "}\n"; out << "}\n";
cout.imbue(locale("")); cout.imbue(locale(""));
cout << "Total size " << total_size << " in " << total_count << " files\n"; cout << "Total size " << total_size << " in " << total_count << " files\n";
......
...@@ -24,7 +24,7 @@ ...@@ -24,7 +24,7 @@
#include "ngraph/codegen/execution_engine.hpp" #include "ngraph/codegen/execution_engine.hpp"
#include "ngraph/file_util.hpp" #include "ngraph/file_util.hpp"
#include "ngraph/log.hpp" #include "ngraph/log.hpp"
#include "ngraph/ops/concatenate.hpp" #include "ngraph/ops/concat.hpp"
#include "ngraph/runtime/backend.hpp" #include "ngraph/runtime/backend.hpp"
#include "ngraph/runtime/call_frame.hpp" #include "ngraph/runtime/call_frame.hpp"
#include "ngraph/runtime/cpu/cpu_call_frame.hpp" #include "ngraph/runtime/cpu/cpu_call_frame.hpp"
......
...@@ -106,6 +106,18 @@ TEST(serialize, existing_models) ...@@ -106,6 +106,18 @@ TEST(serialize, existing_models)
} }
} }
TEST(serialize, default_value)
{
json j = {{"test1", 1}, {"test2", 2}};
int x1 = j.at("test1").get<int>();
EXPECT_EQ(x1, 1);
int x2 = get_or_default<int>(j, "test2", 0);
EXPECT_EQ(x2, 2);
int x3 = get_or_default<int>(j, "test3", 3);
EXPECT_EQ(x3, 3);
}
TEST(benchmark, serialize) TEST(benchmark, serialize)
{ {
stopwatch timer; stopwatch timer;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment