Skip to content
Projects
Groups
Snippets
Help
Loading...
Sign in / Register
Toggle navigation
N
ngraph
Project
Project
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Packages
Packages
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
submodule
ngraph
Commits
a4d4d161
Commit
a4d4d161
authored
Nov 02, 2017
by
Jaikrishnan Menon
Browse files
Options
Browse Files
Download
Plain Diff
Merge branch 'jmenon/cpu' into jmenon/cpu_kernels
Conflicts: src/ngraph/codegen/compiler.cpp
parents
eeb42b94
60ea252d
Show whitespace changes
Inline
Side-by-side
Showing
9 changed files
with
95 additions
and
28 deletions
+95
-28
external_llvm.cmake
cmake/external_llvm.cmake
+1
-1
CMakeLists.txt
src/ngraph/CMakeLists.txt
+5
-3
adjoints.cpp
src/ngraph/autodiff/adjoints.cpp
+1
-1
compiler.cpp
src/ngraph/codegen/compiler.cpp
+3
-1
compiler.hpp
src/ngraph/codegen/compiler.hpp
+0
-1
assign_layout.hpp
src/ngraph/pass/assign_layout.hpp
+59
-0
external_function.cpp
src/ngraph/runtime/cpu/external_function.cpp
+3
-18
CMakeLists.txt
test/CMakeLists.txt
+1
-1
autodiff.cpp
test/autodiff.cpp
+22
-2
No files found.
cmake/external_llvm.cmake
View file @
a4d4d161
...
@@ -13,7 +13,7 @@
...
@@ -13,7 +13,7 @@
include
(
ExternalProject
)
include
(
ExternalProject
)
if
((
NOT
${
CMAKE_SYSTEM_NAME
}
MATCHES
"Darwin"
)
AND
if
(
NGRAPH_CPU_ENABLE
AND
(
NOT
${
CMAKE_SYSTEM_NAME
}
MATCHES
"Darwin"
)
AND
(
NOT
${
CMAKE_SYSTEM_NAME
}
MATCHES
"Windows"
))
(
NOT
${
CMAKE_SYSTEM_NAME
}
MATCHES
"Windows"
))
message
(
STATUS
"Fetching LLVM from llvm.org"
)
message
(
STATUS
"Fetching LLVM from llvm.org"
)
set
(
LLVM_RELEASE_URL http://releases.llvm.org/5.0.0/clang+llvm-5.0.0-linux-x86_64-ubuntu16.04.tar.xz
)
set
(
LLVM_RELEASE_URL http://releases.llvm.org/5.0.0/clang+llvm-5.0.0-linux-x86_64-ubuntu16.04.tar.xz
)
...
...
src/ngraph/CMakeLists.txt
View file @
a4d4d161
...
@@ -114,13 +114,15 @@ if(LLVM_INCLUDE_DIR AND MKLDNN_INCLUDE_DIR)
...
@@ -114,13 +114,15 @@ if(LLVM_INCLUDE_DIR AND MKLDNN_INCLUDE_DIR)
runtime/cpu/external_function.cpp
runtime/cpu/external_function.cpp
)
)
# LLVM binary builds are typically built without RTTI
# LLVM binary builds are typically built without RTTI
# The built-in headers are in a version-specific directory
# This must be kept in sync with the LLVM + Clang version in use
set_source_files_properties
(
codegen/compiler.cpp PROPERTIES COMPILE_FLAGS
"-fno-rtti"
)
set_source_files_properties
(
codegen/compiler.cpp PROPERTIES COMPILE_FLAGS
"-fno-rtti"
)
set_source_files_properties
(
codegen/compiler.cpp PROPERTIES COMPILE_DEFINITIONS
set_source_files_properties
(
codegen/compiler.cpp PROPERTIES COMPILE_DEFINITIONS
"EIGEN_HEADERS_PATH=
\"
${
EIGEN_INCLUDE_DIR
}
\"
;CLANG_BUILTIN_HEADERS_PATH=
\"
${
LLVM_LIB_DIR
}
/clang/5.0.0/include
\"
;NGRAPH_HEADERS_PATH=
\"
${
NGRAPH_INCLUDE_PATH
}
\"
"
)
"EIGEN_HEADERS_PATH=
\"
${
EIGEN_INCLUDE_DIR
}
\"
;CLANG_BUILTIN_HEADERS_PATH=
\"
${
LLVM_LIB_DIR
}
/clang/5.0.0/include
\"
;NGRAPH_HEADERS_PATH=
\"
${
NGRAPH_INCLUDE_PATH
}
\"
"
)
set
(
CPU_ENABLE_PCH
0 CACHE STRING
"Enable pre-compiled headers in the CPU backend"
)
set
(
NGRAPH_CPU_PCH_ENABLE
0 CACHE STRING
"Enable pre-compiled headers in the CPU backend"
)
set
(
CPU_ENABLE_DEBUGINFO
0 CACHE STRING
"Enable debuginfo in the CPU backend"
)
set
(
NGRAPH_CPU_DEBUGINFO_ENABLE
0 CACHE STRING
"Enable debuginfo in the CPU backend"
)
set_source_files_properties
(
runtime/cpu/external_function.cpp PROPERTIES COMPILE_DEFINITIONS
set_source_files_properties
(
runtime/cpu/external_function.cpp PROPERTIES COMPILE_DEFINITIONS
"NGCPU_PCH=
${
CPU_ENABLE_PCH
}
;NGCPU_DEBUGINFO=
${
CPU_ENABLE_DEBUGINFO
}
"
)
"NGCPU_PCH=
${
NGRAPH_CPU_PCH_ENABLE
}
;NGCPU_DEBUGINFO=
${
NGRAPH_CPU_DEBUGINFO_ENABLE
}
"
)
endif
()
endif
()
add_library
(
ngraph SHARED
${
SRC
}
)
add_library
(
ngraph SHARED
${
SRC
}
)
...
...
src/ngraph/autodiff/adjoints.cpp
View file @
a4d4d161
...
@@ -160,6 +160,6 @@ void autodiff::Adjoints::add_delta(const std::shared_ptr<Node>& x,
...
@@ -160,6 +160,6 @@ void autodiff::Adjoints::add_delta(const std::shared_ptr<Node>& x,
}
}
else
else
{
{
m_adjoint_map
.
insert
({
x
.
get
(),
std
::
make_shared
<
op
::
Add
>
(
adjoint_it
->
second
,
delta
)}
);
adjoint_it
->
second
=
std
::
make_shared
<
op
::
Add
>
(
adjoint_it
->
second
,
delta
);
}
}
}
}
src/ngraph/codegen/compiler.cpp
View file @
a4d4d161
...
@@ -131,6 +131,8 @@ std::unique_ptr<llvm::Module> execution_state::compile(const string& source, con
...
@@ -131,6 +131,8 @@ std::unique_ptr<llvm::Module> execution_state::compile(const string& source, con
HSO
.
AddPath
(
NGRAPH_HEADERS_PATH
,
clang
::
frontend
::
System
,
false
,
false
);
HSO
.
AddPath
(
NGRAPH_HEADERS_PATH
,
clang
::
frontend
::
System
,
false
,
false
);
// Language options
// Language options
// These are the C++ features needed to compile ngraph headers
// and any dependencies like Eigen
auto
LO
=
Clang
->
getInvocation
().
getLangOpts
();
auto
LO
=
Clang
->
getInvocation
().
getLangOpts
();
LO
->
CPlusPlus
=
1
;
LO
->
CPlusPlus
=
1
;
LO
->
CPlusPlus11
=
1
;
LO
->
CPlusPlus11
=
1
;
...
@@ -167,7 +169,7 @@ std::unique_ptr<llvm::Module> execution_state::compile(const string& source, con
...
@@ -167,7 +169,7 @@ std::unique_ptr<llvm::Module> execution_state::compile(const string& source, con
// Enable various target features
// Enable various target features
// Most of these are for Eigen
// Most of these are for Eigen
auto
&
TO
=
Clang
->
getInvocation
().
getTargetOpts
();
auto
&
TO
=
Clang
->
getInvocation
().
getTargetOpts
();
// TODO: This needs to be configurable and selected carefully
// TODO: This needs to be configurable and selected carefully
TO
.
CPU
=
"broadwell"
;
TO
.
CPU
=
"broadwell"
;
TO
.
FeaturesAsWritten
.
emplace_back
(
"+sse4.1"
);
TO
.
FeaturesAsWritten
.
emplace_back
(
"+sse4.1"
);
...
...
src/ngraph/codegen/compiler.hpp
View file @
a4d4d161
...
@@ -48,7 +48,6 @@ public:
...
@@ -48,7 +48,6 @@ public:
bool
is_precompiled_headers_enabled
()
{
return
precompiled_headers_enabled
;
}
bool
is_precompiled_headers_enabled
()
{
return
precompiled_headers_enabled
;
}
void
set_debuginfo_enabled
(
bool
state
)
{
debuginfo_enabled
=
state
;
}
void
set_debuginfo_enabled
(
bool
state
)
{
debuginfo_enabled
=
state
;
}
bool
is_debuginfo_enabled
()
{
return
debuginfo_enabled
;
}
bool
is_debuginfo_enabled
()
{
return
debuginfo_enabled
;
}
std
::
unique_ptr
<
llvm
::
Module
>
compile
(
const
std
::
string
&
source
,
const
std
::
string
&
name
=
""
);
std
::
unique_ptr
<
llvm
::
Module
>
compile
(
const
std
::
string
&
source
,
const
std
::
string
&
name
=
""
);
bool
add_module
(
std
::
unique_ptr
<
llvm
::
Module
>&
);
bool
add_module
(
std
::
unique_ptr
<
llvm
::
Module
>&
);
...
...
src/ngraph/pass/assign_layout.hpp
0 → 100644
View file @
a4d4d161
// ----------------------------------------------------------------------------
// Copyright 2017 Nervana Systems Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// ----------------------------------------------------------------------------
#pragma once
#include <exception>
#include <sstream>
#include "ngraph/descriptor/output.hpp"
#include "ngraph/pass/pass.hpp"
namespace
ngraph
{
namespace
pass
{
template
<
typename
LT
>
class
AssignLayout
:
public
CallGraphPass
{
public
:
virtual
bool
run_on_call_graph
(
std
::
list
<
std
::
shared_ptr
<
Node
>>&
nodes
)
override
{
for
(
const
std
::
shared_ptr
<
Node
>&
node
:
nodes
)
{
try
{
for
(
const
descriptor
::
Output
&
output
:
node
->
get_outputs
())
{
auto
tv
=
output
.
get_tensor_view
();
if
(
nullptr
==
tv
->
get_tensor_view_layout
())
{
auto
layout
=
std
::
make_shared
<
LT
>
(
*
tv
);
tv
->
set_tensor_view_layout
(
layout
);
}
}
}
catch
(
const
std
::
exception
&
e
)
{
std
::
stringstream
ss
;
ss
<<
"Error with node "
<<
*
node
<<
": "
;
ss
<<
e
.
what
();
throw
std
::
invalid_argument
(
ss
.
str
());
}
}
return
false
;
}
};
}
}
src/ngraph/runtime/cpu/external_function.cpp
View file @
a4d4d161
...
@@ -52,6 +52,7 @@
...
@@ -52,6 +52,7 @@
#include "ngraph/ops/select.hpp"
#include "ngraph/ops/select.hpp"
#include "ngraph/ops/subtract.hpp"
#include "ngraph/ops/subtract.hpp"
#include "ngraph/ops/tuple.hpp"
#include "ngraph/ops/tuple.hpp"
#include "ngraph/pass/assign_layout.hpp"
#include "ngraph/pass/assign_tensors.hpp"
#include "ngraph/pass/assign_tensors.hpp"
#include "ngraph/pass/manager.hpp"
#include "ngraph/pass/manager.hpp"
#include "ngraph/pass/propagate_types.hpp"
#include "ngraph/pass/propagate_types.hpp"
...
@@ -130,29 +131,13 @@ void ExternalFunction::compile(FunctionMap& function_map)
...
@@ -130,29 +131,13 @@ void ExternalFunction::compile(FunctionMap& function_map)
return
;
return
;
}
}
// This will be replaced with the pass manager
// Get the ordered list of ops in execution order
pass
::
Manager
pass_manager
;
pass
::
Manager
pass_manager
;
pass_manager
.
register_pass
<
pass
::
TopologicalSort
>
();
pass_manager
.
register_pass
<
pass
::
TopologicalSort
>
();
pass_manager
.
register_pass
<
pass
::
PropagateTypes
>
();
pass_manager
.
register_pass
<
pass
::
PropagateTypes
>
();
pass_manager
.
register_pass
<
pass
::
AssignTensors
>
();
pass_manager
.
register_pass
<
pass
::
AssignTensors
>
();
pass_manager
.
run_passes
(
m_function
);
// Turn this into a pass
// Assign layouts
// For now, just make everyone row-major.
// For now, just make everyone row-major.
for
(
shared_ptr
<
Node
>
node
:
m_function
->
get_ordered_ops
())
pass_manager
.
register_pass
<
pass
::
AssignLayout
<
DenseTensorViewLayout
>>
();
{
pass_manager
.
run_passes
(
m_function
);
for
(
const
descriptor
::
Output
&
output
:
node
->
get_outputs
())
{
auto
tv
=
output
.
get_tensor_view
();
if
(
nullptr
==
tv
->
get_tensor_view_layout
())
{
auto
layout
=
std
::
make_shared
<
DenseTensorViewLayout
>
(
*
tv
);
tv
->
set_tensor_view_layout
(
layout
);
}
}
}
// Determine tensor requirements for the call frame
// Determine tensor requirements for the call frame
unordered_map
<
shared_ptr
<
ngraph
::
descriptor
::
TensorView
>
,
size_t
>
tensor_index
;
unordered_map
<
shared_ptr
<
ngraph
::
descriptor
::
TensorView
>
,
size_t
>
tensor_index
;
...
...
test/CMakeLists.txt
View file @
a4d4d161
...
@@ -22,6 +22,7 @@ include_directories(
...
@@ -22,6 +22,7 @@ include_directories(
)
)
set
(
SRC
set
(
SRC
autodiff.cpp
build_graph.cpp
build_graph.cpp
eigen.cpp
eigen.cpp
input_output_assign.cpp
input_output_assign.cpp
...
@@ -35,7 +36,6 @@ set (SRC
...
@@ -35,7 +36,6 @@ set (SRC
topological_sort.cpp
topological_sort.cpp
type_prop.cpp
type_prop.cpp
util/all_close.cpp
util/all_close.cpp
util/autodiff.cpp
util/test_tools.cpp
util/test_tools.cpp
util.cpp
util.cpp
uuid.cpp
uuid.cpp
...
...
test/
util/
autodiff.cpp
→
test/autodiff.cpp
View file @
a4d4d161
...
@@ -19,12 +19,12 @@
...
@@ -19,12 +19,12 @@
#include "gtest/gtest.h"
#include "gtest/gtest.h"
#include "all_close.hpp"
#include "ngraph/autodiff/backprop_derivative.hpp"
#include "ngraph/autodiff/backprop_derivative.hpp"
#include "ngraph/autodiff/backprop_function.hpp"
#include "ngraph/autodiff/backprop_function.hpp"
#include "ngraph/autodiff/numeric_derivative.hpp"
#include "ngraph/autodiff/numeric_derivative.hpp"
#include "ngraph/ngraph.hpp"
#include "ngraph/ngraph.hpp"
#include "random.hpp"
#include "util/all_close.hpp"
#include "util/random.hpp"
using
namespace
std
;
using
namespace
std
;
using
namespace
ngraph
;
using
namespace
ngraph
;
...
@@ -65,6 +65,26 @@ TEST(backwards, add)
...
@@ -65,6 +65,26 @@ TEST(backwards, add)
manager
,
backend
,
make_graph
,
{
x0
,
x1
},
.01
f
,
.01
f
));
manager
,
backend
,
make_graph
,
{
x0
,
x1
},
.01
f
,
.01
f
));
}
}
TEST
(
backwards
,
add_nested
)
{
auto
manager
=
runtime
::
Manager
::
get
(
"NGVM"
);
auto
backend
=
manager
->
allocate_backend
();
test
::
Uniform
<
element
::
Float32
>
rng
(
-
1.0
f
,
1.0
f
);
auto
shape
=
Shape
{
2
,
3
};
auto
x0
=
rng
.
initialize
(
backend
->
make_parameterized_tensor_view
<
element
::
Float32
>
(
shape
));
auto
x1
=
rng
.
initialize
(
backend
->
make_parameterized_tensor_view
<
element
::
Float32
>
(
shape
));
auto
make_graph
=
[
shape
]()
{
auto
X0
=
make_shared
<
op
::
Parameter
>
(
element
::
Float32
::
element_type
(),
shape
);
auto
X1
=
make_shared
<
op
::
Parameter
>
(
element
::
Float32
::
element_type
(),
shape
);
return
make_shared
<
Function
>
(
(
X0
+
X1
)
+
(
X1
+
X0
),
nullptr
,
std
::
vector
<
std
::
shared_ptr
<
op
::
Parameter
>>
{
X0
,
X1
});
};
EXPECT_TRUE
(
autodiff_numeric_compare
<
element
::
Float32
>
(
manager
,
backend
,
make_graph
,
{
x0
,
x1
},
.01
f
,
.01
f
));
}
TEST
(
backwards
,
broadcast0
)
TEST
(
backwards
,
broadcast0
)
{
{
auto
manager
=
runtime
::
Manager
::
get
(
"NGVM"
);
auto
manager
=
runtime
::
Manager
::
get
(
"NGVM"
);
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment