Skip to content
Projects
Groups
Snippets
Help
Loading...
Sign in / Register
Toggle navigation
N
ngraph
Project
Project
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Packages
Packages
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
submodule
ngraph
Commits
d73f92c4
Commit
d73f92c4
authored
Mar 22, 2018
by
Adam Procter
Committed by
adstraw
Mar 22, 2018
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Remove XLA compatibility figleaves (will be moved to ngraph-tensorflow-bridge) (#704)
parent
b4c672f2
Hide whitespace changes
Inline
Side-by-side
Showing
9 changed files
with
1 addition
and
411 deletions
+1
-411
CMakeLists.txt
src/ngraph/CMakeLists.txt
+0
-1
xla_tuple.cpp
src/ngraph/builder/xla_tuple.cpp
+0
-193
xla_tuple.hpp
src/ngraph/builder/xla_tuple.hpp
+0
-106
ngraph.hpp
src/ngraph/ngraph.hpp
+0
-6
serializer.cpp
src/ngraph/serializer.cpp
+0
-6
util.hpp
src/ngraph/util.hpp
+1
-1
CMakeLists.txt
test/CMakeLists.txt
+0
-1
builder_xla.cpp
test/builder_xla.cpp
+0
-96
includes.cpp
test/includes.cpp
+0
-1
No files found.
src/ngraph/CMakeLists.txt
View file @
d73f92c4
...
...
@@ -19,7 +19,6 @@ set (SRC
builder/autobroadcast.cpp
builder/numpy_transpose.cpp
builder/reduce_ops.cpp
builder/xla_tuple.cpp
coordinate_transform.cpp
descriptor/input.cpp
descriptor/layout/dense_tensor_view_layout.cpp
...
...
src/ngraph/builder/xla_tuple.cpp
deleted
100644 → 0
View file @
b4c672f2
/*******************************************************************************
* Copyright 2017-2018 Intel Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*******************************************************************************/
#include <memory>
#include "ngraph/builder/xla_tuple.hpp"
#include "ngraph/descriptor/primary_tensor_view.hpp"
#include "ngraph/descriptor/tensor_view.hpp"
#include "ngraph/except.hpp"
#include "ngraph/op/parameter.hpp"
#include "ngraph/runtime/call_frame.hpp"
#include "ngraph/type/element_type.hpp"
#include "ngraph/type/type.hpp"
using
namespace
std
;
using
namespace
ngraph
;
xla
::
op
::
Tuple
::
Tuple
(
const
NodeVector
&
nodes
)
:
Node
(
"Tuple"
,
NodeVector
{})
,
m_elements
(
nodes
)
{
}
std
::
shared_ptr
<
Node
>
xla
::
op
::
Tuple
::
copy_with_new_args
(
const
NodeVector
&
new_args
)
const
{
return
make_shared
<
Tuple
>
(
new_args
);
}
const
NodeVector
&
xla
::
op
::
Tuple
::
get_elements
()
const
{
return
m_elements
;
}
size_t
xla
::
op
::
Tuple
::
get_tuple_size
()
const
{
return
m_elements
.
size
();
}
shared_ptr
<
Node
>
xla
::
op
::
Tuple
::
get_tuple_element
(
size_t
i
)
{
return
m_elements
.
at
(
i
);
}
shared_ptr
<
Node
>
xla
::
op
::
get_tuple_element
(
shared_ptr
<
Node
>
node
,
size_t
i
)
{
shared_ptr
<
xla
::
op
::
Tuple
>
tuple
=
dynamic_pointer_cast
<
xla
::
op
::
Tuple
>
(
node
);
if
(
tuple
==
nullptr
)
{
throw
ngraph_error
(
"get_tuple_element called on a non-tuple"
);
}
return
tuple
->
get_tuple_element
(
i
);
}
namespace
{
// Add the node to nodes if it's not a Tuple, otherwise add nodes for the elements of the tuple.
template
<
typename
T
>
void
flatten
(
vector
<
shared_ptr
<
T
>>&
nodes
,
shared_ptr
<
Node
>
node
)
{
auto
xla_tuple
=
dynamic_pointer_cast
<
xla
::
op
::
Tuple
>
(
node
);
if
(
xla_tuple
==
nullptr
)
{
auto
t_node
=
dynamic_pointer_cast
<
T
>
(
node
);
if
(
t_node
==
nullptr
)
{
throw
ngraph_error
(
"Invalid node type type encountered"
);
}
nodes
.
push_back
(
t_node
);
}
else
{
for
(
auto
element
:
xla_tuple
->
get_elements
())
{
flatten
<
T
>
(
nodes
,
element
);
}
}
}
// Collect a vector of the non-Tuple nodes that underly nodes
template
<
typename
T
>
vector
<
shared_ptr
<
T
>>
flatten
(
const
NodeVector
&
nodes
)
{
vector
<
shared_ptr
<
T
>>
result
;
for
(
auto
node
:
nodes
)
{
flatten
<
T
>
(
result
,
node
);
}
return
result
;
}
}
xla
::
XLAFunction
::
XLAFunction
(
const
NodeVector
&
results
,
const
NodeVector
&
parameters
,
const
string
&
name
)
:
Function
(
flatten
<
Node
>
(
results
),
flatten
<
ngraph
::
op
::
Parameter
>
(
parameters
),
name
)
{
}
xla
::
XLATuple
::
XLATuple
(
const
XLAValues
&
elements
)
:
runtime
::
TensorView
(
make_shared
<
descriptor
::
PrimaryTensorView
>
(
make_shared
<
ngraph
::
TensorViewType
>
(
element
::
f32
,
Shape
{}),
"XLATuple"
,
false
,
false
,
false
))
,
m_elements
(
elements
)
{
}
const
vector
<
shared_ptr
<
runtime
::
TensorView
>>&
xla
::
XLATuple
::
get_elements
()
const
{
return
m_elements
;
}
size_t
xla
::
XLATuple
::
get_tuple_size
()
const
{
return
m_elements
.
size
();
}
shared_ptr
<
runtime
::
TensorView
>
xla
::
XLATuple
::
get_tuple_element
(
size_t
i
)
const
{
return
m_elements
.
at
(
i
);
}
void
xla
::
XLATuple
::
write
(
const
void
*
p
,
size_t
tensor_offset
,
size_t
n
)
{
throw
ngraph_error
(
"Cannot write to a tuple"
);
}
void
xla
::
XLATuple
::
read
(
void
*
p
,
size_t
tensor_offset
,
size_t
n
)
const
{
throw
ngraph_error
(
"Cannot read from a tuple"
);
}
std
::
shared_ptr
<
runtime
::
TensorView
>
xla
::
get_tuple_element
(
std
::
shared_ptr
<
xla
::
XLATuple
>
tuple
,
size_t
i
)
{
return
tuple
->
get_tuple_element
(
i
);
}
namespace
{
// Collect the real tensors, expanding the tensors that are really tuples
void
flatten
(
runtime
::
TensorViewPtrs
&
tensors
,
shared_ptr
<
runtime
::
TensorView
>
tensor
)
{
auto
xla_tuple
=
dynamic_pointer_cast
<
xla
::
XLATuple
>
(
tensor
);
if
(
xla_tuple
==
nullptr
)
{
tensors
.
push_back
(
tensor
);
}
else
{
for
(
auto
element
:
xla_tuple
->
get_elements
())
{
flatten
(
tensors
,
element
);
}
}
}
// Return a vector of the real tensors underlying a vector of tensors which may contain tuples.
runtime
::
TensorViewPtrs
flatten
(
const
runtime
::
TensorViewPtrs
&
tensors
)
{
runtime
::
TensorViewPtrs
result
;
for
(
auto
tensor
:
tensors
)
{
flatten
(
result
,
tensor
);
}
return
result
;
}
}
void
xla
::
call
(
shared_ptr
<
runtime
::
CallFrame
>
call_frame
,
const
runtime
::
TensorViewPtrs
&
outputs
,
const
runtime
::
TensorViewPtrs
&
inputs
)
{
runtime
::
TensorViewPtrs
flat_outputs
(
flatten
(
outputs
));
runtime
::
TensorViewPtrs
flat_inputs
(
flatten
(
inputs
));
call_frame
->
tensor_call
(
flat_outputs
,
flat_inputs
);
}
src/ngraph/builder/xla_tuple.hpp
deleted
100644 → 0
View file @
b4c672f2
/*******************************************************************************
* Copyright 2017-2018 Intel Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*******************************************************************************/
#pragma once
#include <memory>
#include <vector>
#include "ngraph/function.hpp"
#include "ngraph/node.hpp"
#include "ngraph/runtime/backend.hpp"
#include "ngraph/runtime/tensor_view.hpp"
#include "ngraph/type/element_type.hpp"
namespace
ngraph
{
namespace
runtime
{
class
CallFrame
;
}
namespace
xla
{
namespace
op
{
/// A special Op for constructing graphs with XLA tuples.
/// Can only be used as an argument to the get_tuple_element function, which returns the node
/// that was used when the tuple was constructed; the constructed graph should have no Tuple
/// nodes in it.
class
Tuple
:
public
Node
{
public
:
Tuple
(
const
NodeVector
&
nodes
);
std
::
shared_ptr
<
Node
>
get_tuple_element
(
size_t
i
);
size_t
get_tuple_size
()
const
;
const
NodeVector
&
get_elements
()
const
;
virtual
std
::
shared_ptr
<
Node
>
copy_with_new_args
(
const
NodeVector
&
new_args
)
const
override
;
protected
:
NodeVector
m_elements
;
};
std
::
shared_ptr
<
Node
>
get_tuple_element
(
std
::
shared_ptr
<
Node
>
tuple
,
size_t
i
);
}
/// Extends functions to let results include xla::op::Tuple, and paramaters to include
/// xla::op::Tuple of op::Parameter trees.
class
XLAFunction
:
public
Function
{
public
:
XLAFunction
(
const
NodeVector
&
results
,
const
NodeVector
&
parameters
,
const
std
::
string
&
name
=
""
);
};
using
XLAValues
=
std
::
vector
<
std
::
shared_ptr
<
runtime
::
TensorView
>>
;
/// An XLATuple is a implemented as an extension of a float scalar so that it fits in nicely
/// with the nGraph type hierarchy.
class
XLATuple
:
public
runtime
::
TensorView
{
public
:
XLATuple
(
const
XLAValues
&
elements
);
const
ngraph
::
runtime
::
TensorViewPtrs
&
get_elements
()
const
;
std
::
shared_ptr
<
runtime
::
TensorView
>
get_tuple_element
(
size_t
i
)
const
;
size_t
get_tuple_size
()
const
;
virtual
void
write
(
const
void
*
p
,
size_t
tensor_offset
,
size_t
n
)
override
;
virtual
void
read
(
void
*
p
,
size_t
tensor_offset
,
size_t
n
)
const
override
;
protected
:
std
::
vector
<
std
::
shared_ptr
<
runtime
::
TensorView
>>
m_elements
;
};
/// Convenience function for making a runtime tuple.
inline
std
::
shared_ptr
<
XLATuple
>
make_tuple
(
const
XLAValues
&
elements
)
{
return
std
::
make_shared
<
XLATuple
>
(
elements
);
}
/// Convenience function for accessing a tuple element.
std
::
shared_ptr
<
runtime
::
TensorView
>
get_tuple_element
(
std
::
shared_ptr
<
XLATuple
>
xla_tuple
,
size_t
i
);
/// Invoke a call frame where some arguments might be XLATuples
void
call
(
std
::
shared_ptr
<
runtime
::
CallFrame
>
call_frame
,
const
ngraph
::
runtime
::
TensorViewPtrs
&
outputs
,
const
ngraph
::
runtime
::
TensorViewPtrs
&
inputs
);
}
}
src/ngraph/ngraph.hpp
View file @
d73f92c4
...
...
@@ -43,12 +43,6 @@
/// @brief Convenience functions that create addional graph nodes to implement commonly-used
/// recipes, for example auto-broadcast.
/// @namespace ngraph::xla
/// @brief Code to facilitate nGraph's support for XLA/HLO.
/// @namespace ngraph::xla::op
/// @brief Operators specific to nGraph's support for XLA/HLO.
#include "ngraph/builder/autobroadcast.hpp"
#include "ngraph/builder/numpy_transpose.hpp"
#include "ngraph/builder/reduce_ops.hpp"
...
...
src/ngraph/serializer.cpp
View file @
d73f92c4
...
...
@@ -842,9 +842,6 @@ static shared_ptr<ngraph::Function>
{
node
=
make_shared
<
op
::
Tanh
>
(
args
[
0
]);
}
// else if (node_op == "XLAGetTupleElement")
// {
// }
else
{
stringstream
ss
;
...
...
@@ -1239,9 +1236,6 @@ static json write(const Node& n, bool binary_constant_data)
else
if
(
node_op
==
"Tanh"
)
{
}
else
if
(
node_op
==
"XLAGetTupleElement"
)
{
}
return
node
;
}
src/ngraph/util.hpp
View file @
d73f92c4
...
...
@@ -260,7 +260,7 @@ namespace ngraph
};
/**
* This utility takes forward-propogation and back-prop
ogation XLA
unctions
* This utility takes forward-propogation and back-prop
agation f
unctions
* and turns them into clone functions where the intermediate values of
* the forward prop are added to the output of fprop and the input of the bprop
* to avoid repeat calcualtions.
...
...
test/CMakeLists.txt
View file @
d73f92c4
...
...
@@ -28,7 +28,6 @@ set (SRC
backend_debug_api.cpp
builder.cpp
builder_autobroadcast.cpp
builder_xla.cpp
build_graph.cpp
copy.cpp
core_fusion.cpp
...
...
test/builder_xla.cpp
deleted
100644 → 0
View file @
b4c672f2
/*******************************************************************************
* Copyright 2017-2018 Intel Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*******************************************************************************/
#include <vector>
#include "gtest/gtest.h"
#include "ngraph/builder/xla_tuple.hpp"
#include "ngraph/ngraph.hpp"
#include "util/test_tools.hpp"
using
namespace
std
;
using
namespace
ngraph
;
TEST
(
builder_xla
,
simple
)
{
Shape
shape
{
2
,
2
};
auto
pA
=
make_shared
<
op
::
Parameter
>
(
element
::
f32
,
shape
);
auto
pB
=
make_shared
<
op
::
Parameter
>
(
element
::
f32
,
shape
);
auto
pC
=
make_shared
<
op
::
Parameter
>
(
element
::
f32
,
shape
);
auto
ABC
=
make_shared
<
xla
::
op
::
Tuple
>
(
NodeVector
{
pA
,
pB
,
pC
});
auto
A
=
xla
::
op
::
get_tuple_element
(
ABC
,
0
);
auto
B
=
xla
::
op
::
get_tuple_element
(
ABC
,
1
);
auto
C
=
xla
::
op
::
get_tuple_element
(
ABC
,
2
);
auto
f
=
make_shared
<
xla
::
XLAFunction
>
(
NodeVector
{
make_shared
<
xla
::
op
::
Tuple
>
(
NodeVector
{(
A
+
B
)
*
C
})},
NodeVector
{
ABC
});
auto
manager
=
runtime
::
Manager
::
get
(
"INTERPRETER"
);
auto
external
=
manager
->
compile
(
f
);
auto
backend
=
manager
->
allocate_backend
();
auto
cf
=
backend
->
make_call_frame
(
external
);
// Create some tensors for input/output
auto
a
=
backend
->
make_primary_tensor_view
(
element
::
f32
,
shape
);
copy_data
(
a
,
vector
<
float
>
{
1
,
2
,
3
,
4
});
auto
b
=
backend
->
make_primary_tensor_view
(
element
::
f32
,
shape
);
copy_data
(
b
,
vector
<
float
>
{
5
,
6
,
7
,
8
});
auto
c
=
backend
->
make_primary_tensor_view
(
element
::
f32
,
shape
);
copy_data
(
c
,
vector
<
float
>
{
9
,
10
,
11
,
12
});
auto
abc
=
xla
::
make_tuple
({
a
,
b
,
c
});
auto
bac
=
xla
::
make_tuple
({
b
,
a
,
c
});
auto
acb
=
xla
::
make_tuple
({
a
,
c
,
b
});
auto
result
=
backend
->
make_primary_tensor_view
(
element
::
f32
,
shape
);
auto
result_tuple
=
xla
::
make_tuple
({
result
});
xla
::
call
(
cf
,
{
result_tuple
},
{
abc
});
EXPECT_EQ
((
vector
<
float
>
{
54
,
80
,
110
,
144
}),
read_vector
<
float
>
(
result
));
xla
::
call
(
cf
,
{
result_tuple
},
{
bac
});
EXPECT_EQ
((
vector
<
float
>
{
54
,
80
,
110
,
144
}),
read_vector
<
float
>
(
result
));
xla
::
call
(
cf
,
{
result_tuple
},
{
acb
});
EXPECT_EQ
((
vector
<
float
>
{
50
,
72
,
98
,
128
}),
read_vector
<
float
>
(
result
));
}
TEST
(
builder_xla
,
empty_tuple_interpreter
)
{
auto
empty_tuple
=
make_shared
<
xla
::
op
::
Tuple
>
(
NodeVector
{});
auto
f
=
make_shared
<
xla
::
XLAFunction
>
(
NodeVector
{
empty_tuple
},
NodeVector
{});
auto
manager
=
runtime
::
Manager
::
get
(
"INTERPRETER"
);
auto
external
=
manager
->
compile
(
f
);
auto
backend
=
manager
->
allocate_backend
();
auto
cf
=
backend
->
make_call_frame
(
external
);
xla
::
call
(
cf
,
{},
{});
}
TEST
(
builder_xla
,
empty_tuple_cpu
)
{
auto
empty_tuple
=
make_shared
<
xla
::
op
::
Tuple
>
(
NodeVector
{});
auto
f
=
make_shared
<
xla
::
XLAFunction
>
(
NodeVector
{
empty_tuple
},
NodeVector
{});
auto
manager
=
runtime
::
Manager
::
get
(
"CPU"
);
auto
external
=
manager
->
compile
(
f
);
auto
backend
=
manager
->
allocate_backend
();
auto
cf
=
backend
->
make_call_frame
(
external
);
xla
::
call
(
cf
,
{},
{});
}
test/includes.cpp
View file @
d73f92c4
...
...
@@ -35,7 +35,6 @@ TEST(DISABLED_include, complete)
"ngraph/builder/autobroadcast.hpp"
,
"ngraph/builder/numpy_transpose.hpp"
,
"ngraph/builder/reduce_ops.hpp"
,
"ngraph/builder/xla_tuple.hpp"
,
"ngraph/codegen/code_writer.hpp"
,
"ngraph/codegen/compiler.hpp"
,
"ngraph/codegen/execution_engine.hpp"
,
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment