Skip to content
Projects
Groups
Snippets
Help
Loading...
Sign in / Register
Toggle navigation
N
ngraph
Project
Project
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Packages
Packages
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
submodule
ngraph
Commits
fe85a703
Commit
fe85a703
authored
Feb 01, 2018
by
Jaikrishnan Menon
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
WIP
parent
84ea43b5
Hide whitespace changes
Inline
Side-by-side
Showing
9 changed files
with
86 additions
and
50 deletions
+86
-50
cpu_call_frame.cpp
src/ngraph/runtime/cpu/cpu_call_frame.cpp
+18
-0
cpu_call_frame.hpp
src/ngraph/runtime/cpu/cpu_call_frame.hpp
+4
-0
cpu_external_function.cpp
src/ngraph/runtime/cpu/cpu_external_function.cpp
+37
-1
cpu_external_function.hpp
src/ngraph/runtime/cpu/cpu_external_function.hpp
+7
-0
cpu_layout_descriptor.cpp
src/ngraph/runtime/cpu/cpu_layout_descriptor.cpp
+10
-0
cpu_layout_descriptor.hpp
src/ngraph/runtime/cpu/cpu_layout_descriptor.hpp
+6
-0
cpu_tensor_view.cpp
src/ngraph/runtime/cpu/cpu_tensor_view.cpp
+3
-0
cpu_layout.cpp
src/ngraph/runtime/cpu/pass/cpu_layout.cpp
+1
-2
cpu_tensor_allocation.hpp
src/ngraph/runtime/cpu/pass/cpu_tensor_allocation.hpp
+0
-47
No files found.
src/ngraph/runtime/cpu/cpu_call_frame.cpp
View file @
fe85a703
...
...
@@ -34,6 +34,10 @@ void runtime::cpu::CPU_CallFrame::tensor_call(
{
vector
<
void
*>
inputs
;
vector
<
void
*>
outputs
;
propagate_layouts
(
input_tvs
,
m_external_function
->
get_parameter_layout_descriptors
());
propagate_layouts
(
output_tvs
,
m_external_function
->
get_result_layout_descriptors
());
for
(
size_t
i
=
0
;
i
<
input_tvs
.
size
();
i
++
)
{
shared_ptr
<
runtime
::
cpu
::
CPUTensorView
>
tv
=
...
...
@@ -71,6 +75,20 @@ void runtime::cpu::CPU_CallFrame::call(
tensor_call
(
inputs
,
outputs
);
}
void
runtime
::
cpu
::
CPU_CallFrame
::
propagate_layouts
(
const
std
::
vector
<
std
::
shared_ptr
<
runtime
::
TensorView
>>&
tvs
,
const
LayoutDescriptorPtrs
&
layouts
)
const
{
if
(
layouts
.
size
()
!=
tvs
.
size
())
{
throw
ngraph_error
(
"Error propagating layouts - tensor view and layout descriptor counts do not match"
);
}
for
(
size_t
i
=
0
;
i
<
tvs
.
size
();
i
++
)
{
assert
(
layouts
[
i
]);
tvs
[
i
]
->
get_descriptor
()
->
set_tensor_view_layout
(
layouts
[
i
]);
}
}
vector
<
runtime
::
PerformanceCounter
>
runtime
::
cpu
::
CPU_CallFrame
::
get_performance_data
()
const
{
vector
<
runtime
::
PerformanceCounter
>
rc
;
...
...
src/ngraph/runtime/cpu/cpu_call_frame.hpp
View file @
fe85a703
...
...
@@ -21,6 +21,7 @@
#include "ngraph/function.hpp"
#include "ngraph/runtime/call_frame.hpp"
#include "ngraph/runtime/tensor_view.hpp"
#include "ngraph/runtime/cpu/cpu_layout_descriptor.hpp"
namespace
ngraph
{
...
...
@@ -56,6 +57,9 @@ namespace ngraph
void
tensor_call
(
const
std
::
vector
<
std
::
shared_ptr
<
TensorView
>>&
inputs
,
const
std
::
vector
<
std
::
shared_ptr
<
TensorView
>>&
outputs
)
override
;
void
propagate_layouts
(
const
std
::
vector
<
std
::
shared_ptr
<
runtime
::
TensorView
>>&
tvs
,
const
LayoutDescriptorPtrs
&
layouts
)
const
;
std
::
vector
<
ngraph
::
runtime
::
PerformanceCounter
>
get_performance_data
()
const
override
;
...
...
src/ngraph/runtime/cpu/cpu_external_function.cpp
View file @
fe85a703
...
...
@@ -94,7 +94,6 @@
#include "ngraph/runtime/cpu/cpu_tensor_view.hpp"
#include "ngraph/runtime/cpu/ops/matmul_bias.hpp"
#include "ngraph/runtime/cpu/pass/cpu_layout.hpp"
//#include "ngraph/runtime/cpu/pass/cpu_tensor_allocation.hpp"
using
namespace
std
;
using
namespace
ngraph
;
...
...
@@ -735,6 +734,31 @@ using namespace ngraph::runtime;
writer
+=
"}
\n\n
"
;
}
// Store layouts assigned for arguments
for
(
const
auto
&
parameter
:
m_function
->
get_parameters
())
{
for
(
size_t
i
=
0
;
i
<
parameter
->
get_output_size
();
++
i
)
{
auto
tv
=
parameter
->
get_output_tensor_view
(
i
);
assert
(
tv
->
get_tensor_view_layout
());
parameter_layout_descriptors
.
emplace_back
(
static_pointer_cast
<
runtime
::
cpu
::
LayoutDescriptor
>
(
tv
->
get_tensor_view_layout
()));
}
}
// Store layouts assigned for results
assert
(
result_layout_descriptors
.
empty
());
for
(
size_t
i
=
0
;
i
<
m_function
->
get_output_size
();
++
i
)
{
const
auto
&
output
=
m_function
->
get_output_op
(
i
);
for
(
size_t
j
=
0
;
j
<
output
->
get_output_size
();
++
j
)
{
auto
tv
=
output
->
get_output_tensor_view
(
j
);
assert
(
tv
->
get_tensor_view_layout
());
result_layout_descriptors
.
emplace_back
(
static_pointer_cast
<
runtime
::
cpu
::
LayoutDescriptor
>
(
tv
->
get_tensor_view_layout
()));
}
}
// TODO: Cleanup and make this a utility function
file_util
::
make_directory
(
s_output_dir
);
...
...
@@ -807,6 +831,18 @@ shared_ptr<ngraph::runtime::CallFrame> runtime::cpu::CPU_ExternalFunction::make_
m_compiled_function
);
}
const
runtime
::
cpu
::
LayoutDescriptorPtrs
&
runtime
::
cpu
::
CPU_ExternalFunction
::
get_parameter_layout_descriptors
()
{
assert
(
!
parameter_layout_descriptors
.
empty
());
return
parameter_layout_descriptors
;
}
const
runtime
::
cpu
::
LayoutDescriptorPtrs
&
runtime
::
cpu
::
CPU_ExternalFunction
::
get_result_layout_descriptors
()
{
assert
(
!
result_layout_descriptors
.
empty
());
return
result_layout_descriptors
;
}
void
runtime
::
cpu
::
CPU_ExternalFunction
::
emit_debug_function_entry
(
codegen
::
CodeWriter
&
writer
,
Node
*
node
,
...
...
src/ngraph/runtime/cpu/cpu_external_function.hpp
View file @
fe85a703
...
...
@@ -25,6 +25,7 @@
#include "ngraph/codegen/execution_engine.hpp"
#include "ngraph/function.hpp"
#include "ngraph/runtime/cpu/cpu_call_frame.hpp"
#include "ngraph/runtime/cpu/cpu_layout_descriptor.hpp"
#include "ngraph/runtime/cpu/cpu_tensor_view_wrapper.hpp"
#include "ngraph/runtime/external_function.hpp"
...
...
@@ -55,6 +56,9 @@ namespace ngraph
bool
release_function
=
true
);
std
::
shared_ptr
<
ngraph
::
runtime
::
CallFrame
>
make_call_frame
();
const
LayoutDescriptorPtrs
&
get_parameter_layout_descriptors
();
const
LayoutDescriptorPtrs
&
get_result_layout_descriptors
();
protected
:
void
compile
();
...
...
@@ -79,6 +83,9 @@ namespace ngraph
bool
m_emit_timing
;
bool
m_use_tbb
;
std
::
unordered_map
<
std
::
string
,
std
::
string
>
m_variable_name_map
;
LayoutDescriptorPtrs
parameter_layout_descriptors
;
LayoutDescriptorPtrs
result_layout_descriptors
;
};
}
}
...
...
src/ngraph/runtime/cpu/cpu_layout_descriptor.cpp
View file @
fe85a703
...
...
@@ -26,6 +26,13 @@ namespace ngraph
const
AxisVector
LayoutDescriptor
::
Native4DAxisOrder
{
0
,
1
,
2
,
3
};
const
AxisVector
LayoutDescriptor
::
CHWNAxisOrder
{
1
,
2
,
3
,
0
};
AxisVector
LayoutDescriptor
::
create_native_axis_order
(
size_t
rank
)
{
AxisVector
native_axis_order
(
rank
);
std
::
iota
(
native_axis_order
.
begin
(),
native_axis_order
.
end
(),
0
);
return
native_axis_order
;
}
LayoutDescriptor
::
LayoutDescriptor
(
const
ngraph
::
descriptor
::
TensorView
&
tv
,
const
AxisVector
&
tv_axis_order
)
:
TensorViewLayout
(
tv
)
...
...
@@ -86,6 +93,9 @@ namespace ngraph
return
false
;
//TODO: Numeric backend-specific properties
if
(
mkldnn_format
!=
p_other
->
mkldnn_format
)
return
false
;
return
true
;
}
}
...
...
src/ngraph/runtime/cpu/cpu_layout_descriptor.hpp
View file @
fe85a703
...
...
@@ -15,6 +15,8 @@
#pragma once
#include <cstdint>
#include <memory>
#include <vector>
#include <mkldnn_types.h>
...
...
@@ -45,9 +47,11 @@ namespace ngraph
mkldnn_memory_format_t
get_mkldnn_format
()
const
{
return
mkldnn_format
;
}
const
AxisVector
&
get_axis_order
()
const
{
return
axis_order
;
}
static
const
AxisVector
Native2DAxisOrder
;
static
const
AxisVector
Native4DAxisOrder
;
static
const
AxisVector
CHWNAxisOrder
;
static
AxisVector
create_native_axis_order
(
size_t
rank
);
private
:
AxisVector
axis_order
;
...
...
@@ -58,6 +62,8 @@ namespace ngraph
// Numeric backend-specific fields
mkldnn_memory_format_t
mkldnn_format
;
};
typedef
std
::
vector
<
std
::
shared_ptr
<
ngraph
::
runtime
::
cpu
::
LayoutDescriptor
>>
LayoutDescriptorPtrs
;
}
}
}
src/ngraph/runtime/cpu/cpu_tensor_view.cpp
View file @
fe85a703
...
...
@@ -37,6 +37,9 @@ runtime::cpu::CPUTensorView::CPUTensorView(const ngraph::element::Type& element_
,
buffer
(
nullptr
)
,
aligned_buffer
(
nullptr
)
{
// m_descriptor->set_tensor_view_layout(
// std::make_shared<ngraph::descriptor::layout::DenseTensorViewLayout>(*m_descriptor));
buffer_size
=
shape_size
(
shape
)
*
element_type
.
size
();
if
(
buffer_size
)
{
...
...
src/ngraph/runtime/cpu/pass/cpu_layout.cpp
View file @
fe85a703
...
...
@@ -37,8 +37,7 @@ bool CPULayout::run_on_call_graph(const std::list<std::shared_ptr<Node>>& nodes)
auto
&
tensor
=
tv
->
get_tensor
();
auto
rank
=
tvt
->
get_shape
().
size
();
AxisVector
native_axis_order
(
rank
);
std
::
iota
(
native_axis_order
.
begin
(),
native_axis_order
.
end
(),
0
);
auto
native_axis_order
=
ngraph
::
runtime
::
cpu
::
LayoutDescriptor
::
create_native_axis_order
(
rank
);
if
(
tensor
.
is_output
()
||
tensor
.
is_input
()
||
tensor
.
is_constant
())
{
...
...
src/ngraph/runtime/cpu/pass/cpu_tensor_allocation.hpp
deleted
100644 → 0
View file @
84ea43b5
// ----------------------------------------------------------------------------
// Copyright 2017 Nervana Systems Inc.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// ----------------------------------------------------------------------------
#pragma once
#include <memory>
#include "ngraph/descriptor/output.hpp"
#include "ngraph/pass/pass.hpp"
namespace
ngraph
{
namespace
pass
{
namespace
cpu
{
class
CPUTensorAllocation
:
public
ngraph
::
pass
::
FunctionPass
{
public
:
virtual
bool
run_on_function
(
std
::
shared_ptr
<
ngraph
::
Function
>
function
)
override
{
for
(
std
::
shared_ptr
<
ngraph
::
Node
>
node
:
function
->
get_ops
())
{
for
(
size_t
i
=
0
;
i
<
node
->
get_output_size
();
++
i
)
{
auto
tensor_view
=
node
->
get_output_tensor_view
(
i
);
auto
cpu_tensor_view
=
std
::
static_pointer_cast
<
ngraph
::
runtime
::
cpu
::
CPUTensorView
>
(
tensor_view
);
}
}
return
false
;
}
};
}
}
}
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment