Commit 98445357 authored by Jayaram Bobba's avatar Jayaram Bobba

Changed license headers. Style fixes

parent dd8017f9
...@@ -100,7 +100,11 @@ void runtime::cpu::CPU_CallFrame::propagate_layouts( ...@@ -100,7 +100,11 @@ void runtime::cpu::CPU_CallFrame::propagate_layouts(
} }
for (size_t i = 0; i < tvs.size(); i++) for (size_t i = 0; i < tvs.size(); i++)
{ {
assert(layouts[i]); if (layouts[i] == nullptr)
{
throw ngraph_error(
"Error propagating layouts - layout information missing from tensor view");
}
tvs[i]->get_descriptor()->set_tensor_view_layout(layouts[i]); tvs[i]->get_descriptor()->set_tensor_view_layout(layouts[i]);
} }
} }
......
...@@ -14,7 +14,6 @@ ...@@ -14,7 +14,6 @@
* limitations under the License. * limitations under the License.
*******************************************************************************/ *******************************************************************************/
#include <cassert>
#include <cstdlib> #include <cstdlib>
#include <fstream> #include <fstream>
#include <memory> #include <memory>
...@@ -95,6 +94,7 @@ ...@@ -95,6 +94,7 @@
#include "ngraph/runtime/cpu/cpu_external_function.hpp" #include "ngraph/runtime/cpu/cpu_external_function.hpp"
#include "ngraph/runtime/cpu/cpu_tensor_view.hpp" #include "ngraph/runtime/cpu/cpu_tensor_view.hpp"
#include "ngraph/runtime/cpu/cpu_tracing.hpp" #include "ngraph/runtime/cpu/cpu_tracing.hpp"
#include "ngraph/runtime/cpu/mkldnn_utils.hpp"
#include "ngraph/runtime/cpu/ops/matmul_bias.hpp" #include "ngraph/runtime/cpu/ops/matmul_bias.hpp"
#include "ngraph/runtime/cpu/pass/cpu_fusion.hpp" #include "ngraph/runtime/cpu/pass/cpu_fusion.hpp"
#include "ngraph/runtime/cpu/pass/cpu_layout.hpp" #include "ngraph/runtime/cpu/pass/cpu_layout.hpp"
...@@ -105,7 +105,7 @@ using namespace ngraph; ...@@ -105,7 +105,7 @@ using namespace ngraph;
static const string s_output_dir = "cpu_codegen"; static const string s_output_dir = "cpu_codegen";
// Temporary Memory Pool alignment // Temporary Memory Pool alignment
static const size_t MemoryPoolAlignment = 64; static const size_t s_memory_pool_alignment = 4096;
class StaticInitializers class StaticInitializers
{ {
...@@ -234,7 +234,7 @@ void runtime::cpu::CPU_ExternalFunction::compile() ...@@ -234,7 +234,7 @@ void runtime::cpu::CPU_ExternalFunction::compile()
pass_manager.register_pass<runtime::cpu::pass::CPUFusion>(); pass_manager.register_pass<runtime::cpu::pass::CPUFusion>();
pass_manager.register_pass<runtime::cpu::pass::CPULayout>(); pass_manager.register_pass<runtime::cpu::pass::CPULayout>();
pass_manager.register_pass<ngraph::pass::Liveness>(); pass_manager.register_pass<ngraph::pass::Liveness>();
pass_manager.register_pass<ngraph::pass::MemoryLayout>(MemoryPoolAlignment); pass_manager.register_pass<ngraph::pass::MemoryLayout>(s_memory_pool_alignment);
pass_manager.run_passes(m_function); pass_manager.run_passes(m_function);
...@@ -245,11 +245,7 @@ void runtime::cpu::CPU_ExternalFunction::compile() ...@@ -245,11 +245,7 @@ void runtime::cpu::CPU_ExternalFunction::compile()
{ {
for (shared_ptr<Node> node : current_function->get_ordered_ops()) for (shared_ptr<Node> node : current_function->get_ordered_ops())
{ {
if (dynamic_cast<op::Convolution*>(node.get()) || if (ngraph::runtime::cpu::mkldnn_utils::IsMKLDNNOp(*node))
dynamic_cast<op::ConvolutionBackpropData*>(node.get()) ||
dynamic_cast<op::ConvolutionBackpropFilters*>(node.get()) ||
dynamic_cast<op::AvgPool*>(node.get()) || dynamic_cast<op::MaxPool*>(node.get()) ||
dynamic_cast<op::AvgPoolBackprop*>(node.get()))
{ {
include_mkldnn_headers = true; include_mkldnn_headers = true;
} }
...@@ -524,7 +520,7 @@ using namespace ngraph::runtime; ...@@ -524,7 +520,7 @@ using namespace ngraph::runtime;
writer << "// Memory pool size is " << temp_pool_size << " bytes\n"; writer << "// Memory pool size is " << temp_pool_size << " bytes\n";
writer << "// Worst case size is " << worst_case_tmp_size << " bytes\n"; writer << "// Worst case size is " << worst_case_tmp_size << " bytes\n";
writer << "ngraph::runtime::AlignedBuffer memory_handler(" << temp_pool_size << ", " writer << "ngraph::runtime::AlignedBuffer memory_handler(" << temp_pool_size << ", "
<< MemoryPoolAlignment << ");\n"; << s_memory_pool_alignment << ");\n";
writer << "size_t pool_base_ptr = (size_t)memory_handler.get_ptr();\n"; writer << "size_t pool_base_ptr = (size_t)memory_handler.get_ptr();\n";
writer << "\n"; writer << "\n";
...@@ -772,20 +768,30 @@ using namespace ngraph::runtime; ...@@ -772,20 +768,30 @@ using namespace ngraph::runtime;
for (size_t i = 0; i < parameter->get_output_size(); ++i) for (size_t i = 0; i < parameter->get_output_size(); ++i)
{ {
auto tv = parameter->get_output_tensor_view(i); auto tv = parameter->get_output_tensor_view(i);
assert(tv->get_tensor_view_layout()); if (tv->get_tensor_view_layout() == nullptr)
{
throw ngraph_error("layout missing on function parameter's tensor view: " +
tv->get_name());
}
parameter_layout_descriptors.emplace_back( parameter_layout_descriptors.emplace_back(
static_pointer_cast<runtime::cpu::LayoutDescriptor>(tv->get_tensor_view_layout())); static_pointer_cast<runtime::cpu::LayoutDescriptor>(tv->get_tensor_view_layout()));
} }
} }
// Store layouts assigned for results // Store layouts assigned for results
assert(result_layout_descriptors.empty()); if (!result_layout_descriptors.empty())
{
throw ngraph_error("Function output layouts should not be pre-assigned");
}
for (size_t i = 0; i < m_function->get_output_size(); ++i) for (size_t i = 0; i < m_function->get_output_size(); ++i)
{ {
const auto& output = m_function->get_output_op(i); const auto& output = m_function->get_output_op(i);
for (size_t j = 0; j < output->get_output_size(); ++j) for (size_t j = 0; j < output->get_output_size(); ++j)
{ {
auto tv = output->get_output_tensor_view(j); auto tv = output->get_output_tensor_view(j);
assert(tv->get_tensor_view_layout()); if (tv->get_tensor_view_layout() == nullptr)
{
throw ngraph_error("layout missing on function output tensor: " + tv->get_name());
}
result_layout_descriptors.emplace_back( result_layout_descriptors.emplace_back(
static_pointer_cast<runtime::cpu::LayoutDescriptor>(tv->get_tensor_view_layout())); static_pointer_cast<runtime::cpu::LayoutDescriptor>(tv->get_tensor_view_layout()));
} }
......
// ---------------------------------------------------------------------------- /*******************************************************************************
// Copyright 2017 Nervana Systems Inc. * Copyright 2017-2018 Intel Corporation
// Licensed under the Apache License, Version 2.0 (the "License"); *
// you may not use this file except in compliance with the License. * Licensed under the Apache License, Version 2.0 (the "License");
// You may obtain a copy of the License at * you may not use this file except in compliance with the License.
// * You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0 *
// * http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software *
// distributed under the License is distributed on an "AS IS" BASIS, * Unless required by applicable law or agreed to in writing, software
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// ---------------------------------------------------------------------------- * See the License for the specific language governing permissions and
* limitations under the License.
*******************************************************************************/
#include <algorithm> #include <algorithm>
...@@ -81,16 +83,24 @@ namespace ngraph ...@@ -81,16 +83,24 @@ namespace ngraph
{ {
const LayoutDescriptor* p_other = dynamic_cast<const LayoutDescriptor*>(&other); const LayoutDescriptor* p_other = dynamic_cast<const LayoutDescriptor*>(&other);
if (!p_other) if (!p_other)
{
return false; return false;
}
if (get_element_type() != p_other->get_element_type()) if (get_element_type() != p_other->get_element_type())
{
return false; return false;
}
if (strides != p_other->strides) if (strides != p_other->strides)
{
return false; return false;
}
if (offset != p_other->offset) if (offset != p_other->offset)
{
return false; return false;
}
return true; return true;
} }
......
// ---------------------------------------------------------------------------- /*******************************************************************************
// Copyright 2017 Nervana Systems Inc. * Copyright 2017-2018 Intel Corporation
// Licensed under the Apache License, Version 2.0 (the "License"); *
// you may not use this file except in compliance with the License. * Licensed under the Apache License, Version 2.0 (the "License");
// You may obtain a copy of the License at * you may not use this file except in compliance with the License.
// * You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0 *
// * http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software *
// distributed under the License is distributed on an "AS IS" BASIS, * Unless required by applicable law or agreed to in writing, software
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// ---------------------------------------------------------------------------- * See the License for the specific language governing permissions and
* limitations under the License.
*******************************************************************************/
#pragma once #pragma once
......
// ---------------------------------------------------------------------------- /*******************************************************************************
// Copyright 2017 Nervana Systems Inc. * Copyright 2017-2018 Intel Corporation
// Licensed under the Apache License, Version 2.0 (the "License"); *
// you may not use this file except in compliance with the License. * Licensed under the Apache License, Version 2.0 (the "License");
// You may obtain a copy of the License at * you may not use this file except in compliance with the License.
// * You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0 *
// * http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software *
// distributed under the License is distributed on an "AS IS" BASIS, * Unless required by applicable law or agreed to in writing, software
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// ---------------------------------------------------------------------------- * See the License for the specific language governing permissions and
* limitations under the License.
*******************************************************************************/
#include <cstring> #include <cstring>
#include <memory> #include <memory>
......
// ---------------------------------------------------------------------------- /*******************************************************************************
// Copyright 2017 Nervana Systems Inc. * Copyright 2017-2018 Intel Corporation
// Licensed under the Apache License, Version 2.0 (the "License"); *
// you may not use this file except in compliance with the License. * Licensed under the Apache License, Version 2.0 (the "License");
// You may obtain a copy of the License at * you may not use this file except in compliance with the License.
// * You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0 *
// * http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software *
// distributed under the License is distributed on an "AS IS" BASIS, * Unless required by applicable law or agreed to in writing, software
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// ---------------------------------------------------------------------------- * See the License for the specific language governing permissions and
* limitations under the License.
*******************************************************************************/
#pragma once #pragma once
......
// ---------------------------------------------------------------------------- /*******************************************************************************
// Copyright 2018 Nervana Systems Inc. * Copyright 2017-2018 Intel Corporation
// Licensed under the Apache License, Version 2.0 (the "License"); *
// you may not use this file except in compliance with the License. * Licensed under the Apache License, Version 2.0 (the "License");
// You may obtain a copy of the License at * you may not use this file except in compliance with the License.
// * You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0 *
// * http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software *
// distributed under the License is distributed on an "AS IS" BASIS, * Unless required by applicable law or agreed to in writing, software
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// ---------------------------------------------------------------------------- * See the License for the specific language governing permissions and
* limitations under the License.
*******************************************************************************/
#include <fstream> #include <fstream>
#include <map> #include <map>
......
// ---------------------------------------------------------------------------- /*******************************************************************************
// Copyright 2018 Nervana Systems Inc. * Copyright 2017-2018 Intel Corporation
// Licensed under the Apache License, Version 2.0 (the "License"); *
// you may not use this file except in compliance with the License. * Licensed under the Apache License, Version 2.0 (the "License");
// You may obtain a copy of the License at * you may not use this file except in compliance with the License.
// * You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0 *
// * http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software *
// distributed under the License is distributed on an "AS IS" BASIS, * Unless required by applicable law or agreed to in writing, software
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// ---------------------------------------------------------------------------- * See the License for the specific language governing permissions and
* limitations under the License.
*******************************************************************************/
#pragma once #pragma once
......
// ---------------------------------------------------------------------------- /*******************************************************************************
// Copyright 2017 Nervana Systems Inc. * Copyright 2017-2018 Intel Corporation
// Licensed under the Apache License, Version 2.0 (the "License"); *
// you may not use this file except in compliance with the License. * Licensed under the Apache License, Version 2.0 (the "License");
// You may obtain a copy of the License at * you may not use this file except in compliance with the License.
// * You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0 *
// * http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software *
// distributed under the License is distributed on an "AS IS" BASIS, * Unless required by applicable law or agreed to in writing, software
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// ---------------------------------------------------------------------------- * See the License for the specific language governing permissions and
* limitations under the License.
*******************************************************************************/
#include <typeindex> #include <typeindex>
#include <typeinfo> #include <typeinfo>
...@@ -29,17 +31,21 @@ namespace ngraph ...@@ -29,17 +31,21 @@ namespace ngraph
{ {
namespace cpu namespace cpu
{ {
namespace MKLDNN namespace mkldnn_utils
{ {
#define TI(x) std::type_index(typeid(x)) #define TI(x) std::type_index(typeid(x))
const std::unordered_set<std::type_index> OpRegistry{ const std::unordered_set<std::type_index> s_op_registry{
TI(ngraph::op::Convolution), TI(ngraph::op::AvgPool), TI(ngraph::op::MaxPool), TI(ngraph::op::AvgPool),
}; TI(ngraph::op::AvgPoolBackprop),
TI(ngraph::op::Convolution),
TI(ngraph::op::ConvolutionBackpropData),
TI(ngraph::op::ConvolutionBackpropFilters),
TI(ngraph::op::MaxPool)};
bool IsMKLDNNOp(ngraph::Node& op) bool IsMKLDNNOp(ngraph::Node& op)
{ {
return (OpRegistry.find(TI(op)) != OpRegistry.end()); return (s_op_registry.find(TI(op)) != s_op_registry.end());
} }
mkldnn::memory::format mkldnn::memory::format
......
// ---------------------------------------------------------------------------- /*******************************************************************************
// Copyright 2017 Nervana Systems Inc. * Copyright 2017-2018 Intel Corporation
// Licensed under the Apache License, Version 2.0 (the "License"); *
// you may not use this file except in compliance with the License. * Licensed under the Apache License, Version 2.0 (the "License");
// You may obtain a copy of the License at * you may not use this file except in compliance with the License.
// * You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0 *
// * http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software *
// distributed under the License is distributed on an "AS IS" BASIS, * Unless required by applicable law or agreed to in writing, software
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// ---------------------------------------------------------------------------- * See the License for the specific language governing permissions and
* limitations under the License.
*******************************************************************************/
#pragma once #pragma once
...@@ -29,7 +31,7 @@ namespace ngraph ...@@ -29,7 +31,7 @@ namespace ngraph
{ {
namespace cpu namespace cpu
{ {
namespace MKLDNN namespace mkldnn_utils
{ {
bool IsMKLDNNOp(ngraph::Node& op); bool IsMKLDNNOp(ngraph::Node& op);
mkldnn::memory::format mkldnn::memory::format
......
// ---------------------------------------------------------------------------- /*******************************************************************************
// Copyright 2018 Nervana Systems Inc. * Copyright 2017-2018 Intel Corporation
// Licensed under the Apache License, Version 2.0 (the "License"); *
// you may not use this file except in compliance with the License. * Licensed under the Apache License, Version 2.0 (the "License");
// You may obtain a copy of the License at * you may not use this file except in compliance with the License.
// * You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0 *
// * http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software *
// distributed under the License is distributed on an "AS IS" BASIS, * Unless required by applicable law or agreed to in writing, software
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// ---------------------------------------------------------------------------- * See the License for the specific language governing permissions and
* limitations under the License.
*******************************************************************************/
#include "convert_layout.hpp" #include "ngraph/runtime/cpu/ops/convert_layout.hpp"
#include "ngraph/runtime/cpu/cpu_layout_descriptor.hpp" #include "ngraph/runtime/cpu/cpu_layout_descriptor.hpp"
using namespace std; using namespace std;
......
// ---------------------------------------------------------------------------- /*******************************************************************************
// Copyright 2018 Nervana Systems Inc. * Copyright 2017-2018 Intel Corporation
// Licensed under the Apache License, Version 2.0 (the "License"); *
// you may not use this file except in compliance with the License. * Licensed under the Apache License, Version 2.0 (the "License");
// You may obtain a copy of the License at * you may not use this file except in compliance with the License.
// * You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0 *
// * http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software *
// distributed under the License is distributed on an "AS IS" BASIS, * Unless required by applicable law or agreed to in writing, software
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// ---------------------------------------------------------------------------- * See the License for the specific language governing permissions and
* limitations under the License.
*******************************************************************************/
#pragma once #pragma once
......
// ---------------------------------------------------------------------------- /*******************************************************************************
// Copyright 2017 Nervana Systems Inc. * Copyright 2017-2018 Intel Corporation
// Licensed under the Apache License, Version 2.0 (the "License"); *
// you may not use this file except in compliance with the License. * Licensed under the Apache License, Version 2.0 (the "License");
// You may obtain a copy of the License at * you may not use this file except in compliance with the License.
// * You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0 *
// * http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software *
// distributed under the License is distributed on an "AS IS" BASIS, * Unless required by applicable law or agreed to in writing, software
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// ---------------------------------------------------------------------------- * See the License for the specific language governing permissions and
* limitations under the License.
*******************************************************************************/
#include <algorithm> #include <algorithm>
#include <memory> #include <memory>
...@@ -22,9 +24,10 @@ ...@@ -22,9 +24,10 @@
#include "ngraph/runtime/cpu/cpu_layout_descriptor.hpp" #include "ngraph/runtime/cpu/cpu_layout_descriptor.hpp"
#include "ngraph/runtime/cpu/mkldnn_utils.hpp" #include "ngraph/runtime/cpu/mkldnn_utils.hpp"
using namespace ngraph::runtime::cpu::pass; //using namespace ngraph::runtime::cpu::pass;
using namespace ngraph;
bool CPULayout::run_on_call_graph(const std::list<std::shared_ptr<Node>>& nodes) bool runtime::cpu::pass::CPULayout::run_on_call_graph(const std::list<std::shared_ptr<Node>>& nodes)
{ {
for (const auto& node : nodes) for (const auto& node : nodes)
{ {
...@@ -49,11 +52,11 @@ bool CPULayout::run_on_call_graph(const std::list<std::shared_ptr<Node>>& nodes) ...@@ -49,11 +52,11 @@ bool CPULayout::run_on_call_graph(const std::list<std::shared_ptr<Node>>& nodes)
if (tensor.is_output() || tensor.is_input() || tensor.is_constant()) if (tensor.is_output() || tensor.is_input() || tensor.is_constant())
{ {
// Set the MKLDNN format to native row-major variants // Set the MKLDNN format to native row-major variants
layout->set_mkldnn_format(MKLDNN::CreateNativeDataFormat(*layout)); layout->set_mkldnn_format(mkldnn_utils::CreateNativeDataFormat(*layout));
} }
else else
{ {
if (ngraph::runtime::cpu::MKLDNN::IsMKLDNNOp(*node)) if (ngraph::runtime::cpu::mkldnn_utils::IsMKLDNNOp(*node))
{ {
// TODO(jmenon): get_inputs is marked as to-be-deprecated // TODO(jmenon): get_inputs is marked as to-be-deprecated
// but get_input_ops isn't a suitable API so this needs to be // but get_input_ops isn't a suitable API so this needs to be
......
// ---------------------------------------------------------------------------- /*******************************************************************************
// Copyright 2017 Nervana Systems Inc. * Copyright 2017-2018 Intel Corporation
// Licensed under the Apache License, Version 2.0 (the "License"); *
// you may not use this file except in compliance with the License. * Licensed under the Apache License, Version 2.0 (the "License");
// You may obtain a copy of the License at * you may not use this file except in compliance with the License.
// * You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0 *
// * http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software *
// distributed under the License is distributed on an "AS IS" BASIS, * Unless required by applicable law or agreed to in writing, software
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// ---------------------------------------------------------------------------- * See the License for the specific language governing permissions and
* limitations under the License.
*******************************************************************************/
#pragma once #pragma once
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment