Commit f642bc4c authored by Nick Korovaiko's avatar Nick Korovaiko Committed by Robert Kimball

Reshape Sinking (#1701)

* reshape sinking working on mnist_conv

* forgot to add reshape_sinking files

* refactoring of binary case

* Quantize/Dequantize case, fix add case, add assert

* address bob and scott's feedback

* debug

* fix a bug where reshapes are removed too early
parent edc40856
......@@ -89,7 +89,6 @@ namespace ngraph
protected:
/// Throws if the node is invalid.
virtual void validate_and_infer_types();
// Called in constructors during transition
void constructor_validate_and_infer_types();
......@@ -107,6 +106,7 @@ namespace ngraph
virtual void generate_adjoints(autodiff::Adjoints& adjoints, const NodeVector& deltas) {}
public:
virtual void validate_and_infer_types();
// Called after transition
void delayed_validate_and_infer_types();
......
......@@ -110,6 +110,7 @@ set(SRC
pass/cpu_post_layout_optimizations.cpp
pass/cpu_rnn_fusion.cpp
pass/cpu_workspace_insertion.cpp
pass/cpu_reshape_sinking.cpp
)
if (NOT NGRAPH_DEX_ONLY)
......
//*****************************************************************************
// Copyright 2017-2018 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#include "cpu_reshape_sinking.hpp"
#include <algorithm>
#include <iostream>
#include <numeric>
#include <set>
#include <unordered_set>
#include "cpu_collapse_dims.hpp"
#include "ngraph/descriptor/input.hpp"
#include "ngraph/graph_util.hpp"
#include "ngraph/log.hpp"
#include "ngraph/op/batch_norm.hpp"
#include "ngraph/op/broadcast.hpp"
#include "ngraph/op/convolution.hpp"
#include "ngraph/op/dequantize.hpp"
#include "ngraph/op/get_output_element.hpp"
#include "ngraph/op/quantize.hpp"
#include "ngraph/op/reshape.hpp"
#include "ngraph/op/util/binary_elementwise_arithmetic.hpp"
#include "ngraph/op/util/unary_elementwise_arithmetic.hpp"
#include "ngraph/util.hpp"
using namespace ngraph;
extern template ngraph::AxisVector
ngraph::apply_permutation<ngraph::AxisVector>(ngraph::AxisVector input,
ngraph::AxisVector order);
extern template ngraph::Shape ngraph::apply_permutation<ngraph::Shape>(ngraph::Shape input,
ngraph::AxisVector order);
static std::shared_ptr<op::Reshape> combine_reshapes(std::shared_ptr<op::Reshape> r1,
std::shared_ptr<op::Reshape> r2)
{
auto default_order = ngraph::get_default_order(r1->get_shape());
auto perm_r1 = apply_permutation(default_order, r1->get_input_order());
auto perm_r2 = apply_permutation(perm_r1, r2->get_input_order());
auto rreshape = std::make_shared<op::Reshape>(r2->get_argument(0), perm_r2, r2->get_shape());
return rreshape;
}
static void
insert_reshape(std::shared_ptr<Node> target, std::shared_ptr<Node> reshape, size_t input_index)
{
auto arg = target->get_inputs().at(input_index).get_output().get_node();
auto new_reshape = reshape->copy_with_new_args({arg});
target->get_inputs().at(input_index).replace_output(new_reshape->get_outputs().at(0));
}
std::string describe_reshape(std::shared_ptr<Node> node)
{
std::stringstream ss;
auto reshape = std::dynamic_pointer_cast<op::Reshape>(node);
ss << reshape->get_name()
<< " ( axis order = " << ngraph::vector_to_string(reshape->get_input_order())
<< " , shape = " << vector_to_string(reshape->get_shape()) << " ) "
<< " , child = " << reshape->get_argument(0)->get_name();
return ss.str();
}
static void delete_reshape(std::shared_ptr<Node> reshape)
{
NGRAPH_DEBUG << "Removing reshape " << reshape->get_name();
if (!reshape->get_users().empty())
{
ngraph::replace_node(reshape, reshape->get_argument(0));
}
}
static void mark_reshape_for_deletion(std::shared_ptr<Node> reshape,
std::set<std::shared_ptr<Node>>& reshapes_to_delete)
{
NGRAPH_DEBUG << "Marking reshape " << reshape->get_name() << " for deletion";
reshapes_to_delete.insert(reshape);
}
static std::shared_ptr<op::Reshape> create_default_reshape(std::shared_ptr<Node> n)
{
auto default_order = ngraph::get_default_order(n->get_shape());
auto default_reshape = std::make_shared<op::Reshape>(n, default_order, n->get_shape());
return default_reshape;
}
//compute an axis order that converts the given axis order to default
static AxisSet get_quantization_axes_in_default_order(std::shared_ptr<op::Reshape> arg_reshape,
const AxisSet& old_axis_set)
{
auto perm_to_def = ngraph::get_permutation_to_default_order(arg_reshape->get_input_order());
AxisSet axis_set;
for (auto axis : old_axis_set)
{
axis_set.insert(perm_to_def.at(axis));
}
return axis_set;
}
struct Swimmer
{
descriptor::Input* input;
std::shared_ptr<op::Reshape> reshape;
};
//Swim is used to push/"swim" reshapes towards paramaters.
//This is typically done for binary ops when
//one operand is in nchw, while the other one is nhwc
//we prefer nchw since a lot of ngraph ops require this format,
//so keeping things in nchw allows us to eliminate as many reshapes
//as possible
void swim(descriptor::Input* input, std::shared_ptr<op::Reshape> reshape)
{
Swimmer sw{input, reshape};
std::list<Swimmer> work_queue;
work_queue.push_back(sw);
//TODO: if we support more ops (especially, with >1 args)
//we will need to keep track of nodes we visited and their reshapes
while (work_queue.size() > 0)
{
auto csw = work_queue.front();
work_queue.pop_front();
auto n = csw.input->get_output().get_node();
NGRAPH_DEBUG << "Processing (swimming) " << n->get_name();
if (auto unary = std::dynamic_pointer_cast<op::util::UnaryElementwiseArithmetic>(n))
{
Swimmer nsw{&unary->get_inputs().at(0), csw.reshape};
work_queue.push_back(nsw);
NGRAPH_DEBUG << "Propagating reshape " << describe_reshape(csw.reshape) << " for "
<< n->get_name() << " to " << unary->get_argument(0);
}
//TODO: Add cases to push through Reshape and BinaryElementwiseArithmetic
else
{
//materialize
auto new_reshape = csw.reshape->copy_with_new_args({n});
NGRAPH_DEBUG << "Materializing new reshape " << describe_reshape(new_reshape);
csw.input->replace_output(new_reshape->get_outputs().at(0));
}
}
}
//convert_binary_to_default_order is used when one of the arguments
//of a binary op isn't in the default format (i.e. nhwc instead of nchw)
//We have to normalize this other argument to nchw by swimming nchw towards parameters
//as far as we can
static void convert_binary_to_default_order(
std::shared_ptr<Node> binary,
descriptor::Input& input,
std::shared_ptr<Node> right,
std::unordered_map<std::shared_ptr<Node>, std::shared_ptr<op::Reshape>>& reorders,
std::set<std::shared_ptr<Node>>& reshapes_to_delete)
{
auto left = input.get_output().get_node();
auto perm_to_def =
ngraph::get_permutation_to_default_order(reorders.at(right)->get_input_order());
auto new_shape = apply_permutation(left->get_shape(), perm_to_def);
NGRAPH_DEBUG << "right = " << ngraph::vector_to_string(right->get_shape()) << ", "
<< right->get_name();
auto new_reshape = std::make_shared<op::Reshape>(left, perm_to_def, new_shape);
NGRAPH_DEBUG << "left : About to swim " << describe_reshape(new_reshape) << " up to "
<< left->get_name();
//this should now insert and swim reshape on right
swim(&input, new_reshape);
mark_reshape_for_deletion(reorders.at(right), reshapes_to_delete);
reorders[binary] = reorders.at(right);
}
//The goal of ReshapeSinking is to remove
//round-trip reshapes(i.e. nhwc->nchw(nchw-only-op)->nhwc)
//around nchw-only-op (e.g.Convolution, Batchnorm, Avg/MaxPool)
//This is achieved by both **sinking**, propagating reshapes
//through ops towards op::Results,
//or **swimming** Reshapes up towards op::Parameter
//For each op type we support we can either combine
//two reshapes by replacing the existing Reshape,
//materialize pending reshapes if they can't be propagated through op
bool ngraph::runtime::cpu::pass::CPUReshapeSinking::run_on_function(
std::shared_ptr<ngraph::Function> f)
{
std::unordered_map<std::shared_ptr<Node>, std::shared_ptr<op::Reshape>> reorders;
NodeVector results;
std::set<std::shared_ptr<Node>> reshapes_to_delete;
for (auto n : f->get_ordered_ops())
{
NGRAPH_DEBUG << "Processing node " << n->get_name();
if (n->is_output())
{
results.push_back(n);
}
if (auto reshape = std::dynamic_pointer_cast<op::Reshape>(n))
{
auto orig_reshape = reorders.at(n->get_argument(0));
if (!reshape->get_is_transpose())
{
NGRAPH_DEBUG << "Materializing " << describe_reshape(orig_reshape)
<< " for reshape " << reshape->get_name();
insert_reshape(reshape, orig_reshape, 0);
mark_reshape_for_deletion(orig_reshape, reshapes_to_delete);
reorders[reshape] = create_default_reshape(reshape);
}
else
{
//combine both reshapes
auto new_reshape = combine_reshapes(orig_reshape, reshape);
//remove original reshape now it's combined with a new one
//should be safe to remove an already detached node
mark_reshape_for_deletion(orig_reshape, reshapes_to_delete);
//replace reshape with combined one
ngraph::replace_node(reshape, new_reshape);
reorders[new_reshape] = new_reshape;
NGRAPH_DEBUG << "Combining " << describe_reshape(orig_reshape) << " and"
<< describe_reshape(reshape) << " into "
<< describe_reshape(new_reshape);
}
}
else if (auto unary = std::dynamic_pointer_cast<op::util::UnaryElementwiseArithmetic>(n))
{
auto arg_reshape = reorders.at(n->get_argument(0));
NGRAPH_DEBUG << "Propagating " << describe_reshape(arg_reshape) << " for "
<< n->get_name();
reorders[n] = reorders[n->get_argument(0)];
}
else if (auto binary = std::dynamic_pointer_cast<op::util::BinaryElementwiseArithmetic>(n))
{
auto left = n->get_argument(0);
auto right = n->get_argument(1);
if (reorders.at(left)->get_input_order() == reorders.at(right)->get_input_order())
{
NGRAPH_DEBUG << "Propagating " << describe_reshape(reorders.at(left)) << " for "
<< n->get_name();
reorders[n] = reorders.at(left);
}
else if (reorders.at(left)->get_input_order() ==
ngraph::get_default_order(left->get_shape()))
{
convert_binary_to_default_order(
binary, binary->get_inputs().at(0), right, reorders, reshapes_to_delete);
}
else if (reorders.at(right)->get_input_order() ==
ngraph::get_default_order(right->get_shape()))
{
convert_binary_to_default_order(
binary, binary->get_inputs().at(1), left, reorders, reshapes_to_delete);
}
else
{
NGRAPH_DEBUG << "Materializing both reshapes for " << binary->get_name();
NGRAPH_DEBUG << "Left = " << describe_reshape(reorders.at(left));
NGRAPH_DEBUG << "Right = " << describe_reshape(reorders.at(right));
mark_reshape_for_deletion(reorders.at(left), reshapes_to_delete);
mark_reshape_for_deletion(reorders.at(right), reshapes_to_delete);
insert_reshape(binary, reorders.at(left), 0);
insert_reshape(binary, reorders.at(right), 1);
}
}
else if (auto goe = std::dynamic_pointer_cast<op::GetOutputElement>(n))
{
reorders[goe] = create_default_reshape(goe);
}
else if (auto quantize = std::dynamic_pointer_cast<op::Quantize>(n))
{
auto arg_reshape = reorders.at(n->get_argument(0));
AxisSet axes_in_def_order =
get_quantization_axes_in_default_order(arg_reshape, quantize->get_axes());
auto new_quantize = std::make_shared<op::Quantize>(quantize->get_argument(0),
quantize->get_argument(1),
quantize->get_argument(2),
quantize->get_element_type(),
axes_in_def_order,
quantize->get_round_mode());
ngraph::replace_node(quantize, new_quantize);
reorders[new_quantize] = arg_reshape;
}
else if (auto dequantize = std::dynamic_pointer_cast<op::Dequantize>(n))
{
auto arg_reshape = reorders.at(n->get_argument(0));
AxisSet axes_in_def_order =
get_quantization_axes_in_default_order(arg_reshape, dequantize->get_axes());
auto new_dequantize = std::make_shared<op::Dequantize>(dequantize->get_argument(0),
dequantize->get_argument(1),
dequantize->get_argument(2),
dequantize->get_element_type(),
axes_in_def_order);
ngraph::replace_node(dequantize, new_dequantize);
reorders[new_dequantize] = arg_reshape;
}
else
{
//skip multiple output nodes and deal with GOEs exclusively
if (n->get_outputs().size() > 1)
{
continue;
}
//TODO: multiple outputs
for (size_t i = 0; i < n->get_arguments().size(); i++)
{
//materialize all pending reshapes, flush pending reshapes
auto arg = n->get_argument(i);
if (reorders.count(arg) != 0)
{
NGRAPH_DEBUG << "Materializing " << describe_reshape(reorders.at(arg))
<< " for " << arg->get_name();
mark_reshape_for_deletion(reorders.at(arg), reshapes_to_delete);
insert_reshape(n, reorders.at(arg), i);
//no swimming up
}
}
reorders[n] = create_default_reshape(n);
}
}
//purge all the reshapes we either sunk or swam.
for (auto r : reshapes_to_delete)
{
delete_reshape(r);
}
//make sure shapes are always materialized before results
for (auto r : results)
{
NGRAPH_ASSERT(r->get_shape() == r->get_argument(0)->get_shape() &&
r->get_element_type() == r->get_argument(0)->get_element_type())
<< " op::Result = " << *r << ", Arg = " << *r->get_argument(0);
}
//fix wrong shape info wholesale
for (auto n : f->get_ordered_ops())
{
n->validate_and_infer_types();
}
return true;
}
//*****************************************************************************
// Copyright 2017-2018 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#pragma once
#include "ngraph/pass/pass.hpp"
namespace ngraph
{
namespace runtime
{
namespace cpu
{
namespace pass
{
class CPUReshapeSinking : public ngraph::pass::FunctionPass
{
public:
bool run_on_function(std::shared_ptr<ngraph::Function> function) override;
};
}
}
}
}
......@@ -488,3 +488,13 @@ AxisVector ngraph::get_default_order(size_t rank)
std::iota(begin(default_order), end(default_order), 0);
return default_order;
}
AxisVector ngraph::get_permutation_to_default_order(const AxisVector& axis_order)
{
AxisVector out(axis_order.size());
for (size_t i = 0; i < axis_order.size(); i++)
{
out.at(axis_order[i]) = i;
}
return out;
}
......@@ -204,6 +204,8 @@ namespace ngraph
AxisVector get_default_order(size_t rank);
AxisVector get_default_order(const Shape& shape);
AxisVector get_permutation_to_default_order(const AxisVector& axis_order);
/*
* Return type struct for cache_fprop, with the modified fprop and bprop
* functions
......
......@@ -69,7 +69,7 @@ add_subdirectory(files)
add_subdirectory(util)
if(NGRAPH_CPU_ENABLE)
set(SRC ${SRC} backend_performance.cpp cpu_fusion.cpp cpu_test.cpp)
set(SRC ${SRC} backend_performance.cpp cpu_fusion.cpp cpu_test.cpp cpu_reshape_sinking.cpp)
endif()
if(NGRAPH_GPU_ENABLE)
......
//*****************************************************************************
// Copyright 2017-2018 Intel Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
#include <algorithm>
#include <cstdio>
#include <iostream>
#include <list>
#include <memory>
#include "gtest/gtest.h"
#include "ngraph/autodiff/adjoints.hpp"
#include "ngraph/file_util.hpp"
#include "ngraph/graph_util.hpp"
#include "ngraph/log.hpp"
#include "ngraph/ngraph.hpp"
#include "ngraph/op/batch_norm.hpp"
#include "ngraph/op/get_output_element.hpp"
#include "ngraph/op/parameter.hpp"
#include "ngraph/pass/core_fusion.hpp"
#include "ngraph/pass/cse.hpp"
#include "ngraph/pass/manager.hpp"
#include "ngraph/pass/reshape_elimination.hpp"
#include "ngraph/pass/visualize_tree.hpp"
#include "ngraph/runtime/cpu/pass/cpu_fusion.hpp"
#include "ngraph/runtime/cpu/pass/cpu_reshape_sinking.hpp"
#include "ngraph/serializer.hpp"
#include "ngraph/util.hpp"
#include "nlohmann/json.hpp"
#include "util/all_close.hpp"
#include "util/autodiff/backprop_function.hpp"
#include "util/autodiff/numeric_compare.hpp"
#include "util/ndarray.hpp"
#include "util/random.hpp"
#include "util/test_tools.hpp"
using namespace ngraph;
using namespace std;
TEST(cpu_reshape_sinking, edge_splitting)
{
//checks if Reshapes are pushed through op::Abs, but stopped by Sum
Shape shape_nhwc{16, 28, 28, 1};
Shape shape_nchw{16, 1, 28, 28};
auto a = make_shared<op::Parameter>(element::i32, shape_nhwc);
auto reshape = make_shared<op::Reshape>(a, AxisVector{0, 3, 1, 2}, shape_nchw);
auto absn = make_shared<op::Abs>(reshape);
auto absn2 = make_shared<op::Abs>(absn);
auto sum = make_shared<op::Sum>(reshape, AxisSet{0, 1, 2, 3});
auto func = make_shared<Function>(NodeVector{absn2, sum}, op::ParameterVector{a});
pass::Manager pass_manager;
//size_t before_count = count_ops_of_type<op::Reshape>(func);
pass_manager.register_pass<pass::VisualizeTree>("before.pdf");
pass_manager.register_pass<runtime::cpu::pass::CPUReshapeSinking>();
pass_manager.register_pass<pass::ReshapeElimination>();
pass_manager.register_pass<pass::CommonSubexpressionElimination>();
pass_manager.register_pass<pass::VisualizeTree>("after.pdf");
pass_manager.run_passes(func);
ASSERT_EQ(func->get_results().at(1)->get_argument(0), sum);
auto new_reshape =
std::dynamic_pointer_cast<op::Reshape>(func->get_results().at(0)->get_argument(0));
ASSERT_TRUE(new_reshape);
ASSERT_EQ(new_reshape->get_shape(), shape_nchw);
}
TEST(cpu_reshape_sinking, mnist_conv)
{
const string json_path = file_util::path_join(SERIALIZED_ZOO, "tf_conv_mnist_nhwc.json");
const string json_string = file_util::read_file_to_string(json_path);
stringstream ss(json_string);
shared_ptr<Function> func = ngraph::deserialize(ss);
pass::Manager pass_manager;
size_t before_count = count_ops_of_type<op::Reshape>(func);
//pass_manager.register_pass<pass::VisualizeTree>("before.pdf");
pass_manager.register_pass<runtime::cpu::pass::CPUReshapeSinking>();
pass_manager.register_pass<pass::ReshapeElimination>();
pass_manager.register_pass<pass::CommonSubexpressionElimination>();
//pass_manager.register_pass<pass::CoreFusion>();
//pass_manager.register_pass<runtime::cpu::pass::CPUFusion>();
//pass_manager.register_pass<pass::VisualizeTree>("after.pdf");
pass_manager.run_passes(func);
size_t before_after = count_ops_of_type<op::Reshape>(func);
ASSERT_LE(before_after, before_count);
}
[{
"name" : "Function_0",
"ops" : [
{
"cacheable" : false,
"element_type" : "float",
"inputs" : [],
"name" : "Parameter_12",
"op" : "Parameter",
"outputs" : ["Parameter_12_0"],
"shape" : [ 2, 224, 224, 3 ]
},
{
"cacheable" : false,
"element_type" : "float",
"inputs" : [],
"name" : "Parameter_11",
"op" : "Parameter",
"outputs" : ["Parameter_11_0"],
"shape" : [10]
},
{
"cacheable" : false,
"element_type" : "float",
"inputs" : [],
"name" : "Parameter_10",
"op" : "Parameter",
"outputs" : ["Parameter_10_0"],
"shape" : [ 37632, 10 ]
},
{
"cacheable" : false,
"element_type" : "float",
"inputs" : [],
"name" : "Parameter_9",
"op" : "Parameter",
"outputs" : ["Parameter_9_0"],
"shape" : [3]
},
{
"cacheable" : false,
"element_type" : "float",
"inputs" : [],
"name" : "Parameter_8",
"op" : "Parameter",
"outputs" : ["Parameter_8_0"],
"shape" : [3]
},
{
"cacheable" : false,
"element_type" : "float",
"inputs" : [],
"name" : "Parameter_7",
"op" : "Parameter",
"outputs" : ["Parameter_7_0"],
"shape" : [3]
},
{
"cacheable" : false,
"element_type" : "float",
"inputs" : [],
"name" : "Parameter_6",
"op" : "Parameter",
"outputs" : ["Parameter_6_0"],
"shape" : [3]
},
{
"cacheable" : false,
"element_type" : "float",
"inputs" : [],
"name" : "Parameter_5",
"op" : "Parameter",
"outputs" : ["Parameter_5_0"],
"shape" : [ 3, 3, 3, 3 ]
},
{
"cacheable" : false,
"element_type" : "float",
"inputs" : [],
"name" : "Parameter_4",
"op" : "Parameter",
"outputs" : ["Parameter_4_0"],
"shape" : [3]
},
{
"cacheable" : false,
"element_type" : "float",
"inputs" : [],
"name" : "Parameter_3",
"op" : "Parameter",
"outputs" : ["Parameter_3_0"],
"shape" : [3]
},
{
"cacheable" : false,
"element_type" : "float",
"inputs" : [],
"name" : "Parameter_2",
"op" : "Parameter",
"outputs" : ["Parameter_2_0"],
"shape" : [3]
},
{
"cacheable" : false,
"element_type" : "float",
"inputs" : [],
"name" : "Parameter_1",
"op" : "Parameter",
"outputs" : ["Parameter_1_0"],
"shape" : [3]
},
{
"cacheable" : false,
"element_type" : "float",
"inputs" : [],
"name" : "Parameter_0",
"op" : "Parameter",
"outputs" : ["Parameter_0_0"],
"shape" : [ 3, 3, 3, 3 ]
},
{
"input_order" : [ 0, 3, 1, 2 ],
"inputs" : ["Parameter_12"],
"name" : "Reshape_13",
"op" : "Reshape",
"output_shape" : [ 2, 3, 224, 224 ],
"outputs" : ["Reshape_13_0"]
},
{
"axes" : [0],
"inputs" : ["Parameter_11"],
"name" : "Broadcast_36",
"op" : "Broadcast",
"outputs" : ["Broadcast_36_0"],
"shape" : [ 2, 10 ]
},
{
"input_order" : [ 3, 2, 0, 1 ],
"inputs" : ["Parameter_5"],
"name" : "Reshape_22",
"op" : "Reshape",
"output_shape" : [ 3, 3, 3, 3 ],
"outputs" : ["Reshape_22_0"]
},
{
"input_order" : [ 3, 2, 0, 1 ],
"inputs" : ["Parameter_0"],
"name" : "Reshape_14",
"op" : "Reshape",
"output_shape" : [ 3, 3, 3, 3 ],
"outputs" : ["Reshape_14_0"]
},
{
"data_dilation_strides" : [ 1, 1 ],
"inputs" : [ "Reshape_13", "Reshape_14" ],
"name" : "Convolution_15",
"op" : "Convolution",
"outputs" : ["Convolution_15_0"],
"padding_above" : [ 1, 1 ],
"padding_below" : [ 1, 1 ],
"window_dilation_strides" : [ 1, 1 ],
"window_movement_strides" : [ 1, 1 ]
},
{
"input_order" : [ 0, 2, 3, 1 ],
"inputs" : ["Convolution_15"],
"name" : "Reshape_16",
"op" : "Reshape",
"output_shape" : [ 2, 224, 224, 3 ],
"outputs" : ["Reshape_16_0"]
},
{
"input_order" : [ 0, 3, 1, 2 ],
"inputs" : ["Reshape_16"],
"name" : "Reshape_17",
"op" : "Reshape",
"output_shape" : [ 2, 3, 224, 224 ],
"outputs" : ["Reshape_17_0"]
},
{
"eps" : 1.0009999641624745e-05,
"inputs" : [
"Parameter_1", "Parameter_2", "Reshape_17", "Parameter_3",
"Parameter_4"
],
"name" : "BatchNorm_18",
"op" : "BatchNorm",
"outputs" : ["BatchNorm_18_0"],
"training" : false
},
{
"input_order" : [ 0, 2, 3, 1 ],
"inputs" : ["BatchNorm_18"],
"name" : "Reshape_19",
"op" : "Reshape",
"output_shape" : [ 2, 224, 224, 3 ],
"outputs" : ["Reshape_19_0"]
},
{
"inputs" : ["Reshape_19"],
"name" : "Relu_20",
"op" : "Relu",
"outputs" : ["Relu_20_0"]
},
{
"input_order" : [ 0, 3, 1, 2 ],
"inputs" : ["Relu_20"],
"name" : "Reshape_21",
"op" : "Reshape",
"output_shape" : [ 2, 3, 224, 224 ],
"outputs" : ["Reshape_21_0"]
},
{
"data_dilation_strides" : [ 1, 1 ],
"inputs" : [ "Reshape_21", "Reshape_22" ],
"name" : "Convolution_23",
"op" : "Convolution",
"outputs" : ["Convolution_23_0"],
"padding_above" : [ 1, 1 ],
"padding_below" : [ 1, 1 ],
"window_dilation_strides" : [ 1, 1 ],
"window_movement_strides" : [ 1, 1 ]
},
{
"input_order" : [ 0, 2, 3, 1 ],
"inputs" : ["Convolution_23"],
"name" : "Reshape_24",
"op" : "Reshape",
"output_shape" : [ 2, 224, 224, 3 ],
"outputs" : ["Reshape_24_0"]
},
{
"input_order" : [ 0, 3, 1, 2 ],
"inputs" : ["Reshape_24"],
"name" : "Reshape_25",
"op" : "Reshape",
"output_shape" : [ 2, 3, 224, 224 ],
"outputs" : ["Reshape_25_0"]
},
{
"eps" : 1.0009999641624745e-05,
"inputs" : [
"Parameter_6", "Parameter_7", "Reshape_25", "Parameter_8",
"Parameter_9"
],
"name" : "BatchNorm_26",
"op" : "BatchNorm",
"outputs" : ["BatchNorm_26_0"],
"training" : false
},
{
"input_order" : [ 0, 2, 3, 1 ],
"inputs" : ["BatchNorm_26"],
"name" : "Reshape_27",
"op" : "Reshape",
"output_shape" : [ 2, 224, 224, 3 ],
"outputs" : ["Reshape_27_0"]
},
{
"inputs" : [ "Reshape_27", "Parameter_12" ],
"name" : "Add_28",
"op" : "Add",
"outputs" : ["Add_28_0"]
},
{
"inputs" : ["Add_28"],
"name" : "Relu_29",
"op" : "Relu",
"outputs" : ["Relu_29_0"]
},
{
"input_order" : [ 0, 3, 1, 2 ],
"inputs" : ["Relu_29"],
"name" : "Reshape_30",
"op" : "Reshape",
"output_shape" : [ 2, 3, 224, 224 ],
"outputs" : ["Reshape_30_0"]
},
{
"include_padding_in_avg_computation" : false,
"inputs" : ["Reshape_30"],
"name" : "AvgPool_31",
"op" : "AvgPool",
"outputs" : ["AvgPool_31_0"],
"padding_above" : [ 0, 0 ],
"padding_below" : [ 0, 0 ],
"window_movement_strides" : [ 2, 2 ],
"window_shape" : [ 2, 2 ]
},
{
"input_order" : [ 0, 2, 3, 1 ],
"inputs" : ["AvgPool_31"],
"name" : "Reshape_32",
"op" : "Reshape",
"output_shape" : [ 2, 112, 112, 3 ],
"outputs" : ["Reshape_32_0"]
},
{
"input_order" : [ 0, 1, 2, 3 ],
"inputs" : ["Reshape_32"],
"name" : "Reshape_34",
"op" : "Reshape",
"output_shape" : [ 2, 37632 ],
"outputs" : ["Reshape_34_0"]
},
{
"inputs" : [ "Reshape_34", "Parameter_10" ],
"name" : "Dot_35",
"op" : "Dot",
"outputs" : ["Dot_35_0"],
"reduction_axes_count" : 1
},
{
"inputs" : [ "Dot_35", "Broadcast_36" ],
"name" : "Add_37",
"op" : "Add",
"outputs" : ["Add_37_0"]
},
{
"inputs" : ["Add_37"],
"name" : "Result_38",
"op" : "Result",
"outputs" : ["Result_38_0"]
}
],
"parameters" : [
"Parameter_0", "Parameter_1", "Parameter_2", "Parameter_3", "Parameter_4",
"Parameter_5", "Parameter_6", "Parameter_7", "Parameter_8", "Parameter_9",
"Parameter_10", "Parameter_11", "Parameter_12"
],
"result" : ["Result_38"]
}]
\ No newline at end of file
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment