Commit 5a7d60a1 authored by Louis Feng's avatar Louis Feng Committed by Adam Procter

NGRAPH-1605 Sigmoid multiply fusion (#964)

parent 83206a0a
...@@ -219,6 +219,7 @@ if (NGRAPH_CPU_ENABLE AND LLVM_INCLUDE_DIR AND MKLDNN_INCLUDE_DIR) ...@@ -219,6 +219,7 @@ if (NGRAPH_CPU_ENABLE AND LLVM_INCLUDE_DIR AND MKLDNN_INCLUDE_DIR)
runtime/cpu/op/conv_relu.cpp runtime/cpu/op/conv_relu.cpp
runtime/cpu/op/convert_layout.cpp runtime/cpu/op/convert_layout.cpp
runtime/cpu/op/sigmoid.cpp runtime/cpu/op/sigmoid.cpp
runtime/cpu/op/sigmoid_mul.cpp
runtime/cpu/op/rnn.cpp runtime/cpu/op/rnn.cpp
runtime/cpu/op/lstm.cpp runtime/cpu/op/lstm.cpp
runtime/cpu/op/matmul_bias.cpp runtime/cpu/op/matmul_bias.cpp
......
...@@ -102,6 +102,7 @@ ...@@ -102,6 +102,7 @@
#include "ngraph/runtime/cpu/op/max_pool_with_indices.hpp" #include "ngraph/runtime/cpu/op/max_pool_with_indices.hpp"
#include "ngraph/runtime/cpu/op/rnn.hpp" #include "ngraph/runtime/cpu/op/rnn.hpp"
#include "ngraph/runtime/cpu/op/sigmoid.hpp" #include "ngraph/runtime/cpu/op/sigmoid.hpp"
#include "ngraph/runtime/cpu/op/sigmoid_mul.hpp"
#include "ngraph/type/element_type.hpp" #include "ngraph/type/element_type.hpp"
#include "ngraph/util.hpp" #include "ngraph/util.hpp"
...@@ -3895,6 +3896,158 @@ namespace ngraph ...@@ -3895,6 +3896,158 @@ namespace ngraph
<< to_string(sigmoid_index) << ");\n"; << to_string(sigmoid_index) << ");\n";
} }
std::string
generate_sigmoid_mul_func(const ngraph::op::SigmoidMultiply::FunctionType type,
const std::string& input,
const std::string& out_numer,
const std::string& out_denom,
bool derivative)
{
std::string func_block;
switch (type)
{
case ngraph::op::SigmoidMultiply::FunctionType::Logistic:
func_block = "auto e_x = exp(" + input + ");\n";
func_block += out_numer + " = e_x;\n";
func_block += out_denom + " = e_x+1;\n";
if (derivative)
{
func_block += "d_" + out_numer + " = " + out_numer + ";\n";
func_block +=
"d_" + out_denom + " = " + out_denom + " * " + out_denom + ";\n";
}
break;
case ngraph::op::SigmoidMultiply::FunctionType::Tanh:
func_block = "auto e_2x = exp(2.0*" + input + ");\n";
func_block += out_numer + " = e_2x-1;\n";
func_block += out_denom + " = e_2x+1;\n";
if (derivative)
{
func_block += "d_" + out_numer + " = 4.0*e_2x;\n";
func_block +=
"d_" + out_denom + " = " + out_denom + " * " + out_denom + ";\n";
}
break;
case ngraph::op::SigmoidMultiply::FunctionType::Identity:
func_block = out_numer + " = " + input + ";\n";
func_block += out_denom + " = 1;\n";
if (derivative)
{
func_block += "d_" + out_numer + " = 1;\n";
func_block += "d_" + out_denom + " = 1;\n";
}
break;
}
if (func_block.empty())
{
throw ngraph_error(
"generate_sigmoid_mul_func input function type not supported");
}
return func_block;
}
template <>
void CPU_Emitter::EMITTER_DECL(ngraph::op::SigmoidMultiply)
{
auto sigmoid_mul = static_cast<const ngraph::op::SigmoidMultiply*>(node);
std::string numer_0 = "numer_0";
std::string denom_0 = "denom_0";
std::string numer_1 = "numer_1";
std::string denom_1 = "denom_1";
std::string input_0_func_string =
generate_sigmoid_mul_func(sigmoid_mul->get_input_func_type(0),
args[0].get_name() + "[i]",
numer_0,
denom_0,
false);
std::string input_1_func_string =
generate_sigmoid_mul_func(sigmoid_mul->get_input_func_type(1),
args[1].get_name() + "[i]",
numer_1,
denom_1,
false);
writer.block_begin();
writer << "#pragma omp parallel for simd\n";
writer << "for (size_t i=0; i<" << out[0].get_size() << "; i++)\n";
writer.block_begin();
writer << "float " << numer_0 << ";\n";
writer << "float " << denom_0 << ";\n";
writer.block_begin();
writer << input_0_func_string;
writer.block_end();
writer << "float " << numer_1 << ";\n";
writer << "float " << denom_1 << ";\n";
writer.block_begin();
writer << input_1_func_string;
writer.block_end();
writer << out[0].get_name()
<< "[i] = (" + numer_0 + " * " + numer_1 + ") / (" + denom_0 + " * " +
denom_1 + ");\n";
writer.block_end();
writer.block_end();
}
template <>
void CPU_Emitter::EMITTER_DECL(ngraph::op::SigmoidMultiplyBackprop)
{
// math: we have sigmoid functions f(x) and g(y) multiplied, z = f(x) * g(y)
// dz/dx = dz/df * df/dx = g(y) * f'(x)
// dz/dy = dz/dg * dg/dy = f(x) * g'(y)
auto sigmoid_mul_backprop =
static_cast<const ngraph::op::SigmoidMultiplyBackprop*>(node);
const TensorViewWrapper& data_0 = args[0];
const TensorViewWrapper& data_1 = args[1];
const TensorViewWrapper& delta = args[2];
const TensorViewWrapper& input_0_delta = out[0];
const TensorViewWrapper& input_1_delta = out[1];
std::string numer_0 = "numer_0";
std::string denom_0 = "denom_0";
std::string numer_1 = "numer_1";
std::string denom_1 = "denom_1";
std::string d_numer_0 = "d_numer_0";
std::string d_denom_0 = "d_denom_0";
std::string d_numer_1 = "d_numer_1";
std::string d_denom_1 = "d_denom_1";
std::string input_0_func_string =
generate_sigmoid_mul_func(sigmoid_mul_backprop->get_input_func_type(0),
data_0.get_name() + "[i]",
numer_0,
denom_0,
true);
std::string input_1_func_string =
generate_sigmoid_mul_func(sigmoid_mul_backprop->get_input_func_type(1),
data_1.get_name() + "[i]",
numer_1,
denom_1,
true);
writer.block_begin();
writer << "#pragma omp parallel for simd\n";
writer << "for (size_t i=0; i<" << input_0_delta.get_size() << "; i++)\n";
writer.block_begin();
writer << "float " << numer_0 << ";\n";
writer << "float " << denom_0 << ";\n";
writer << "float " << d_numer_0 << ";\n";
writer << "float " << d_denom_0 << ";\n";
writer.block_begin();
writer << input_0_func_string;
writer.block_end();
writer << "float " << numer_1 << ";\n";
writer << "float " << denom_1 << ";\n";
writer << "float " << d_numer_1 << ";\n";
writer << "float " << d_denom_1 << ";\n";
writer.block_begin();
writer << input_1_func_string;
writer.block_end();
writer << input_0_delta.get_name()
<< "[i] = " + delta.get_name() + "[i]*(" + numer_1 + "*" + d_numer_0 +
")/(" + denom_1 + "*" + d_denom_0 + ");\n";
writer << input_1_delta.get_name()
<< "[i] = " + delta.get_name() + "[i]*(" + numer_0 + "*" + d_numer_1 +
")/(" + denom_0 + "*" + d_denom_1 + ");\n";
writer.block_end();
writer.block_end();
}
template <> template <>
void CPU_Emitter::EMITTER_DECL(ngraph::op::Softmax) void CPU_Emitter::EMITTER_DECL(ngraph::op::Softmax)
{ {
......
...@@ -125,6 +125,7 @@ ...@@ -125,6 +125,7 @@
#include "ngraph/runtime/cpu/op/max_pool_with_indices.hpp" #include "ngraph/runtime/cpu/op/max_pool_with_indices.hpp"
#include "ngraph/runtime/cpu/op/rnn.hpp" #include "ngraph/runtime/cpu/op/rnn.hpp"
#include "ngraph/runtime/cpu/op/sigmoid.hpp" #include "ngraph/runtime/cpu/op/sigmoid.hpp"
#include "ngraph/runtime/cpu/op/sigmoid_mul.hpp"
#include "ngraph/runtime/cpu/pass/cpu_assignment.hpp" #include "ngraph/runtime/cpu/pass/cpu_assignment.hpp"
#include "ngraph/runtime/cpu/pass/cpu_concat_inputs.hpp" #include "ngraph/runtime/cpu/pass/cpu_concat_inputs.hpp"
#include "ngraph/runtime/cpu/pass/cpu_fusion.hpp" #include "ngraph/runtime/cpu/pass/cpu_fusion.hpp"
...@@ -289,6 +290,9 @@ static const runtime::cpu::OpMap dispatcher{ ...@@ -289,6 +290,9 @@ static const runtime::cpu::OpMap dispatcher{
{TI(ngraph::op::ReluBackprop), &runtime::cpu::CPU_Emitter::emit<op::ReluBackprop>}, {TI(ngraph::op::ReluBackprop), &runtime::cpu::CPU_Emitter::emit<op::ReluBackprop>},
{TI(ngraph::op::Rnn), &runtime::cpu::CPU_Emitter::emit<op::Rnn>}, {TI(ngraph::op::Rnn), &runtime::cpu::CPU_Emitter::emit<op::Rnn>},
{TI(ngraph::op::Sigmoid), &runtime::cpu::CPU_Emitter::emit<op::Sigmoid>}, {TI(ngraph::op::Sigmoid), &runtime::cpu::CPU_Emitter::emit<op::Sigmoid>},
{TI(ngraph::op::SigmoidMultiply), &runtime::cpu::CPU_Emitter::emit<op::SigmoidMultiply>},
{TI(ngraph::op::SigmoidMultiplyBackprop),
&runtime::cpu::CPU_Emitter::emit<op::SigmoidMultiplyBackprop>},
{TI(ngraph::op::Softmax), &runtime::cpu::CPU_Emitter::emit<op::Softmax>}, {TI(ngraph::op::Softmax), &runtime::cpu::CPU_Emitter::emit<op::Softmax>},
{TI(ngraph::op::SigmoidBackprop), &runtime::cpu::CPU_Emitter::emit<op::SigmoidBackprop>}, {TI(ngraph::op::SigmoidBackprop), &runtime::cpu::CPU_Emitter::emit<op::SigmoidBackprop>},
{TI(ngraph::op::And), &runtime::cpu::CPU_Emitter::emit<op::And>}, {TI(ngraph::op::And), &runtime::cpu::CPU_Emitter::emit<op::And>},
......
/*******************************************************************************
* Copyright 2018 Intel Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*******************************************************************************/
#include "sigmoid_mul.hpp"
#include "ngraph/log.hpp"
#include "ngraph/op/add.hpp"
#include "ngraph/op/broadcast.hpp"
#include "ngraph/op/get_output_element.hpp"
#include "ngraph/op/tanh.hpp"
#include "ngraph/runtime/cpu/op/sigmoid.hpp"
#include "ngraph/util.hpp"
using namespace std;
using namespace ngraph;
ngraph::op::SigmoidMultiply::FunctionType
op::SigmoidMultiply::identify_node_type(const std::shared_ptr<ngraph::Node>& node)
{
if (std::dynamic_pointer_cast<ngraph::op::Tanh>(node) != nullptr)
{
return ngraph::op::SigmoidMultiply::FunctionType::Tanh;
}
else if (std::dynamic_pointer_cast<ngraph::op::Sigmoid>(node) != nullptr)
{
return ngraph::op::SigmoidMultiply::FunctionType::Logistic;
}
else if (std::dynamic_pointer_cast<ngraph::op::Broadcast>(node) != nullptr)
{
return ngraph::op::SigmoidMultiply::FunctionType::Identity;
}
else if (std::dynamic_pointer_cast<ngraph::op::Add>(node) != nullptr)
{
return ngraph::op::SigmoidMultiply::FunctionType::Identity;
}
else
{
throw ngraph::ngraph_error("SigmoidMultiply input function type not supported: " +
node->get_name());
}
}
op::SigmoidMultiply::SigmoidMultiply(shared_ptr<Node> input_0,
shared_ptr<Node> input_1,
const FunctionType input_0_type,
const FunctionType input_1_type)
: RequiresTensorViewArgs("SigmoidMultiply", {input_0, input_1})
{
if (input_0->get_element_type() != input_1->get_element_type())
{
throw ngraph_error("SigmoidMultiply input element type mismatch");
}
if (input_0->get_shape() != input_1->get_shape())
{
throw ngraph_error("SigmoidMultiply input shape mismatch: " +
vector_to_string(input_0->get_shape()) + " != " +
vector_to_string(input_1->get_shape()));
}
m_input_type[0] = input_0_type;
m_input_type[1] = input_1_type;
add_output(input_0->get_element_type(), input_0->get_shape());
}
shared_ptr<Node> op::SigmoidMultiply::copy_with_new_args(const NodeVector& new_args) const
{
if (new_args.size() != 2)
{
throw ngraph_error("SigmoidMultiply incorrect number of new arguments");
}
// WARNING: implicitly expecting new args must match the original input function types.
return make_shared<SigmoidMultiply>(
new_args.at(0), new_args.at(1), m_input_type[0], m_input_type[1]);
}
void op::SigmoidMultiply::generate_adjoints(autodiff::Adjoints& adjoints, const NodeVector& deltas)
{
auto delta = deltas.at(0);
auto input_0 = get_argument(0);
auto input_1 = get_argument(1);
auto sigmoid_mul_backprop =
make_shared<op::SigmoidMultiplyBackprop>(input_0, input_1, delta, m_input_type);
auto input_0_delta = make_shared<op::GetOutputElement>(sigmoid_mul_backprop, 0);
auto input_1_delta = make_shared<op::GetOutputElement>(sigmoid_mul_backprop, 1);
adjoints.add_delta(input_0, input_0_delta);
adjoints.add_delta(input_1, input_1_delta);
}
op::SigmoidMultiplyBackprop::SigmoidMultiplyBackprop(std::shared_ptr<Node> input_0,
std::shared_ptr<Node> input_1,
shared_ptr<Node> delta,
const std::array<FunctionType, 2>& input_type)
: RequiresTensorViewArgs("SigmoidMultiplyBackprop", {input_0, input_1, delta})
, m_input_type(input_type)
{
if (input_0->get_element_type() != input_1->get_element_type())
{
throw ngraph_error("Argument element types for SigmoidMultiply backprop do not match");
}
if (input_0->get_shape() != input_1->get_shape())
{
throw ngraph_error("Argument shapes for SigmoidMultiply backprop do not match");
}
if (input_0->get_element_type() != delta->get_element_type())
{
throw ngraph_error(
"Argument and delta element types for SigmoidMultiply backprop do not match");
}
if (input_0->get_shape() != delta->get_shape())
{
throw ngraph_error("Argument and delta shape for SigmoidMultiply backprop do not match");
}
add_output(get_input_element_type(0), get_input_shape(0));
add_output(get_input_element_type(1), get_input_shape(1));
}
shared_ptr<Node> op::SigmoidMultiplyBackprop::copy_with_new_args(const NodeVector& new_args) const
{
if (new_args.size() != 3)
{
throw ngraph_error("Incorrect number of new arguments");
}
return make_shared<SigmoidMultiplyBackprop>(
new_args.at(0), new_args.at(1), new_args.at(2), m_input_type);
}
/*******************************************************************************
* Copyright 2018 Intel Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*******************************************************************************/
#pragma once
#include "ngraph/op/util/requires_tensor_view_args.hpp"
#include "ngraph/util.hpp"
#include <array>
namespace ngraph
{
namespace op
{
/// \brief Fused Sigmoid functions (logistic and tanh) with multiplication forward prop.
class SigmoidMultiply : public util::RequiresTensorViewArgs
{
public:
/// Defines valid function types
enum class FunctionType
{
Logistic,
Tanh,
Identity
};
/// Input nodes are expected to be actual inputs where the corresponding input
/// FunctionType will be applied to those inputs in the fused operation.
SigmoidMultiply(std::shared_ptr<Node> input_0,
std::shared_ptr<Node> input_1,
const FunctionType input_0_type,
const FunctionType input_1_type);
/// WARNING: copy_with_new_args() implicitly expects new args must match the original input function types.
virtual std::shared_ptr<Node>
copy_with_new_args(const NodeVector& new_args) const override;
virtual void generate_adjoints(autodiff::Adjoints& adjoints,
const NodeVector& deltas) override;
FunctionType get_input_func_type(const unsigned int index) const
{
return m_input_type[index];
}
/// Identifies the corresponding FunctionType for the input node.
static FunctionType identify_node_type(const std::shared_ptr<ngraph::Node>& node);
private:
std::array<FunctionType, 2> m_input_type;
};
/// \brief Elementwise SigmoidMultiplyBackprop operation.
///
class SigmoidMultiplyBackprop : public util::RequiresTensorViewArgs
{
public:
typedef SigmoidMultiply::FunctionType FunctionType;
/// \brief Constructs a SigmoidMultiplyBackprop operation.
///
/// \param input_0 Forward input node 0.
/// \param input_1 Forward input node 1.
/// \param delta Backprop delta node.
/// \param input_type Function type for the input nodes.
SigmoidMultiplyBackprop(std::shared_ptr<Node> input_0,
std::shared_ptr<Node> input_1,
std::shared_ptr<ngraph::Node> delta,
const std::array<FunctionType, 2>& input_type);
virtual std::shared_ptr<Node>
copy_with_new_args(const NodeVector& new_args) const override;
FunctionType get_input_func_type(const unsigned int index) const
{
return m_input_type[index];
}
private:
std::array<FunctionType, 2> m_input_type;
};
}
}
...@@ -41,6 +41,7 @@ ...@@ -41,6 +41,7 @@
#include "ngraph/op/sqrt.hpp" #include "ngraph/op/sqrt.hpp"
#include "ngraph/op/subtract.hpp" #include "ngraph/op/subtract.hpp"
#include "ngraph/op/sum.hpp" #include "ngraph/op/sum.hpp"
#include "ngraph/op/tanh.hpp"
#include "ngraph/pattern/matcher.hpp" #include "ngraph/pattern/matcher.hpp"
#include "ngraph/pattern/op/label.hpp" #include "ngraph/pattern/op/label.hpp"
#include "ngraph/pattern/op/skip.hpp" #include "ngraph/pattern/op/skip.hpp"
...@@ -49,6 +50,7 @@ ...@@ -49,6 +50,7 @@
#include "ngraph/runtime/cpu/op/conv_relu.hpp" #include "ngraph/runtime/cpu/op/conv_relu.hpp"
#include "ngraph/runtime/cpu/op/matmul_bias.hpp" #include "ngraph/runtime/cpu/op/matmul_bias.hpp"
#include "ngraph/runtime/cpu/op/sigmoid.hpp" #include "ngraph/runtime/cpu/op/sigmoid.hpp"
#include "ngraph/runtime/cpu/op/sigmoid_mul.hpp"
static bool init_cblas_arg(std::shared_ptr<ngraph::Node> reshape, static bool init_cblas_arg(std::shared_ptr<ngraph::Node> reshape,
std::shared_ptr<ngraph::Node> arg, std::shared_ptr<ngraph::Node> arg,
...@@ -1071,3 +1073,66 @@ void ngraph::runtime::cpu::pass::CPUFusion::construct_conv_bias_relu() ...@@ -1071,3 +1073,66 @@ void ngraph::runtime::cpu::pass::CPUFusion::construct_conv_bias_relu()
auto m = std::make_shared<pattern::Matcher>(prelu, callback); auto m = std::make_shared<pattern::Matcher>(prelu, callback);
this->add_matcher(m); this->add_matcher(m);
} }
void ngraph::runtime::cpu::pass::CPUFusion::construct_sigmoid_multiply()
{
// Construct predicate to match sigmoid and tanh
auto sigmoid_pred = [](std::shared_ptr<Node> n) {
return (std::dynamic_pointer_cast<op::Sigmoid>(n) != nullptr) ||
(std::dynamic_pointer_cast<op::Tanh>(n) != nullptr);
};
// Construct predicate to match other valid nodes
auto other_pred = [](std::shared_ptr<Node> n) {
return (std::dynamic_pointer_cast<op::Sigmoid>(n) != nullptr) ||
(std::dynamic_pointer_cast<op::Tanh>(n) != nullptr) ||
(std::dynamic_pointer_cast<op::Add>(n) != nullptr) ||
(std::dynamic_pointer_cast<op::Broadcast>(n) != nullptr);
};
auto sigmoid_0 = std::make_shared<pattern::op::Label>(element::f32, Shape{1, 1}, sigmoid_pred);
auto sigmoid_1 = std::make_shared<pattern::op::Label>(element::f32, Shape{1, 1}, other_pred);
auto elem_mul = std::make_shared<op::Multiply>(sigmoid_0, sigmoid_1);
ngraph::pattern::graph_rewrite_callback callback = [sigmoid_0, sigmoid_1](pattern::Matcher& m) {
NGRAPH_DEBUG << "In a callback for construct_sigmoid_multiply pattern against "
<< m.get_match_root()->get_name();
auto pattern_map = m.get_pattern_map();
if (m.get_match_root()->get_element_type() != element::f32)
{
NGRAPH_DEBUG << "mpattern = " << m.get_match_root()->get_name()
<< " type is not float!";
return false;
}
using FunctionType = op::SigmoidMultiply::FunctionType;
const int max_inputs{2};
std::array<std::shared_ptr<ngraph::Node>, max_inputs> match_nodes{
{pattern_map[sigmoid_0], pattern_map[sigmoid_1]}};
std::array<std::shared_ptr<ngraph::Node>, max_inputs> input_nodes;
std::array<FunctionType, max_inputs> input_type;
for (int i = 0; i < max_inputs; ++i)
{
input_type[i] = op::SigmoidMultiply::identify_node_type(match_nodes[i]);
if (input_type[i] != FunctionType::Identity)
{
if (match_nodes[i]->get_users().size() > 1)
{
NGRAPH_DEBUG << "input node has multiple users, skipping fusion.";
return false;
}
input_nodes[i] = match_nodes[i]->get_argument(0);
}
else
{
input_nodes[i] = match_nodes[i];
}
}
auto sigmoid_mul_node = std::make_shared<op::SigmoidMultiply>(
input_nodes[0], input_nodes[1], input_type[0], input_type[1]);
ngraph::replace_node(m.get_match_root(), sigmoid_mul_node);
return true;
};
auto m = std::make_shared<ngraph::pattern::Matcher>(elem_mul, callback);
this->add_matcher(m);
}
...@@ -69,6 +69,7 @@ public: ...@@ -69,6 +69,7 @@ public:
if (fusions & DIFFERENTIABLE_FUSIONS) if (fusions & DIFFERENTIABLE_FUSIONS)
{ {
construct_conv_bias(); construct_conv_bias();
construct_sigmoid_multiply();
} }
} }
...@@ -80,6 +81,7 @@ private: ...@@ -80,6 +81,7 @@ private:
void construct_fprop_bn(); void construct_fprop_bn();
void construct_sigmoid(); void construct_sigmoid();
void construct_sigmoid_bprop(); void construct_sigmoid_bprop();
void construct_sigmoid_multiply();
void construct_zero_padded_reshaped_conv(); void construct_zero_padded_reshaped_conv();
void construct_zero_padded_conv(); void construct_zero_padded_conv();
void construct_zero_padded_conv_backprop_filters(); void construct_zero_padded_conv_backprop_filters();
......
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment