hybrid_backend.cpp 6.79 KB
Newer Older
Sandeep's avatar
Sandeep committed
1
//*****************************************************************************
2
// Copyright 2017-2019 Intel Corporation
Sandeep's avatar
Sandeep committed
3 4 5 6 7 8 9 10 11 12 13 14 15
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//*****************************************************************************
16 17 18 19 20 21 22

#include <memory>

#include "gtest/gtest.h"

#include "ngraph/log.hpp"
#include "ngraph/ngraph.hpp"
23 24 25
#include "ngraph/op/get_output_element.hpp"
#include "ngraph/pass/manager.hpp"
#include "ngraph/pass/visualize_tree.hpp"
26 27
#include "ngraph/runtime/backend.hpp"
#include "ngraph/runtime/backend_manager.hpp"
28
#include "ngraph/runtime/hybrid/hybrid_backend.hpp"
29 30
#include "ngraph/runtime/hybrid/hybrid_util.hpp"
#include "ngraph/runtime/hybrid/op/function_call.hpp"
31
#include "ngraph/runtime/interpreter/int_backend.hpp"
32 33 34 35 36 37 38 39 40
#include "util/all_close.hpp"
#include "util/all_close_f.hpp"
#include "util/ndarray.hpp"
#include "util/test_control.hpp"
#include "util/test_tools.hpp"

using namespace std;
using namespace ngraph;

41
static runtime::BackendConstructor* hybrid_creator()
42
{
43 44 45 46 47 48 49 50 51
    class HybridBackendConstructor : public runtime::BackendConstructor
    {
    public:
        std::shared_ptr<runtime::Backend> create(const std::string& config) override
        {
            vector<string> unsupported_0 = {"Add", "Max"};
            vector<string> unsupported_1 = {"Multiply"};
            vector<shared_ptr<runtime::Backend>> backend_list = {
                make_shared<runtime::interpreter::INTBackend>(unsupported_0),
52
                runtime::Backend::create("CPU")};
53 54 55 56 57 58 59 60

            return make_shared<runtime::hybrid::HybridBackend>(backend_list);
        }
    };

    static unique_ptr<runtime::BackendConstructor> s_backend_constructor(
        new HybridBackendConstructor());
    return s_backend_constructor.get();
61 62
}

63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84
TEST(HYBRID, function_call)
{
    vector<shared_ptr<runtime::Backend>> backend_list = {
        make_shared<runtime::interpreter::INTBackend>()};
    auto backend = make_shared<runtime::hybrid::HybridBackend>(backend_list);

    Shape shape{};
    shared_ptr<Function> inner_function;
    auto inner_A = make_shared<op::Parameter>(element::f32, shape);
    auto inner_B = make_shared<op::Parameter>(element::f32, shape);
    auto inner_C = make_shared<op::Parameter>(element::f32, shape);
    auto inner_R1 = (inner_A + inner_B) * inner_C;
    auto inner_R2 = (inner_A + inner_C) * inner_C;
    NodeVector inner_Result{inner_R1, inner_R2};
    inner_function =
        make_shared<Function>(inner_Result, ParameterVector{inner_A, inner_B, inner_C});

    auto A = make_shared<op::Parameter>(element::f32, shape);
    auto B = make_shared<op::Parameter>(element::f32, shape);
    auto C = make_shared<op::Parameter>(element::f32, shape);
    NodeVector fcall_args{A, B, C};
    auto H = make_shared<runtime::hybrid::op::FunctionCall>(
85
        inner_Result, fcall_args, *inner_function, backend_list[0]);
86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109
    auto G0 = make_shared<ngraph::op::GetOutputElement>(H, 0);
    auto G1 = make_shared<ngraph::op::GetOutputElement>(H, 1);
    NodeVector out{G0, G1};
    auto J = G0 + G1;
    auto f = make_shared<Function>(out, ParameterVector{A, B, C});

    shared_ptr<runtime::Tensor> a = backend->create_tensor(element::f32, shape);
    shared_ptr<runtime::Tensor> b = backend->create_tensor(element::f32, shape);
    shared_ptr<runtime::Tensor> c = backend->create_tensor(element::f32, shape);
    shared_ptr<runtime::Tensor> r0 = backend->create_tensor(element::f32, shape);
    shared_ptr<runtime::Tensor> r1 = backend->create_tensor(element::f32, shape);

    copy_data(a, vector<float>{2});
    copy_data(b, vector<float>{3});
    copy_data(c, vector<float>{4});

    auto exec = backend->compile(f);
    exec->call({r0, r1}, {a, b, c});

    ngraph::pass::Manager pass_manager;
    pass_manager.register_pass<ngraph::pass::VisualizeTree>("test.png");
    pass_manager.run_passes(f);
}

110
TEST(HYBRID, abc)
111
{
112
    const string backend_name = "H1";
113
    runtime::BackendManager::register_backend(backend_name, hybrid_creator());
114 115 116 117

    Shape shape{2, 2};
    auto A = make_shared<op::Parameter>(element::f32, shape);
    auto B = make_shared<op::Parameter>(element::f32, shape);
118
    auto C = make_shared<op::Parameter>(element::f32, shape);
119 120 121
    auto D = make_shared<op::Parameter>(element::f32, shape);
    auto t1 = A * B;
    auto t2 = t1 * D;
122 123 124 125
    auto t3 = (t2 + C);
    auto t4 = (t3 + A) * t1;
    NodeVector result({t3, t4});
    auto f = make_shared<Function>(result, ParameterVector{A, B, C, D});
126

127 128
    shared_ptr<runtime::Backend> backend = runtime::Backend::create("H1");
    static_pointer_cast<runtime::hybrid::HybridBackend>(backend)->set_debug_enabled(true);
129 130 131 132 133

    // Create some tensors for input/output
    shared_ptr<runtime::Tensor> a = backend->create_tensor(element::f32, shape);
    shared_ptr<runtime::Tensor> b = backend->create_tensor(element::f32, shape);
    shared_ptr<runtime::Tensor> c = backend->create_tensor(element::f32, shape);
134
    shared_ptr<runtime::Tensor> d = backend->create_tensor(element::f32, shape);
135 136
    shared_ptr<runtime::Tensor> result1 = backend->create_tensor(element::f32, shape);
    shared_ptr<runtime::Tensor> result2 = backend->create_tensor(element::f32, shape);
137

138 139 140 141
    copy_data(a, vector<float>{1, 2, 3, 4});
    copy_data(b, vector<float>{5, 6, 7, 8});
    copy_data(c, vector<float>{9, 10, 11, 12});
    copy_data(d, vector<float>{4, 3, 2, 1});
142

143
    auto handle = backend->compile(f);
144
    handle->call_with_validate({result1, result2}, {a, b, c, d});
145 146
    EXPECT_TRUE(
        test::all_close_f(read_vector<float>(result2), (vector<float>{150, 576, 1176, 1536})));
147
}
148 149 150 151

TEST(HYBRID, simple)
{
    const string backend_name = "H1";
152
    runtime::BackendManager::register_backend(backend_name, hybrid_creator());
153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169

    Shape shape{2, 2};
    auto A = make_shared<op::Parameter>(element::i8, shape);
    auto f = make_shared<Function>(make_shared<op::Max>(A, AxisSet{0, 1}), ParameterVector{A});

    shared_ptr<runtime::Backend> backend = runtime::Backend::create("H1");
    static_pointer_cast<runtime::hybrid::HybridBackend>(backend)->set_debug_enabled(true);

    // Create some tensors for input/output
    auto a = backend->create_tensor(element::i8, shape);
    copy_data(a, vector<int8_t>{1, 2, 3, 4});
    auto result = backend->create_tensor(element::i8, Shape{});

    auto handle = backend->compile(f);
    handle->call_with_validate({result}, {a});
    EXPECT_EQ((vector<int8_t>{4}), read_vector<int8_t>(result));
}