test_layers.cpp 10.8 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45
/*M///////////////////////////////////////////////////////////////////////////////////////
//
//  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
//  By downloading, copying, installing or using the software you agree to this license.
//  If you do not agree to this license, do not download, install,
//  copy or use the software.
//
//
//                           License Agreement
//                For Open Source Computer Vision Library
//
// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
//   * Redistribution's of source code must retain the above copyright notice,
//     this list of conditions and the following disclaimer.
//
//   * Redistribution's in binary form must reproduce the above copyright notice,
//     this list of conditions and the following disclaimer in the documentation
//     and/or other materials provided with the distribution.
//
//   * The name of the copyright holders may not be used to endorse or promote products
//     derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/

#include "test_precomp.hpp"
#include <opencv2/core/ocl.hpp>
#include <iostream>
#include "npy_blob.hpp"
46
#include <opencv2/dnn/all_layers.hpp>
47
#include <opencv2/ts/ocl_test.hpp>
48 49 50 51 52 53 54 55 56 57

namespace cvtest
{

using namespace cv;
using namespace cv::dnn;

template<typename TString>
static String _tf(TString filename)
{
58 59 60 61 62
    String basetestdir = getOpenCVExtraDir();
    size_t len = basetestdir.size();
    if(len > 0 && basetestdir[len-1] != '/' && basetestdir[len-1] != '\\')
        return (basetestdir + "/dnn/layers") + filename;
    return (basetestdir + "dnn/layers/") + filename;
63 64
}

65
void runLayer(Ptr<Layer> layer, std::vector<Mat> &inpBlobs, std::vector<Mat> &outBlobs)
66
{
67 68 69 70
    size_t i, ninputs = inpBlobs.size();
    std::vector<Mat> inp_(ninputs);
    std::vector<Mat*> inp(ninputs);
    std::vector<Mat> outp;
71

72 73 74 75 76
    for( i = 0; i < ninputs; i++ )
    {
        inp_[i] = inpBlobs[i].clone();
        inp[i] = &inp_[i];
    }
77

78 79
    layer->allocate(inp, outp);
    layer->forward(inp, outp);
80

81 82 83 84
    size_t noutputs = outp.size();
    outBlobs.resize(noutputs);
    for( i = 0; i < noutputs; i++ )
        outBlobs[i] = outp[i];
85 86 87
}


88
void testLayerUsingCaffeModels(String basename, bool useCaffeModel = false, bool useCommonInputBlob = true)
89 90 91 92 93 94 95
{
    String prototxt = _tf(basename + ".prototxt");
    String caffemodel = _tf(basename + ".caffemodel");

    String inpfile = (useCommonInputBlob) ? _tf("blob.npy") : _tf(basename + ".input.npy");
    String outfile = _tf(basename + ".npy");

96 97
    cv::setNumThreads(cv::getNumberOfCPUs());

98 99 100 101 102 103 104
    Net net;
    {
        Ptr<Importer> importer = createCaffeImporter(prototxt, (useCaffeModel) ? caffemodel : String());
        ASSERT_TRUE(importer != NULL);
        importer->populateNet(net);
    }

105 106
    Mat inp = blobFromNPY(inpfile);
    Mat ref = blobFromNPY(outfile);
107 108 109

    net.setBlob(".input", inp);
    net.forward();
110
    Mat out = net.getBlob("output");
111 112 113 114 115 116

    normAssert(ref, out);
}

TEST(Layer_Test_Softmax, Accuracy)
{
117
     testLayerUsingCaffeModels("layer_softmax");
118 119 120 121
}

TEST(Layer_Test_LRN_spatial, Accuracy)
{
122
     testLayerUsingCaffeModels("layer_lrn_spatial");
123 124 125 126
}

TEST(Layer_Test_LRN_channels, Accuracy)
{
127
     testLayerUsingCaffeModels("layer_lrn_channels");
128 129 130 131
}

TEST(Layer_Test_Convolution, Accuracy)
{
132
     testLayerUsingCaffeModels("layer_convolution", true);
133 134
}

135
TEST(Layer_Test_DeConvolution, Accuracy)
136
{
137
     testLayerUsingCaffeModels("layer_deconvolution", true, false);
138 139 140 141
}

TEST(Layer_Test_InnerProduct, Accuracy)
{
142
     testLayerUsingCaffeModels("layer_inner_product", true);
143 144 145 146
}

TEST(Layer_Test_Pooling_max, Accuracy)
{
147
     testLayerUsingCaffeModels("layer_pooling_max");
148 149 150 151
}

TEST(Layer_Test_Pooling_ave, Accuracy)
{
152
     testLayerUsingCaffeModels("layer_pooling_ave");
153 154 155 156
}

TEST(Layer_Test_MVN, Accuracy)
{
157
     testLayerUsingCaffeModels("layer_mvn");
158 159 160 161 162 163 164 165
}

TEST(Layer_Test_Reshape, squeeze)
{
    LayerParams params;
    params.set("axis", 2);
    params.set("num_axes", 1);

166 167 168 169
    int sz[] = {4, 3, 1, 2};
    Mat inp(4, sz, CV_32F);
    std::vector<Mat*> inpVec(1, &inp);
    std::vector<Mat> outVec;
170 171 172 173 174

    Ptr<Layer> rl = LayerFactory::createLayerInstance("Reshape", params);
    rl->allocate(inpVec, outVec);
    rl->forward(inpVec, outVec);

175 176 177 178 179
    Mat& out = outVec[0];
    std::vector<int> shape(out.size.p, out.size.p + out.dims);
    int sh0[] = {4, 3, 2};
    std::vector<int> shape0(sh0, sh0+3);
    EXPECT_TRUE(shapeEqual(shape, shape0));
180 181
}

182 183
TEST(Layer_Test_BatchNorm, Accuracy)
{
184
     testLayerUsingCaffeModels("layer_batch_norm", true);
185 186
}

187 188 189 190 191 192 193 194 195 196 197 198
//template<typename XMat>
//static void test_Layer_Concat()
//{
//    Matx21f a(1.f, 1.f), b(2.f, 2.f), c(3.f, 3.f);
//    std::vector<Blob> res(1), src = { Blob(XMat(a)), Blob(XMat(b)), Blob(XMat(c)) };
//    Blob ref(XMat(Matx23f(1.f, 2.f, 3.f, 1.f, 2.f, 3.f)));
//
//    runLayer(ConcatLayer::create(1), src, res);
//    normAssert(ref, res[0]);
//}
//TEST(Layer_Concat, Accuracy)
//{
199
//    test_Layer_Concat<Mat>());
200 201 202 203
//}
//OCL_TEST(Layer_Concat, Accuracy)
//{
//    OCL_ON(test_Layer_Concat<Mat>());
204
//    );
205
//}
206

207
static void test_Reshape_Split_Slice_layers()
208 209 210 211 212 213 214 215
{
    Net net;
    {
        Ptr<Importer> importer = createCaffeImporter(_tf("reshape_and_slice_routines.prototxt"));
        ASSERT_TRUE(importer != NULL);
        importer->populateNet(net);
    }

216
    Mat input(6, 12, CV_32F);
217
    RNG rng(0);
218
    rng.fill(input, RNG::UNIFORM, -1, 1);
219 220 221

    net.setBlob(".input", input);
    net.forward();
222
    Mat output = net.getBlob("output");
223 224 225

    normAssert(input, output);
}
226
TEST(Layer_Test_Reshape_Split_Slice, Accuracy)
227
{
228
    test_Reshape_Split_Slice_layers();
229 230
}

231 232 233
class Layer_LSTM_Test : public ::testing::Test
{
public:
234
    int numInp, numOut;
235
    Mat Wh, Wx, b;
236
    Ptr<LSTMLayer> layer;
237
    std::vector<Mat> inputs, outputs;
238

239 240
    Layer_LSTM_Test() {}

241
    void init(const std::vector<int> &inpShape_, const std::vector<int> &outShape_)
242
    {
243 244
        numInp = (int)shapeTotal(inpShape_);
        numOut = (int)shapeTotal(outShape_);
245

246 247 248
        Wh = Mat::ones(4 * numOut, numOut, CV_32F);
        Wx = Mat::ones(4 * numOut, numInp, CV_32F);
        b  = Mat::ones(4 * numOut, 1, CV_32F);
249

250
        layer = LSTMLayer::create(LayerParams());
251
        layer->setWeights(Wh, Wx, b);
252
        layer->setOutShape(outShape_);
253 254 255
    }
};

256
TEST_F(Layer_LSTM_Test, get_set_test)
257
{
258 259 260 261 262
    const int TN = 4;
    std::vector<int> inpShape = makeShape(5, 3, 2);
    std::vector<int> outShape = makeShape(3, 1, 2);
    std::vector<int> inpResShape = concatShape(makeShape(TN), inpShape);
    std::vector<int> outResShape = concatShape(makeShape(TN), outShape);
263

264 265 266 267
    init(inpShape, outShape);
    layer->setProduceCellOutput(true);
    layer->setUseTimstampsDim(false);
    layer->setOutShape(outShape);
268

269 270 271 272 273 274
    Mat C((int)outResShape.size(), &outResShape[0], CV_32F);
    randu(C, -1., 1.);
    Mat H = C.clone();
    randu(H, -1., 1.);
    layer->setC(C);
    layer->setH(H);
275

276 277 278 279
    Mat inp((int)inpResShape.size(), &inpResShape[0], CV_32F);
    randu(inp, -1., 1.);

    inputs.push_back(inp);
280 281
    runLayer(layer, inputs, outputs);

282
    EXPECT_EQ(2u, outputs.size());
283

284 285 286 287 288 289 290 291 292 293 294
    printShape("outResShape", outResShape);
    printShape("out0", getShape(outputs[0]));
    printShape("out1", getShape(outputs[0]));
    printShape("C", getShape(layer->getC()));
    printShape("H", getShape(layer->getH()));

    EXPECT_TRUE(shapeEqual(outResShape, getShape(outputs[0])));
    EXPECT_TRUE(shapeEqual(outResShape, getShape(outputs[1])));

    EXPECT_TRUE(shapeEqual(outResShape, getShape(layer->getC())));
    EXPECT_TRUE(shapeEqual(outResShape, getShape(layer->getH())));
295 296 297 298

    EXPECT_EQ(0, layer->inputNameToIndex("x"));
    EXPECT_EQ(0, layer->outputNameToIndex("h"));
    EXPECT_EQ(1, layer->outputNameToIndex("c"));
299 300
}

301
TEST(Layer_LSTM_Test_Accuracy_with_, CaffeRecurrent)
302
{
303
    Ptr<LSTMLayer> layer = LSTMLayer::create(LayerParams());
304

305 306 307
    Mat Wx = blobFromNPY(_tf("lstm.prototxt.w_0.npy"));
    Mat Wh = blobFromNPY(_tf("lstm.prototxt.w_2.npy"));
    Mat b  = blobFromNPY(_tf("lstm.prototxt.w_1.npy"));
308 309
    layer->setWeights(Wh, Wx, b);

310 311
    Mat inp = blobFromNPY(_tf("recurrent.input.npy"));
    std::vector<Mat> inputs(1, inp), outputs;
312
    runLayer(layer, inputs, outputs);
313

314
    Mat h_t_reference = blobFromNPY(_tf("lstm.prototxt.h_1.npy"));
315 316 317 318 319
    normAssert(h_t_reference, outputs[0]);
}

TEST(Layer_RNN_Test_Accuracy_with_, CaffeRecurrent)
{
320
    Ptr<RNNLayer> layer = RNNLayer::create(LayerParams());
321 322 323 324 325 326 327

    layer->setWeights(
                blobFromNPY(_tf("rnn.prototxt.w_0.npy")),
                blobFromNPY(_tf("rnn.prototxt.w_1.npy")),
                blobFromNPY(_tf("rnn.prototxt.w_2.npy")),
                blobFromNPY(_tf("rnn.prototxt.w_3.npy")),
                blobFromNPY(_tf("rnn.prototxt.w_4.npy")) );
328

329
    std::vector<Mat> output, input(1, blobFromNPY(_tf("recurrent.input.npy")));
330 331
    runLayer(layer, input, output);

332
    Mat h_ref = blobFromNPY(_tf("rnn.prototxt.h_1.npy"));
333
    normAssert(h_ref, output[0]);
334 335
}

336 337 338 339

class Layer_RNN_Test : public ::testing::Test
{
public:
340
    int nX, nH, nO, nT, nS;
341
    Mat Whh, Wxh, bh, Who, bo;
342 343
    Ptr<RNNLayer> layer;

344
    std::vector<Mat> inputs, outputs;
345

346
    Layer_RNN_Test()
347
    {
348 349 350 351 352 353
        nT = 3;
        nS = 5;
        nX = 31;
        nH = 64;
        nO = 100;

354 355 356 357 358
        Whh = Mat::ones(nH, nH, CV_32F);
        Wxh = Mat::ones(nH, nX, CV_32F);
        bh  = Mat::ones(nH, 1, CV_32F);
        Who = Mat::ones(nO, nH, CV_32F);
        bo  = Mat::ones(nO, 1, CV_32F);
359

360
        layer = RNNLayer::create(LayerParams());
361 362
        layer->setProduceHiddenOutput(true);
        layer->setWeights(Wxh, bh, Whh, Who, bo);
363 364 365
    }
};

366
TEST_F(Layer_RNN_Test, get_set_test)
367
{
368 369 370 371
    int sz[] = { nT, nS, 1, nX };
    Mat inp(4, sz, CV_32F);
    randu(inp, -1., 1.);
    inputs.push_back(inp);
372
    runLayer(layer, inputs, outputs);
373

374
    EXPECT_EQ(outputs.size(), 2u);
375 376
    EXPECT_TRUE(shapeEqual(getShape(outputs[0]), makeShape(nT, nS, nO)));
    EXPECT_TRUE(shapeEqual(getShape(outputs[1]), makeShape(nT, nS, nH)));
377 378
}

379
}