Commit d0a9683f authored by Vitaliy Lyudvichenko's avatar Vitaliy Lyudvichenko

Moving of Caffe loaders into separate file

parent e713af1a
#include "../precomp.hpp"
#include "layer_loaders.hpp"
#include <opencv2/dnn/shape_utils.hpp>
#include "../layers/layers_common.hpp"
namespace cv
{
namespace dnn
{
template<>
Ptr<Layer> createLayerFromCaffe<PoolingLayer>(LayerParams &params)
{
int type;
Size kernel, stride, pad;
if (params.has("pool"))
{
String pool = params.get<String>("pool").toLowerCase();
if (pool == "max")
type = PoolingLayer::MAX;
else if (pool == "ave")
type = PoolingLayer::AVE;
else if (pool == "stochastic")
type = PoolingLayer::STOCHASTIC;
else
CV_Error(Error::StsBadArg, "Unknown pooling type \"" + pool + "\"");
}
else
{
type = PoolingLayer::MAX;
}
getCaffeConvParams(params, kernel, pad, stride);
return Ptr<Layer>(PoolingLayer::create(type, kernel, stride, pad));
}
template<>
Ptr<Layer> createLayerFromCaffe<SoftmaxLayer>(LayerParams &params)
{
int axis = params.get<int>("axis", 1);
return Ptr<Layer>(SoftmaxLayer::create(axis));
}
template<> //InnerProduct specialization
Ptr<Layer> createLayerFromCaffe<InnerProductLayer>(LayerParams &params)
{
const std::vector<Blob> &blobs = params.blobs;
CV_Assert(1 <= blobs.size() && blobs.size() <= 2);
int numOutputs = params.get<int>("num_output");
int innerSize = (int)blobs[0].total() / numOutputs;
bool bias = params.get<bool>("bias_term", true);
int axis = params.get<int>("axis", 1);
CV_Assert(blobs[0].dims() >= 2 && (size_t)(innerSize * numOutputs) == blobs[0].total());
CV_Assert(!bias || (blobs.size() == 2 && (size_t)numOutputs == blobs[1].total()));
Ptr<InnerProductLayer> l = InnerProductLayer::create(axis);
l->setParamsFrom(params);
l->blobs[0].reshape(Shape(numOutputs, innerSize));
if (bias)
l->blobs[1].reshape(Shape(1, numOutputs));
return Ptr<Layer>(l);
}
template<> //LRNLayer specialization
Ptr<Layer> createLayerFromCaffe<LRNLayer>(LayerParams& params)
{
int type;
String nrmType = params.get<String>("norm_region", "ACROSS_CHANNELS");
if (nrmType == "ACROSS_CHANNELS")
type = LRNLayer::CHANNEL_NRM;
else if (nrmType == "WITHIN_CHANNEL")
type = LRNLayer::SPATIAL_NRM;
else
CV_Error(Error::StsBadArg, "Unknown region type \"" + nrmType + "\"");
int size = params.get<int>("local_size", 5);
if (size % 2 != 1 || size <= 0)
CV_Error(Error::StsBadArg, "LRN layer supports only positive odd values for local_size");
double alpha = params.get<double>("alpha", 1);
double beta = params.get<double>("beta", 0.75);
return Ptr<Layer>(LRNLayer::create(type, size, alpha, beta));
}
//Activation layers
template <typename ActivationLayer> //Intended for parameters-free activations
Ptr<Layer> createLayerFromCaffe(LayerParams&)
{
return Ptr<Layer>(ActivationLayer::create());
}
template<> //ReLU specialization
Ptr<Layer> createLayerFromCaffe<ReLULayer>(LayerParams& params)
{
float negative_slope = params.get<float>("negative_slope", 0.f);
return Ptr<Layer>(ReLULayer::create(negative_slope));
}
template<> //Power specialization
Ptr<Layer> createLayerFromCaffe<PowerLayer>(LayerParams& params)
{
float power = params.get<float>("power", 1.0f);
float scale = params.get<float>("scale", 1.0f);
float shift = params.get<float>("shift", 0.0f);
return Ptr<Layer>(PowerLayer::create(power, scale, shift));
}
//Explicit instantiation
template Ptr<Layer> createLayerFromCaffe<SoftmaxLayer>(LayerParams&);
template Ptr<Layer> createLayerFromCaffe<InnerProductLayer>(LayerParams&);
template Ptr<Layer> createLayerFromCaffe<LRNLayer>(LayerParams&);
template Ptr<Layer> createLayerFromCaffe<ReLULayer>(LayerParams&);
template Ptr<Layer> createLayerFromCaffe<SigmoidLayer>(LayerParams&);
template Ptr<Layer> createLayerFromCaffe<TanHLayer>(LayerParams&);
template Ptr<Layer> createLayerFromCaffe<AbsLayer>(LayerParams&);
template Ptr<Layer> createLayerFromCaffe<BNLLLayer>(LayerParams&);
template Ptr<Layer> createLayerFromCaffe<PowerLayer>(LayerParams&);
}
}
\ No newline at end of file
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#ifndef __OPENCV_DNN_CAFFE_LAYER_LOADERS_HPP__
#define __OPENCV_DNN_CAFFE_LAYER_LOADERS_HPP__
#include <opencv2/dnn/all_layers.hpp>
namespace cv
{
namespace dnn
{
//Common template for Caffe layer loaders
template <typename PublicLayer>
Ptr<Layer> createLayerFromCaffe(LayerParams&);
}
}
#endif
\ No newline at end of file
......@@ -54,6 +54,8 @@
#include "layers/softmax_layer.hpp"
#include "layers/split_layer.hpp"
#include "caffe/layer_loaders.hpp"
namespace cv
{
namespace dnn
......@@ -77,21 +79,21 @@ void initModule()
return;
REG_RUNTIME_LAYER_CLASS(Slice, SliceLayer)
REG_STATIC_LAYER_FUNC(Softmax, createSoftmaxLayerFromCaffe)
REG_RUNTIME_LAYER_CLASS(Split, SplitLayer)
REG_RUNTIME_LAYER_CLASS(Reshape, ReshapeLayer)
REG_STATIC_LAYER_FUNC(Flatten, createFlattenLayer)
REG_RUNTIME_LAYER_FUNC(Pooling, createPoolingLayerFromCaffe)
REG_RUNTIME_LAYER_CLASS(MVN, MVNLayer)
REG_RUNTIME_LAYER_FUNC(LRN, createLRNLayerFromCaffe)
REG_RUNTIME_LAYER_FUNC(InnerProduct, createInnerProductLayerFromCaffe)
REG_RUNTIME_LAYER_FUNC(Flatten, createFlattenLayer);
REG_RUNTIME_LAYER_FUNC(Pooling, createLayerFromCaffe<PoolingLayer>);
REG_RUNTIME_LAYER_FUNC(LRN, createLayerFromCaffe<LRNLayer>);
REG_RUNTIME_LAYER_FUNC(InnerProduct, createLayerFromCaffe<InnerProductLayer>);
REG_STATIC_LAYER_FUNC(Softmax, createLayerFromCaffe<SoftmaxLayer>);
REG_RUNTIME_LAYER_FUNC(ReLU, createReLULayerFromCaffe)
REG_RUNTIME_LAYER_FUNC(Sigmoid, createSigmoidLayerFromCaffe)
REG_RUNTIME_LAYER_FUNC(TanH, createTanHLayerFromCaffe)
REG_RUNTIME_LAYER_FUNC(BNLL, createBNLLLayerFromCaffe)
REG_RUNTIME_LAYER_FUNC(AbsVal, createAbsLayerFromCaffe)
REG_RUNTIME_LAYER_FUNC(Power, createPowerLayerFromCaffe)
REG_RUNTIME_LAYER_FUNC(ReLU, createLayerFromCaffe<ReLULayer>);
REG_RUNTIME_LAYER_FUNC(Sigmoid, createLayerFromCaffe<SigmoidLayer>);
REG_RUNTIME_LAYER_FUNC(TanH, createLayerFromCaffe<TanHLayer>);
REG_RUNTIME_LAYER_FUNC(BNLL, createLayerFromCaffe<BNLLLayer>);
REG_RUNTIME_LAYER_FUNC(AbsVal, createLayerFromCaffe<AbsLayer>);
REG_RUNTIME_LAYER_FUNC(Power, createLayerFromCaffe<PowerLayer>);
REG_RUNTIME_LAYER_CLASS(Dropout, BlankLayer)
REG_RUNTIME_LAYER_FUNC(Convolution, createConvolutionLayerFromCaffe)
......
......@@ -42,48 +42,5 @@ Ptr<PowerLayer> PowerLayer::create(double power /*= 1*/, double scale /*= 1*/, d
return Ptr<PowerLayer>(new ElementWiseLayer<PowerFunctor>(f));
}
Ptr<Layer> createReLULayerFromCaffe(LayerParams &params)
{
float negative_slope;
if (params.has("negative_slope"))
negative_slope = params.get<float>("negative_slope");
else
negative_slope = 0.f;
return Ptr<Layer>(ReLULayer::create(negative_slope));
}
Ptr<Layer> createSigmoidLayerFromCaffe(LayerParams&)
{
return Ptr<Layer>(SigmoidLayer::create());
}
Ptr<Layer> createTanHLayerFromCaffe(LayerParams&)
{
return Ptr<Layer>(TanHLayer::create());
}
Ptr<Layer> createAbsLayerFromCaffe(LayerParams&)
{
return Ptr<Layer>(AbsLayer::create());
}
Ptr<Layer> createBNLLLayerFromCaffe(LayerParams&)
{
return Ptr<Layer>(BNLLLayer::create());
}
Ptr<Layer> createPowerLayerFromCaffe(LayerParams &params)
{
float power = params.get<float>("power", 1.0f);
float scale = params.get<float>("scale", 1.0f);
float shift = params.get<float>("shift", 0.0f);
return Ptr<Layer>(PowerLayer::create(power, scale, shift));
}
}
}
\ No newline at end of file
......@@ -164,10 +164,12 @@ public:
}
};
#ifdef HAVE_OPENCL
static String oclGetTMacro(const UMat &m)
{
return String("-DT=") + ocl::typeToStr(m.type()) + String(" ");
}
#endif
struct ReLUFunctor
{
......@@ -311,23 +313,6 @@ struct PowerFunctor
#endif
};
template <typename ActivationLayer>
Ptr<Layer> createLayerFromCaffe(LayerParams&)
{
return Ptr<Layer>(ActivationLayer::create());
}
Ptr<Layer> createReLULayerFromCaffe(LayerParams &params);
Ptr<Layer> createSigmoidLayerFromCaffe(LayerParams&);
Ptr<Layer> createTanHLayerFromCaffe(LayerParams&);
Ptr<Layer> createAbsLayerFromCaffe(LayerParams&);
Ptr<Layer> createBNLLLayerFromCaffe(LayerParams&);
Ptr<Layer> createPowerLayerFromCaffe(LayerParams &params);
}
}
#endif
......@@ -123,27 +123,5 @@ Ptr<InnerProductLayer> InnerProductLayer::create(int axis)
return Ptr<InnerProductLayer>(new FullyConnectedLayerImpl(axis));
}
Ptr<Layer> createInnerProductLayerFromCaffe(LayerParams &params)
{
const std::vector<Blob> &blobs = params.blobs;
CV_Assert(1 <= blobs.size() && blobs.size() <= 2);
int numOutputs = params.get<int>("num_output");
int innerSize = (int)blobs[0].total() / numOutputs;
bool bias = params.get<bool>("bias_term", true);
int axis = params.get<int>("axis", 1);
CV_Assert(blobs[0].dims() >= 2 && (size_t)(innerSize * numOutputs) == blobs[0].total());
CV_Assert(!bias || (blobs.size() == 2 && (size_t)numOutputs == blobs[1].total()));
Ptr<InnerProductLayer> l = InnerProductLayer::create(axis);
l->setParamsFrom(params);
l->blobs[0].reshape(Shape(numOutputs, innerSize));
if (bias)
l->blobs[1].reshape(Shape(1, numOutputs));
return Ptr<Layer>(l);
}
}
}
......@@ -66,8 +66,6 @@ public:
void forward(std::vector<Blob*> &inputs, std::vector<Blob> &outputs);
};
Ptr<Layer> createInnerProductLayerFromCaffe(LayerParams &params);
}
}
#endif
......@@ -245,26 +245,5 @@ Ptr<LRNLayer> LRNLayer::create(int type, int size, double alpha, double beta)
return Ptr<LRNLayer>(new LRNLayerImpl(type, size, alpha, beta));
}
Ptr<Layer> createLRNLayerFromCaffe(LayerParams &params)
{
int type;
String nrmType = params.get<String>("norm_region", "ACROSS_CHANNELS");
if (nrmType == "ACROSS_CHANNELS")
type = LRNLayer::CHANNEL_NRM;
else if (nrmType == "WITHIN_CHANNEL")
type = LRNLayer::SPATIAL_NRM;
else
CV_Error(Error::StsBadArg, "Unknown region type \"" + nrmType + "\"");
int size = params.get<int>("local_size", 5);
if (size % 2 != 1 || size <= 0)
CV_Error(Error::StsBadArg, "LRN layer supports only positive odd values for local_size");
double alpha = params.get<double>("alpha", 1);
double beta = params.get<double>("beta", 0.75);
return Ptr<Layer>(LRNLayer::create(type, size, alpha, beta));
}
}
}
......@@ -48,29 +48,27 @@ namespace cv
{
namespace dnn
{
class LRNLayerImpl : public LRNLayer
{
bool useOpenCL;
Blob buf;
void channelNoramlization(Blob &src, Blob &dst);
template<typename XMat>
void channelNoramlization_(Blob &src, Blob &dst);
bool channelNoramlization_ocl(const UMat &src, UMat &dst);
void spatialNormalization(Blob &src, Blob &dst);
template<typename XMat>
void spatialNormalization_(Blob &src, Blob &dst);
class LRNLayerImpl : public LRNLayer
{
bool useOpenCL;
Blob buf;
public:
void channelNoramlization(Blob &src, Blob &dst);
template<typename XMat>
void channelNoramlization_(Blob &src, Blob &dst);
bool channelNoramlization_ocl(const UMat &src, UMat &dst);
LRNLayerImpl(int type = CHANNEL_NRM, int size = 5, double alpha = 1, double beta = 0.75);
void allocate(const std::vector<Blob*> &inputs, std::vector<Blob> &outputs);
void forward(std::vector<Blob*> &inputs, std::vector<Blob> &outputs);
};
void spatialNormalization(Blob &src, Blob &dst);
template<typename XMat>
void spatialNormalization_(Blob &src, Blob &dst);
public:
Ptr<Layer> createLRNLayerFromCaffe(LayerParams &params);
LRNLayerImpl(int type = CHANNEL_NRM, int size = 5, double alpha = 1, double beta = 0.75);
void allocate(const std::vector<Blob*> &inputs, std::vector<Blob> &outputs);
void forward(std::vector<Blob*> &inputs, std::vector<Blob> &outputs);
};
}
}
......
......@@ -266,32 +266,5 @@ Ptr<PoolingLayer> PoolingLayer::create(int type, Size kernel, Size stride, Size
return Ptr<PoolingLayer>(new PoolingLayerImpl(type, kernel, stride, pad));
}
Ptr<Layer> createPoolingLayerFromCaffe(LayerParams &params)
{
int type;
Size kernel, stride, pad;
if (params.has("pool"))
{
String pool = params.get<String>("pool").toLowerCase();
if (pool == "max")
type = PoolingLayer::MAX;
else if (pool == "ave")
type = PoolingLayer::AVE;
else if (pool == "stochastic")
type = PoolingLayer::STOCHASTIC;
else
CV_Error(Error::StsBadArg, "Unknown pooling type \"" + pool + "\"");
}
else
{
type = PoolingLayer::MAX;
}
getCaffeConvParams(params, kernel, pad, stride);
return Ptr<Layer>(new PoolingLayerImpl(type, kernel, stride, pad));
}
}
}
......@@ -48,33 +48,32 @@ namespace cv
{
namespace dnn
{
class PoolingLayerImpl : public PoolingLayer
{
bool useOpenCL;
Size inp, out;
void computeOutputShape(Size inpSz);
class PoolingLayerImpl : public PoolingLayer
{
bool useOpenCL;
Size inp, out;
bool pooling_ocl(const char *kname, const Blob &src, Blob &dst, Blob *mask = NULL);
void computeOutputShape(Size inpSz);
void maxPooling(Blob &src, Blob &dst);
void maxPooling_cpu(Blob &src, Blob &dst);
bool maxPooling_ocl(Blob &src, Blob &dst);
bool pooling_ocl(const char *kname, const Blob &src, Blob &dst, Blob *mask = NULL);
void avePooling(Blob &src, Blob &dst);
void avePooling_cpu(Blob &src, Blob &dst);
bool avePooling_ocl(Blob &src, Blob &dst);
void maxPooling(Blob &src, Blob &dst);
void maxPooling_cpu(Blob &src, Blob &dst);
bool maxPooling_ocl(Blob &src, Blob &dst);
public:
void avePooling(Blob &src, Blob &dst);
void avePooling_cpu(Blob &src, Blob &dst);
bool avePooling_ocl(Blob &src, Blob &dst);
PoolingLayerImpl();
PoolingLayerImpl(int type, Size kernel, Size stride, Size pad);
public:
void allocate(const std::vector<Blob*> &inputs, std::vector<Blob> &outputs);
void forward(std::vector<Blob*> &inputs, std::vector<Blob> &outputs);
};
PoolingLayerImpl();
PoolingLayerImpl(int type, Size kernel, Size stride, Size pad);
Ptr<Layer> createPoolingLayerFromCaffe(LayerParams &params);
void allocate(const std::vector<Blob*> &inputs, std::vector<Blob> &outputs);
void forward(std::vector<Blob*> &inputs, std::vector<Blob> &outputs);
};
}
}
......
......@@ -220,11 +220,5 @@ Ptr<SoftmaxLayer> SoftmaxLayer::create(int axis)
return Ptr<SoftmaxLayer>(new SoftMaxLayerImpl(axis));
}
Ptr<Layer> createSoftmaxLayerFromCaffe(LayerParams &params)
{
int axis = params.get<int>("axis", 1);
return Ptr<Layer>(SoftmaxLayer::create(axis));
}
}
}
......@@ -67,8 +67,6 @@ public:
void forward(std::vector<Blob*> &inputs, std::vector<Blob> &outputs);
};
Ptr<Layer> createSoftmaxLayerFromCaffe(LayerParams &params);
}
}
#endif
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment