Commit 0af7f9ea authored by Vitaliy Lyudvichenko's avatar Vitaliy Lyudvichenko

Adding of OCL & public interface for Pooling layer

parent 601afeed
...@@ -209,7 +209,7 @@ namespace dnn ...@@ -209,7 +209,7 @@ namespace dnn
{ {
public: public:
Size kernel, pad, stride; Size kernel, stride, pad;
}; };
class CV_EXPORTS_W ConvolutionLayer : public BaseConvolutionLayer class CV_EXPORTS_W ConvolutionLayer : public BaseConvolutionLayer
...@@ -232,7 +232,7 @@ namespace dnn ...@@ -232,7 +232,7 @@ namespace dnn
{ {
public: public:
enum enum Type
{ {
CHANNEL_NRM, CHANNEL_NRM,
SPATIAL_NRM SPATIAL_NRM
...@@ -241,9 +241,26 @@ namespace dnn ...@@ -241,9 +241,26 @@ namespace dnn
int size; int size;
double alpha, beta; double alpha, beta;
static Ptr<LRNLayer> create(int type = CHANNEL_NRM, int size = 5, double alpha = 1, double beta = 0.75);
}; };
class CV_EXPORTS_W PoolingLayer : public Layer
{
public:
enum Type
{
MAX,
AVE,
STOCHASTIC
};
int type;
Size kernel, stride, pad;
static Ptr<PoolingLayer> create(int type = MAX, Size kernel = Size(2, 2), Size pad = Size(0, 0), Size stride = Size(1, 1));
};
//! @} //! @}
//! @} //! @}
......
...@@ -81,7 +81,7 @@ void initModule() ...@@ -81,7 +81,7 @@ void initModule()
REG_RUNTIME_LAYER_CLASS(Split, SplitLayer) REG_RUNTIME_LAYER_CLASS(Split, SplitLayer)
REG_RUNTIME_LAYER_CLASS(Reshape, ReshapeLayer) REG_RUNTIME_LAYER_CLASS(Reshape, ReshapeLayer)
REG_STATIC_LAYER_FUNC(Flatten, createFlattenLayer) REG_STATIC_LAYER_FUNC(Flatten, createFlattenLayer)
REG_RUNTIME_LAYER_CLASS(Pooling, PoolingLayerImpl) REG_RUNTIME_LAYER_FUNC(Pooling, createPoolingLayerFromCaffe)
REG_RUNTIME_LAYER_CLASS(MVN, MVNLayer) REG_RUNTIME_LAYER_CLASS(MVN, MVNLayer)
REG_RUNTIME_LAYER_FUNC(LRN, createLRNLayerFromCaffe) REG_RUNTIME_LAYER_FUNC(LRN, createLRNLayerFromCaffe)
REG_RUNTIME_LAYER_CLASS(InnerProduct, FullyConnectedLayer) REG_RUNTIME_LAYER_CLASS(InnerProduct, FullyConnectedLayer)
......
...@@ -53,17 +53,18 @@ namespace cv ...@@ -53,17 +53,18 @@ namespace cv
namespace dnn namespace dnn
{ {
LRNLayerImpl::LRNLayerImpl() LRNLayerImpl::LRNLayerImpl(int type_, int size_, double alpha_, double beta_)
{ {
size = 5; type = type_;
alpha = 1; size = size_;
beta = 0.75; alpha = alpha_;
type = CHANNEL_NRM; beta = beta_;
} }
void LRNLayerImpl::allocate(const std::vector<Blob*> &inputs, std::vector<Blob> &outputs) void LRNLayerImpl::allocate(const std::vector<Blob*> &inputs, std::vector<Blob> &outputs)
{ {
CV_Assert(inputs.size() == 1 && inputs[0]->dims() == 4); CV_Assert(inputs.size() == 1 && inputs[0]->dims() == 4);
CV_Assert(type == CHANNEL_NRM || type == SPATIAL_NRM);
useOpenCL = cv::ocl::useOpenCL(); useOpenCL = cv::ocl::useOpenCL();
if (type == SPATIAL_NRM && !useOpenCL) if (type == SPATIAL_NRM && !useOpenCL)
...@@ -154,6 +155,7 @@ void LRNLayerImpl::channelNoramlization_(Blob &srcBlob, Blob &dstBlob) ...@@ -154,6 +155,7 @@ void LRNLayerImpl::channelNoramlization_(Blob &srcBlob, Blob &dstBlob)
bool LRNLayerImpl::channelNoramlization_ocl(const UMat &src, UMat &dst) bool LRNLayerImpl::channelNoramlization_ocl(const UMat &src, UMat &dst)
{ {
#ifdef HAVE_OPENCL
if (src.offset != 0 || dst.offset != 0) //TODO: add offset if (src.offset != 0 || dst.offset != 0) //TODO: add offset
return false; return false;
...@@ -187,6 +189,9 @@ bool LRNLayerImpl::channelNoramlization_ocl(const UMat &src, UMat &dst) ...@@ -187,6 +189,9 @@ bool LRNLayerImpl::channelNoramlization_ocl(const UMat &src, UMat &dst)
return false; return false;
return true; return true;
#else
return false;
#endif
} }
void LRNLayerImpl::spatialNormalization(Blob &src, Blob &dst) void LRNLayerImpl::spatialNormalization(Blob &src, Blob &dst)
...@@ -232,27 +237,31 @@ void LRNLayerImpl::spatialNormalization_(Blob &srcBlob, Blob &dstBlob) ...@@ -232,27 +237,31 @@ void LRNLayerImpl::spatialNormalization_(Blob &srcBlob, Blob &dstBlob)
} }
} }
Ptr<Layer> createLRNLayerFromCaffe(LayerParams &params)
Ptr<LRNLayer> LRNLayer::create(int type, int size, double alpha, double beta)
{ {
LRNLayerImpl *l = new LRNLayerImpl(); return Ptr<LRNLayer>(new LRNLayerImpl(type, size, alpha, beta));
}
Ptr<Layer> createLRNLayerFromCaffe(LayerParams &params)
{
int type;
String nrmType = params.get<String>("norm_region", "ACROSS_CHANNELS"); String nrmType = params.get<String>("norm_region", "ACROSS_CHANNELS");
if (nrmType == "ACROSS_CHANNELS") if (nrmType == "ACROSS_CHANNELS")
l->type = LRNLayer::CHANNEL_NRM; type = LRNLayer::CHANNEL_NRM;
else if (nrmType == "WITHIN_CHANNEL") else if (nrmType == "WITHIN_CHANNEL")
l->type = LRNLayer::SPATIAL_NRM; type = LRNLayer::SPATIAL_NRM;
else else
CV_Error(Error::StsBadArg, "Unknown region type \"" + nrmType + "\""); CV_Error(Error::StsBadArg, "Unknown region type \"" + nrmType + "\"");
int size = params.get<int>("local_size", 5); int size = params.get<int>("local_size", 5);
if (size % 2 != 1 || size <= 0) if (size % 2 != 1 || size <= 0)
CV_Error(Error::StsBadArg, "LRN layer supports only positive odd values for local_size"); CV_Error(Error::StsBadArg, "LRN layer supports only positive odd values for local_size");
l->size = size;
l->alpha = params.get<double>("alpha", 1); double alpha = params.get<double>("alpha", 1);
l->beta = params.get<double>("beta", 0.75); double beta = params.get<double>("beta", 0.75);
return Ptr<Layer>(l); return Ptr<Layer>(new LRNLayerImpl(type, size, alpha, beta));
} }
} }
......
...@@ -64,7 +64,7 @@ namespace dnn ...@@ -64,7 +64,7 @@ namespace dnn
public: public:
LRNLayerImpl(); LRNLayerImpl(int type = CHANNEL_NRM, int size = 5, double alpha = 1, double beta = 0.75);
void allocate(const std::vector<Blob*> &inputs, std::vector<Blob> &outputs); void allocate(const std::vector<Blob*> &inputs, std::vector<Blob> &outputs);
void forward(std::vector<Blob*> &inputs, std::vector<Blob> &outputs); void forward(std::vector<Blob*> &inputs, std::vector<Blob> &outputs);
}; };
......
This diff is collapsed.
/*M/////////////////////////////////////////////////////////////////////////////////////// /*M///////////////////////////////////////////////////////////////////////////////////////
// //
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
// //
...@@ -42,33 +42,40 @@ ...@@ -42,33 +42,40 @@
#ifndef __OPENCV_DNN_LAYERS_POOLING_LAYER_HPP__ #ifndef __OPENCV_DNN_LAYERS_POOLING_LAYER_HPP__
#define __OPENCV_DNN_LAYERS_POOLING_LAYER_HPP__ #define __OPENCV_DNN_LAYERS_POOLING_LAYER_HPP__
#include "../precomp.hpp" #include "../precomp.hpp"
#include <opencv2/dnn/all_layers.hpp>
namespace cv namespace cv
{ {
namespace dnn namespace dnn
{ {
class PoolingLayer : public Layer class PoolingLayerImpl : public PoolingLayer
{ {
enum bool useOpenCL;
{
MAX,
AVE,
STOCHASTIC
};
int type;
Size kernel, pad, stride;
Size inp, out; Size inp, out;
void computeOutputShape(Size inpSz); void computeOutputShape(Size inpSz);
void maxPooling(Blob &input, Blob &output);
void avePooling(Blob &input, Blob &output); bool pooling_ocl(const char *kname, const Blob &src, Blob &dst, Blob *mask = NULL);
void maxPooling(Blob &src, Blob &dst);
void maxPooling_cpu(Blob &src, Blob &dst);
bool maxPooling_ocl(Blob &src, Blob &dst);
void avePooling(Blob &src, Blob &dst);
void avePooling_cpu(Blob &src, Blob &dst);
bool avePooling_ocl(Blob &src, Blob &dst);
public: public:
PoolingLayer(LayerParams &params);
PoolingLayerImpl();
PoolingLayerImpl(int type, Size kernel, Size pad, Size stride);
void allocate(const std::vector<Blob*> &inputs, std::vector<Blob> &outputs); void allocate(const std::vector<Blob*> &inputs, std::vector<Blob> &outputs);
void forward(std::vector<Blob*> &inputs, std::vector<Blob> &outputs); void forward(std::vector<Blob*> &inputs, std::vector<Blob> &outputs);
}; };
Ptr<Layer> createPoolingLayerFromCaffe(LayerParams &params);
} }
} }
#endif #endif
/*************************************************************************************
* Copyright (c) 2015, Advanced Micro Devices, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation and/or
* other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
* INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
* OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
**************************************************************************************/
__kernel void MaxPoolForward(const int nthreads, __global T* bottom_data, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, __global T* top_data
#ifdef MASK
, __global int* mask, __global T* top_mask
#endif
) {
int index = get_global_id(0);
int tmp = get_global_size(0);
for(index; index < nthreads; index += tmp) {
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
int hstart = ph * stride_h - pad_h;
int wstart = pw * stride_w - pad_w;
const int hend = min(hstart + kernel_h, height);
const int wend = min(wstart + kernel_w, width);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
T maxval = -FLT_MAX;
int maxidx = -1;
bottom_data =
bottom_data + (n * channels + c) * height * width;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
if (bottom_data[h * width + w] > maxval) {
maxidx = h * width + w;
maxval = bottom_data[maxidx];
}
}
}
top_data[index] = maxval;
#ifdef MASK
if (mask) {
mask[index] = maxidx;
} else {
top_mask[index] = maxidx;
}
#endif
}
}
__kernel void AvePoolForward(const int nthreads, __global T* bottom_data, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w,__global T* top_data) {
int index = get_global_id(0);
int tmp = get_global_size(0);
for(index; index < nthreads; index+=tmp) {
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels; int hstart = ph * stride_h - pad_h; int wstart = pw * stride_w - pad_w;
int hend = min(hstart + kernel_h, height + pad_h);
int wend = min(wstart + kernel_w, width + pad_w);
const int pool_size = (hend - hstart) * (wend - wstart);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
hend = min(hend, height);
wend = min(wend, width);
T aveval = 0;
bottom_data =
bottom_data + (n * channels + c) * height * width;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
aveval += bottom_data[h * width + w];
}
}
top_data[index] = aveval / pool_size;
}
}
...@@ -142,12 +142,24 @@ TEST(Layer_Test_InnerProduct, Accuracy) ...@@ -142,12 +142,24 @@ TEST(Layer_Test_InnerProduct, Accuracy)
TEST(Layer_Test_Pooling_max, Accuracy) TEST(Layer_Test_Pooling_max, Accuracy)
{ {
testLayerUsingCaffeModels("layer_pooling_max"); OCL_OFF(testLayerUsingCaffeModels("layer_pooling_max"));
OCL_ON();
}
OCL_TEST(Layer_Test_Pooling_max, Accuracy)
{
OCL_ON(testLayerUsingCaffeModels("layer_pooling_max"));
OCL_OFF();
} }
TEST(Layer_Test_Pooling_ave, Accuracy) TEST(Layer_Test_Pooling_ave, Accuracy)
{ {
testLayerUsingCaffeModels("layer_pooling_ave"); OCL_OFF(testLayerUsingCaffeModels("layer_pooling_ave"));
OCL_ON();
}
OCL_TEST(Layer_Test_Pooling_ave, Accuracy)
{
OCL_ON(testLayerUsingCaffeModels("layer_pooling_ave"));
OCL_OFF();
} }
TEST(Layer_Test_MVN, Accuracy) TEST(Layer_Test_MVN, Accuracy)
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment