Commit 942e9205 authored by Vitaliy Lyudvichenko's avatar Vitaliy Lyudvichenko

Fixing of reshape layer to pass Torch test

parent 231546b7
......@@ -364,9 +364,15 @@ BlobShape computeShapeByReshapeMask(const BlobShape &srcShape, const BlobShape &
{
if (srcRange == Range::all())
srcRange = Range(0, srcShape.dims());
else
{
int sz = srcRange.size();
srcRange.start = srcShape.canonicalAxis(srcRange.start);
srcRange.end = (srcRange.end == INT_MAX) ? srcShape.dims() : srcRange.start + sz;
}
CV_Assert(0 <= srcRange.start && srcRange.start <= srcRange.end && srcRange.end <= srcShape.dims());
Shape dstShape(srcShape.dims() - srcRange.size() + maskShape.dims(), nullptr);
BlobShape dstShape(srcShape.dims() - srcRange.size() + maskShape.dims(), (const int*)NULL);
std::copy(srcShape.ptr(), srcShape.ptr() + srcRange.start, dstShape.ptr());
std::copy(srcShape.ptr() + srcRange.end, srcShape.ptr() + srcShape.dims(), dstShape.ptr() + srcRange.start + maskShape.dims());
......
#include "../precomp.hpp"
#include "layer_loaders.hpp"
#include <opencv2/dnn/shape_utils.hpp>
#include <climits>
namespace cv
{
......@@ -180,13 +181,13 @@ Ptr<Layer> createLayerFromCaffe<ReshapeLayer>(LayerParams &params)
int axis = params.get<int>("axis", 0);
int numAxes = params.get<int>("num_axes", -1);
CV_Assert(numAxes >= -1);
Range applyingRange = (numAxes == -1) ? Range::all() : Range(axis, axis + numAxes);
Range applyingRange = (numAxes == -1) ? Range(axis, INT_MAX) : Range(axis, axis + numAxes);
Shape newShape;
if (params.has("dim"))
{
const DictValue &paramShape = params.get("dim");
newShape = Shape(paramShape.size(), nullptr);
newShape = Shape::all(paramShape.size());
for (int i = 0; i < paramShape.size(); i++)
newShape[i] = paramShape.get<int>(i);
}
......@@ -290,4 +291,4 @@ template Ptr<Layer> createLayerFromCaffe<BNLLLayer>(LayerParams&);
template Ptr<Layer> createLayerFromCaffe<PowerLayer>(LayerParams&);
}
}
\ No newline at end of file
}
......@@ -89,10 +89,12 @@ void FullyConnectedLayerImpl::allocate(const std::vector<Blob*> &input, std::vec
void FullyConnectedLayerImpl::forward(std::vector<Blob*> &input, std::vector<Blob> &output)
{
if (!useOpenCL)
forward_<Mat>(input, output);
else
#ifdef HAVE_OPENCL
if (useOpenCL)
forward_<UMat>(input, output);
else
#endif
forward_<Mat>(input, output);
}
template<typename XMat>
......
......@@ -47,7 +47,6 @@
#include <opencv2/core/ocl.hpp>
#include <opencv2/dnn/shape_utils.hpp>
#include <algorithm>
#include <type_traits>
namespace cv
{
......@@ -205,6 +204,21 @@ void LRNLayerImpl::spatialNormalization(Blob &src, Blob &dst)
spatialNormalization_<UMat>(src, dst);
}
//TODO: fix cv::boxFilter with BORDER_ISOLATED flag in CPU mode
template<>
void LRNLayerImpl::sqrBoxFilter_<Mat>(const Mat &src, Mat &dst)
{
Mat bufMat = buf.getRef<Mat>();
src.copyTo(bufMat);
cv::sqrBoxFilter(bufMat, dst, dst.depth(), Size(size, size), Point(-1, -1), false, BORDER_CONSTANT);
}
template<>
void LRNLayerImpl::sqrBoxFilter_<UMat>(const UMat &src, UMat &dst)
{
cv::sqrBoxFilter(src, dst, dst.depth(), Size(size, size), Point(-1, -1), false, BORDER_CONSTANT | BORDER_ISOLATED);
}
template<typename XMat>
void LRNLayerImpl::spatialNormalization_(Blob &srcBlob, Blob &dstBlob)
{
......@@ -221,17 +235,7 @@ void LRNLayerImpl::spatialNormalization_(Blob &srcBlob, Blob &dstBlob)
XMat src = getPlane(srcMat, n, cn);
XMat dst = getPlane(dstMat, n, cn);
if (std::is_same<XMat, UMat>::value)
{
cv::sqrBoxFilter(src, dst, dst.depth(), Size(size, size), Point(-1, -1), false, BORDER_CONSTANT | BORDER_ISOLATED);
}
else
{
//TODO: fix cv::boxFilter with BORDER_ISOLATED flag in CPU mode
Mat bufMat = buf.getRef<Mat>();
src.copyTo(bufMat);
cv::sqrBoxFilter(bufMat, dst, dst.depth(), Size(size, size), Point(-1, -1), false, BORDER_CONSTANT);
}
sqrBoxFilter_(src, dst);
dst.convertTo(dst, dst.type(), alpha/(size*size), 1);
cv::pow(dst, beta, dst);
......
......@@ -62,6 +62,8 @@ class LRNLayerImpl : public LRNLayer
void spatialNormalization(Blob &src, Blob &dst);
template<typename XMat>
void spatialNormalization_(Blob &src, Blob &dst);
template<typename XMat>
void sqrBoxFilter_(const XMat &src, XMat &dst);
public:
......
......@@ -52,6 +52,12 @@ namespace dnn {
#if defined(ENABLE_TORCH_IMPORTER) && ENABLE_TORCH_IMPORTER
#include "THDiskFile.h"
#ifdef NDEBUG
static bool dbgPrint = false;
#else
static bool dbgPrint = true;
#endif
enum LuaType
{
TYPE_NIL = 0,
......@@ -290,7 +296,8 @@ struct TorchImporter : public ::cv::dnn::Importer
}
String key = readString();
std::cout << i << "th key: " << key << "\n";
if (dbgPrint)
std::cout << i << "th key: " << key << "\n";
fpos = THFile_position(file);
int vtype = readInt();
......@@ -334,13 +341,16 @@ struct TorchImporter : public ::cv::dnn::Importer
}
//Debug output
std::cout << "scalarParams:\n";
std::cout << scalarParams;
if (dbgPrint)
{
std::cout << "scalarParams:\n";
std::cout << scalarParams;
std::cout << "#" << tensorParams.size() << " tensorParams:\n";
std::map<String,Blob>::const_iterator it;
for (it = tensorParams.begin(); it != tensorParams.end(); it++)
std::cout << it->first << ": Tensor " << it->second.shape() << "\n";
std::cout << "#" << tensorParams.size() << " tensorParams:\n";
std::map<String,Blob>::const_iterator it;
for (it = tensorParams.begin(); it != tensorParams.end(); it++)
std::cout << it->first << ": Tensor " << it->second.shape() << "\n";
}
}
void readTorchTensor(int indexTensor, int typeTensor)
......@@ -435,7 +445,9 @@ struct TorchImporter : public ::cv::dnn::Importer
String className = readTorchClassName();
String nnName;
std::cout << "Class: " << className << std::endl;
if (dbgPrint)
std::cout << "Class: " << className << std::endl;
int type;
if ( (type = parseTensorType(className)) >= 0 ) //is Tensor
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment