Commit e33da71a authored by Vitaliy Lyudvichenko's avatar Vitaliy Lyudvichenko

Adding of setTo method for Blob, fixing of UMat allocation

parent 909a0022
......@@ -164,13 +164,21 @@ namespace dnn
/** @brief Creates blob with specified @p shape and @p type. */
void create(const BlobShape &shape, int type = CV_32F, int allocFlags = ALLOC_MAT);
/** @brief Creates blob from cv::Mat or cv::UMat without copying the data */
/** @brief Creates blob from Mat or UMat without copying the data.
* @details If in is Mat then Mat data is populated, otherwise - UMat.
*/
void fill(InputArray in);
/** @brief Creates blob from user data.
* @details If @p deepCopy is false then CPU data will not be allocated.
*/
void fill(const BlobShape &shape, int type, void *data, bool deepCopy = true);
/** @brief Sets @p value to the last used data (if @p allocFlags = -1).
* @details If @p allocFlags != -1 then destination data (Mat or UMat) is determined by flags from AllocFlag enum like in create().
*/
void setTo(InputArray value, int allocFlags = -1);
Mat& matRef(bool writeOnly = true); //!< Returns reference to cv::Mat, containing blob data.
const Mat& matRefConst() const; //!< Returns reference to cv::Mat, containing blob data, for read-only purposes.
UMat &umatRef(bool writeOnly = true); //!< Returns reference to cv::UMat, containing blob data.
......
......@@ -101,6 +101,26 @@ namespace dnn
#endif
}
void Blob::fill(InputArray in)
{
#ifdef CV_DNN_UMAT
CV_Assert(in.isMat() || in.isUMat());
if (in.isMat())
{
m = in.getMat();
state = HEAD_AT_MAT;
}
else
{
um = in.getUMat();
state = HEAD_AT_UMAT;
}
#else
CV_Assert(in.isMat());
m = in.getMat();
#endif
}
static inline int getMatChannels(const Mat &mat)
{
return (mat.dims <= 2) ? mat.channels() : mat.size[0];
......@@ -226,6 +246,47 @@ namespace dnn
CV_DNN_UMAT_ONLY(state = HEAD_AT_MAT);
}
void Blob::setTo(InputArray value, int allocFlags)
{
#ifdef CV_DNN_UMAT
if (allocFlags == -1)
{
if (state == HEAD_AT_UMAT)
um.setTo(value);
else if (state == HEAD_AT_MAT)
m.setTo(value);
else //SYNCED or UNINITIALIZED
{
um.setTo(value);
m.setTo(value);
if (state == UNINITIALIZED)
state = SYNCED;
}
}
else if (allocFlags == ALLOC_BOTH)
{
m.setTo(value);
um.setTo(value);
state = SYNCED;
}
else if (allocFlags == ALLOC_MAT)
{
matRef().setTo(value);
}
else if (allocFlags == ALLOC_UMAT)
{
umatRef().setTo(value);
}
else
{
CV_Error(Error::StsBadArg, "allocFlags sholud be -1 or one of Blob::AllocFlag values");
}
#else
m.setTo(value);
#endif
}
void Blob::updateMat(bool syncData) const
{
#ifdef CV_DNN_UMAT
......
......@@ -53,8 +53,6 @@ namespace cv
namespace dnn
{
typedef BlobShape Shape;
ConvolutionLayerImpl::ConvolutionLayerImpl()
{
tryUseOpenCL = true;
......@@ -104,7 +102,7 @@ void ConvolutionLayerImpl::allocate(const std::vector<Blob*> &inputs, std::vecto
CV_Assert(inputs[i]->rows() == input.rows() && inputs[i]->cols() == input.cols());
}
int allocFlags = useOpenCL ? Blob::ALLOC_BOTH : Blob::ALLOC_MAT;
int allocFlags = useOpenCL ? Blob::ALLOC_UMAT : Blob::ALLOC_MAT;
if (!is1x1())
{
......@@ -114,13 +112,13 @@ void ConvolutionLayerImpl::allocate(const std::vector<Blob*> &inputs, std::vecto
if (bias)
{
biasOnesBlob.create(Shape(1, topH * topW), input.type(), allocFlags);
biasOnesBlob.matRef().setTo(1);
biasOnesBlob.setTo(1);
}
outputs.resize(inputs.size());
for (size_t i = 0; i < inputs.size(); i++)
{
outputs[i].create(Shape(inputs[i]->num(), topCn, topH, topW));
outputs[i].create(Shape(inputs[i]->num(), topCn, topH, topW), input.type(), allocFlags);
}
}
......
......@@ -86,7 +86,11 @@ public:
{
outputs.resize(inputs.size());
for (size_t i = 0; i < inputs.size(); i++)
{
outputs[i].shareFrom(*inputs[i]); //no data copy
//hotfix: shareFrom doesn't provide properly Mat/UMat switching
outputs[i].matRef() = inputs[i]->matRefConst();
}
}
void forward(std::vector<Blob*> &inputs, std::vector<Blob> &outputs)
......
......@@ -76,10 +76,7 @@ void FullyConnectedLayerImpl::allocate(const std::vector<Blob*> &input, std::vec
int allocFlags = useOpenCL ? Blob::ALLOC_UMAT : Blob::ALLOC_UMAT;
biasOnesBlob.create(Shape(outerSize, 1), dtype, allocFlags);
if (useOpenCL)
biasOnesBlob.getRef<UMat>().setTo(1);
else
biasOnesBlob.getRef<Mat>().setTo(1);
biasOnesBlob.setTo(1);
output.resize(input.size());
for (size_t i = 0; i < input.size(); i++)
......
......@@ -261,7 +261,7 @@ Ptr<Layer> createLRNLayerFromCaffe(LayerParams &params)
double alpha = params.get<double>("alpha", 1);
double beta = params.get<double>("beta", 0.75);
return Ptr<Layer>(new LRNLayerImpl(type, size, alpha, beta));
return Ptr<Layer>(LRNLayer::create(type, size, alpha, beta));
}
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment