Commit bfdc06cb authored by Alexander Alekhin's avatar Alexander Alekhin
parent 91e16573
if(IOS OR WINRT)
ocv_module_disable(superres)
endif()
set(the_description "Super Resolution")
if(HAVE_CUDA)
ocv_warnings_disable(CMAKE_CXX_FLAGS -Wundef -Wshadow)
endif()
ocv_define_module(superres opencv_imgproc opencv_video
OPTIONAL opencv_videoio opencv_cudaarithm opencv_cudafilters opencv_cudawarping opencv_cudaimgproc opencv_cudaoptflow opencv_cudacodec
WRAP python)
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#ifndef OPENCV_SUPERRES_HPP
#define OPENCV_SUPERRES_HPP
#include "opencv2/core.hpp"
#include "opencv2/superres/optical_flow.hpp"
/**
@defgroup superres Super Resolution
The Super Resolution module contains a set of functions and classes that can be used to solve the
problem of resolution enhancement. There are a few methods implemented, most of them are described in
the papers @cite Farsiu03 and @cite Mitzel09 .
*/
namespace cv
{
namespace superres
{
//! @addtogroup superres
//! @{
class CV_EXPORTS FrameSource
{
public:
virtual ~FrameSource();
virtual void nextFrame(OutputArray frame) = 0;
virtual void reset() = 0;
};
CV_EXPORTS Ptr<FrameSource> createFrameSource_Empty();
CV_EXPORTS Ptr<FrameSource> createFrameSource_Video(const String& fileName);
CV_EXPORTS Ptr<FrameSource> createFrameSource_Video_CUDA(const String& fileName);
CV_EXPORTS Ptr<FrameSource> createFrameSource_Camera(int deviceId = 0);
/** @brief Base class for Super Resolution algorithms.
The class is only used to define the common interface for the whole family of Super Resolution
algorithms.
*/
class CV_EXPORTS SuperResolution : public cv::Algorithm, public FrameSource
{
public:
/** @brief Set input frame source for Super Resolution algorithm.
@param frameSource Input frame source
*/
void setInput(const Ptr<FrameSource>& frameSource);
/** @brief Process next frame from input and return output result.
@param frame Output result
*/
void nextFrame(OutputArray frame) CV_OVERRIDE;
void reset() CV_OVERRIDE;
/** @brief Clear all inner buffers.
*/
virtual void collectGarbage();
//! @brief Scale factor
/** @see setScale */
virtual int getScale() const = 0;
/** @copybrief getScale @see getScale */
virtual void setScale(int val) = 0;
//! @brief Iterations count
/** @see setIterations */
virtual int getIterations() const = 0;
/** @copybrief getIterations @see getIterations */
virtual void setIterations(int val) = 0;
//! @brief Asymptotic value of steepest descent method
/** @see setTau */
virtual double getTau() const = 0;
/** @copybrief getTau @see getTau */
virtual void setTau(double val) = 0;
//! @brief Weight parameter to balance data term and smoothness term
/** @see setLabmda */
virtual double getLabmda() const = 0;
/** @copybrief getLabmda @see getLabmda */
virtual void setLabmda(double val) = 0;
//! @brief Parameter of spacial distribution in Bilateral-TV
/** @see setAlpha */
virtual double getAlpha() const = 0;
/** @copybrief getAlpha @see getAlpha */
virtual void setAlpha(double val) = 0;
//! @brief Kernel size of Bilateral-TV filter
/** @see setKernelSize */
virtual int getKernelSize() const = 0;
/** @copybrief getKernelSize @see getKernelSize */
virtual void setKernelSize(int val) = 0;
//! @brief Gaussian blur kernel size
/** @see setBlurKernelSize */
virtual int getBlurKernelSize() const = 0;
/** @copybrief getBlurKernelSize @see getBlurKernelSize */
virtual void setBlurKernelSize(int val) = 0;
//! @brief Gaussian blur sigma
/** @see setBlurSigma */
virtual double getBlurSigma() const = 0;
/** @copybrief getBlurSigma @see getBlurSigma */
virtual void setBlurSigma(double val) = 0;
//! @brief Radius of the temporal search area
/** @see setTemporalAreaRadius */
virtual int getTemporalAreaRadius() const = 0;
/** @copybrief getTemporalAreaRadius @see getTemporalAreaRadius */
virtual void setTemporalAreaRadius(int val) = 0;
//! @brief Dense optical flow algorithm
/** @see setOpticalFlow */
virtual Ptr<cv::superres::DenseOpticalFlowExt> getOpticalFlow() const = 0;
/** @copybrief getOpticalFlow @see getOpticalFlow */
virtual void setOpticalFlow(const Ptr<cv::superres::DenseOpticalFlowExt> &val) = 0;
protected:
SuperResolution();
virtual void initImpl(Ptr<FrameSource>& frameSource) = 0;
virtual void processImpl(Ptr<FrameSource>& frameSource, OutputArray output) = 0;
bool isUmat_;
private:
Ptr<FrameSource> frameSource_;
bool firstCall_;
};
/** @brief Create Bilateral TV-L1 Super Resolution.
This class implements Super Resolution algorithm described in the papers @cite Farsiu03 and
@cite Mitzel09 .
Here are important members of the class that control the algorithm, which you can set after
constructing the class instance:
- **int scale** Scale factor.
- **int iterations** Iteration count.
- **double tau** Asymptotic value of steepest descent method.
- **double lambda** Weight parameter to balance data term and smoothness term.
- **double alpha** Parameter of spacial distribution in Bilateral-TV.
- **int btvKernelSize** Kernel size of Bilateral-TV filter.
- **int blurKernelSize** Gaussian blur kernel size.
- **double blurSigma** Gaussian blur sigma.
- **int temporalAreaRadius** Radius of the temporal search area.
- **Ptr\<DenseOpticalFlowExt\> opticalFlow** Dense optical flow algorithm.
*/
CV_EXPORTS Ptr<SuperResolution> createSuperResolution_BTVL1();
CV_EXPORTS Ptr<SuperResolution> createSuperResolution_BTVL1_CUDA();
//! @} superres
}
}
#endif // OPENCV_SUPERRES_HPP
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#ifndef OPENCV_SUPERRES_OPTICAL_FLOW_HPP
#define OPENCV_SUPERRES_OPTICAL_FLOW_HPP
#include "opencv2/core.hpp"
namespace cv
{
namespace superres
{
//! @addtogroup superres
//! @{
class CV_EXPORTS DenseOpticalFlowExt : public cv::Algorithm
{
public:
virtual void calc(InputArray frame0, InputArray frame1, OutputArray flow1, OutputArray flow2 = noArray()) = 0;
virtual void collectGarbage() = 0;
};
class CV_EXPORTS FarnebackOpticalFlow : public virtual DenseOpticalFlowExt
{
public:
/** @see setPyrScale */
virtual double getPyrScale() const = 0;
/** @copybrief getPyrScale @see getPyrScale */
virtual void setPyrScale(double val) = 0;
/** @see setLevelsNumber */
virtual int getLevelsNumber() const = 0;
/** @copybrief getLevelsNumber @see getLevelsNumber */
virtual void setLevelsNumber(int val) = 0;
/** @see setWindowSize */
virtual int getWindowSize() const = 0;
/** @copybrief getWindowSize @see getWindowSize */
virtual void setWindowSize(int val) = 0;
/** @see setIterations */
virtual int getIterations() const = 0;
/** @copybrief getIterations @see getIterations */
virtual void setIterations(int val) = 0;
/** @see setPolyN */
virtual int getPolyN() const = 0;
/** @copybrief getPolyN @see getPolyN */
virtual void setPolyN(int val) = 0;
/** @see setPolySigma */
virtual double getPolySigma() const = 0;
/** @copybrief getPolySigma @see getPolySigma */
virtual void setPolySigma(double val) = 0;
/** @see setFlags */
virtual int getFlags() const = 0;
/** @copybrief getFlags @see getFlags */
virtual void setFlags(int val) = 0;
};
CV_EXPORTS Ptr<FarnebackOpticalFlow> createOptFlow_Farneback();
CV_EXPORTS Ptr<FarnebackOpticalFlow> createOptFlow_Farneback_CUDA();
// CV_EXPORTS Ptr<DenseOpticalFlowExt> createOptFlow_Simple();
class CV_EXPORTS DualTVL1OpticalFlow : public virtual DenseOpticalFlowExt
{
public:
/** @see setTau */
virtual double getTau() const = 0;
/** @copybrief getTau @see getTau */
virtual void setTau(double val) = 0;
/** @see setLambda */
virtual double getLambda() const = 0;
/** @copybrief getLambda @see getLambda */
virtual void setLambda(double val) = 0;
/** @see setTheta */
virtual double getTheta() const = 0;
/** @copybrief getTheta @see getTheta */
virtual void setTheta(double val) = 0;
/** @see setScalesNumber */
virtual int getScalesNumber() const = 0;
/** @copybrief getScalesNumber @see getScalesNumber */
virtual void setScalesNumber(int val) = 0;
/** @see setWarpingsNumber */
virtual int getWarpingsNumber() const = 0;
/** @copybrief getWarpingsNumber @see getWarpingsNumber */
virtual void setWarpingsNumber(int val) = 0;
/** @see setEpsilon */
virtual double getEpsilon() const = 0;
/** @copybrief getEpsilon @see getEpsilon */
virtual void setEpsilon(double val) = 0;
/** @see setIterations */
virtual int getIterations() const = 0;
/** @copybrief getIterations @see getIterations */
virtual void setIterations(int val) = 0;
/** @see setUseInitialFlow */
virtual bool getUseInitialFlow() const = 0;
/** @copybrief getUseInitialFlow @see getUseInitialFlow */
virtual void setUseInitialFlow(bool val) = 0;
};
CV_EXPORTS Ptr<DualTVL1OpticalFlow> createOptFlow_DualTVL1();
CV_EXPORTS Ptr<DualTVL1OpticalFlow> createOptFlow_DualTVL1_CUDA();
class CV_EXPORTS BroxOpticalFlow : public virtual DenseOpticalFlowExt
{
public:
//! @brief Flow smoothness
/** @see setAlpha */
virtual double getAlpha() const = 0;
/** @copybrief getAlpha @see getAlpha */
virtual void setAlpha(double val) = 0;
//! @brief Gradient constancy importance
/** @see setGamma */
virtual double getGamma() const = 0;
/** @copybrief getGamma @see getGamma */
virtual void setGamma(double val) = 0;
//! @brief Pyramid scale factor
/** @see setScaleFactor */
virtual double getScaleFactor() const = 0;
/** @copybrief getScaleFactor @see getScaleFactor */
virtual void setScaleFactor(double val) = 0;
//! @brief Number of lagged non-linearity iterations (inner loop)
/** @see setInnerIterations */
virtual int getInnerIterations() const = 0;
/** @copybrief getInnerIterations @see getInnerIterations */
virtual void setInnerIterations(int val) = 0;
//! @brief Number of warping iterations (number of pyramid levels)
/** @see setOuterIterations */
virtual int getOuterIterations() const = 0;
/** @copybrief getOuterIterations @see getOuterIterations */
virtual void setOuterIterations(int val) = 0;
//! @brief Number of linear system solver iterations
/** @see setSolverIterations */
virtual int getSolverIterations() const = 0;
/** @copybrief getSolverIterations @see getSolverIterations */
virtual void setSolverIterations(int val) = 0;
};
CV_EXPORTS Ptr<BroxOpticalFlow> createOptFlow_Brox_CUDA();
class PyrLKOpticalFlow : public virtual DenseOpticalFlowExt
{
public:
/** @see setWindowSize */
virtual int getWindowSize() const = 0;
/** @copybrief getWindowSize @see getWindowSize */
virtual void setWindowSize(int val) = 0;
/** @see setMaxLevel */
virtual int getMaxLevel() const = 0;
/** @copybrief getMaxLevel @see getMaxLevel */
virtual void setMaxLevel(int val) = 0;
/** @see setIterations */
virtual int getIterations() const = 0;
/** @copybrief getIterations @see getIterations */
virtual void setIterations(int val) = 0;
};
CV_EXPORTS Ptr<PyrLKOpticalFlow> createOptFlow_PyrLK_CUDA();
//! @}
}
}
#endif // OPENCV_SUPERRES_OPTICAL_FLOW_HPP
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "perf_precomp.hpp"
using namespace perf;
static const char * impls[] = {
#ifdef HAVE_CUDA
"cuda",
#endif
"plain"
};
#if defined(HAVE_HPX)
#include <hpx/hpx_main.hpp>
#endif
CV_PERF_TEST_MAIN_WITH_IMPLS(superres, impls, printCudaInfo())
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#ifndef __OPENCV_PERF_PRECOMP_HPP__
#define __OPENCV_PERF_PRECOMP_HPP__
#include "opencv2/ts.hpp"
#include "opencv2/core/cuda.hpp"
#include "opencv2/ts/cuda_perf.hpp"
#include "opencv2/superres.hpp"
#include "opencv2/superres/optical_flow.hpp"
#endif
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "perf_precomp.hpp"
#include "opencv2/ts/ocl_perf.hpp"
namespace opencv_test
{
using namespace perf;
using namespace cv::superres;
using namespace cv::cuda;
namespace
{
class OneFrameSource_CPU : public FrameSource
{
public:
explicit OneFrameSource_CPU(const Mat& frame) : frame_(frame) {}
void nextFrame(OutputArray frame)
{
frame.getMatRef() = frame_;
}
void reset()
{
}
private:
Mat frame_;
};
class OneFrameSource_CUDA : public FrameSource
{
public:
explicit OneFrameSource_CUDA(const GpuMat& frame) : frame_(frame) {}
void nextFrame(OutputArray frame)
{
frame.getGpuMatRef() = frame_;
}
void reset()
{
}
private:
GpuMat frame_;
};
class ZeroOpticalFlow : public DenseOpticalFlowExt
{
public:
virtual void calc(InputArray frame0, InputArray, OutputArray flow1, OutputArray flow2)
{
cv::Size size = frame0.size();
if (!flow2.needed())
{
flow1.create(size, CV_32FC2);
flow1.setTo(cv::Scalar::all(0));
}
else
{
flow1.create(size, CV_32FC1);
flow2.create(size, CV_32FC1);
flow1.setTo(cv::Scalar::all(0));
flow2.setTo(cv::Scalar::all(0));
}
}
virtual void collectGarbage()
{
}
};
} // namespace
PERF_TEST_P(Size_MatType, SuperResolution_BTVL1,
Combine(Values(szSmall64, szSmall128),
Values(MatType(CV_8UC1), MatType(CV_8UC3))))
{
declare.time(5 * 60);
const Size size = get<0>(GetParam());
const int type = get<1>(GetParam());
Mat frame(size, type);
declare.in(frame, WARMUP_RNG);
const int scale = 2;
const int iterations = 50;
const int temporalAreaRadius = 1;
Ptr<DenseOpticalFlowExt> opticalFlow(new ZeroOpticalFlow);
if (PERF_RUN_CUDA())
{
Ptr<SuperResolution> superRes = createSuperResolution_BTVL1_CUDA();
superRes->setScale(scale);
superRes->setIterations(iterations);
superRes->setTemporalAreaRadius(temporalAreaRadius);
superRes->setOpticalFlow(opticalFlow);
superRes->setInput(makePtr<OneFrameSource_CUDA>(GpuMat(frame)));
GpuMat dst;
superRes->nextFrame(dst);
TEST_CYCLE_N(10) superRes->nextFrame(dst);
CUDA_SANITY_CHECK(dst, 2);
}
else
{
Ptr<SuperResolution> superRes = createSuperResolution_BTVL1();
superRes->setScale(scale);
superRes->setIterations(iterations);
superRes->setTemporalAreaRadius(temporalAreaRadius);
superRes->setOpticalFlow(opticalFlow);
superRes->setInput(makePtr<OneFrameSource_CPU>(frame));
Mat dst;
superRes->nextFrame(dst);
TEST_CYCLE_N(10) superRes->nextFrame(dst);
CPU_SANITY_CHECK(dst);
}
}
#ifdef HAVE_OPENCL
namespace ocl {
typedef Size_MatType SuperResolution_BTVL1;
OCL_PERF_TEST_P(SuperResolution_BTVL1 ,BTVL1,
Combine(Values(szSmall64, szSmall128),
Values(MatType(CV_8UC1), MatType(CV_8UC3))))
{
Size_MatType_t params = GetParam();
const Size size = get<0>(params);
const int type = get<1>(params);
Mat frame(size, type);
UMat dst(1, 1, 0);
declare.in(frame, WARMUP_RNG);
const int scale = 2;
const int iterations = 50;
const int temporalAreaRadius = 1;
Ptr<DenseOpticalFlowExt> opticalFlow(new ZeroOpticalFlow);
Ptr<SuperResolution> superRes = createSuperResolution_BTVL1();
superRes->setScale(scale);
superRes->setIterations(iterations);
superRes->setTemporalAreaRadius(temporalAreaRadius);
superRes->setOpticalFlow(opticalFlow);
superRes->setInput(makePtr<OneFrameSource_CPU>(frame));
// skip first frame
superRes->nextFrame(dst);
OCL_TEST_CYCLE_N(10) superRes->nextFrame(dst);
SANITY_CHECK_NOTHING();
}
} // namespace ocl
#endif // HAVE_OPENCL
} // namespace
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
// S. Farsiu , D. Robinson, M. Elad, P. Milanfar. Fast and robust multiframe super resolution.
// Dennis Mitzel, Thomas Pock, Thomas Schoenemann, Daniel Cremers. Video Super Resolution using Duality Based TV-L1 Optical Flow.
#include "precomp.hpp"
#include "opencl_kernels_superres.hpp"
using namespace cv;
using namespace cv::superres;
using namespace cv::superres::detail;
namespace
{
#ifdef HAVE_OPENCL
bool ocl_calcRelativeMotions(InputArrayOfArrays _forwardMotions, InputArrayOfArrays _backwardMotions,
OutputArrayOfArrays _relForwardMotions, OutputArrayOfArrays _relBackwardMotions,
int baseIdx, const Size & size)
{
std::vector<UMat> & forwardMotions = *(std::vector<UMat> *)_forwardMotions.getObj(),
& backwardMotions = *(std::vector<UMat> *)_backwardMotions.getObj(),
& relForwardMotions = *(std::vector<UMat> *)_relForwardMotions.getObj(),
& relBackwardMotions = *(std::vector<UMat> *)_relBackwardMotions.getObj();
const int count = static_cast<int>(forwardMotions.size());
relForwardMotions.resize(count);
relForwardMotions[baseIdx].create(size, CV_32FC2);
relForwardMotions[baseIdx].setTo(Scalar::all(0));
relBackwardMotions.resize(count);
relBackwardMotions[baseIdx].create(size, CV_32FC2);
relBackwardMotions[baseIdx].setTo(Scalar::all(0));
for (int i = baseIdx - 1; i >= 0; --i)
{
add(relForwardMotions[i + 1], forwardMotions[i], relForwardMotions[i]);
add(relBackwardMotions[i + 1], backwardMotions[i + 1], relBackwardMotions[i]);
}
for (int i = baseIdx + 1; i < count; ++i)
{
add(relForwardMotions[i - 1], backwardMotions[i], relForwardMotions[i]);
add(relBackwardMotions[i - 1], forwardMotions[i - 1], relBackwardMotions[i]);
}
return true;
}
#endif
void calcRelativeMotions(InputArrayOfArrays _forwardMotions, InputArrayOfArrays _backwardMotions,
OutputArrayOfArrays _relForwardMotions, OutputArrayOfArrays _relBackwardMotions,
int baseIdx, const Size & size)
{
CV_OCL_RUN(_forwardMotions.isUMatVector() && _backwardMotions.isUMatVector() &&
_relForwardMotions.isUMatVector() && _relBackwardMotions.isUMatVector(),
ocl_calcRelativeMotions(_forwardMotions, _backwardMotions, _relForwardMotions,
_relBackwardMotions, baseIdx, size))
std::vector<Mat> & forwardMotions = *(std::vector<Mat> *)_forwardMotions.getObj(),
& backwardMotions = *(std::vector<Mat> *)_backwardMotions.getObj(),
& relForwardMotions = *(std::vector<Mat> *)_relForwardMotions.getObj(),
& relBackwardMotions = *(std::vector<Mat> *)_relBackwardMotions.getObj();
const int count = static_cast<int>(forwardMotions.size());
relForwardMotions.resize(count);
relForwardMotions[baseIdx].create(size, CV_32FC2);
relForwardMotions[baseIdx].setTo(Scalar::all(0));
relBackwardMotions.resize(count);
relBackwardMotions[baseIdx].create(size, CV_32FC2);
relBackwardMotions[baseIdx].setTo(Scalar::all(0));
for (int i = baseIdx - 1; i >= 0; --i)
{
add(relForwardMotions[i + 1], forwardMotions[i], relForwardMotions[i]);
add(relBackwardMotions[i + 1], backwardMotions[i + 1], relBackwardMotions[i]);
}
for (int i = baseIdx + 1; i < count; ++i)
{
add(relForwardMotions[i - 1], backwardMotions[i], relForwardMotions[i]);
add(relBackwardMotions[i - 1], forwardMotions[i - 1], relBackwardMotions[i]);
}
}
#ifdef HAVE_OPENCL
bool ocl_upscaleMotions(InputArrayOfArrays _lowResMotions, OutputArrayOfArrays _highResMotions, int scale)
{
std::vector<UMat> & lowResMotions = *(std::vector<UMat> *)_lowResMotions.getObj(),
& highResMotions = *(std::vector<UMat> *)_highResMotions.getObj();
highResMotions.resize(lowResMotions.size());
for (size_t i = 0; i < lowResMotions.size(); ++i)
{
resize(lowResMotions[i], highResMotions[i], Size(), scale, scale, INTER_LINEAR); // TODO
multiply(highResMotions[i], Scalar::all(scale), highResMotions[i]);
}
return true;
}
#endif
void upscaleMotions(InputArrayOfArrays _lowResMotions, OutputArrayOfArrays _highResMotions, int scale)
{
CV_OCL_RUN(_lowResMotions.isUMatVector() && _highResMotions.isUMatVector(),
ocl_upscaleMotions(_lowResMotions, _highResMotions, scale))
std::vector<Mat> & lowResMotions = *(std::vector<Mat> *)_lowResMotions.getObj(),
& highResMotions = *(std::vector<Mat> *)_highResMotions.getObj();
highResMotions.resize(lowResMotions.size());
for (size_t i = 0; i < lowResMotions.size(); ++i)
{
resize(lowResMotions[i], highResMotions[i], Size(), scale, scale, INTER_CUBIC);
multiply(highResMotions[i], Scalar::all(scale), highResMotions[i]);
}
}
#ifdef HAVE_OPENCL
bool ocl_buildMotionMaps(InputArray _forwardMotion, InputArray _backwardMotion,
OutputArray _forwardMap, OutputArray _backwardMap)
{
ocl::Kernel k("buildMotionMaps", ocl::superres::superres_btvl1_oclsrc);
if (k.empty())
return false;
UMat forwardMotion = _forwardMotion.getUMat(), backwardMotion = _backwardMotion.getUMat();
Size size = forwardMotion.size();
_forwardMap.create(size, CV_32FC2);
_backwardMap.create(size, CV_32FC2);
UMat forwardMap = _forwardMap.getUMat(), backwardMap = _backwardMap.getUMat();
k.args(ocl::KernelArg::ReadOnlyNoSize(forwardMotion),
ocl::KernelArg::ReadOnlyNoSize(backwardMotion),
ocl::KernelArg::WriteOnlyNoSize(forwardMap),
ocl::KernelArg::WriteOnly(backwardMap));
size_t globalsize[2] = { (size_t)size.width, (size_t)size.height };
return k.run(2, globalsize, NULL, false);
}
#endif
void buildMotionMaps(InputArray _forwardMotion, InputArray _backwardMotion,
OutputArray _forwardMap, OutputArray _backwardMap)
{
CV_OCL_RUN(_forwardMap.isUMat() && _backwardMap.isUMat(),
ocl_buildMotionMaps(_forwardMotion, _backwardMotion, _forwardMap,
_backwardMap));
Mat forwardMotion = _forwardMotion.getMat(), backwardMotion = _backwardMotion.getMat();
_forwardMap.create(forwardMotion.size(), CV_32FC2);
_backwardMap.create(forwardMotion.size(), CV_32FC2);
Mat forwardMap = _forwardMap.getMat(), backwardMap = _backwardMap.getMat();
for (int y = 0; y < forwardMotion.rows; ++y)
{
const Point2f* forwardMotionRow = forwardMotion.ptr<Point2f>(y);
const Point2f* backwardMotionRow = backwardMotion.ptr<Point2f>(y);
Point2f* forwardMapRow = forwardMap.ptr<Point2f>(y);
Point2f* backwardMapRow = backwardMap.ptr<Point2f>(y);
for (int x = 0; x < forwardMotion.cols; ++x)
{
Point2f base(static_cast<float>(x), static_cast<float>(y));
forwardMapRow[x] = base + backwardMotionRow[x];
backwardMapRow[x] = base + forwardMotionRow[x];
}
}
}
template <typename T>
void upscaleImpl(InputArray _src, OutputArray _dst, int scale)
{
Mat src = _src.getMat();
_dst.create(src.rows * scale, src.cols * scale, src.type());
_dst.setTo(Scalar::all(0));
Mat dst = _dst.getMat();
for (int y = 0, Y = 0; y < src.rows; ++y, Y += scale)
{
const T * const srcRow = src.ptr<T>(y);
T * const dstRow = dst.ptr<T>(Y);
for (int x = 0, X = 0; x < src.cols; ++x, X += scale)
dstRow[X] = srcRow[x];
}
}
#ifdef HAVE_OPENCL
static bool ocl_upscale(InputArray _src, OutputArray _dst, int scale)
{
int type = _src.type(), cn = CV_MAT_CN(type);
ocl::Kernel k("upscale", ocl::superres::superres_btvl1_oclsrc,
format("-D cn=%d", cn));
if (k.empty())
return false;
UMat src = _src.getUMat();
_dst.create(src.rows * scale, src.cols * scale, type);
_dst.setTo(Scalar::all(0));
UMat dst = _dst.getUMat();
k.args(ocl::KernelArg::ReadOnly(src),
ocl::KernelArg::ReadWriteNoSize(dst), scale);
size_t globalsize[2] = { (size_t)src.cols, (size_t)src.rows };
return k.run(2, globalsize, NULL, false);
}
#endif
typedef struct _Point4f { float ar[4]; } Point4f;
void upscale(InputArray _src, OutputArray _dst, int scale)
{
int cn = _src.channels();
CV_Assert( cn == 1 || cn == 3 || cn == 4 );
CV_OCL_RUN(_dst.isUMat(),
ocl_upscale(_src, _dst, scale))
typedef void (*func_t)(InputArray src, OutputArray dst, int scale);
static const func_t funcs[] =
{
0, upscaleImpl<float>, 0, upscaleImpl<Point3f>, upscaleImpl<Point4f>
};
const func_t func = funcs[cn];
CV_Assert(func != 0);
func(_src, _dst, scale);
}
inline float diffSign(float a, float b)
{
return a > b ? 1.0f : a < b ? -1.0f : 0.0f;
}
Point3f diffSign(Point3f a, Point3f b)
{
return Point3f(
a.x > b.x ? 1.0f : a.x < b.x ? -1.0f : 0.0f,
a.y > b.y ? 1.0f : a.y < b.y ? -1.0f : 0.0f,
a.z > b.z ? 1.0f : a.z < b.z ? -1.0f : 0.0f
);
}
#ifdef HAVE_OPENCL
static bool ocl_diffSign(InputArray _src1, OutputArray _src2, OutputArray _dst)
{
ocl::Kernel k("diffSign", ocl::superres::superres_btvl1_oclsrc);
if (k.empty())
return false;
UMat src1 = _src1.getUMat(), src2 = _src2.getUMat();
_dst.create(src1.size(), src1.type());
UMat dst = _dst.getUMat();
int cn = src1.channels();
k.args(ocl::KernelArg::ReadOnlyNoSize(src1),
ocl::KernelArg::ReadOnlyNoSize(src2),
ocl::KernelArg::WriteOnly(dst, cn));
size_t globalsize[2] = { (size_t)src1.cols * cn, (size_t)src1.rows };
return k.run(2, globalsize, NULL, false);
}
#endif
void diffSign(InputArray _src1, OutputArray _src2, OutputArray _dst)
{
CV_OCL_RUN(_dst.isUMat(),
ocl_diffSign(_src1, _src2, _dst))
Mat src1 = _src1.getMat(), src2 = _src2.getMat();
_dst.create(src1.size(), src1.type());
Mat dst = _dst.getMat();
const int count = src1.cols * src1.channels();
for (int y = 0; y < src1.rows; ++y)
{
const float * const src1Ptr = src1.ptr<float>(y);
const float * const src2Ptr = src2.ptr<float>(y);
float* dstPtr = dst.ptr<float>(y);
for (int x = 0; x < count; ++x)
dstPtr[x] = diffSign(src1Ptr[x], src2Ptr[x]);
}
}
void calcBtvWeights(int btvKernelSize, double alpha, std::vector<float>& btvWeights)
{
const size_t size = btvKernelSize * btvKernelSize;
btvWeights.resize(size);
const int ksize = (btvKernelSize - 1) / 2;
const float alpha_f = static_cast<float>(alpha);
for (int m = 0, ind = 0; m <= ksize; ++m)
{
for (int l = ksize; l + m >= 0; --l, ++ind)
btvWeights[ind] = pow(alpha_f, std::abs(m) + std::abs(l));
}
}
template <typename T>
struct BtvRegularizationBody : ParallelLoopBody
{
void operator ()(const Range& range) const CV_OVERRIDE;
Mat src;
mutable Mat dst;
int ksize;
const float* btvWeights;
};
template <typename T>
void BtvRegularizationBody<T>::operator ()(const Range& range) const
{
for (int i = range.start; i < range.end; ++i)
{
const T * const srcRow = src.ptr<T>(i);
T * const dstRow = dst.ptr<T>(i);
for(int j = ksize; j < src.cols - ksize; ++j)
{
const T srcVal = srcRow[j];
for (int m = 0, ind = 0; m <= ksize; ++m)
{
const T* srcRow2 = src.ptr<T>(i - m);
const T* srcRow3 = src.ptr<T>(i + m);
for (int l = ksize; l + m >= 0; --l, ++ind)
dstRow[j] += btvWeights[ind] * (diffSign(srcVal, srcRow3[j + l])
- diffSign(srcRow2[j - l], srcVal));
}
}
}
}
template <typename T>
void calcBtvRegularizationImpl(InputArray _src, OutputArray _dst, int btvKernelSize, const std::vector<float>& btvWeights)
{
Mat src = _src.getMat();
_dst.create(src.size(), src.type());
_dst.setTo(Scalar::all(0));
Mat dst = _dst.getMat();
const int ksize = (btvKernelSize - 1) / 2;
BtvRegularizationBody<T> body;
body.src = src;
body.dst = dst;
body.ksize = ksize;
body.btvWeights = &btvWeights[0];
parallel_for_(Range(ksize, src.rows - ksize), body);
}
#ifdef HAVE_OPENCL
static bool ocl_calcBtvRegularization(InputArray _src, OutputArray _dst, int btvKernelSize, const UMat & ubtvWeights)
{
int cn = _src.channels();
ocl::Kernel k("calcBtvRegularization", ocl::superres::superres_btvl1_oclsrc,
format("-D cn=%d", cn));
if (k.empty())
return false;
UMat src = _src.getUMat();
_dst.create(src.size(), src.type());
_dst.setTo(Scalar::all(0));
UMat dst = _dst.getUMat();
const int ksize = (btvKernelSize - 1) / 2;
k.args(ocl::KernelArg::ReadOnlyNoSize(src), ocl::KernelArg::WriteOnly(dst),
ksize, ocl::KernelArg::PtrReadOnly(ubtvWeights));
size_t globalsize[2] = { (size_t)src.cols, (size_t)src.rows };
return k.run(2, globalsize, NULL, false);
}
#endif
void calcBtvRegularization(InputArray _src, OutputArray _dst, int btvKernelSize,
const std::vector<float>& btvWeights, const UMat & ubtvWeights)
{
CV_OCL_RUN(_dst.isUMat(),
ocl_calcBtvRegularization(_src, _dst, btvKernelSize, ubtvWeights))
CV_UNUSED(ubtvWeights);
if (_src.channels() == 1)
{
calcBtvRegularizationImpl<float>(_src, _dst, btvKernelSize, btvWeights);
}
else if (_src.channels() == 3)
{
calcBtvRegularizationImpl<Point3f>(_src, _dst, btvKernelSize, btvWeights);
}
else
{
CV_Error(Error::StsBadArg, "Unsupported number of channels in _src");
}
}
class BTVL1_Base : public cv::superres::SuperResolution
{
public:
BTVL1_Base();
void process(InputArrayOfArrays src, OutputArray dst, InputArrayOfArrays forwardMotions,
InputArrayOfArrays backwardMotions, int baseIdx);
void collectGarbage() CV_OVERRIDE;
inline int getScale() const CV_OVERRIDE { return scale_; }
inline void setScale(int val) CV_OVERRIDE { scale_ = val; }
inline int getIterations() const CV_OVERRIDE { return iterations_; }
inline void setIterations(int val) CV_OVERRIDE { iterations_ = val; }
inline double getTau() const CV_OVERRIDE { return tau_; }
inline void setTau(double val) CV_OVERRIDE { tau_ = val; }
inline double getLabmda() const CV_OVERRIDE { return lambda_; }
inline void setLabmda(double val) CV_OVERRIDE { lambda_ = val; }
inline double getAlpha() const CV_OVERRIDE { return alpha_; }
inline void setAlpha(double val) CV_OVERRIDE { alpha_ = val; }
inline int getKernelSize() const CV_OVERRIDE { return btvKernelSize_; }
inline void setKernelSize(int val) CV_OVERRIDE { btvKernelSize_ = val; }
inline int getBlurKernelSize() const CV_OVERRIDE { return blurKernelSize_; }
inline void setBlurKernelSize(int val) CV_OVERRIDE { blurKernelSize_ = val; }
inline double getBlurSigma() const CV_OVERRIDE { return blurSigma_; }
inline void setBlurSigma(double val) CV_OVERRIDE { blurSigma_ = val; }
inline int getTemporalAreaRadius() const CV_OVERRIDE { return temporalAreaRadius_; }
inline void setTemporalAreaRadius(int val) CV_OVERRIDE { temporalAreaRadius_ = val; }
inline Ptr<cv::superres::DenseOpticalFlowExt> getOpticalFlow() const CV_OVERRIDE { return opticalFlow_; }
inline void setOpticalFlow(const Ptr<cv::superres::DenseOpticalFlowExt>& val) CV_OVERRIDE { opticalFlow_ = val; }
protected:
int scale_;
int iterations_;
double tau_;
double lambda_;
double alpha_;
int btvKernelSize_;
int blurKernelSize_;
double blurSigma_;
int temporalAreaRadius_; // not used in some implementations
Ptr<cv::superres::DenseOpticalFlowExt> opticalFlow_;
private:
bool ocl_process(InputArrayOfArrays src, OutputArray dst, InputArrayOfArrays forwardMotions,
InputArrayOfArrays backwardMotions, int baseIdx);
//Ptr<FilterEngine> filter_;
int curBlurKernelSize_;
double curBlurSigma_;
int curSrcType_;
std::vector<float> btvWeights_;
UMat ubtvWeights_;
int curBtvKernelSize_;
double curAlpha_;
// Mat
std::vector<Mat> lowResForwardMotions_;
std::vector<Mat> lowResBackwardMotions_;
std::vector<Mat> highResForwardMotions_;
std::vector<Mat> highResBackwardMotions_;
std::vector<Mat> forwardMaps_;
std::vector<Mat> backwardMaps_;
Mat highRes_;
Mat diffTerm_, regTerm_;
Mat a_, b_, c_;
#ifdef HAVE_OPENCL
// UMat
std::vector<UMat> ulowResForwardMotions_;
std::vector<UMat> ulowResBackwardMotions_;
std::vector<UMat> uhighResForwardMotions_;
std::vector<UMat> uhighResBackwardMotions_;
std::vector<UMat> uforwardMaps_;
std::vector<UMat> ubackwardMaps_;
UMat uhighRes_;
UMat udiffTerm_, uregTerm_;
UMat ua_, ub_, uc_;
#endif
};
BTVL1_Base::BTVL1_Base()
{
scale_ = 4;
iterations_ = 180;
lambda_ = 0.03;
tau_ = 1.3;
alpha_ = 0.7;
btvKernelSize_ = 7;
blurKernelSize_ = 5;
blurSigma_ = 0.0;
temporalAreaRadius_ = 0;
opticalFlow_ = createOptFlow_Farneback();
curBlurKernelSize_ = -1;
curBlurSigma_ = -1.0;
curSrcType_ = -1;
curBtvKernelSize_ = -1;
curAlpha_ = -1.0;
}
#ifdef HAVE_OPENCL
bool BTVL1_Base::ocl_process(InputArrayOfArrays _src, OutputArray _dst, InputArrayOfArrays _forwardMotions,
InputArrayOfArrays _backwardMotions, int baseIdx)
{
std::vector<UMat> & src = *(std::vector<UMat> *)_src.getObj(),
& forwardMotions = *(std::vector<UMat> *)_forwardMotions.getObj(),
& backwardMotions = *(std::vector<UMat> *)_backwardMotions.getObj();
// update blur filter and btv weights
if (blurKernelSize_ != curBlurKernelSize_ || blurSigma_ != curBlurSigma_ || src[0].type() != curSrcType_)
{
//filter_ = createGaussianFilter(src[0].type(), Size(blurKernelSize_, blurKernelSize_), blurSigma_);
curBlurKernelSize_ = blurKernelSize_;
curBlurSigma_ = blurSigma_;
curSrcType_ = src[0].type();
}
if (btvWeights_.empty() || btvKernelSize_ != curBtvKernelSize_ || alpha_ != curAlpha_)
{
calcBtvWeights(btvKernelSize_, alpha_, btvWeights_);
Mat(btvWeights_, true).copyTo(ubtvWeights_);
curBtvKernelSize_ = btvKernelSize_;
curAlpha_ = alpha_;
}
// calc high res motions
calcRelativeMotions(forwardMotions, backwardMotions, ulowResForwardMotions_, ulowResBackwardMotions_, baseIdx, src[0].size());
upscaleMotions(ulowResForwardMotions_, uhighResForwardMotions_, scale_);
upscaleMotions(ulowResBackwardMotions_, uhighResBackwardMotions_, scale_);
uforwardMaps_.resize(uhighResForwardMotions_.size());
ubackwardMaps_.resize(uhighResForwardMotions_.size());
for (size_t i = 0; i < uhighResForwardMotions_.size(); ++i)
buildMotionMaps(uhighResForwardMotions_[i], uhighResBackwardMotions_[i], uforwardMaps_[i], ubackwardMaps_[i]);
// initial estimation
const Size lowResSize = src[0].size();
const Size highResSize(lowResSize.width * scale_, lowResSize.height * scale_);
resize(src[baseIdx], uhighRes_, highResSize, 0, 0, INTER_LINEAR); // TODO
// iterations
udiffTerm_.create(highResSize, uhighRes_.type());
ua_.create(highResSize, uhighRes_.type());
ub_.create(highResSize, uhighRes_.type());
uc_.create(lowResSize, uhighRes_.type());
for (int i = 0; i < iterations_; ++i)
{
udiffTerm_.setTo(Scalar::all(0));
for (size_t k = 0; k < src.size(); ++k)
{
// a = M * Ih
remap(uhighRes_, ua_, ubackwardMaps_[k], noArray(), INTER_NEAREST);
// b = HM * Ih
GaussianBlur(ua_, ub_, Size(blurKernelSize_, blurKernelSize_), blurSigma_);
// c = DHM * Ih
resize(ub_, uc_, lowResSize, 0, 0, INTER_NEAREST);
diffSign(src[k], uc_, uc_);
// a = Dt * diff
upscale(uc_, ua_, scale_);
// b = HtDt * diff
GaussianBlur(ua_, ub_, Size(blurKernelSize_, blurKernelSize_), blurSigma_);
// a = MtHtDt * diff
remap(ub_, ua_, uforwardMaps_[k], noArray(), INTER_NEAREST);
add(udiffTerm_, ua_, udiffTerm_);
}
if (lambda_ > 0)
{
calcBtvRegularization(uhighRes_, uregTerm_, btvKernelSize_, btvWeights_, ubtvWeights_);
addWeighted(udiffTerm_, 1.0, uregTerm_, -lambda_, 0.0, udiffTerm_);
}
addWeighted(uhighRes_, 1.0, udiffTerm_, tau_, 0.0, uhighRes_);
}
Rect inner(btvKernelSize_, btvKernelSize_, uhighRes_.cols - 2 * btvKernelSize_, uhighRes_.rows - 2 * btvKernelSize_);
uhighRes_(inner).copyTo(_dst);
return true;
}
#endif
void BTVL1_Base::process(InputArrayOfArrays _src, OutputArray _dst, InputArrayOfArrays _forwardMotions,
InputArrayOfArrays _backwardMotions, int baseIdx)
{
CV_INSTRUMENT_REGION();
CV_Assert( scale_ > 1 );
CV_Assert( iterations_ > 0 );
CV_Assert( tau_ > 0.0 );
CV_Assert( alpha_ > 0.0 );
CV_Assert( btvKernelSize_ > 0 );
CV_Assert( blurKernelSize_ > 0 );
CV_Assert( blurSigma_ >= 0.0 );
CV_OCL_RUN(_src.isUMatVector() && _dst.isUMat() && _forwardMotions.isUMatVector() &&
_backwardMotions.isUMatVector(),
ocl_process(_src, _dst, _forwardMotions, _backwardMotions, baseIdx))
std::vector<Mat> & src = *(std::vector<Mat> *)_src.getObj(),
& forwardMotions = *(std::vector<Mat> *)_forwardMotions.getObj(),
& backwardMotions = *(std::vector<Mat> *)_backwardMotions.getObj();
// update blur filter and btv weights
if (blurKernelSize_ != curBlurKernelSize_ || blurSigma_ != curBlurSigma_ || src[0].type() != curSrcType_)
{
//filter_ = createGaussianFilter(src[0].type(), Size(blurKernelSize_, blurKernelSize_), blurSigma_);
curBlurKernelSize_ = blurKernelSize_;
curBlurSigma_ = blurSigma_;
curSrcType_ = src[0].type();
}
if (btvWeights_.empty() || btvKernelSize_ != curBtvKernelSize_ || alpha_ != curAlpha_)
{
calcBtvWeights(btvKernelSize_, alpha_, btvWeights_);
curBtvKernelSize_ = btvKernelSize_;
curAlpha_ = alpha_;
}
// calc high res motions
calcRelativeMotions(forwardMotions, backwardMotions, lowResForwardMotions_, lowResBackwardMotions_, baseIdx, src[0].size());
upscaleMotions(lowResForwardMotions_, highResForwardMotions_, scale_);
upscaleMotions(lowResBackwardMotions_, highResBackwardMotions_, scale_);
forwardMaps_.resize(highResForwardMotions_.size());
backwardMaps_.resize(highResForwardMotions_.size());
for (size_t i = 0; i < highResForwardMotions_.size(); ++i)
buildMotionMaps(highResForwardMotions_[i], highResBackwardMotions_[i], forwardMaps_[i], backwardMaps_[i]);
// initial estimation
const Size lowResSize = src[0].size();
const Size highResSize(lowResSize.width * scale_, lowResSize.height * scale_);
resize(src[baseIdx], highRes_, highResSize, 0, 0, INTER_CUBIC);
// iterations
diffTerm_.create(highResSize, highRes_.type());
a_.create(highResSize, highRes_.type());
b_.create(highResSize, highRes_.type());
c_.create(lowResSize, highRes_.type());
for (int i = 0; i < iterations_; ++i)
{
diffTerm_.setTo(Scalar::all(0));
for (size_t k = 0; k < src.size(); ++k)
{
// a = M * Ih
remap(highRes_, a_, backwardMaps_[k], noArray(), INTER_NEAREST);
// b = HM * Ih
GaussianBlur(a_, b_, Size(blurKernelSize_, blurKernelSize_), blurSigma_);
// c = DHM * Ih
resize(b_, c_, lowResSize, 0, 0, INTER_NEAREST);
diffSign(src[k], c_, c_);
// a = Dt * diff
upscale(c_, a_, scale_);
// b = HtDt * diff
GaussianBlur(a_, b_, Size(blurKernelSize_, blurKernelSize_), blurSigma_);
// a = MtHtDt * diff
remap(b_, a_, forwardMaps_[k], noArray(), INTER_NEAREST);
add(diffTerm_, a_, diffTerm_);
}
if (lambda_ > 0)
{
calcBtvRegularization(highRes_, regTerm_, btvKernelSize_, btvWeights_, ubtvWeights_);
addWeighted(diffTerm_, 1.0, regTerm_, -lambda_, 0.0, diffTerm_);
}
addWeighted(highRes_, 1.0, diffTerm_, tau_, 0.0, highRes_);
}
Rect inner(btvKernelSize_, btvKernelSize_, highRes_.cols - 2 * btvKernelSize_, highRes_.rows - 2 * btvKernelSize_);
highRes_(inner).copyTo(_dst);
}
void BTVL1_Base::collectGarbage()
{
// Mat
lowResForwardMotions_.clear();
lowResBackwardMotions_.clear();
highResForwardMotions_.clear();
highResBackwardMotions_.clear();
forwardMaps_.clear();
backwardMaps_.clear();
highRes_.release();
diffTerm_.release();
regTerm_.release();
a_.release();
b_.release();
c_.release();
#ifdef HAVE_OPENCL
// UMat
ulowResForwardMotions_.clear();
ulowResBackwardMotions_.clear();
uhighResForwardMotions_.clear();
uhighResBackwardMotions_.clear();
uforwardMaps_.clear();
ubackwardMaps_.clear();
uhighRes_.release();
udiffTerm_.release();
uregTerm_.release();
ua_.release();
ub_.release();
uc_.release();
#endif
}
////////////////////////////////////////////////////////////////////
class BTVL1 CV_FINAL : public BTVL1_Base
{
public:
BTVL1();
void collectGarbage() CV_OVERRIDE;
protected:
void initImpl(Ptr<FrameSource>& frameSource) CV_OVERRIDE;
bool ocl_initImpl(Ptr<FrameSource>& frameSource);
void processImpl(Ptr<FrameSource>& frameSource, OutputArray output) CV_OVERRIDE;
bool ocl_processImpl(Ptr<FrameSource>& frameSource, OutputArray output);
private:
void readNextFrame(Ptr<FrameSource>& frameSource);
bool ocl_readNextFrame(Ptr<FrameSource>& frameSource);
void processFrame(int idx);
bool ocl_processFrame(int idx);
int storePos_;
int procPos_;
int outPos_;
// Mat
Mat curFrame_;
Mat prevFrame_;
std::vector<Mat> frames_;
std::vector<Mat> forwardMotions_;
std::vector<Mat> backwardMotions_;
std::vector<Mat> outputs_;
std::vector<Mat> srcFrames_;
std::vector<Mat> srcForwardMotions_;
std::vector<Mat> srcBackwardMotions_;
Mat finalOutput_;
#ifdef HAVE_OPENCL
// UMat
UMat ucurFrame_;
UMat uprevFrame_;
std::vector<UMat> uframes_;
std::vector<UMat> uforwardMotions_;
std::vector<UMat> ubackwardMotions_;
std::vector<UMat> uoutputs_;
std::vector<UMat> usrcFrames_;
std::vector<UMat> usrcForwardMotions_;
std::vector<UMat> usrcBackwardMotions_;
#endif
};
BTVL1::BTVL1()
{
temporalAreaRadius_ = 4;
procPos_ = 0;
outPos_ = 0;
storePos_ = 0;
}
void BTVL1::collectGarbage()
{
// Mat
curFrame_.release();
prevFrame_.release();
frames_.clear();
forwardMotions_.clear();
backwardMotions_.clear();
outputs_.clear();
srcFrames_.clear();
srcForwardMotions_.clear();
srcBackwardMotions_.clear();
finalOutput_.release();
#ifdef HAVE_OPENCL
// UMat
ucurFrame_.release();
uprevFrame_.release();
uframes_.clear();
uforwardMotions_.clear();
ubackwardMotions_.clear();
uoutputs_.clear();
usrcFrames_.clear();
usrcForwardMotions_.clear();
usrcBackwardMotions_.clear();
#endif
SuperResolution::collectGarbage();
BTVL1_Base::collectGarbage();
}
#ifdef HAVE_OPENCL
bool BTVL1::ocl_initImpl(Ptr<FrameSource>& frameSource)
{
const int cacheSize = 2 * temporalAreaRadius_ + 1;
uframes_.resize(cacheSize);
uforwardMotions_.resize(cacheSize);
ubackwardMotions_.resize(cacheSize);
uoutputs_.resize(cacheSize);
storePos_ = -1;
for (int t = -temporalAreaRadius_; t <= temporalAreaRadius_; ++t)
readNextFrame(frameSource);
for (int i = 0; i <= temporalAreaRadius_; ++i)
processFrame(i);
procPos_ = temporalAreaRadius_;
outPos_ = -1;
return true;
}
#endif
void BTVL1::initImpl(Ptr<FrameSource>& frameSource)
{
const int cacheSize = 2 * temporalAreaRadius_ + 1;
frames_.resize(cacheSize);
forwardMotions_.resize(cacheSize);
backwardMotions_.resize(cacheSize);
outputs_.resize(cacheSize);
CV_OCL_RUN(isUmat_,
ocl_initImpl(frameSource))
storePos_ = -1;
for (int t = -temporalAreaRadius_; t <= temporalAreaRadius_; ++t)
readNextFrame(frameSource);
for (int i = 0; i <= temporalAreaRadius_; ++i)
processFrame(i);
procPos_ = temporalAreaRadius_;
outPos_ = -1;
}
#ifdef HAVE_OPENCL
bool BTVL1::ocl_processImpl(Ptr<FrameSource>& /*frameSource*/, OutputArray _output)
{
const UMat& curOutput = at(outPos_, uoutputs_);
curOutput.convertTo(_output, CV_8U);
return true;
}
#endif
void BTVL1::processImpl(Ptr<FrameSource>& frameSource, OutputArray _output)
{
CV_INSTRUMENT_REGION();
if (outPos_ >= storePos_)
{
_output.release();
return;
}
readNextFrame(frameSource);
if (procPos_ < storePos_)
{
++procPos_;
processFrame(procPos_);
}
++outPos_;
CV_OCL_RUN(isUmat_,
ocl_processImpl(frameSource, _output))
const Mat& curOutput = at(outPos_, outputs_);
if (_output.kind() < _InputArray::OPENGL_BUFFER || _output.isUMat())
curOutput.convertTo(_output, CV_8U);
else
{
curOutput.convertTo(finalOutput_, CV_8U);
arrCopy(finalOutput_, _output);
}
}
#ifdef HAVE_OPENCL
bool BTVL1::ocl_readNextFrame(Ptr<FrameSource>& /*frameSource*/)
{
ucurFrame_.convertTo(at(storePos_, uframes_), CV_32F);
if (storePos_ > 0)
{
opticalFlow_->calc(uprevFrame_, ucurFrame_, at(storePos_ - 1, uforwardMotions_));
opticalFlow_->calc(ucurFrame_, uprevFrame_, at(storePos_, ubackwardMotions_));
}
ucurFrame_.copyTo(uprevFrame_);
return true;
}
#endif
void BTVL1::readNextFrame(Ptr<FrameSource>& frameSource)
{
CV_INSTRUMENT_REGION();
frameSource->nextFrame(curFrame_);
if (curFrame_.empty())
return;
#ifdef HAVE_OPENCL
if (isUmat_)
curFrame_.copyTo(ucurFrame_);
#endif
++storePos_;
CV_OCL_RUN(isUmat_,
ocl_readNextFrame(frameSource))
curFrame_.convertTo(at(storePos_, frames_), CV_32F);
if (storePos_ > 0)
{
opticalFlow_->calc(prevFrame_, curFrame_, at(storePos_ - 1, forwardMotions_));
opticalFlow_->calc(curFrame_, prevFrame_, at(storePos_, backwardMotions_));
}
curFrame_.copyTo(prevFrame_);
}
#ifdef HAVE_OPENCL
bool BTVL1::ocl_processFrame(int idx)
{
const int startIdx = std::max(idx - temporalAreaRadius_, 0);
const int procIdx = idx;
const int endIdx = std::min(startIdx + 2 * temporalAreaRadius_, storePos_);
const int count = endIdx - startIdx + 1;
usrcFrames_.resize(count);
usrcForwardMotions_.resize(count);
usrcBackwardMotions_.resize(count);
int baseIdx = -1;
for (int i = startIdx, k = 0; i <= endIdx; ++i, ++k)
{
if (i == procIdx)
baseIdx = k;
usrcFrames_[k] = at(i, uframes_);
if (i < endIdx)
usrcForwardMotions_[k] = at(i, uforwardMotions_);
if (i > startIdx)
usrcBackwardMotions_[k] = at(i, ubackwardMotions_);
}
process(usrcFrames_, at(idx, uoutputs_), usrcForwardMotions_, usrcBackwardMotions_, baseIdx);
return true;
}
#endif
void BTVL1::processFrame(int idx)
{
CV_INSTRUMENT_REGION();
CV_OCL_RUN(isUmat_,
ocl_processFrame(idx))
const int startIdx = std::max(idx - temporalAreaRadius_, 0);
const int procIdx = idx;
const int endIdx = std::min(startIdx + 2 * temporalAreaRadius_, storePos_);
const int count = endIdx - startIdx + 1;
srcFrames_.resize(count);
srcForwardMotions_.resize(count);
srcBackwardMotions_.resize(count);
int baseIdx = -1;
for (int i = startIdx, k = 0; i <= endIdx; ++i, ++k)
{
if (i == procIdx)
baseIdx = k;
srcFrames_[k] = at(i, frames_);
if (i < endIdx)
srcForwardMotions_[k] = at(i, forwardMotions_);
if (i > startIdx)
srcBackwardMotions_[k] = at(i, backwardMotions_);
}
process(srcFrames_, at(idx, outputs_), srcForwardMotions_, srcBackwardMotions_, baseIdx);
}
}
Ptr<cv::superres::SuperResolution> cv::superres::createSuperResolution_BTVL1()
{
return makePtr<BTVL1>();
}
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
// S. Farsiu , D. Robinson, M. Elad, P. Milanfar. Fast and robust multiframe super resolution.
// Dennis Mitzel, Thomas Pock, Thomas Schoenemann, Daniel Cremers. Video Super Resolution using Duality Based TV-L1 Optical Flow.
#include "precomp.hpp"
using namespace cv;
using namespace cv::cuda;
using namespace cv::superres;
using namespace cv::superres::detail;
#if !defined(HAVE_CUDA) || !defined(HAVE_OPENCV_CUDAARITHM) || !defined(HAVE_OPENCV_CUDAWARPING) || !defined(HAVE_OPENCV_CUDAFILTERS)
Ptr<SuperResolution> cv::superres::createSuperResolution_BTVL1_CUDA()
{
CV_Error(Error::StsNotImplemented, "The called functionality is disabled for current build or platform");
}
#else // HAVE_CUDA
namespace btv_l1_cudev
{
void buildMotionMaps(PtrStepSzf forwardMotionX, PtrStepSzf forwardMotionY,
PtrStepSzf backwardMotionX, PtrStepSzf bacwardMotionY,
PtrStepSzf forwardMapX, PtrStepSzf forwardMapY,
PtrStepSzf backwardMapX, PtrStepSzf backwardMapY);
template <int cn>
void upscale(const PtrStepSzb src, PtrStepSzb dst, int scale, cudaStream_t stream);
void diffSign(PtrStepSzf src1, PtrStepSzf src2, PtrStepSzf dst, cudaStream_t stream);
void loadBtvWeights(const float* weights, size_t count);
template <int cn> void calcBtvRegularization(PtrStepSzb src, PtrStepSzb dst, int ksize);
}
namespace
{
void calcRelativeMotions(const std::vector<std::pair<GpuMat, GpuMat> >& forwardMotions, const std::vector<std::pair<GpuMat, GpuMat> >& backwardMotions,
std::vector<std::pair<GpuMat, GpuMat> >& relForwardMotions, std::vector<std::pair<GpuMat, GpuMat> >& relBackwardMotions,
int baseIdx, Size size)
{
const int count = static_cast<int>(forwardMotions.size());
relForwardMotions.resize(count);
relForwardMotions[baseIdx].first.create(size, CV_32FC1);
relForwardMotions[baseIdx].first.setTo(Scalar::all(0));
relForwardMotions[baseIdx].second.create(size, CV_32FC1);
relForwardMotions[baseIdx].second.setTo(Scalar::all(0));
relBackwardMotions.resize(count);
relBackwardMotions[baseIdx].first.create(size, CV_32FC1);
relBackwardMotions[baseIdx].first.setTo(Scalar::all(0));
relBackwardMotions[baseIdx].second.create(size, CV_32FC1);
relBackwardMotions[baseIdx].second.setTo(Scalar::all(0));
for (int i = baseIdx - 1; i >= 0; --i)
{
cuda::add(relForwardMotions[i + 1].first, forwardMotions[i].first, relForwardMotions[i].first);
cuda::add(relForwardMotions[i + 1].second, forwardMotions[i].second, relForwardMotions[i].second);
cuda::add(relBackwardMotions[i + 1].first, backwardMotions[i + 1].first, relBackwardMotions[i].first);
cuda::add(relBackwardMotions[i + 1].second, backwardMotions[i + 1].second, relBackwardMotions[i].second);
}
for (int i = baseIdx + 1; i < count; ++i)
{
cuda::add(relForwardMotions[i - 1].first, backwardMotions[i].first, relForwardMotions[i].first);
cuda::add(relForwardMotions[i - 1].second, backwardMotions[i].second, relForwardMotions[i].second);
cuda::add(relBackwardMotions[i - 1].first, forwardMotions[i - 1].first, relBackwardMotions[i].first);
cuda::add(relBackwardMotions[i - 1].second, forwardMotions[i - 1].second, relBackwardMotions[i].second);
}
}
void upscaleMotions(const std::vector<std::pair<GpuMat, GpuMat> >& lowResMotions, std::vector<std::pair<GpuMat, GpuMat> >& highResMotions, int scale)
{
highResMotions.resize(lowResMotions.size());
for (size_t i = 0; i < lowResMotions.size(); ++i)
{
cuda::resize(lowResMotions[i].first, highResMotions[i].first, Size(), scale, scale, INTER_CUBIC);
cuda::resize(lowResMotions[i].second, highResMotions[i].second, Size(), scale, scale, INTER_CUBIC);
cuda::multiply(highResMotions[i].first, Scalar::all(scale), highResMotions[i].first);
cuda::multiply(highResMotions[i].second, Scalar::all(scale), highResMotions[i].second);
}
}
void buildMotionMaps(const std::pair<GpuMat, GpuMat>& forwardMotion, const std::pair<GpuMat, GpuMat>& backwardMotion,
std::pair<GpuMat, GpuMat>& forwardMap, std::pair<GpuMat, GpuMat>& backwardMap)
{
forwardMap.first.create(forwardMotion.first.size(), CV_32FC1);
forwardMap.second.create(forwardMotion.first.size(), CV_32FC1);
backwardMap.first.create(forwardMotion.first.size(), CV_32FC1);
backwardMap.second.create(forwardMotion.first.size(), CV_32FC1);
btv_l1_cudev::buildMotionMaps(forwardMotion.first, forwardMotion.second,
backwardMotion.first, backwardMotion.second,
forwardMap.first, forwardMap.second,
backwardMap.first, backwardMap.second);
}
void upscale(const GpuMat& src, GpuMat& dst, int scale, Stream& stream)
{
typedef void (*func_t)(const PtrStepSzb src, PtrStepSzb dst, int scale, cudaStream_t stream);
static const func_t funcs[] =
{
0, btv_l1_cudev::upscale<1>, 0, btv_l1_cudev::upscale<3>, btv_l1_cudev::upscale<4>
};
CV_Assert( src.channels() == 1 || src.channels() == 3 || src.channels() == 4 );
dst.create(src.rows * scale, src.cols * scale, src.type());
dst.setTo(Scalar::all(0));
const func_t func = funcs[src.channels()];
func(src, dst, scale, StreamAccessor::getStream(stream));
}
void diffSign(const GpuMat& src1, const GpuMat& src2, GpuMat& dst, Stream& stream)
{
dst.create(src1.size(), src1.type());
btv_l1_cudev::diffSign(src1.reshape(1), src2.reshape(1), dst.reshape(1), StreamAccessor::getStream(stream));
}
void calcBtvWeights(int btvKernelSize, double alpha, std::vector<float>& btvWeights)
{
const size_t size = btvKernelSize * btvKernelSize;
btvWeights.resize(size);
const int ksize = (btvKernelSize - 1) / 2;
const float alpha_f = static_cast<float>(alpha);
for (int m = 0, ind = 0; m <= ksize; ++m)
{
for (int l = ksize; l + m >= 0; --l, ++ind)
btvWeights[ind] = pow(alpha_f, std::abs(m) + std::abs(l));
}
btv_l1_cudev::loadBtvWeights(&btvWeights[0], size);
}
void calcBtvRegularization(const GpuMat& src, GpuMat& dst, int btvKernelSize)
{
typedef void (*func_t)(PtrStepSzb src, PtrStepSzb dst, int ksize);
static const func_t funcs[] =
{
0,
btv_l1_cudev::calcBtvRegularization<1>,
0,
btv_l1_cudev::calcBtvRegularization<3>,
btv_l1_cudev::calcBtvRegularization<4>
};
dst.create(src.size(), src.type());
dst.setTo(Scalar::all(0));
const int ksize = (btvKernelSize - 1) / 2;
funcs[src.channels()](src, dst, ksize);
}
class BTVL1_CUDA_Base : public cv::superres::SuperResolution
{
public:
BTVL1_CUDA_Base();
void process(const std::vector<GpuMat>& src, GpuMat& dst,
const std::vector<std::pair<GpuMat, GpuMat> >& forwardMotions, const std::vector<std::pair<GpuMat, GpuMat> >& backwardMotions,
int baseIdx);
void collectGarbage();
inline int getScale() const CV_OVERRIDE { return scale_; }
inline void setScale(int val) CV_OVERRIDE { scale_ = val; }
inline int getIterations() const CV_OVERRIDE { return iterations_; }
inline void setIterations(int val) CV_OVERRIDE { iterations_ = val; }
inline double getTau() const CV_OVERRIDE { return tau_; }
inline void setTau(double val) CV_OVERRIDE { tau_ = val; }
inline double getLabmda() const CV_OVERRIDE { return lambda_; }
inline void setLabmda(double val) CV_OVERRIDE { lambda_ = val; }
inline double getAlpha() const CV_OVERRIDE { return alpha_; }
inline void setAlpha(double val) CV_OVERRIDE { alpha_ = val; }
inline int getKernelSize() const CV_OVERRIDE { return btvKernelSize_; }
inline void setKernelSize(int val) CV_OVERRIDE { btvKernelSize_ = val; }
inline int getBlurKernelSize() const CV_OVERRIDE { return blurKernelSize_; }
inline void setBlurKernelSize(int val) CV_OVERRIDE { blurKernelSize_ = val; }
inline double getBlurSigma() const CV_OVERRIDE { return blurSigma_; }
inline void setBlurSigma(double val) CV_OVERRIDE { blurSigma_ = val; }
inline int getTemporalAreaRadius() const CV_OVERRIDE { return temporalAreaRadius_; }
inline void setTemporalAreaRadius(int val) CV_OVERRIDE { temporalAreaRadius_ = val; }
inline Ptr<cv::superres::DenseOpticalFlowExt> getOpticalFlow() const CV_OVERRIDE { return opticalFlow_; }
inline void setOpticalFlow(const Ptr<cv::superres::DenseOpticalFlowExt>& val) CV_OVERRIDE { opticalFlow_ = val; }
protected:
int scale_;
int iterations_;
double lambda_;
double tau_;
double alpha_;
int btvKernelSize_;
int blurKernelSize_;
double blurSigma_;
int temporalAreaRadius_;
Ptr<cv::superres::DenseOpticalFlowExt> opticalFlow_;
private:
std::vector<Ptr<cuda::Filter> > filters_;
int curBlurKernelSize_;
double curBlurSigma_;
int curSrcType_;
std::vector<float> btvWeights_;
int curBtvKernelSize_;
double curAlpha_;
std::vector<std::pair<GpuMat, GpuMat> > lowResForwardMotions_;
std::vector<std::pair<GpuMat, GpuMat> > lowResBackwardMotions_;
std::vector<std::pair<GpuMat, GpuMat> > highResForwardMotions_;
std::vector<std::pair<GpuMat, GpuMat> > highResBackwardMotions_;
std::vector<std::pair<GpuMat, GpuMat> > forwardMaps_;
std::vector<std::pair<GpuMat, GpuMat> > backwardMaps_;
GpuMat highRes_;
std::vector<Stream> streams_;
std::vector<GpuMat> diffTerms_;
std::vector<GpuMat> a_, b_, c_;
GpuMat regTerm_;
};
BTVL1_CUDA_Base::BTVL1_CUDA_Base()
{
scale_ = 4;
iterations_ = 180;
lambda_ = 0.03;
tau_ = 1.3;
alpha_ = 0.7;
btvKernelSize_ = 7;
blurKernelSize_ = 5;
blurSigma_ = 0.0;
#ifdef HAVE_OPENCV_CUDAOPTFLOW
opticalFlow_ = createOptFlow_Farneback_CUDA();
#else
opticalFlow_ = createOptFlow_Farneback();
#endif
temporalAreaRadius_ = 0;
curBlurKernelSize_ = -1;
curBlurSigma_ = -1.0;
curSrcType_ = -1;
curBtvKernelSize_ = -1;
curAlpha_ = -1.0;
}
void BTVL1_CUDA_Base::process(const std::vector<GpuMat>& src, GpuMat& dst,
const std::vector<std::pair<GpuMat, GpuMat> >& forwardMotions, const std::vector<std::pair<GpuMat, GpuMat> >& backwardMotions,
int baseIdx)
{
CV_Assert( scale_ > 1 );
CV_Assert( iterations_ > 0 );
CV_Assert( tau_ > 0.0 );
CV_Assert( alpha_ > 0.0 );
CV_Assert( btvKernelSize_ > 0 && btvKernelSize_ <= 16 );
CV_Assert( blurKernelSize_ > 0 );
CV_Assert( blurSigma_ >= 0.0 );
// update blur filter and btv weights
if (filters_.size() != src.size() || blurKernelSize_ != curBlurKernelSize_ || blurSigma_ != curBlurSigma_ || src[0].type() != curSrcType_)
{
filters_.resize(src.size());
for (size_t i = 0; i < src.size(); ++i)
filters_[i] = cuda::createGaussianFilter(src[0].type(), -1, Size(blurKernelSize_, blurKernelSize_), blurSigma_);
curBlurKernelSize_ = blurKernelSize_;
curBlurSigma_ = blurSigma_;
curSrcType_ = src[0].type();
}
if (btvWeights_.empty() || btvKernelSize_ != curBtvKernelSize_ || alpha_ != curAlpha_)
{
calcBtvWeights(btvKernelSize_, alpha_, btvWeights_);
curBtvKernelSize_ = btvKernelSize_;
curAlpha_ = alpha_;
}
// calc motions between input frames
calcRelativeMotions(forwardMotions, backwardMotions, lowResForwardMotions_, lowResBackwardMotions_, baseIdx, src[0].size());
upscaleMotions(lowResForwardMotions_, highResForwardMotions_, scale_);
upscaleMotions(lowResBackwardMotions_, highResBackwardMotions_, scale_);
forwardMaps_.resize(highResForwardMotions_.size());
backwardMaps_.resize(highResForwardMotions_.size());
for (size_t i = 0; i < highResForwardMotions_.size(); ++i)
buildMotionMaps(highResForwardMotions_[i], highResBackwardMotions_[i], forwardMaps_[i], backwardMaps_[i]);
// initial estimation
const Size lowResSize = src[0].size();
const Size highResSize(lowResSize.width * scale_, lowResSize.height * scale_);
cuda::resize(src[baseIdx], highRes_, highResSize, 0, 0, INTER_CUBIC);
// iterations
streams_.resize(src.size());
diffTerms_.resize(src.size());
a_.resize(src.size());
b_.resize(src.size());
c_.resize(src.size());
for (int i = 0; i < iterations_; ++i)
{
for (size_t k = 0; k < src.size(); ++k)
{
// a = M * Ih
cuda::remap(highRes_, a_[k], backwardMaps_[k].first, backwardMaps_[k].second, INTER_NEAREST, BORDER_REPLICATE, Scalar(), streams_[k]);
// b = HM * Ih
filters_[k]->apply(a_[k], b_[k], streams_[k]);
// c = DHF * Ih
cuda::resize(b_[k], c_[k], lowResSize, 0, 0, INTER_NEAREST, streams_[k]);
diffSign(src[k], c_[k], c_[k], streams_[k]);
// a = Dt * diff
upscale(c_[k], a_[k], scale_, streams_[k]);
// b = HtDt * diff
filters_[k]->apply(a_[k], b_[k], streams_[k]);
// diffTerm = MtHtDt * diff
cuda::remap(b_[k], diffTerms_[k], forwardMaps_[k].first, forwardMaps_[k].second, INTER_NEAREST, BORDER_REPLICATE, Scalar(), streams_[k]);
}
if (lambda_ > 0)
{
calcBtvRegularization(highRes_, regTerm_, btvKernelSize_);
cuda::addWeighted(highRes_, 1.0, regTerm_, -tau_ * lambda_, 0.0, highRes_);
}
for (size_t k = 0; k < src.size(); ++k)
{
streams_[k].waitForCompletion();
cuda::addWeighted(highRes_, 1.0, diffTerms_[k], tau_, 0.0, highRes_);
}
}
Rect inner(btvKernelSize_, btvKernelSize_, highRes_.cols - 2 * btvKernelSize_, highRes_.rows - 2 * btvKernelSize_);
highRes_(inner).copyTo(dst);
}
void BTVL1_CUDA_Base::collectGarbage()
{
filters_.clear();
lowResForwardMotions_.clear();
lowResBackwardMotions_.clear();
highResForwardMotions_.clear();
highResBackwardMotions_.clear();
forwardMaps_.clear();
backwardMaps_.clear();
highRes_.release();
diffTerms_.clear();
a_.clear();
b_.clear();
c_.clear();
regTerm_.release();
}
////////////////////////////////////////////////////////////
class BTVL1_CUDA : public BTVL1_CUDA_Base
{
public:
BTVL1_CUDA();
void collectGarbage();
protected:
void initImpl(Ptr<FrameSource>& frameSource);
void processImpl(Ptr<FrameSource>& frameSource, OutputArray output);
private:
void readNextFrame(Ptr<FrameSource>& frameSource);
void processFrame(int idx);
GpuMat curFrame_;
GpuMat prevFrame_;
std::vector<GpuMat> frames_;
std::vector<std::pair<GpuMat, GpuMat> > forwardMotions_;
std::vector<std::pair<GpuMat, GpuMat> > backwardMotions_;
std::vector<GpuMat> outputs_;
int storePos_;
int procPos_;
int outPos_;
std::vector<GpuMat> srcFrames_;
std::vector<std::pair<GpuMat, GpuMat> > srcForwardMotions_;
std::vector<std::pair<GpuMat, GpuMat> > srcBackwardMotions_;
GpuMat finalOutput_;
};
BTVL1_CUDA::BTVL1_CUDA()
{
temporalAreaRadius_ = 4;
}
void BTVL1_CUDA::collectGarbage()
{
curFrame_.release();
prevFrame_.release();
frames_.clear();
forwardMotions_.clear();
backwardMotions_.clear();
outputs_.clear();
srcFrames_.clear();
srcForwardMotions_.clear();
srcBackwardMotions_.clear();
finalOutput_.release();
SuperResolution::collectGarbage();
BTVL1_CUDA_Base::collectGarbage();
}
void BTVL1_CUDA::initImpl(Ptr<FrameSource>& frameSource)
{
const int cacheSize = 2 * temporalAreaRadius_ + 1;
frames_.resize(cacheSize);
forwardMotions_.resize(cacheSize);
backwardMotions_.resize(cacheSize);
outputs_.resize(cacheSize);
storePos_ = -1;
for (int t = -temporalAreaRadius_; t <= temporalAreaRadius_; ++t)
readNextFrame(frameSource);
for (int i = 0; i <= temporalAreaRadius_; ++i)
processFrame(i);
procPos_ = temporalAreaRadius_;
outPos_ = -1;
}
void BTVL1_CUDA::processImpl(Ptr<FrameSource>& frameSource, OutputArray _output)
{
if (outPos_ >= storePos_)
{
_output.release();
return;
}
readNextFrame(frameSource);
if (procPos_ < storePos_)
{
++procPos_;
processFrame(procPos_);
}
++outPos_;
const GpuMat& curOutput = at(outPos_, outputs_);
if (_output.kind() == _InputArray::CUDA_GPU_MAT)
curOutput.convertTo(_output.getGpuMatRef(), CV_8U);
else
{
curOutput.convertTo(finalOutput_, CV_8U);
arrCopy(finalOutput_, _output);
}
}
void BTVL1_CUDA::readNextFrame(Ptr<FrameSource>& frameSource)
{
frameSource->nextFrame(curFrame_);
if (curFrame_.empty())
return;
++storePos_;
curFrame_.convertTo(at(storePos_, frames_), CV_32F);
if (storePos_ > 0)
{
std::pair<GpuMat, GpuMat>& forwardMotion = at(storePos_ - 1, forwardMotions_);
std::pair<GpuMat, GpuMat>& backwardMotion = at(storePos_, backwardMotions_);
opticalFlow_->calc(prevFrame_, curFrame_, forwardMotion.first, forwardMotion.second);
opticalFlow_->calc(curFrame_, prevFrame_, backwardMotion.first, backwardMotion.second);
}
curFrame_.copyTo(prevFrame_);
}
void BTVL1_CUDA::processFrame(int idx)
{
const int startIdx = std::max(idx - temporalAreaRadius_, 0);
const int procIdx = idx;
const int endIdx = std::min(startIdx + 2 * temporalAreaRadius_, storePos_);
const int count = endIdx - startIdx + 1;
srcFrames_.resize(count);
srcForwardMotions_.resize(count);
srcBackwardMotions_.resize(count);
int baseIdx = -1;
for (int i = startIdx, k = 0; i <= endIdx; ++i, ++k)
{
if (i == procIdx)
baseIdx = k;
srcFrames_[k] = at(i, frames_);
if (i < endIdx)
srcForwardMotions_[k] = at(i, forwardMotions_);
if (i > startIdx)
srcBackwardMotions_[k] = at(i, backwardMotions_);
}
process(srcFrames_, at(idx, outputs_), srcForwardMotions_, srcBackwardMotions_, baseIdx);
}
}
Ptr<SuperResolution> cv::superres::createSuperResolution_BTVL1_CUDA()
{
return makePtr<BTVL1_CUDA>();
}
#endif // HAVE_CUDA
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "opencv2/opencv_modules.hpp"
#if defined(HAVE_OPENCV_CUDAARITHM) && defined(HAVE_OPENCV_CUDAWARPING) && defined(HAVE_OPENCV_CUDAFILTERS)
#include "opencv2/core/cuda/common.hpp"
#include "opencv2/core/cuda/transform.hpp"
#include "opencv2/core/cuda/vec_traits.hpp"
#include "opencv2/core/cuda/vec_math.hpp"
using namespace cv::cuda;
using namespace cv::cuda::device;
namespace btv_l1_cudev
{
void buildMotionMaps(PtrStepSzf forwardMotionX, PtrStepSzf forwardMotionY,
PtrStepSzf backwardMotionX, PtrStepSzf bacwardMotionY,
PtrStepSzf forwardMapX, PtrStepSzf forwardMapY,
PtrStepSzf backwardMapX, PtrStepSzf backwardMapY);
template <int cn>
void upscale(const PtrStepSzb src, PtrStepSzb dst, int scale, cudaStream_t stream);
void diffSign(PtrStepSzf src1, PtrStepSzf src2, PtrStepSzf dst, cudaStream_t stream);
void loadBtvWeights(const float* weights, size_t count);
template <int cn> void calcBtvRegularization(PtrStepSzb src, PtrStepSzb dst, int ksize);
}
namespace btv_l1_cudev
{
__global__ void buildMotionMapsKernel(const PtrStepSzf forwardMotionX, const PtrStepf forwardMotionY,
PtrStepf backwardMotionX, PtrStepf backwardMotionY,
PtrStepf forwardMapX, PtrStepf forwardMapY,
PtrStepf backwardMapX, PtrStepf backwardMapY)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= forwardMotionX.cols || y >= forwardMotionX.rows)
return;
const float fx = forwardMotionX(y, x);
const float fy = forwardMotionY(y, x);
const float bx = backwardMotionX(y, x);
const float by = backwardMotionY(y, x);
forwardMapX(y, x) = x + bx;
forwardMapY(y, x) = y + by;
backwardMapX(y, x) = x + fx;
backwardMapY(y, x) = y + fy;
}
void buildMotionMaps(PtrStepSzf forwardMotionX, PtrStepSzf forwardMotionY,
PtrStepSzf backwardMotionX, PtrStepSzf bacwardMotionY,
PtrStepSzf forwardMapX, PtrStepSzf forwardMapY,
PtrStepSzf backwardMapX, PtrStepSzf backwardMapY)
{
const dim3 block(32, 8);
const dim3 grid(divUp(forwardMapX.cols, block.x), divUp(forwardMapX.rows, block.y));
buildMotionMapsKernel<<<grid, block>>>(forwardMotionX, forwardMotionY,
backwardMotionX, bacwardMotionY,
forwardMapX, forwardMapY,
backwardMapX, backwardMapY);
cudaSafeCall( cudaGetLastError() );
cudaSafeCall( cudaDeviceSynchronize() );
}
template <typename T>
__global__ void upscaleKernel(const PtrStepSz<T> src, PtrStep<T> dst, const int scale)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= src.cols || y >= src.rows)
return;
dst(y * scale, x * scale) = src(y, x);
}
template <int cn>
void upscale(const PtrStepSzb src, PtrStepSzb dst, int scale, cudaStream_t stream)
{
typedef typename TypeVec<float, cn>::vec_type src_t;
const dim3 block(32, 8);
const dim3 grid(divUp(src.cols, block.x), divUp(src.rows, block.y));
upscaleKernel<src_t><<<grid, block, 0, stream>>>((PtrStepSz<src_t>) src, (PtrStepSz<src_t>) dst, scale);
cudaSafeCall( cudaGetLastError() );
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
}
template void upscale<1>(const PtrStepSzb src, PtrStepSzb dst, int scale, cudaStream_t stream);
template void upscale<3>(const PtrStepSzb src, PtrStepSzb dst, int scale, cudaStream_t stream);
template void upscale<4>(const PtrStepSzb src, PtrStepSzb dst, int scale, cudaStream_t stream);
__device__ __forceinline__ float diffSign(float a, float b)
{
return a > b ? 1.0f : a < b ? -1.0f : 0.0f;
}
__device__ __forceinline__ float3 diffSign(const float3& a, const float3& b)
{
return make_float3(
a.x > b.x ? 1.0f : a.x < b.x ? -1.0f : 0.0f,
a.y > b.y ? 1.0f : a.y < b.y ? -1.0f : 0.0f,
a.z > b.z ? 1.0f : a.z < b.z ? -1.0f : 0.0f
);
}
__device__ __forceinline__ float4 diffSign(const float4& a, const float4& b)
{
return make_float4(
a.x > b.x ? 1.0f : a.x < b.x ? -1.0f : 0.0f,
a.y > b.y ? 1.0f : a.y < b.y ? -1.0f : 0.0f,
a.z > b.z ? 1.0f : a.z < b.z ? -1.0f : 0.0f,
0.0f
);
}
struct DiffSign : binary_function<float, float, float>
{
__device__ __forceinline__ float operator ()(float a, float b) const
{
return diffSign(a, b);
}
};
}
namespace cv { namespace cuda { namespace device
{
template <> struct TransformFunctorTraits<btv_l1_cudev::DiffSign> : DefaultTransformFunctorTraits<btv_l1_cudev::DiffSign>
{
enum { smart_block_dim_y = 8 };
enum { smart_shift = 4 };
};
}}}
namespace btv_l1_cudev
{
void diffSign(PtrStepSzf src1, PtrStepSzf src2, PtrStepSzf dst, cudaStream_t stream)
{
transform(src1, src2, dst, DiffSign(), WithOutMask(), stream);
}
__constant__ float c_btvRegWeights[16*16];
template <typename T>
__global__ void calcBtvRegularizationKernel(const PtrStepSz<T> src, PtrStep<T> dst, const int ksize)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x + ksize;
const int y = blockIdx.y * blockDim.y + threadIdx.y + ksize;
if (y >= src.rows - ksize || x >= src.cols - ksize)
return;
const T srcVal = src(y, x);
T dstVal = VecTraits<T>::all(0);
for (int m = 0, count = 0; m <= ksize; ++m)
{
for (int l = ksize; l + m >= 0; --l, ++count)
dstVal = dstVal + c_btvRegWeights[count] * (diffSign(srcVal, src(y + m, x + l)) - diffSign(src(y - m, x - l), srcVal));
}
dst(y, x) = dstVal;
}
void loadBtvWeights(const float* weights, size_t count)
{
cudaSafeCall( cudaMemcpyToSymbol(c_btvRegWeights, weights, count * sizeof(float)) );
}
template <int cn>
void calcBtvRegularization(PtrStepSzb src, PtrStepSzb dst, int ksize)
{
typedef typename TypeVec<float, cn>::vec_type src_t;
const dim3 block(32, 8);
const dim3 grid(divUp(src.cols, block.x), divUp(src.rows, block.y));
calcBtvRegularizationKernel<src_t><<<grid, block>>>((PtrStepSz<src_t>) src, (PtrStepSz<src_t>) dst, ksize);
cudaSafeCall( cudaGetLastError() );
cudaSafeCall( cudaDeviceSynchronize() );
}
template void calcBtvRegularization<1>(PtrStepSzb src, PtrStepSzb dst, int ksize);
template void calcBtvRegularization<3>(PtrStepSzb src, PtrStepSzb dst, int ksize);
template void calcBtvRegularization<4>(PtrStepSzb src, PtrStepSzb dst, int ksize);
}
#endif
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "precomp.hpp"
using namespace cv;
using namespace cv::cuda;
using namespace cv::superres;
using namespace cv::superres::detail;
cv::superres::FrameSource::~FrameSource()
{
}
//////////////////////////////////////////////////////
// EmptyFrameSource
namespace
{
class EmptyFrameSource : public FrameSource
{
public:
void nextFrame(OutputArray frame) CV_OVERRIDE;
void reset() CV_OVERRIDE;
};
void EmptyFrameSource::nextFrame(OutputArray frame)
{
frame.release();
}
void EmptyFrameSource::reset()
{
}
}
Ptr<FrameSource> cv::superres::createFrameSource_Empty()
{
return makePtr<EmptyFrameSource>();
}
//////////////////////////////////////////////////////
// VideoFrameSource & CameraFrameSource
#ifndef HAVE_OPENCV_VIDEOIO
Ptr<FrameSource> cv::superres::createFrameSource_Video(const String& fileName)
{
CV_UNUSED(fileName);
CV_Error(cv::Error::StsNotImplemented, "The called functionality is disabled for current build or platform");
return Ptr<FrameSource>();
}
Ptr<FrameSource> cv::superres::createFrameSource_Camera(int deviceId)
{
CV_UNUSED(deviceId);
CV_Error(cv::Error::StsNotImplemented, "The called functionality is disabled for current build or platform");
return Ptr<FrameSource>();
}
#else // HAVE_OPENCV_VIDEOIO
namespace
{
class CaptureFrameSource : public FrameSource
{
public:
void nextFrame(OutputArray frame) CV_OVERRIDE;
protected:
VideoCapture vc_;
private:
Mat frame_;
};
void CaptureFrameSource::nextFrame(OutputArray _frame)
{
if (_frame.kind() == _InputArray::MAT)
vc_ >> _frame.getMatRef();
else if(_frame.kind() == _InputArray::CUDA_GPU_MAT)
{
vc_ >> frame_;
arrCopy(frame_, _frame);
}
else if (_frame.isUMat())
vc_ >> *(UMat *)_frame.getObj();
else
{
// should never get here
CV_Error(Error::StsBadArg, "Failed to detect input frame kind" );
}
}
class VideoFrameSource : public CaptureFrameSource
{
public:
VideoFrameSource(const String& fileName);
void reset() CV_OVERRIDE;
private:
String fileName_;
};
VideoFrameSource::VideoFrameSource(const String& fileName) : fileName_(fileName)
{
reset();
}
void VideoFrameSource::reset()
{
vc_.release();
vc_.open(fileName_);
CV_Assert( vc_.isOpened() );
}
class CameraFrameSource : public CaptureFrameSource
{
public:
CameraFrameSource(int deviceId);
void reset() CV_OVERRIDE;
private:
int deviceId_;
};
CameraFrameSource::CameraFrameSource(int deviceId) : deviceId_(deviceId)
{
reset();
}
void CameraFrameSource::reset()
{
vc_.release();
vc_.open(deviceId_);
CV_Assert( vc_.isOpened() );
}
}
Ptr<FrameSource> cv::superres::createFrameSource_Video(const String& fileName)
{
return makePtr<VideoFrameSource>(fileName);
}
Ptr<FrameSource> cv::superres::createFrameSource_Camera(int deviceId)
{
return makePtr<CameraFrameSource>(deviceId);
}
#endif // HAVE_OPENCV_VIDEOIO
//////////////////////////////////////////////////////
// VideoFrameSource_CUDA
#ifndef HAVE_OPENCV_CUDACODEC
Ptr<FrameSource> cv::superres::createFrameSource_Video_CUDA(const String& fileName)
{
CV_UNUSED(fileName);
CV_Error(cv::Error::StsNotImplemented, "The called functionality is disabled for current build or platform");
}
#else // HAVE_OPENCV_CUDACODEC
namespace
{
class VideoFrameSource_CUDA : public FrameSource
{
public:
VideoFrameSource_CUDA(const String& fileName);
void nextFrame(OutputArray frame);
void reset();
private:
String fileName_;
Ptr<cudacodec::VideoReader> reader_;
GpuMat frame_;
};
VideoFrameSource_CUDA::VideoFrameSource_CUDA(const String& fileName) : fileName_(fileName)
{
reset();
}
void VideoFrameSource_CUDA::nextFrame(OutputArray _frame)
{
if (_frame.kind() == _InputArray::CUDA_GPU_MAT)
{
bool res = reader_->nextFrame(_frame.getGpuMatRef());
if (!res)
_frame.release();
}
else
{
bool res = reader_->nextFrame(frame_);
if (!res)
_frame.release();
else
arrCopy(frame_, _frame);
}
}
void VideoFrameSource_CUDA::reset()
{
reader_ = cudacodec::createVideoReader(fileName_);
}
}
Ptr<FrameSource> cv::superres::createFrameSource_Video_CUDA(const String& fileName)
{
return makePtr<VideoFrameSource_CUDA>(fileName);
}
#endif // HAVE_OPENCV_CUDACODEC
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "precomp.hpp"
using namespace cv;
using namespace cv::cuda;
Mat cv::superres::arrGetMat(InputArray arr, Mat& buf)
{
switch (arr.kind())
{
case _InputArray::CUDA_GPU_MAT:
arr.getGpuMat().download(buf);
return buf;
case _InputArray::OPENGL_BUFFER:
arr.getOGlBuffer().copyTo(buf);
return buf;
default:
return arr.getMat();
}
}
UMat cv::superres::arrGetUMat(InputArray arr, UMat& buf)
{
switch (arr.kind())
{
case _InputArray::CUDA_GPU_MAT:
arr.getGpuMat().download(buf);
return buf;
case _InputArray::OPENGL_BUFFER:
arr.getOGlBuffer().copyTo(buf);
return buf;
default:
return arr.getUMat();
}
}
GpuMat cv::superres::arrGetGpuMat(InputArray arr, GpuMat& buf)
{
switch (arr.kind())
{
case _InputArray::CUDA_GPU_MAT:
return arr.getGpuMat();
case _InputArray::OPENGL_BUFFER:
arr.getOGlBuffer().copyTo(buf);
return buf;
default:
buf.upload(arr.getMat());
return buf;
}
}
namespace
{
void mat2mat(InputArray src, OutputArray dst)
{
src.getMat().copyTo(dst);
}
void arr2buf(InputArray src, OutputArray dst)
{
dst.getOGlBufferRef().copyFrom(src);
}
void mat2gpu(InputArray src, OutputArray dst)
{
dst.getGpuMatRef().upload(src.getMat());
}
void buf2arr(InputArray src, OutputArray dst)
{
src.getOGlBuffer().copyTo(dst);
}
void gpu2mat(InputArray src, OutputArray dst)
{
GpuMat d = src.getGpuMat();
dst.create(d.size(), d.type());
Mat m = dst.getMat();
d.download(m);
}
void gpu2gpu(InputArray src, OutputArray dst)
{
src.getGpuMat().copyTo(dst.getGpuMatRef());
}
}
void cv::superres::arrCopy(InputArray src, OutputArray dst)
{
if (dst.isUMat() || src.isUMat())
{
src.copyTo(dst);
return;
}
typedef void (*func_t)(InputArray src, OutputArray dst);
static const func_t funcs[10][10] =
{
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
{ 0, mat2mat, mat2mat, mat2mat, mat2mat, mat2mat, mat2mat, arr2buf, 0, mat2gpu },
{ 0, mat2mat, mat2mat, mat2mat, mat2mat, mat2mat, mat2mat, arr2buf, 0, mat2gpu },
{ 0, mat2mat, mat2mat, mat2mat, mat2mat, mat2mat, mat2mat, arr2buf, 0, mat2gpu },
{ 0, mat2mat, mat2mat, mat2mat, mat2mat, mat2mat, mat2mat, arr2buf, 0, mat2gpu },
{ 0, mat2mat, mat2mat, mat2mat, mat2mat, mat2mat, mat2mat, arr2buf, 0, mat2gpu },
{ 0, mat2mat, mat2mat, mat2mat, mat2mat, mat2mat, mat2mat, arr2buf, 0, mat2gpu },
{ 0, buf2arr, buf2arr, buf2arr, buf2arr, buf2arr, buf2arr, buf2arr, 0, buf2arr },
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
{ 0, gpu2mat, gpu2mat, gpu2mat, gpu2mat, gpu2mat, gpu2mat, arr2buf, 0 , gpu2gpu },
};
const int src_kind = src.kind() >> _InputArray::KIND_SHIFT;
const int dst_kind = dst.kind() >> _InputArray::KIND_SHIFT;
CV_Assert( src_kind >= 0 && src_kind < 10 );
CV_Assert( dst_kind >= 0 && dst_kind < 10 );
const func_t func = funcs[src_kind][dst_kind];
CV_Assert( func != 0 );
func(src, dst);
}
namespace
{
void convertToCn(InputArray src, OutputArray dst, int cn)
{
int scn = src.channels();
CV_Assert( scn == 1 || scn == 3 || scn == 4 );
CV_Assert( cn == 1 || cn == 3 || cn == 4 );
static const int codes[5][5] =
{
{ -1, -1, -1, -1, -1 },
{ -1, -1, -1, COLOR_GRAY2BGR, COLOR_GRAY2BGRA },
{ -1, -1, -1, -1, -1 },
{ -1, COLOR_BGR2GRAY, -1, -1, COLOR_BGR2BGRA },
{ -1, COLOR_BGRA2GRAY, -1, COLOR_BGRA2BGR, -1 }
};
const int code = codes[scn][cn];
CV_Assert( code >= 0 );
switch (src.kind())
{
case _InputArray::CUDA_GPU_MAT:
#ifdef HAVE_OPENCV_CUDAIMGPROC
cuda::cvtColor(src.getGpuMat(), dst.getGpuMatRef(), code, cn);
#else
CV_Error(cv::Error::StsNotImplemented, "The called functionality is disabled for current build or platform");
#endif
break;
default:
cv::cvtColor(src, dst, code, cn);
break;
}
}
void convertToDepth(InputArray src, OutputArray dst, int depth)
{
const int sdepth = src.depth();
CV_Assert( sdepth <= CV_64F );
CV_Assert( depth == CV_8U || depth == CV_32F );
static const double maxVals[CV_64F + 1] =
{
(double)std::numeric_limits<uchar>::max(),
(double)std::numeric_limits<schar>::max(),
(double)std::numeric_limits<ushort>::max(),
(double)std::numeric_limits<short>::max(),
(double)std::numeric_limits<int>::max(),
1.0,
1.0,
};
const double scale = maxVals[depth] / maxVals[sdepth];
switch (src.kind())
{
case _InputArray::CUDA_GPU_MAT:
src.getGpuMat().convertTo(dst.getGpuMatRef(), depth, scale);
break;
case _InputArray::UMAT:
src.getUMat().convertTo(dst, depth, scale);
break;
default:
src.getMat().convertTo(dst, depth, scale);
break;
}
}
}
Mat cv::superres::convertToType(const Mat& src, int type, Mat& buf0, Mat& buf1)
{
CV_INSTRUMENT_REGION();
if (src.type() == type)
return src;
const int depth = CV_MAT_DEPTH(type);
const int cn = CV_MAT_CN(type);
if (src.depth() == depth)
{
convertToCn(src, buf0, cn);
return buf0;
}
if (src.channels() == cn)
{
convertToDepth(src, buf1, depth);
return buf1;
}
convertToCn(src, buf0, cn);
convertToDepth(buf0, buf1, depth);
return buf1;
}
UMat cv::superres::convertToType(const UMat& src, int type, UMat& buf0, UMat& buf1)
{
CV_INSTRUMENT_REGION();
if (src.type() == type)
return src;
const int depth = CV_MAT_DEPTH(type);
const int cn = CV_MAT_CN(type);
if (src.depth() == depth)
{
convertToCn(src, buf0, cn);
return buf0;
}
if (src.channels() == cn)
{
convertToDepth(src, buf1, depth);
return buf1;
}
convertToCn(src, buf0, cn);
convertToDepth(buf0, buf1, depth);
return buf1;
}
GpuMat cv::superres::convertToType(const GpuMat& src, int type, GpuMat& buf0, GpuMat& buf1)
{
if (src.type() == type)
return src;
const int depth = CV_MAT_DEPTH(type);
const int cn = CV_MAT_CN(type);
if (src.depth() == depth)
{
convertToCn(src, buf0, cn);
return buf0;
}
if (src.channels() == cn)
{
convertToDepth(src, buf1, depth);
return buf1;
}
convertToCn(src, buf0, cn);
convertToDepth(buf0, buf1, depth);
return buf1;
}
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#ifndef __OPENCV_SUPERRES_INPUT_ARRAY_UTILITY_HPP__
#define __OPENCV_SUPERRES_INPUT_ARRAY_UTILITY_HPP__
#include "opencv2/core.hpp"
#include "opencv2/core/cuda.hpp"
namespace cv
{
namespace superres
{
CV_EXPORTS Mat arrGetMat(InputArray arr, Mat& buf);
CV_EXPORTS UMat arrGetUMat(InputArray arr, UMat& buf);
CV_EXPORTS cuda::GpuMat arrGetGpuMat(InputArray arr, cuda::GpuMat& buf);
CV_EXPORTS void arrCopy(InputArray src, OutputArray dst);
CV_EXPORTS Mat convertToType(const Mat& src, int type, Mat& buf0, Mat& buf1);
CV_EXPORTS UMat convertToType(const UMat& src, int type, UMat& buf0, UMat& buf1);
CV_EXPORTS cuda::GpuMat convertToType(const cuda::GpuMat& src, int type, cuda::GpuMat& buf0, cuda::GpuMat& buf1);
}
}
#endif // __OPENCV_SUPERRES_INPUT_ARRAY_UTILITY_HPP__
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2010-2012, Multicoreware, Inc., all rights reserved.
// Copyright (C) 2010-2012, Advanced Micro Devices, Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// @Authors
// Jin Ma jin@multicorewareinc.com
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors as is and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#ifndef cn
#define cn 1
#endif
#define sz (int)sizeof(float)
#define src_elem_at(_src, y, step, x) *(__global const float *)(_src + mad24(y, step, (x) * sz))
#define dst_elem_at(_dst, y, step, x) *(__global float *)(_dst + mad24(y, step, (x) * sz))
__kernel void buildMotionMaps(__global const uchar * forwardMotionPtr, int forwardMotion_step, int forwardMotion_offset,
__global const uchar * backwardMotionPtr, int backwardMotion_step, int backwardMotion_offset,
__global const uchar * forwardMapPtr, int forwardMap_step, int forwardMap_offset,
__global const uchar * backwardMapPtr, int backwardMap_step, int backwardMap_offset,
int rows, int cols)
{
int x = get_global_id(0);
int y = get_global_id(1);
if (x < cols && y < rows)
{
int forwardMotion_index = mad24(forwardMotion_step, y, (int)sizeof(float2) * x + forwardMotion_offset);
int backwardMotion_index = mad24(backwardMotion_step, y, (int)sizeof(float2) * x + backwardMotion_offset);
int forwardMap_index = mad24(forwardMap_step, y, (int)sizeof(float2) * x + forwardMap_offset);
int backwardMap_index = mad24(backwardMap_step, y, (int)sizeof(float2) * x + backwardMap_offset);
float2 forwardMotion = *(__global const float2 *)(forwardMotionPtr + forwardMotion_index);
float2 backwardMotion = *(__global const float2 *)(backwardMotionPtr + backwardMotion_index);
__global float2 * forwardMap = (__global float2 *)(forwardMapPtr + forwardMap_index);
__global float2 * backwardMap = (__global float2 *)(backwardMapPtr + backwardMap_index);
float2 basePoint = (float2)(x, y);
forwardMap[0] = basePoint + backwardMotion;
backwardMap[0] = basePoint + forwardMotion;
}
}
__kernel void upscale(__global const uchar * srcptr, int src_step, int src_offset, int src_rows, int src_cols,
__global uchar * dstptr, int dst_step, int dst_offset, int scale)
{
int x = get_global_id(0);
int y = get_global_id(1);
if (x < src_cols && y < src_rows)
{
int src_index = mad24(y, src_step, sz * x * cn + src_offset);
int dst_index = mad24(y * scale, dst_step, sz * x * scale * cn + dst_offset);
__global const float * src = (__global const float *)(srcptr + src_index);
__global float * dst = (__global float *)(dstptr + dst_index);
#pragma unroll
for (int c = 0; c < cn; ++c)
dst[c] = src[c];
}
}
inline float diffSign1(float a, float b)
{
return a > b ? 1.0f : a < b ? -1.0f : 0.0f;
}
inline float3 diffSign3(float3 a, float3 b)
{
float3 pos;
pos.x = a.x > b.x ? 1.0f : a.x < b.x ? -1.0f : 0.0f;
pos.y = a.y > b.y ? 1.0f : a.y < b.y ? -1.0f : 0.0f;
pos.z = a.z > b.z ? 1.0f : a.z < b.z ? -1.0f : 0.0f;
return pos;
}
__kernel void diffSign(__global const uchar * src1, int src1_step, int src1_offset,
__global const uchar * src2, int src2_step, int src2_offset,
__global uchar * dst, int dst_step, int dst_offset, int rows, int cols)
{
int x = get_global_id(0);
int y = get_global_id(1);
if (x < cols && y < rows)
*(__global float *)(dst + mad24(y, dst_step, sz * x + dst_offset)) =
diffSign1(*(__global const float *)(src1 + mad24(y, src1_step, sz * x + src1_offset)),
*(__global const float *)(src2 + mad24(y, src2_step, sz * x + src2_offset)));
}
__kernel void calcBtvRegularization(__global const uchar * src, int src_step, int src_offset,
__global uchar * dst, int dst_step, int dst_offset, int dst_rows, int dst_cols,
int ksize, __constant float * c_btvRegWeights)
{
int x = get_global_id(0) + ksize;
int y = get_global_id(1) + ksize;
if (y < dst_rows - ksize && x < dst_cols - ksize)
{
src += src_offset;
#if cn == 1
const float srcVal = src_elem_at(src, y, src_step, x);
float dstVal = 0.0f;
for (int m = 0, count = 0; m <= ksize; ++m)
for (int l = ksize; l + m >= 0; --l, ++count)
{
dstVal += c_btvRegWeights[count] * (diffSign1(srcVal, src_elem_at(src, y + m, src_step, x + l))
- diffSign1(src_elem_at(src, y - m, src_step, x - l), srcVal));
}
dst_elem_at(dst, y, dst_step, x) = dstVal;
#elif cn == 3
__global const float * src0ptr = (__global const float *)(src + mad24(y, src_step, 3 * sz * x + src_offset));
float3 srcVal = (float3)(src0ptr[0], src0ptr[1], src0ptr[2]), dstVal = 0.f;
for (int m = 0, count = 0; m <= ksize; ++m)
{
for (int l = ksize; l + m >= 0; --l, ++count)
{
__global const float * src1ptr = (__global const float *)(src + mad24(y + m, src_step, 3 * sz * (x + l) + src_offset));
__global const float * src2ptr = (__global const float *)(src + mad24(y - m, src_step, 3 * sz * (x - l) + src_offset));
float3 src1 = (float3)(src1ptr[0], src1ptr[1], src1ptr[2]);
float3 src2 = (float3)(src2ptr[0], src2ptr[1], src2ptr[2]);
dstVal += c_btvRegWeights[count] * (diffSign3(srcVal, src1) - diffSign3(src2, srcVal));
}
}
__global float * dstptr = (__global float *)(dst + mad24(y, dst_step, 3 * sz * x + dst_offset + 0));
dstptr[0] = dstVal.x;
dstptr[1] = dstVal.y;
dstptr[2] = dstVal.z;
#else
#error "Number of channels should be either 1 of 3"
#endif
}
}
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "precomp.hpp"
#include "opencv2/core/opencl/ocl_defs.hpp"
using namespace cv;
using namespace cv::cuda;
using namespace cv::superres;
using namespace cv::superres::detail;
///////////////////////////////////////////////////////////////////
// CpuOpticalFlow
namespace
{
class CpuOpticalFlow : public virtual cv::superres::DenseOpticalFlowExt
{
public:
explicit CpuOpticalFlow(int work_type);
void calc(InputArray frame0, InputArray frame1, OutputArray flow1, OutputArray flow2) CV_OVERRIDE;
void collectGarbage() CV_OVERRIDE;
protected:
virtual void impl(InputArray input0, InputArray input1, OutputArray dst) = 0;
private:
#ifdef HAVE_OPENCL
bool ocl_calc(InputArray frame0, InputArray frame1, OutputArray flow1, OutputArray flow2);
#endif
int work_type_;
// Mat
Mat buf_[6];
Mat flow_;
Mat flows_[2];
// UMat
UMat ubuf_[6];
UMat uflow_;
std::vector<UMat> uflows_;
};
CpuOpticalFlow::CpuOpticalFlow(int work_type) :
work_type_(work_type)
{
}
#ifdef HAVE_OPENCL
bool CpuOpticalFlow::ocl_calc(InputArray _frame0, InputArray _frame1, OutputArray _flow1, OutputArray _flow2)
{
UMat frame0 = arrGetUMat(_frame0, ubuf_[0]);
UMat frame1 = arrGetUMat(_frame1, ubuf_[1]);
CV_Assert( frame1.type() == frame0.type() );
CV_Assert( frame1.size() == frame0.size() );
UMat input0 = convertToType(frame0, work_type_, ubuf_[2], ubuf_[3]);
UMat input1 = convertToType(frame1, work_type_, ubuf_[4], ubuf_[5]);
if (!_flow2.needed())
{
impl(input0, input1, _flow1);
return true;
}
impl(input0, input1, uflow_);
if (!_flow2.needed())
arrCopy(uflow_, _flow1);
else
{
split(uflow_, uflows_);
arrCopy(uflows_[0], _flow1);
arrCopy(uflows_[1], _flow2);
}
return true;
}
#endif
void CpuOpticalFlow::calc(InputArray _frame0, InputArray _frame1, OutputArray _flow1, OutputArray _flow2)
{
CV_INSTRUMENT_REGION();
CV_OCL_RUN(_flow1.isUMat() && (_flow2.isUMat() || !_flow2.needed()),
ocl_calc(_frame0, _frame1, _flow1, _flow2))
Mat frame0 = arrGetMat(_frame0, buf_[0]);
Mat frame1 = arrGetMat(_frame1, buf_[1]);
CV_Assert( frame1.type() == frame0.type() );
CV_Assert( frame1.size() == frame0.size() );
Mat input0 = convertToType(frame0, work_type_, buf_[2], buf_[3]);
Mat input1 = convertToType(frame1, work_type_, buf_[4], buf_[5]);
if (!_flow2.needed() && _flow1.kind() < _InputArray::OPENGL_BUFFER)
{
impl(input0, input1, _flow1);
return;
}
impl(input0, input1, flow_);
if (!_flow2.needed())
arrCopy(flow_, _flow1);
else
{
split(flow_, flows_);
arrCopy(flows_[0], _flow1);
arrCopy(flows_[1], _flow2);
}
}
void CpuOpticalFlow::collectGarbage()
{
// Mat
for (int i = 0; i < 6; ++i)
buf_[i].release();
flow_.release();
flows_[0].release();
flows_[1].release();
// UMat
for (int i = 0; i < 6; ++i)
ubuf_[i].release();
uflow_.release();
uflows_[0].release();
uflows_[1].release();
}
}
///////////////////////////////////////////////////////////////////
// Farneback
namespace
{
class Farneback CV_FINAL : public CpuOpticalFlow, public cv::superres::FarnebackOpticalFlow
{
public:
Farneback();
void calc(InputArray frame0, InputArray frame1, OutputArray flow1, OutputArray flow2) CV_OVERRIDE;
void collectGarbage() CV_OVERRIDE;
inline double getPyrScale() const CV_OVERRIDE { return pyrScale_; }
inline void setPyrScale(double val) CV_OVERRIDE { pyrScale_ = val; }
inline int getLevelsNumber() const CV_OVERRIDE { return numLevels_; }
inline void setLevelsNumber(int val) CV_OVERRIDE { numLevels_ = val; }
inline int getWindowSize() const CV_OVERRIDE { return winSize_; }
inline void setWindowSize(int val) CV_OVERRIDE { winSize_ = val; }
inline int getIterations() const CV_OVERRIDE { return numIters_; }
inline void setIterations(int val) CV_OVERRIDE { numIters_ = val; }
inline int getPolyN() const CV_OVERRIDE { return polyN_; }
inline void setPolyN(int val) CV_OVERRIDE { polyN_ = val; }
inline double getPolySigma() const CV_OVERRIDE { return polySigma_; }
inline void setPolySigma(double val) CV_OVERRIDE { polySigma_ = val; }
inline int getFlags() const CV_OVERRIDE { return flags_; }
inline void setFlags(int val) CV_OVERRIDE { flags_ = val; }
protected:
void impl(InputArray input0, InputArray input1, OutputArray dst) CV_OVERRIDE;
private:
double pyrScale_;
int numLevels_;
int winSize_;
int numIters_;
int polyN_;
double polySigma_;
int flags_;
};
Farneback::Farneback() : CpuOpticalFlow(CV_8UC1)
{
pyrScale_ = 0.5;
numLevels_ = 5;
winSize_ = 13;
numIters_ = 10;
polyN_ = 5;
polySigma_ = 1.1;
flags_ = 0;
}
void Farneback::calc(InputArray frame0, InputArray frame1, OutputArray flow1, OutputArray flow2)
{
CV_INSTRUMENT_REGION();
CpuOpticalFlow::calc(frame0, frame1, flow1, flow2);
}
void Farneback::collectGarbage()
{
CpuOpticalFlow::collectGarbage();
}
void Farneback::impl(InputArray input0, InputArray input1, OutputArray dst)
{
calcOpticalFlowFarneback(input0, input1, InputOutputArray(dst), pyrScale_,
numLevels_, winSize_, numIters_,
polyN_, polySigma_, flags_);
}
}
Ptr<cv::superres::FarnebackOpticalFlow> cv::superres::createOptFlow_Farneback()
{
return makePtr<Farneback>();
}
///////////////////////////////////////////////////////////////////
// Simple
/*
namespace
{
class Simple : public CpuOpticalFlow
{
public:
AlgorithmInfo* info() const;
Simple();
protected:
void impl(InputArray input0, InputArray input1, OutputArray dst);
private:
int layers_;
int averagingBlockSize_;
int maxFlow_;
double sigmaDist_;
double sigmaColor_;
int postProcessWindow_;
double sigmaDistFix_;
double sigmaColorFix_;
double occThr_;
int upscaleAveragingRadius_;
double upscaleSigmaDist_;
double upscaleSigmaColor_;
double speedUpThr_;
};
CV_INIT_ALGORITHM(Simple, "DenseOpticalFlowExt.Simple",
obj.info()->addParam(obj, "layers", obj.layers_);
obj.info()->addParam(obj, "averagingBlockSize", obj.averagingBlockSize_);
obj.info()->addParam(obj, "maxFlow", obj.maxFlow_);
obj.info()->addParam(obj, "sigmaDist", obj.sigmaDist_);
obj.info()->addParam(obj, "sigmaColor", obj.sigmaColor_);
obj.info()->addParam(obj, "postProcessWindow", obj.postProcessWindow_);
obj.info()->addParam(obj, "sigmaDistFix", obj.sigmaDistFix_);
obj.info()->addParam(obj, "sigmaColorFix", obj.sigmaColorFix_);
obj.info()->addParam(obj, "occThr", obj.occThr_);
obj.info()->addParam(obj, "upscaleAveragingRadius", obj.upscaleAveragingRadius_);
obj.info()->addParam(obj, "upscaleSigmaDist", obj.upscaleSigmaDist_);
obj.info()->addParam(obj, "upscaleSigmaColor", obj.upscaleSigmaColor_);
obj.info()->addParam(obj, "speedUpThr", obj.speedUpThr_))
Simple::Simple() : CpuOpticalFlow(CV_8UC3)
{
layers_ = 3;
averagingBlockSize_ = 2;
maxFlow_ = 4;
sigmaDist_ = 4.1;
sigmaColor_ = 25.5;
postProcessWindow_ = 18;
sigmaDistFix_ = 55.0;
sigmaColorFix_ = 25.5;
occThr_ = 0.35;
upscaleAveragingRadius_ = 18;
upscaleSigmaDist_ = 55.0;
upscaleSigmaColor_ = 25.5;
speedUpThr_ = 10;
}
void Simple::impl(InputArray _input0, InputArray _input1, OutputArray _dst)
{
calcOpticalFlowSF(_input0, _input1, _dst,
layers_,
averagingBlockSize_,
maxFlow_,
sigmaDist_,
sigmaColor_,
postProcessWindow_,
sigmaDistFix_,
sigmaColorFix_,
occThr_,
upscaleAveragingRadius_,
upscaleSigmaDist_,
upscaleSigmaColor_,
speedUpThr_);
}
}
Ptr<DenseOpticalFlowExt> cv::superres::createOptFlow_Simple()
{
return makePtr<Simple>();
}*/
///////////////////////////////////////////////////////////////////
// DualTVL1
namespace
{
class DualTVL1 CV_FINAL : public CpuOpticalFlow, public virtual cv::superres::DualTVL1OpticalFlow
{
public:
DualTVL1();
void calc(InputArray frame0, InputArray frame1, OutputArray flow1, OutputArray flow2) CV_OVERRIDE;
void collectGarbage() CV_OVERRIDE;
inline double getTau() const CV_OVERRIDE { return (*alg_).getTau(); }
inline void setTau(double val) CV_OVERRIDE { (*alg_).setTau(val); }
inline double getLambda() const CV_OVERRIDE { return (*alg_).getLambda(); }
inline void setLambda(double val) CV_OVERRIDE { (*alg_).setLambda(val); }
inline double getTheta() const CV_OVERRIDE { return (*alg_).getTheta(); }
inline void setTheta(double val) CV_OVERRIDE { (*alg_).setTheta(val); }
inline int getScalesNumber() const CV_OVERRIDE { return (*alg_).getScalesNumber(); }
inline void setScalesNumber(int val) CV_OVERRIDE { (*alg_).setScalesNumber(val); }
inline int getWarpingsNumber() const CV_OVERRIDE { return (*alg_).getWarpingsNumber(); }
inline void setWarpingsNumber(int val) CV_OVERRIDE { (*alg_).setWarpingsNumber(val); }
inline double getEpsilon() const CV_OVERRIDE { return (*alg_).getEpsilon(); }
inline void setEpsilon(double val) CV_OVERRIDE { (*alg_).setEpsilon(val); }
inline int getIterations() const CV_OVERRIDE { return (*alg_).getOuterIterations(); }
inline void setIterations(int val) CV_OVERRIDE { (*alg_).setOuterIterations(val); }
inline bool getUseInitialFlow() const CV_OVERRIDE { return (*alg_).getUseInitialFlow(); }
inline void setUseInitialFlow(bool val) CV_OVERRIDE { (*alg_).setUseInitialFlow(val); }
protected:
void impl(InputArray input0, InputArray input1, OutputArray dst) CV_OVERRIDE;
private:
Ptr<cv::DualTVL1OpticalFlow> alg_;
};
DualTVL1::DualTVL1() : CpuOpticalFlow(CV_8UC1)
{
alg_ = cv::createOptFlow_DualTVL1();
}
void DualTVL1::calc(InputArray frame0, InputArray frame1, OutputArray flow1, OutputArray flow2)
{
CV_INSTRUMENT_REGION();
CpuOpticalFlow::calc(frame0, frame1, flow1, flow2);
}
void DualTVL1::impl(InputArray input0, InputArray input1, OutputArray dst)
{
alg_->calc(input0, input1, (InputOutputArray)dst);
}
void DualTVL1::collectGarbage()
{
alg_->collectGarbage();
CpuOpticalFlow::collectGarbage();
}
}
Ptr<cv::superres::DualTVL1OpticalFlow> cv::superres::createOptFlow_DualTVL1()
{
return makePtr<DualTVL1>();
}
///////////////////////////////////////////////////////////////////
// GpuOpticalFlow
#ifndef HAVE_OPENCV_CUDAOPTFLOW
Ptr<cv::superres::FarnebackOpticalFlow> cv::superres::createOptFlow_Farneback_CUDA()
{
CV_Error(cv::Error::StsNotImplemented, "The called functionality is disabled for current build or platform");
}
Ptr<cv::superres::DualTVL1OpticalFlow> cv::superres::createOptFlow_DualTVL1_CUDA()
{
CV_Error(cv::Error::StsNotImplemented, "The called functionality is disabled for current build or platform");
}
Ptr<cv::superres::BroxOpticalFlow> cv::superres::createOptFlow_Brox_CUDA()
{
CV_Error(cv::Error::StsNotImplemented, "The called functionality is disabled for current build or platform");
}
Ptr<cv::superres::PyrLKOpticalFlow> cv::superres::createOptFlow_PyrLK_CUDA()
{
CV_Error(cv::Error::StsNotImplemented, "The called functionality is disabled for current build or platform");
}
#else // HAVE_OPENCV_CUDAOPTFLOW
namespace
{
class GpuOpticalFlow : public virtual cv::superres::DenseOpticalFlowExt
{
public:
explicit GpuOpticalFlow(int work_type);
void calc(InputArray frame0, InputArray frame1, OutputArray flow1, OutputArray flow2) CV_OVERRIDE;
void collectGarbage() CV_OVERRIDE;
protected:
virtual void impl(const GpuMat& input0, const GpuMat& input1, GpuMat& dst1, GpuMat& dst2) = 0;
private:
int work_type_;
GpuMat buf_[6];
GpuMat u_, v_, flow_;
};
GpuOpticalFlow::GpuOpticalFlow(int work_type) : work_type_(work_type)
{
}
void GpuOpticalFlow::calc(InputArray _frame0, InputArray _frame1, OutputArray _flow1, OutputArray _flow2)
{
CV_INSTRUMENT_REGION();
GpuMat frame0 = arrGetGpuMat(_frame0, buf_[0]);
GpuMat frame1 = arrGetGpuMat(_frame1, buf_[1]);
CV_Assert( frame1.type() == frame0.type() );
CV_Assert( frame1.size() == frame0.size() );
GpuMat input0 = convertToType(frame0, work_type_, buf_[2], buf_[3]);
GpuMat input1 = convertToType(frame1, work_type_, buf_[4], buf_[5]);
if (_flow2.needed() && _flow1.kind() == _InputArray::CUDA_GPU_MAT && _flow2.kind() == _InputArray::CUDA_GPU_MAT)
{
impl(input0, input1, _flow1.getGpuMatRef(), _flow2.getGpuMatRef());
return;
}
impl(input0, input1, u_, v_);
if (_flow2.needed())
{
arrCopy(u_, _flow1);
arrCopy(v_, _flow2);
}
else
{
GpuMat src[] = {u_, v_};
merge(src, 2, flow_);
arrCopy(flow_, _flow1);
}
}
void GpuOpticalFlow::collectGarbage()
{
for (int i = 0; i < 6; ++i)
buf_[i].release();
u_.release();
v_.release();
flow_.release();
}
}
///////////////////////////////////////////////////////////////////
// Brox_CUDA
namespace
{
class Brox_CUDA : public GpuOpticalFlow, public virtual cv::superres::BroxOpticalFlow
{
public:
Brox_CUDA();
void calc(InputArray frame0, InputArray frame1, OutputArray flow1, OutputArray flow2) CV_OVERRIDE;
void collectGarbage() CV_OVERRIDE;
inline double getAlpha() const CV_OVERRIDE { return alpha_; }
inline void setAlpha(double val) CV_OVERRIDE { alpha_ = val; }
inline double getGamma() const CV_OVERRIDE { return gamma_; }
inline void setGamma(double val) CV_OVERRIDE { gamma_ = val; }
inline double getScaleFactor() const CV_OVERRIDE { return scaleFactor_; }
inline void setScaleFactor(double val) CV_OVERRIDE { scaleFactor_ = val; }
inline int getInnerIterations() const CV_OVERRIDE { return innerIterations_; }
inline void setInnerIterations(int val) CV_OVERRIDE { innerIterations_ = val; }
inline int getOuterIterations() const CV_OVERRIDE { return outerIterations_; }
inline void setOuterIterations(int val) CV_OVERRIDE { outerIterations_ = val; }
inline int getSolverIterations() const CV_OVERRIDE { return solverIterations_; }
inline void setSolverIterations(int val) CV_OVERRIDE { solverIterations_ = val; }
protected:
void impl(const GpuMat& input0, const GpuMat& input1, GpuMat& dst1, GpuMat& dst2) CV_OVERRIDE;
private:
double alpha_;
double gamma_;
double scaleFactor_;
int innerIterations_;
int outerIterations_;
int solverIterations_;
Ptr<cuda::BroxOpticalFlow> alg_;
};
Brox_CUDA::Brox_CUDA() : GpuOpticalFlow(CV_32FC1)
{
alg_ = cuda::BroxOpticalFlow::create(0.197f, 50.0f, 0.8f, 10, 77, 10);
alpha_ = alg_->getFlowSmoothness();
gamma_ = alg_->getGradientConstancyImportance();
scaleFactor_ = alg_->getPyramidScaleFactor();
innerIterations_ = alg_->getInnerIterations();
outerIterations_ = alg_->getOuterIterations();
solverIterations_ = alg_->getSolverIterations();
}
void Brox_CUDA::calc(InputArray frame0, InputArray frame1, OutputArray flow1, OutputArray flow2)
{
GpuOpticalFlow::calc(frame0, frame1, flow1, flow2);
}
void Brox_CUDA::impl(const GpuMat& input0, const GpuMat& input1, GpuMat& dst1, GpuMat& dst2)
{
alg_->setFlowSmoothness(alpha_);
alg_->setGradientConstancyImportance(gamma_);
alg_->setPyramidScaleFactor(scaleFactor_);
alg_->setInnerIterations(innerIterations_);
alg_->setOuterIterations(outerIterations_);
alg_->setSolverIterations(solverIterations_);
GpuMat flow;
alg_->calc(input0, input1, flow);
GpuMat flows[2];
cuda::split(flow, flows);
dst1 = flows[0];
dst2 = flows[1];
}
void Brox_CUDA::collectGarbage()
{
alg_ = cuda::BroxOpticalFlow::create(alpha_, gamma_, scaleFactor_, innerIterations_, outerIterations_, solverIterations_);
GpuOpticalFlow::collectGarbage();
}
}
Ptr<cv::superres::BroxOpticalFlow> cv::superres::createOptFlow_Brox_CUDA()
{
return makePtr<Brox_CUDA>();
}
///////////////////////////////////////////////////////////////////
// PyrLK_CUDA
namespace
{
class PyrLK_CUDA : public GpuOpticalFlow, public cv::superres::PyrLKOpticalFlow
{
public:
PyrLK_CUDA();
void calc(InputArray frame0, InputArray frame1, OutputArray flow1, OutputArray flow2) CV_OVERRIDE;
void collectGarbage() CV_OVERRIDE;
inline int getWindowSize() const CV_OVERRIDE { return winSize_; }
inline void setWindowSize(int val) CV_OVERRIDE { winSize_ = val; }
inline int getMaxLevel() const CV_OVERRIDE { return maxLevel_; }
inline void setMaxLevel(int val) CV_OVERRIDE { maxLevel_ = val; }
inline int getIterations() const CV_OVERRIDE { return iterations_; }
inline void setIterations(int val) CV_OVERRIDE { iterations_ = val; }
protected:
void impl(const GpuMat& input0, const GpuMat& input1, GpuMat& dst1, GpuMat& dst2) CV_OVERRIDE;
private:
int winSize_;
int maxLevel_;
int iterations_;
Ptr<cuda::DensePyrLKOpticalFlow> alg_;
};
PyrLK_CUDA::PyrLK_CUDA() : GpuOpticalFlow(CV_8UC1)
{
alg_ = cuda::DensePyrLKOpticalFlow::create();
winSize_ = alg_->getWinSize().width;
maxLevel_ = alg_->getMaxLevel();
iterations_ = alg_->getNumIters();
}
void PyrLK_CUDA::calc(InputArray frame0, InputArray frame1, OutputArray flow1, OutputArray flow2)
{
GpuOpticalFlow::calc(frame0, frame1, flow1, flow2);
}
void PyrLK_CUDA::impl(const GpuMat& input0, const GpuMat& input1, GpuMat& dst1, GpuMat& dst2)
{
alg_->setWinSize(Size(winSize_, winSize_));
alg_->setMaxLevel(maxLevel_);
alg_->setNumIters(iterations_);
GpuMat flow;
alg_->calc(input0, input1, flow);
GpuMat flows[2];
cuda::split(flow, flows);
dst1 = flows[0];
dst2 = flows[1];
}
void PyrLK_CUDA::collectGarbage()
{
alg_ = cuda::DensePyrLKOpticalFlow::create();
GpuOpticalFlow::collectGarbage();
}
}
Ptr<cv::superres::PyrLKOpticalFlow> cv::superres::createOptFlow_PyrLK_CUDA()
{
return makePtr<PyrLK_CUDA>();
}
///////////////////////////////////////////////////////////////////
// Farneback_CUDA
namespace
{
class Farneback_CUDA : public GpuOpticalFlow, public cv::superres::FarnebackOpticalFlow
{
public:
Farneback_CUDA();
void calc(InputArray frame0, InputArray frame1, OutputArray flow1, OutputArray flow2) CV_OVERRIDE;
void collectGarbage() CV_OVERRIDE;
inline double getPyrScale() const CV_OVERRIDE { return pyrScale_; }
inline void setPyrScale(double val) CV_OVERRIDE { pyrScale_ = val; }
inline int getLevelsNumber() const CV_OVERRIDE { return numLevels_; }
inline void setLevelsNumber(int val) CV_OVERRIDE { numLevels_ = val; }
inline int getWindowSize() const CV_OVERRIDE { return winSize_; }
inline void setWindowSize(int val) CV_OVERRIDE { winSize_ = val; }
inline int getIterations() const CV_OVERRIDE { return numIters_; }
inline void setIterations(int val) CV_OVERRIDE { numIters_ = val; }
inline int getPolyN() const CV_OVERRIDE { return polyN_; }
inline void setPolyN(int val) CV_OVERRIDE { polyN_ = val; }
inline double getPolySigma() const CV_OVERRIDE { return polySigma_; }
inline void setPolySigma(double val) CV_OVERRIDE { polySigma_ = val; }
inline int getFlags() const CV_OVERRIDE { return flags_; }
inline void setFlags(int val) CV_OVERRIDE { flags_ = val; }
protected:
void impl(const GpuMat& input0, const GpuMat& input1, GpuMat& dst1, GpuMat& dst2) CV_OVERRIDE;
private:
double pyrScale_;
int numLevels_;
int winSize_;
int numIters_;
int polyN_;
double polySigma_;
int flags_;
Ptr<cuda::FarnebackOpticalFlow> alg_;
};
Farneback_CUDA::Farneback_CUDA() : GpuOpticalFlow(CV_8UC1)
{
alg_ = cuda::FarnebackOpticalFlow::create();
pyrScale_ = alg_->getPyrScale();
numLevels_ = alg_->getNumLevels();
winSize_ = alg_->getWinSize();
numIters_ = alg_->getNumIters();
polyN_ = alg_->getPolyN();
polySigma_ = alg_->getPolySigma();
flags_ = alg_->getFlags();
}
void Farneback_CUDA::calc(InputArray frame0, InputArray frame1, OutputArray flow1, OutputArray flow2)
{
GpuOpticalFlow::calc(frame0, frame1, flow1, flow2);
}
void Farneback_CUDA::impl(const GpuMat& input0, const GpuMat& input1, GpuMat& dst1, GpuMat& dst2)
{
alg_->setPyrScale(pyrScale_);
alg_->setNumLevels(numLevels_);
alg_->setWinSize(winSize_);
alg_->setNumIters(numIters_);
alg_->setPolyN(polyN_);
alg_->setPolySigma(polySigma_);
alg_->setFlags(flags_);
GpuMat flow;
alg_->calc(input0, input1, flow);
GpuMat flows[2];
cuda::split(flow, flows);
dst1 = flows[0];
dst2 = flows[1];
}
void Farneback_CUDA::collectGarbage()
{
alg_ = cuda::FarnebackOpticalFlow::create();
GpuOpticalFlow::collectGarbage();
}
}
Ptr<cv::superres::FarnebackOpticalFlow> cv::superres::createOptFlow_Farneback_CUDA()
{
return makePtr<Farneback_CUDA>();
}
///////////////////////////////////////////////////////////////////
// DualTVL1_CUDA
namespace
{
class DualTVL1_CUDA : public GpuOpticalFlow, public cv::superres::DualTVL1OpticalFlow
{
public:
DualTVL1_CUDA();
void calc(InputArray frame0, InputArray frame1, OutputArray flow1, OutputArray flow2) CV_OVERRIDE;
void collectGarbage() CV_OVERRIDE;
inline double getTau() const CV_OVERRIDE { return tau_; }
inline void setTau(double val) CV_OVERRIDE { tau_ = val; }
inline double getLambda() const CV_OVERRIDE { return lambda_; }
inline void setLambda(double val) CV_OVERRIDE { lambda_ = val; }
inline double getTheta() const CV_OVERRIDE { return theta_; }
inline void setTheta(double val) CV_OVERRIDE { theta_ = val; }
inline int getScalesNumber() const CV_OVERRIDE { return nscales_; }
inline void setScalesNumber(int val) CV_OVERRIDE { nscales_ = val; }
inline int getWarpingsNumber() const CV_OVERRIDE { return warps_; }
inline void setWarpingsNumber(int val) CV_OVERRIDE { warps_ = val; }
inline double getEpsilon() const CV_OVERRIDE { return epsilon_; }
inline void setEpsilon(double val) CV_OVERRIDE { epsilon_ = val; }
inline int getIterations() const CV_OVERRIDE { return iterations_; }
inline void setIterations(int val) CV_OVERRIDE { iterations_ = val; }
inline bool getUseInitialFlow() const CV_OVERRIDE { return useInitialFlow_; }
inline void setUseInitialFlow(bool val) CV_OVERRIDE { useInitialFlow_ = val; }
protected:
void impl(const GpuMat& input0, const GpuMat& input1, GpuMat& dst1, GpuMat& dst2) CV_OVERRIDE;
private:
double tau_;
double lambda_;
double theta_;
int nscales_;
int warps_;
double epsilon_;
int iterations_;
bool useInitialFlow_;
Ptr<cuda::OpticalFlowDual_TVL1> alg_;
};
DualTVL1_CUDA::DualTVL1_CUDA() : GpuOpticalFlow(CV_8UC1)
{
alg_ = cuda::OpticalFlowDual_TVL1::create();
tau_ = alg_->getTau();
lambda_ = alg_->getLambda();
theta_ = alg_->getTheta();
nscales_ = alg_->getNumScales();
warps_ = alg_->getNumWarps();
epsilon_ = alg_->getEpsilon();
iterations_ = alg_->getNumIterations();
useInitialFlow_ = alg_->getUseInitialFlow();
}
void DualTVL1_CUDA::calc(InputArray frame0, InputArray frame1, OutputArray flow1, OutputArray flow2)
{
GpuOpticalFlow::calc(frame0, frame1, flow1, flow2);
}
void DualTVL1_CUDA::impl(const GpuMat& input0, const GpuMat& input1, GpuMat& dst1, GpuMat& dst2)
{
alg_->setTau(tau_);
alg_->setLambda(lambda_);
alg_->setTheta(theta_);
alg_->setNumScales(nscales_);
alg_->setNumWarps(warps_);
alg_->setEpsilon(epsilon_);
alg_->setNumIterations(iterations_);
alg_->setUseInitialFlow(useInitialFlow_);
GpuMat flow;
alg_->calc(input0, input1, flow);
GpuMat flows[2];
cuda::split(flow, flows);
dst1 = flows[0];
dst2 = flows[1];
}
void DualTVL1_CUDA::collectGarbage()
{
alg_ = cuda::OpticalFlowDual_TVL1::create();
GpuOpticalFlow::collectGarbage();
}
}
Ptr<cv::superres::DualTVL1OpticalFlow> cv::superres::createOptFlow_DualTVL1_CUDA()
{
return makePtr<DualTVL1_CUDA>();
}
#endif // HAVE_OPENCV_CUDAOPTFLOW
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#ifndef __OPENCV_PRECOMP_H__
#define __OPENCV_PRECOMP_H__
#include <vector>
#include <limits>
#include "opencv2/opencv_modules.hpp"
#include "opencv2/core.hpp"
#include "opencv2/core/cuda.hpp"
#include "opencv2/core/opengl.hpp"
#include "opencv2/core/utility.hpp"
#include "opencv2/imgproc.hpp"
#include "opencv2/video/tracking.hpp"
#include "opencv2/core/private.hpp"
#include "opencv2/core/private.cuda.hpp"
#include "opencv2/core/ocl.hpp"
#ifdef HAVE_OPENCV_CUDAARITHM
# include "opencv2/cudaarithm.hpp"
#endif
#ifdef HAVE_OPENCV_CUDAWARPING
# include "opencv2/cudawarping.hpp"
#endif
#ifdef HAVE_OPENCV_CUDAFILTERS
# include "opencv2/cudafilters.hpp"
#endif
#ifdef HAVE_OPENCV_CUDAIMGPROC
# include "opencv2/cudaimgproc.hpp"
#endif
#ifdef HAVE_OPENCV_CUDAOPTFLOW
# include "opencv2/cudaoptflow.hpp"
#endif
#ifdef HAVE_OPENCV_CUDACODEC
# include "opencv2/cudacodec.hpp"
#endif
#ifdef HAVE_OPENCV_VIDEOIO
#include "opencv2/videoio.hpp"
#endif
#include "opencv2/superres.hpp"
#include "opencv2/superres/optical_flow.hpp"
#include "input_array_utility.hpp"
#include "ring_buffer.hpp"
#include "opencv2/core/private.hpp"
#endif /* __OPENCV_PRECOMP_H__ */
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#ifndef __RING_BUFFER_HPP__
#define __RING_BUFFER_HPP__
#include "precomp.hpp"
namespace cv
{
namespace superres
{
namespace detail
{
template <typename T, class A>
inline const T& at(int index, const std::vector<T, A>& items)
{
const int len = static_cast<int>(items.size());
if (index < 0)
index -= ((index - len + 1) / len) * len;
if (index >= len)
index %= len;
return items[index];
}
template <typename T, class A>
inline T& at(int index, std::vector<T, A>& items)
{
const int len = static_cast<int>(items.size());
if (index < 0)
index -= ((index - len + 1) / len) * len;
if (index >= len)
index %= len;
return items[index];
}
}
}
}
#endif // __RING_BUFFER_HPP__
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "precomp.hpp"
using namespace cv;
using namespace cv::superres;
cv::superres::SuperResolution::SuperResolution()
{
frameSource_ = createFrameSource_Empty();
firstCall_ = true;
isUmat_ = false;
}
void cv::superres::SuperResolution::setInput(const Ptr<FrameSource>& frameSource)
{
frameSource_ = frameSource;
firstCall_ = true;
isUmat_ = false;
}
void cv::superres::SuperResolution::nextFrame(OutputArray frame)
{
CV_INSTRUMENT_REGION();
isUmat_ = frame.isUMat();
if (firstCall_)
{
initImpl(frameSource_);
firstCall_ = false;
}
processImpl(frameSource_, frame);
}
void cv::superres::SuperResolution::reset()
{
frameSource_->reset();
firstCall_ = true;
isUmat_ = false;
}
void cv::superres::SuperResolution::collectGarbage()
{
}
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "test_precomp.hpp"
#if defined(HAVE_HPX)
#include <hpx/hpx_main.hpp>
#endif
CV_TEST_MAIN("superres")
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#ifndef __OPENCV_TEST_PRECOMP_HPP__
#define __OPENCV_TEST_PRECOMP_HPP__
#include "opencv2/ts.hpp"
#include "opencv2/superres.hpp"
#endif
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "test_precomp.hpp"
#include "cvconfig.h"
#include "../src/input_array_utility.hpp"
#include "opencv2/ts/ocl_test.hpp"
namespace opencv_test {
#ifdef HAVE_VIDEO_INPUT
namespace {
class AllignedFrameSource : public cv::superres::FrameSource
{
public:
AllignedFrameSource(const cv::Ptr<cv::superres::FrameSource>& base, int scale);
void nextFrame(cv::OutputArray frame);
void reset();
private:
cv::Ptr<cv::superres::FrameSource> base_;
cv::Mat origFrame_;
int scale_;
};
AllignedFrameSource::AllignedFrameSource(const cv::Ptr<cv::superres::FrameSource>& base, int scale) :
base_(base), scale_(scale)
{
CV_Assert( base_ );
}
void AllignedFrameSource::nextFrame(cv::OutputArray frame)
{
base_->nextFrame(origFrame_);
if (origFrame_.rows % scale_ == 0 && origFrame_.cols % scale_ == 0)
cv::superres::arrCopy(origFrame_, frame);
else
{
cv::Rect ROI(0, 0, (origFrame_.cols / scale_) * scale_, (origFrame_.rows / scale_) * scale_);
cv::superres::arrCopy(origFrame_(ROI), frame);
}
}
void AllignedFrameSource::reset()
{
base_->reset();
}
class DegradeFrameSource : public cv::superres::FrameSource
{
public:
DegradeFrameSource(const cv::Ptr<cv::superres::FrameSource>& base, int scale);
void nextFrame(cv::OutputArray frame);
void reset();
private:
cv::Ptr<cv::superres::FrameSource> base_;
cv::Mat origFrame_;
cv::Mat blurred_;
cv::Mat deg_;
double iscale_;
};
DegradeFrameSource::DegradeFrameSource(const cv::Ptr<cv::superres::FrameSource>& base, int scale) :
base_(base), iscale_(1.0 / scale)
{
CV_Assert( base_ );
}
static void addGaussNoise(cv::OutputArray _image, double sigma)
{
int type = _image.type(), depth = CV_MAT_DEPTH(type), cn = CV_MAT_CN(type);
cv::Mat noise(_image.size(), CV_32FC(cn));
cvtest::TS::ptr()->get_rng().fill(noise, cv::RNG::NORMAL, 0.0, sigma);
cv::addWeighted(_image, 1.0, noise, 1.0, 0.0, _image, depth);
}
static void addSpikeNoise(cv::OutputArray _image, int frequency)
{
cv::Mat_<uchar> mask(_image.size(), 0);
for (int y = 0; y < mask.rows; ++y)
for (int x = 0; x < mask.cols; ++x)
if (cvtest::TS::ptr()->get_rng().uniform(0, frequency) < 1)
mask(y, x) = 255;
_image.setTo(cv::Scalar::all(255), mask);
}
void DegradeFrameSource::nextFrame(cv::OutputArray frame)
{
base_->nextFrame(origFrame_);
cv::GaussianBlur(origFrame_, blurred_, cv::Size(5, 5), 0);
cv::resize(blurred_, deg_, cv::Size(), iscale_, iscale_, cv::INTER_NEAREST);
addGaussNoise(deg_, 10.0);
addSpikeNoise(deg_, 500);
cv::superres::arrCopy(deg_, frame);
}
void DegradeFrameSource::reset()
{
base_->reset();
}
double MSSIM(cv::InputArray _i1, cv::InputArray _i2)
{
const double C1 = 6.5025;
const double C2 = 58.5225;
const int depth = CV_32F;
cv::Mat I1, I2;
_i1.getMat().convertTo(I1, depth);
_i2.getMat().convertTo(I2, depth);
cv::Mat I2_2 = I2.mul(I2); // I2^2
cv::Mat I1_2 = I1.mul(I1); // I1^2
cv::Mat I1_I2 = I1.mul(I2); // I1 * I2
cv::Mat mu1, mu2;
cv::GaussianBlur(I1, mu1, cv::Size(11, 11), 1.5);
cv::GaussianBlur(I2, mu2, cv::Size(11, 11), 1.5);
cv::Mat mu1_2 = mu1.mul(mu1);
cv::Mat mu2_2 = mu2.mul(mu2);
cv::Mat mu1_mu2 = mu1.mul(mu2);
cv::Mat sigma1_2, sigma2_2, sigma12;
cv::GaussianBlur(I1_2, sigma1_2, cv::Size(11, 11), 1.5);
sigma1_2 -= mu1_2;
cv::GaussianBlur(I2_2, sigma2_2, cv::Size(11, 11), 1.5);
sigma2_2 -= mu2_2;
cv::GaussianBlur(I1_I2, sigma12, cv::Size(11, 11), 1.5);
sigma12 -= mu1_mu2;
cv::Mat t1, t2;
cv::Mat numerator;
cv::Mat denominator;
// t3 = ((2*mu1_mu2 + C1).*(2*sigma12 + C2))
t1 = 2 * mu1_mu2 + C1;
t2 = 2 * sigma12 + C2;
numerator = t1.mul(t2);
// t1 =((mu1_2 + mu2_2 + C1).*(sigma1_2 + sigma2_2 + C2))
t1 = mu1_2 + mu2_2 + C1;
t2 = sigma1_2 + sigma2_2 + C2;
denominator = t1.mul(t2);
// ssim_map = numerator./denominator;
cv::Mat ssim_map;
cv::divide(numerator, denominator, ssim_map);
// mssim = average of ssim map
cv::Scalar mssim = cv::mean(ssim_map);
if (_i1.channels() == 1)
return mssim[0];
return (mssim[0] + mssim[1] + mssim[3]) / 3;
}
class SuperResolution : public testing::Test
{
public:
template <typename T>
void RunTest(cv::Ptr<cv::superres::SuperResolution> superRes);
};
template <typename T>
void SuperResolution::RunTest(cv::Ptr<cv::superres::SuperResolution> superRes)
{
const std::string inputVideoName = cvtest::TS::ptr()->get_data_path() + "car.avi";
const int scale = 2;
const int iterations = 100;
const int temporalAreaRadius = 2;
ASSERT_FALSE( superRes.empty() );
const int btvKernelSize = superRes->getKernelSize();
superRes->setScale(scale);
superRes->setIterations(iterations);
superRes->setTemporalAreaRadius(temporalAreaRadius);
cv::Ptr<cv::superres::FrameSource> goldSource(new AllignedFrameSource(cv::superres::createFrameSource_Video(inputVideoName), scale));
cv::Ptr<cv::superres::FrameSource> lowResSource(new DegradeFrameSource(
cv::makePtr<AllignedFrameSource>(cv::superres::createFrameSource_Video(inputVideoName), scale), scale));
// skip first frame
cv::Mat frame;
lowResSource->nextFrame(frame);
goldSource->nextFrame(frame);
cv::Rect inner(btvKernelSize, btvKernelSize, frame.cols - 2 * btvKernelSize, frame.rows - 2 * btvKernelSize);
superRes->setInput(lowResSource);
double srAvgMSSIM = 0.0;
const int count = 10;
cv::Mat goldFrame;
T superResFrame;
for (int i = 0; i < count; ++i)
{
goldSource->nextFrame(goldFrame);
ASSERT_FALSE( goldFrame.empty() );
superRes->nextFrame(superResFrame);
ASSERT_FALSE( superResFrame.empty() );
const double srMSSIM = MSSIM(goldFrame(inner), superResFrame);
srAvgMSSIM += srMSSIM;
}
srAvgMSSIM /= count;
EXPECT_GE( srAvgMSSIM, 0.5 );
}
TEST_F(SuperResolution, BTVL1)
{
RunTest<cv::Mat>(cv::superres::createSuperResolution_BTVL1());
}
#if defined(HAVE_CUDA) && defined(HAVE_OPENCV_CUDAARITHM) && defined(HAVE_OPENCV_CUDAWARPING) && defined(HAVE_OPENCV_CUDAFILTERS)
TEST_F(SuperResolution, BTVL1_CUDA)
{
RunTest<cv::Mat>(cv::superres::createSuperResolution_BTVL1_CUDA());
}
#endif
} // namespace
#ifdef HAVE_OPENCL
namespace ocl {
OCL_TEST_F(SuperResolution, BTVL1)
{
RunTest<cv::UMat>(cv::superres::createSuperResolution_BTVL1());
}
} // namespace opencv_test::ocl
#endif
#endif // HAVE_VIDEO_INPUT
} // namespace
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment