Commit bfb390e8 authored by Vladislav Vinogradov's avatar Vladislav Vinogradov

added gpu::FGDStatModel (Background/foreground segmentation)

parent 3c16c9c9
......@@ -3,7 +3,7 @@ if(ANDROID OR IOS)
endif()
set(the_description "GPU-accelerated Computer Vision")
ocv_add_module(gpu opencv_imgproc opencv_calib3d opencv_objdetect opencv_video opencv_nonfree)
ocv_add_module(gpu opencv_imgproc opencv_calib3d opencv_objdetect opencv_video opencv_nonfree opencv_legacy)
ocv_module_include_directories("${CMAKE_CURRENT_SOURCE_DIR}/src/cuda")
......@@ -50,20 +50,20 @@ if (HAVE_CUDA)
ocv_cuda_compile(cuda_objs ${lib_cuda} ${ncv_cuda})
#CUDA_BUILD_CLEAN_TARGET()
set(cuda_link_libs ${CUDA_LIBRARIES} ${CUDA_npp_LIBRARY})
if(NOT APPLE)
unset(CUDA_nvcuvid_LIBRARY CACHE)
find_cuda_helper_libs(nvcuvid)
set(cuda_link_libs ${cuda_link_libs} ${CUDA_nvcuvid_LIBRARY})
set(cuda_link_libs ${cuda_link_libs} ${CUDA_nvcuvid_LIBRARY})
endif()
if(WIN32)
unset(CUDA_nvcuvenc_LIBRARY CACHE)
find_cuda_helper_libs(nvcuvenc)
set(cuda_link_libs ${cuda_link_libs} ${CUDA_nvcuvenc_LIBRARY})
set(cuda_link_libs ${cuda_link_libs} ${CUDA_nvcuvenc_LIBRARY})
endif()
if(NOT APPLE AND WITH_FFMPEG)
......
......@@ -294,6 +294,104 @@ Interpolates frames (images) using provided optical flow (displacement field).
gpu::FGDStatModel
-----------------
.. ocv:class:: gpu::FGDStatModel
Class used for background/foreground segmentation. ::
class FGDStatModel
{
public:
struct Params
{
...
};
explicit FGDStatModel(int out_cn = 3);
explicit FGDStatModel(const cv::gpu::GpuMat& firstFrame, const Params& params = Params(), int out_cn = 3);
~FGDStatModel();
void create(const cv::gpu::GpuMat& firstFrame, const Params& params = Params());
void release();
int update(const cv::gpu::GpuMat& curFrame);
//8UC3 or 8UC4 reference background image
cv::gpu::GpuMat background;
//8UC1 foreground image
cv::gpu::GpuMat foreground;
std::vector< std::vector<cv::Point> > foreground_regions;
};
The class discriminates between foreground and background pixels by building and maintaining a model of the background. Any pixel which does not fit this model is then deemed to be foreground. The class implements algorithm described in [FGD2003]_.
The results are available through the class fields:
.. ocv:member:: cv::gpu::GpuMat background
The output background image.
.. ocv:member:: cv::gpu::GpuMat foreground
The output foreground mask as an 8-bit binary image.
.. ocv:member:: cv::gpu::GpuMat foreground_regions
The output foreground regions calculated by :ocv:func:`findContours`.
gpu::FGDStatModel::FGDStatModel
-------------------------------
Constructors.
.. ocv:function:: gpu::FGDStatModel::FGDStatModel(int out_cn = 3)
.. ocv:function:: gpu::FGDStatModel::FGDStatModel(const cv::gpu::GpuMat& firstFrame, const Params& params = Params(), int out_cn = 3)
:param firstFrame: First frame from video stream. Supports 3- and 4-channels input ( ``CV_8UC3`` and ``CV_8UC4`` ).
:param params: Algorithm's parameters. See [FGD2003]_ for explanation.
:param out_cn: Channels count in output result and inner buffers. Can be 3 or 4. 4-channels version requires more memory, but works a bit faster.
.. seealso:: :ocv:func:`gpu::FGDStatModel::create`
gpu::FGDStatModel::create
-------------------------
Initializes background model.
.. ocv:function:: void gpu::FGDStatModel::create(const cv::gpu::GpuMat& firstFrame, const Params& params = Params())
:param firstFrame: First frame from video stream. Supports 3- and 4-channels input ( ``CV_8UC3`` and ``CV_8UC4`` ).
:param params: Algorithm's parameters. See [FGD2003]_ for explanation.
gpu::FGDStatModel::release
--------------------------
Releases all inner buffer's memory.
.. ocv:function:: void gpu::FGDStatModel::release()
gpu::FGDStatModel::update
--------------------------
Updates the background model and returns foreground regions count.
.. ocv:function:: int gpu::FGDStatModel::update(const cv::gpu::GpuMat& curFrame);
:param curFrame: Next video frame.
gpu::VideoWriter_GPU
---------------------
Video writer class.
......@@ -731,3 +829,4 @@ Parse next video frame. Implementation must call this method after new frame was
.. [Brox2004] T. Brox, A. Bruhn, N. Papenberg, J. Weickert. *High accuracy optical flow estimation based on a theory for warping*. ECCV 2004.
.. [FGD2003] Liyuan Li, Weimin Huang, Irene Y.H. Gu, and Qi Tian. *Foreground Object Detection from Videos Containing Complex Background*. ACM MM2003 9p, 2003.
......@@ -1891,7 +1891,71 @@ CV_EXPORTS void interpolateFrames(const GpuMat& frame0, const GpuMat& frame1,
CV_EXPORTS void createOpticalFlowNeedleMap(const GpuMat& u, const GpuMat& v, GpuMat& vertex, GpuMat& colors);
////////////////////////////////// Video Encoding //////////////////////////////////////////
//////////////////////// Background/foreground segmentation ////////////////////////
// Foreground Object Detection from Videos Containing Complex Background.
// Liyuan Li, Weimin Huang, Irene Y.H. Gu, and Qi Tian.
// ACM MM2003 9p
class CV_EXPORTS FGDStatModel
{
public:
struct CV_EXPORTS Params
{
int Lc; // Quantized levels per 'color' component. Power of two, typically 32, 64 or 128.
int N1c; // Number of color vectors used to model normal background color variation at a given pixel.
int N2c; // Number of color vectors retained at given pixel. Must be > N1c, typically ~ 5/3 of N1c.
// Used to allow the first N1c vectors to adapt over time to changing background.
int Lcc; // Quantized levels per 'color co-occurrence' component. Power of two, typically 16, 32 or 64.
int N1cc; // Number of color co-occurrence vectors used to model normal background color variation at a given pixel.
int N2cc; // Number of color co-occurrence vectors retained at given pixel. Must be > N1cc, typically ~ 5/3 of N1cc.
// Used to allow the first N1cc vectors to adapt over time to changing background.
bool is_obj_without_holes; // If TRUE we ignore holes within foreground blobs. Defaults to TRUE.
int perform_morphing; // Number of erode-dilate-erode foreground-blob cleanup iterations.
// These erase one-pixel junk blobs and merge almost-touching blobs. Default value is 1.
float alpha1; // How quickly we forget old background pixel values seen. Typically set to 0.1.
float alpha2; // "Controls speed of feature learning". Depends on T. Typical value circa 0.005.
float alpha3; // Alternate to alpha2, used (e.g.) for quicker initial convergence. Typical value 0.1.
float delta; // Affects color and color co-occurrence quantization, typically set to 2.
float T; // A percentage value which determines when new features can be recognized as new background. (Typically 0.9).
float minArea; // Discard foreground blobs whose bounding box is smaller than this threshold.
// default Params
Params();
};
// out_cn - channels count in output result (can be 3 or 4)
// 4-channels require more memory, but a bit faster
explicit FGDStatModel(int out_cn = 3);
explicit FGDStatModel(const cv::gpu::GpuMat& firstFrame, const Params& params = Params(), int out_cn = 3);
~FGDStatModel();
void create(const cv::gpu::GpuMat& firstFrame, const Params& params = Params());
void release();
int update(const cv::gpu::GpuMat& curFrame);
//8UC3 or 8UC4 reference background image
cv::gpu::GpuMat background;
//8UC1 foreground image
cv::gpu::GpuMat foreground;
std::vector< std::vector<cv::Point> > foreground_regions;
private:
FGDStatModel(const FGDStatModel&);
FGDStatModel& operator=(const FGDStatModel&);
class Impl;
std::auto_ptr<Impl> impl_;
};
////////////////////////////////// Video Encoding //////////////////////////////////
// Works only under Windows
// Supports olny H264 video codec and AVI files
......
......@@ -271,4 +271,120 @@ GPU_PERF_TEST_1(FarnebackOpticalFlowTest, cv::gpu::DeviceInfo)
INSTANTIATE_TEST_CASE_P(Video, FarnebackOpticalFlowTest, ALL_DEVICES);
//////////////////////////////////////////////////////
// FGDStatModel
GPU_PERF_TEST(FGDStatModel, cv::gpu::DeviceInfo, std::string)
{
cv::gpu::DeviceInfo devInfo = GET_PARAM(0);
cv::gpu::setDevice(devInfo.deviceID());
std::string inputFile = perf::TestBase::getDataPath(std::string("gpu/video/") + GET_PARAM(1));
cv::VideoCapture cap(inputFile);
ASSERT_TRUE(cap.isOpened());
cv::Mat frame;
cap >> frame;
ASSERT_FALSE(frame.empty());
cv::gpu::GpuMat d_frame(frame);
cv::gpu::FGDStatModel d_model(4);
d_model.create(d_frame);
declare.time(10);
for (int i = 0; i < 10; ++i)
{
cap >> frame;
ASSERT_FALSE(frame.empty());
d_frame.upload(frame);
startTimer(); next();
d_model.update(d_frame);
stopTimer();
}
}
INSTANTIATE_TEST_CASE_P(Video, FGDStatModel, testing::Combine(
ALL_DEVICES,
testing::Values(std::string("768x576.avi"), std::string("1920x1080.avi"))));
//////////////////////////////////////////////////////
// VideoWriter
#ifdef WIN32
GPU_PERF_TEST(VideoWriter, cv::gpu::DeviceInfo, std::string)
{
const double FPS = 25.0;
cv::gpu::DeviceInfo devInfo = GET_PARAM(0);
cv::gpu::setDevice(devInfo.deviceID());
std::string inputFile = perf::TestBase::getDataPath(std::string("gpu/video/") + GET_PARAM(1));
std::string outputFile = inputFile.substr(0, inputFile.find('.')) + "_test.avi";
cv::VideoCapture reader(inputFile);
ASSERT_TRUE( reader.isOpened() );
cv::gpu::VideoWriter_GPU d_writer;
cv::Mat frame;
cv::gpu::GpuMat d_frame;
declare.time(10);
for (int i = 0; i < 10; ++i)
{
reader >> frame;
ASSERT_FALSE(frame.empty());
d_frame.upload(frame);
if (!d_writer.isOpened())
d_writer.open(outputFile, frame.size(), FPS);
startTimer(); next();
d_writer.write(d_frame);
stopTimer();
}
}
INSTANTIATE_TEST_CASE_P(Video, VideoWriter, testing::Combine(
ALL_DEVICES,
testing::Values(std::string("768x576.avi"), std::string("1920x1080.avi"))));
#endif // WIN32
//////////////////////////////////////////////////////
// VideoReader
GPU_PERF_TEST(VideoReader, cv::gpu::DeviceInfo, std::string)
{
cv::gpu::DeviceInfo devInfo = GET_PARAM(0);
cv::gpu::setDevice(devInfo.deviceID());
std::string inputFile = perf::TestBase::getDataPath(std::string("gpu/video/") + GET_PARAM(1));
cv::gpu::VideoReader_GPU reader(inputFile);
ASSERT_TRUE( reader.isOpened() );
cv::gpu::GpuMat frame;
reader.read(frame);
declare.time(20);
TEST_CYCLE_N(10)
{
reader.read(frame);
}
}
INSTANTIATE_TEST_CASE_P(Video, VideoReader, testing::Combine(
ALL_DEVICES,
testing::Values(std::string("768x576.avi"), std::string("1920x1080.avi"))));
#endif
......@@ -16,6 +16,7 @@
#include "opencv2/video/video.hpp"
#include "opencv2/calib3d/calib3d.hpp"
#include "opencv2/nonfree/nonfree.hpp"
#include "opencv2/legacy/legacy.hpp"
#include "perf_utility.hpp"
......
......@@ -105,4 +105,118 @@ GPU_PERF_TEST_1(FarnebackOpticalFlowTest, cv::gpu::DeviceInfo)
INSTANTIATE_TEST_CASE_P(Video, FarnebackOpticalFlowTest, ALL_DEVICES);
//////////////////////////////////////////////////////
// FGDStatModel
namespace cv
{
template<> void Ptr<CvBGStatModel>::delete_obj()
{
cvReleaseBGStatModel(&obj);
}
}
GPU_PERF_TEST(FGDStatModel, cv::gpu::DeviceInfo, std::string)
{
std::string inputFile = perf::TestBase::getDataPath(std::string("gpu/video/") + GET_PARAM(1));
cv::VideoCapture cap(inputFile);
ASSERT_TRUE(cap.isOpened());
cv::Mat frame;
cap >> frame;
ASSERT_FALSE(frame.empty());
IplImage ipl_frame = frame;
cv::Ptr<CvBGStatModel> model(cvCreateFGDStatModel(&ipl_frame));
declare.time(60);
for (int i = 0; i < 10; ++i)
{
cap >> frame;
ASSERT_FALSE(frame.empty());
ipl_frame = frame;
startTimer();
next();
cvUpdateBGStatModel(&ipl_frame, model);
stopTimer();
}
}
INSTANTIATE_TEST_CASE_P(Video, FGDStatModel, testing::Combine(
ALL_DEVICES,
testing::Values(std::string("768x576.avi"), std::string("1920x1080.avi"))));
//////////////////////////////////////////////////////
// VideoWriter
#ifdef WIN32
GPU_PERF_TEST(VideoWriter, cv::gpu::DeviceInfo, std::string)
{
const double FPS = 25.0;
std::string inputFile = perf::TestBase::getDataPath(std::string("gpu/video/") + GET_PARAM(1));
std::string outputFile = inputFile.substr(0, inputFile.find('.')) + "_test.avi";
cv::VideoCapture reader(inputFile);
ASSERT_TRUE( reader.isOpened() );
cv::VideoWriter writer;
cv::Mat frame;
declare.time(30);
for (int i = 0; i < 10; ++i)
{
reader >> frame;
ASSERT_FALSE(frame.empty());
if (!writer.isOpened())
writer.open(outputFile, CV_FOURCC('H', '2', '6', '4'), frame.size(), FPS);
startTimer(); next();
writer.write(frame);
stopTimer();
}
}
INSTANTIATE_TEST_CASE_P(Video, VideoWriter, testing::Combine(
ALL_DEVICES,
testing::Values(std::string("768x576.avi"), std::string("1920x1080.avi"))));
#endif // WIN32
//////////////////////////////////////////////////////
// VideoReader
GPU_PERF_TEST(VideoReader, cv::gpu::DeviceInfo, std::string)
{
std::string inputFile = perf::TestBase::getDataPath(std::string("gpu/video/") + GET_PARAM(1));
cv::VideoCapture reader(inputFile);
ASSERT_TRUE( reader.isOpened() );
cv::Mat frame;
reader >> frame;
declare.time(20);
TEST_CYCLE_N(10)
{
reader >> frame;
}
}
INSTANTIATE_TEST_CASE_P(Video, VideoReader, testing::Combine(
ALL_DEVICES,
testing::Values(std::string("768x576.avi"), std::string("1920x1080.avi"))));
#endif
This diff is collapsed.
#ifndef __FGD_BGFG_COMMON_HPP__
#define __FGD_BGFG_COMMON_HPP__
#include "opencv2/core/devmem2d.hpp"
namespace bgfg
{
struct BGPixelStat
{
public:
#ifdef __CUDACC__
__device__ float& Pbc(int i, int j);
__device__ float& Pbcc(int i, int j);
__device__ unsigned char& is_trained_st_model(int i, int j);
__device__ unsigned char& is_trained_dyn_model(int i, int j);
__device__ float& PV_C(int i, int j, int k);
__device__ float& PVB_C(int i, int j, int k);
template <typename T> __device__ T& V_C(int i, int j, int k);
__device__ float& PV_CC(int i, int j, int k);
__device__ float& PVB_CC(int i, int j, int k);
template <typename T> __device__ T& V1_CC(int i, int j, int k);
template <typename T> __device__ T& V2_CC(int i, int j, int k);
#endif
int rows_;
unsigned char* Pbc_data_;
size_t Pbc_step_;
unsigned char* Pbcc_data_;
size_t Pbcc_step_;
unsigned char* is_trained_st_model_data_;
size_t is_trained_st_model_step_;
unsigned char* is_trained_dyn_model_data_;
size_t is_trained_dyn_model_step_;
unsigned char* ctable_Pv_data_;
size_t ctable_Pv_step_;
unsigned char* ctable_Pvb_data_;
size_t ctable_Pvb_step_;
unsigned char* ctable_v_data_;
size_t ctable_v_step_;
unsigned char* cctable_Pv_data_;
size_t cctable_Pv_step_;
unsigned char* cctable_Pvb_data_;
size_t cctable_Pvb_step_;
unsigned char* cctable_v1_data_;
size_t cctable_v1_step_;
unsigned char* cctable_v2_data_;
size_t cctable_v2_step_;
};
#ifdef __CUDACC__
__device__ __forceinline__ float& BGPixelStat::Pbc(int i, int j)
{
return *((float*)(Pbc_data_ + i * Pbc_step_) + j);
}
__device__ __forceinline__ float& BGPixelStat::Pbcc(int i, int j)
{
return *((float*)(Pbcc_data_ + i * Pbcc_step_) + j);
}
__device__ __forceinline__ unsigned char& BGPixelStat::is_trained_st_model(int i, int j)
{
return *((unsigned char*)(is_trained_st_model_data_ + i * is_trained_st_model_step_) + j);
}
__device__ __forceinline__ unsigned char& BGPixelStat::is_trained_dyn_model(int i, int j)
{
return *((unsigned char*)(is_trained_dyn_model_data_ + i * is_trained_dyn_model_step_) + j);
}
__device__ __forceinline__ float& BGPixelStat::PV_C(int i, int j, int k)
{
return *((float*)(ctable_Pv_data_ + ((k * rows_) + i) * ctable_Pv_step_) + j);
}
__device__ __forceinline__ float& BGPixelStat::PVB_C(int i, int j, int k)
{
return *((float*)(ctable_Pvb_data_ + ((k * rows_) + i) * ctable_Pvb_step_) + j);
}
template <typename T> __device__ __forceinline__ T& BGPixelStat::V_C(int i, int j, int k)
{
return *((T*)(ctable_v_data_ + ((k * rows_) + i) * ctable_v_step_) + j);
}
__device__ __forceinline__ float& BGPixelStat::PV_CC(int i, int j, int k)
{
return *((float*)(cctable_Pv_data_ + ((k * rows_) + i) * cctable_Pv_step_) + j);
}
__device__ __forceinline__ float& BGPixelStat::PVB_CC(int i, int j, int k)
{
return *((float*)(cctable_Pvb_data_ + ((k * rows_) + i) * cctable_Pvb_step_) + j);
}
template <typename T> __device__ __forceinline__ T& BGPixelStat::V1_CC(int i, int j, int k)
{
return *((T*)(cctable_v1_data_ + ((k * rows_) + i) * cctable_v1_step_) + j);
}
template <typename T> __device__ __forceinline__ T& BGPixelStat::V2_CC(int i, int j, int k)
{
return *((T*)(cctable_v2_data_ + ((k * rows_) + i) * cctable_v2_step_) + j);
}
#endif
const int PARTIAL_HISTOGRAM_COUNT = 240;
const int HISTOGRAM_BIN_COUNT = 256;
template <typename PT, typename CT>
void calcDiffHistogram_gpu(cv::gpu::DevMem2Db prevFrame, cv::gpu::DevMem2Db curFrame,
unsigned int* hist0, unsigned int* hist1, unsigned int* hist2,
unsigned int* partialBuf0, unsigned int* partialBuf1, unsigned int* partialBuf2,
int cc, cudaStream_t stream);
template <typename PT, typename CT>
void calcDiffThreshMask_gpu(cv::gpu::DevMem2Db prevFrame, cv::gpu::DevMem2Db curFrame, uchar3 bestThres, cv::gpu::DevMem2Db changeMask, cudaStream_t stream);
void setBGPixelStat(const BGPixelStat& stat);
template <typename PT, typename CT, typename OT>
void bgfgClassification_gpu(cv::gpu::DevMem2Db prevFrame, cv::gpu::DevMem2Db curFrame,
cv::gpu::DevMem2Db Ftd, cv::gpu::DevMem2Db Fbd, cv::gpu::DevMem2Db foreground,
int deltaC, int deltaCC, float alpha2, int N1c, int N1cc, cudaStream_t stream);
template <typename PT, typename CT, typename OT>
void updateBackgroundModel_gpu(cv::gpu::DevMem2Db prevFrame, cv::gpu::DevMem2Db curFrame,
cv::gpu::DevMem2Db Ftd, cv::gpu::DevMem2Db Fbd, cv::gpu::DevMem2Db foreground, cv::gpu::DevMem2Db background,
int deltaC, int deltaCC, float alpha1, float alpha2, float alpha3, int N1c, int N1cc, int N2c, int N2cc, float T,
cudaStream_t stream);
}
#endif // __FGD_BGFG_COMMON_HPP__
This diff is collapsed.
......@@ -68,6 +68,7 @@
#include "opencv2/gpu/gpu.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/imgproc/imgproc_c.h"
#include "opencv2/calib3d/calib3d.hpp"
#include "opencv2/core/internal.hpp"
#include "opencv2/video/video.hpp"
......
......@@ -62,6 +62,7 @@
#include "opencv2/ts/ts_perf.hpp"
#include "opencv2/gpu/gpu.hpp"
#include "opencv2/nonfree/nonfree.hpp"
#include "opencv2/legacy/legacy.hpp"
#include "utility.hpp"
#include "interpolation.hpp"
......
......@@ -3365,7 +3365,7 @@ TEST_P(Reduce, Rows)
cv::Mat dst_gold;
cv::reduce(src, dst_gold, 0, reduceOp, dst_depth);
EXPECT_MAT_NEAR(dst_gold, dst, dst_depth < CV_32F ? 0.0 : 1e-2);
EXPECT_MAT_NEAR(dst_gold, dst, dst_depth < CV_32F ? 0.0 : 0.02);
}
TEST_P(Reduce, Cols)
......@@ -3381,7 +3381,7 @@ TEST_P(Reduce, Cols)
dst_gold.rows = 1;
dst_gold.step = dst_gold.cols * dst_gold.elemSize();
EXPECT_MAT_NEAR(dst_gold, dst, dst_depth < CV_32F ? 0.0 : 1e-2);
EXPECT_MAT_NEAR(dst_gold, dst, dst_depth < CV_32F ? 0.0 : 0.02);
}
INSTANTIATE_TEST_CASE_P(GPU_Core, Reduce, testing::Combine(
......
......@@ -41,11 +41,9 @@
#include "precomp.hpp"
namespace {
//#define DUMP
/////////////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////
// BroxOpticalFlow
#define BROX_OPTICAL_FLOW_DUMP_FILE "opticalflow/brox_optical_flow.bin"
......@@ -130,7 +128,7 @@ TEST_P(BroxOpticalFlow, Regression)
INSTANTIATE_TEST_CASE_P(GPU_Video, BroxOpticalFlow, ALL_DEVICES);
/////////////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////
// GoodFeaturesToTrack
IMPLEMENT_PARAM_CLASS(MinDistance, double)
......@@ -207,7 +205,7 @@ INSTANTIATE_TEST_CASE_P(GPU_Video, GoodFeaturesToTrack, testing::Combine(
ALL_DEVICES,
testing::Values(MinDistance(0.0), MinDistance(3.0))));
/////////////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////
// PyrLKOpticalFlow
IMPLEMENT_PARAM_CLASS(UseGray, bool)
......@@ -306,7 +304,7 @@ INSTANTIATE_TEST_CASE_P(GPU_Video, PyrLKOpticalFlow, testing::Combine(
ALL_DEVICES,
testing::Values(UseGray(true), UseGray(false))));
/////////////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////
// FarnebackOpticalFlow
IMPLEMENT_PARAM_CLASS(PyrScale, double)
......@@ -413,7 +411,87 @@ TEST_P(OpticalFlowNan, Regression)
INSTANTIATE_TEST_CASE_P(GPU_Video, OpticalFlowNan, ALL_DEVICES);
/////////////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////
// FGDStatModel
namespace cv
{
template<> void Ptr<CvBGStatModel>::delete_obj()
{
cvReleaseBGStatModel(&obj);
}
}
PARAM_TEST_CASE(FGDStatModel, cv::gpu::DeviceInfo, std::string, Channels)
{
};
TEST_P(FGDStatModel, Accuracy)
{
cv::gpu::DeviceInfo devInfo = GET_PARAM(0);
cv::gpu::setDevice(devInfo.deviceID());
std::string inputFile = std::string(cvtest::TS::ptr()->get_data_path()) + "video/" + GET_PARAM(1);
int out_cn = GET_PARAM(2);
cv::VideoCapture cap(inputFile);
ASSERT_TRUE(cap.isOpened());
cv::Mat frame;
cap >> frame;
ASSERT_FALSE(frame.empty());
IplImage ipl_frame = frame;
cv::Ptr<CvBGStatModel> model(cvCreateFGDStatModel(&ipl_frame));
cv::gpu::GpuMat d_frame(frame);
cv::gpu::FGDStatModel d_model(out_cn);
d_model.create(d_frame);
cv::Mat h_background;
cv::Mat h_foreground;
cv::Mat h_background3;
cv::Mat backgroundDiff;
cv::Mat foregroundDiff;
for (int i = 0; i < 5; ++i)
{
cap >> frame;
ASSERT_FALSE(frame.empty());
ipl_frame = frame;
int gold_count = cvUpdateBGStatModel(&ipl_frame, model);
d_frame.upload(frame);
int count = d_model.update(d_frame);
ASSERT_EQ(gold_count, count);
cv::Mat gold_background(model->background);
cv::Mat gold_foreground(model->foreground);
if (out_cn == 3)
d_model.background.download(h_background3);
else
{
d_model.background.download(h_background);
cv::cvtColor(h_background, h_background3, cv::COLOR_BGRA2BGR);
}
d_model.foreground.download(h_foreground);
EXPECT_MAT_NEAR(gold_background, h_background3, 1.0);
EXPECT_MAT_NEAR(gold_foreground, h_foreground, 0.0);
}
}
INSTANTIATE_TEST_CASE_P(GPU_Video, FGDStatModel, testing::Combine(
ALL_DEVICES,
testing::Values(std::string("768x576.avi")),
testing::Values(Channels(3), Channels(4))));
//////////////////////////////////////////////////////
// VideoWriter
#ifdef WIN32
......@@ -447,17 +525,13 @@ TEST_P(VideoWriter, Regression)
cv::gpu::VideoWriter_GPU d_writer;
cv::Mat frame;
std::vector<cv::Mat> frames;
cv::gpu::GpuMat d_frame;
for (int i = 1; i < 10; ++i)
for (int i = 0; i < 10; ++i)
{
reader >> frame;
ASSERT_FALSE(frame.empty());
if (frame.empty())
break;
frames.push_back(frame.clone());
d_frame.upload(frame);
if (!d_writer.isOpened())
......@@ -481,11 +555,11 @@ TEST_P(VideoWriter, Regression)
INSTANTIATE_TEST_CASE_P(GPU_Video, VideoWriter, testing::Combine(
ALL_DEVICES,
testing::Values(std::string("VID00003-20100701-2204.mpg"), std::string("big_buck_bunny.mpg"))));
testing::Values(std::string("768x576.avi"), std::string("1920x1080.avi"))));
#endif // WIN32
/////////////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////
// VideoReader
PARAM_TEST_CASE(VideoReader, cv::gpu::DeviceInfo, std::string)
......@@ -511,7 +585,7 @@ TEST_P(VideoReader, Regression)
cv::gpu::GpuMat frame;
for (int i = 0; i < 5; ++i)
for (int i = 0; i < 10; ++i)
{
ASSERT_TRUE( reader.read(frame) );
ASSERT_FALSE( frame.empty() );
......@@ -523,6 +597,4 @@ TEST_P(VideoReader, Regression)
INSTANTIATE_TEST_CASE_P(GPU_Video, VideoReader, testing::Combine(
ALL_DEVICES,
testing::Values(std::string("VID00003-20100701-2204.mpg"))));
} // namespace
testing::Values(std::string("768x576.avi"), std::string("1920x1080.avi"))));
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment