Commit 3a1f85d4 authored by Andrey Kamaev's avatar Andrey Kamaev

Remerged the 2.4.0 branch

parent 77717e1a
#include "cap_ffmpeg_impl_v2.hpp"
#include "cap_ffmpeg_impl.hpp"
......@@ -56,7 +56,7 @@ version_patch = re.search("^W*#\W*define\W+CV_SUBMINOR_VERSION\W+(\d+)\W*$", ver
# The short X.Y version.
version = version_major + '.' + version_minor
# The full version, including alpha/beta/rc tags.
release = version_major + '.' + version_minor + '.' + version_patch + "-beta"
release = version_major + '.' + version_minor + '.' + version_patch
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
......
......@@ -49,6 +49,8 @@
#define LOGW(...) ((void)__android_log_print(ANDROID_LOG_WARN, CAMERA_LOG_TAG, __VA_ARGS__))
#define LOGE(...) ((void)__android_log_print(ANDROID_LOG_ERROR, CAMERA_LOG_TAG, __VA_ARGS__))
#include <dlfcn.h>
using namespace android;
void debugShowFPS()
......@@ -200,18 +202,25 @@ protected:
return strstr(supported_modes, mode) > 0;
}
float getFocusDistance(int focus_distance_type){
if (focus_distance_type >= 0 && focus_distance_type < 3) {
float getFocusDistance(int focus_distance_type)
{
#if !defined(ANDROID_r2_2_0)
if (focus_distance_type >= 0 && focus_distance_type < 3)
{
float focus_distances[3];
const char* output = params.get(CameraParameters::KEY_FOCUS_DISTANCES);
int val_num = CameraHandler::split_float(output, focus_distances, ',', 3);
if(val_num == 3){
if(val_num == 3)
{
return focus_distances[focus_distance_type];
} else {
}
else
{
LOGE("Invalid focus distances.");
}
}
return -1;
#endif
return -1;
}
static int getModeNum(const char** modes, const int modes_num, const char* mode_name)
......@@ -299,7 +308,9 @@ const char* CameraHandler::flashModesNames[ANDROID_CAMERA_FLASH_MODES_NUM] =
const char* CameraHandler::focusModesNames[ANDROID_CAMERA_FOCUS_MODES_NUM] =
{
CameraParameters::FOCUS_MODE_AUTO,
#if !defined(ANDROID_r2_2_0)
CameraParameters::FOCUS_MODE_CONTINUOUS_VIDEO,
#endif
CameraParameters::FOCUS_MODE_EDOF,
CameraParameters::FOCUS_MODE_FIXED,
CameraParameters::FOCUS_MODE_INFINITY
......@@ -326,17 +337,61 @@ const char* CameraHandler::antibandingModesNames[ANDROID_CAMERA_ANTIBANDING_MODE
CameraHandler* CameraHandler::initCameraConnect(const CameraCallback& callback, int cameraId, void* userData, CameraParameters* prevCameraParameters)
{
LOGD("CameraHandler::initCameraConnect(%p, %d, %p, %p)", callback, cameraId, userData, prevCameraParameters);
typedef sp<Camera> (*Android22ConnectFuncType)();
typedef sp<Camera> (*Android23ConnectFuncType)(int);
typedef sp<Camera> (*Android3DConnectFuncType)(int, int);
enum {
CAMERA_SUPPORT_MODE_2D = 0x01, /* Camera Sensor supports 2D mode. */
CAMERA_SUPPORT_MODE_3D = 0x02, /* Camera Sensor supports 3D mode. */
CAMERA_SUPPORT_MODE_NONZSL = 0x04, /* Camera Sensor in NON-ZSL mode. */
CAMERA_SUPPORT_MODE_ZSL = 0x08 /* Camera Sensor supports ZSL mode. */
};
const char Android22ConnectName[] = "_ZN7android6Camera7connectEv";
const char Android23ConnectName[] = "_ZN7android6Camera7connectEi";
const char Android3DConnectName[] = "_ZN7android6Camera7connectEii";
LOGD("CameraHandler::initCameraConnect(%p, %d, %p, %p)", callback, cameraId, userData, prevCameraParameters);
sp<Camera> camera = 0;
void* CameraHALHandle = dlopen("libcamera_client.so", RTLD_LAZY);
if (!CameraHALHandle)
{
LOGE("Cannot link to \"libcamera_client.so\"");
return NULL;
}
// reset errors
dlerror();
#ifdef ANDROID_r2_2_0
camera = Camera::connect();
#else
/* This is 2.3 or higher. The connect method has cameraID parameter */
camera = Camera::connect(cameraId);
#endif
if (Android22ConnectFuncType Android22Connect = (Android22ConnectFuncType)dlsym(CameraHALHandle, Android22ConnectName))
{
LOGD("Connecting to CameraService v 2.2");
camera = Android22Connect();
}
else if (Android23ConnectFuncType Android23Connect = (Android23ConnectFuncType)dlsym(CameraHALHandle, Android23ConnectName))
{
LOGD("Connecting to CameraService v 2.3");
camera = Android23Connect(cameraId);
}
else if (Android3DConnectFuncType Android3DConnect = (Android3DConnectFuncType)dlsym(CameraHALHandle, Android3DConnectName))
{
LOGD("Connecting to CameraService v 3D");
camera = Android3DConnect(cameraId, CAMERA_SUPPORT_MODE_2D);
}
else
{
dlclose(CameraHALHandle);
LOGE("Cannot connect to CameraService. Connect method was not found!");
return NULL;
}
dlclose(CameraHALHandle);
if ( 0 == camera.get() )
{
LOGE("initCameraConnect: Unable to connect to CameraService\n");
......
......@@ -75,7 +75,6 @@ void CV_ChessboardDetectorTimingTest::run( int start_from )
sprintf( filepath, "%scameracalibration/", ts->get_data_path().c_str() );
sprintf( filename, "%schessboard_timing_list.dat", filepath );
printf("Reading file %s\n", filename);
CvFileStorage* fs = cvOpenFileStorage( filename, 0, CV_STORAGE_READ );
CvFileNode* board_list = fs ? cvGetFileNodeByName( fs, 0, "boards" ) : 0;
......
......@@ -16,13 +16,7 @@ else()
set(cuda_link_libs "")
endif()
set(OPENCV_VERSION_FILE "${opencv_core_BINARY_DIR}/version_string.inc")
add_custom_command(OUTPUT "${OPENCV_VERSION_FILE}"
COMMAND ${CMAKE_COMMAND} -E copy_if_different "${OPENCV_BUILD_INFO_FILE}" "${OPENCV_VERSION_FILE}"
MAIN_DEPENDENCY "${OPENCV_BUILD_INFO_FILE}"
COMMENT "")
ocv_glob_module_sources(SOURCES ${lib_cuda} ${cuda_objs} "${OPENCV_VERSION_FILE}")
ocv_glob_module_sources(SOURCES ${lib_cuda} ${cuda_objs} "${opencv_core_BINARY_DIR}/version_string.inc")
ocv_create_module(${cuda_link_libs})
ocv_add_precompiled_headers(${the_module})
......
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other GpuMaterials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#ifndef __OPENCV_CORE_DevMem2D_HPP__
#define __OPENCV_CORE_DevMem2D_HPP__
#ifdef __cplusplus
#ifdef __CUDACC__
#define __CV_GPU_HOST_DEVICE__ __host__ __device__ __forceinline__
#else
#define __CV_GPU_HOST_DEVICE__
#endif
namespace cv
{
namespace gpu
{
// Simple lightweight structures that encapsulates information about an image on device.
// It is intended to pass to nvcc-compiled code. GpuMat depends on headers that nvcc can't compile
template <bool expr> struct StaticAssert;
template <> struct StaticAssert<true> {static __CV_GPU_HOST_DEVICE__ void check(){}};
template<typename T> struct DevPtr
{
typedef T elem_type;
typedef int index_type;
enum { elem_size = sizeof(elem_type) };
T* data;
__CV_GPU_HOST_DEVICE__ DevPtr() : data(0) {}
__CV_GPU_HOST_DEVICE__ DevPtr(T* data_) : data(data_) {}
__CV_GPU_HOST_DEVICE__ size_t elemSize() const { return elem_size; }
__CV_GPU_HOST_DEVICE__ operator T*() { return data; }
__CV_GPU_HOST_DEVICE__ operator const T*() const { return data; }
};
template<typename T> struct PtrSz : public DevPtr<T>
{
__CV_GPU_HOST_DEVICE__ PtrSz() : size(0) {}
__CV_GPU_HOST_DEVICE__ PtrSz(T* data_, size_t size_) : DevPtr<T>(data_), size(size_) {}
size_t size;
};
template<typename T> struct PtrStep : public DevPtr<T>
{
__CV_GPU_HOST_DEVICE__ PtrStep() : step(0) {}
__CV_GPU_HOST_DEVICE__ PtrStep(T* data_, size_t step_) : DevPtr<T>(data_), step(step_) {}
/** \brief stride between two consecutive rows in bytes. Step is stored always and everywhere in bytes!!! */
size_t step;
__CV_GPU_HOST_DEVICE__ T* ptr(int y = 0) { return ( T*)( ( char*)DevPtr<T>::data + y * step); }
__CV_GPU_HOST_DEVICE__ const T* ptr(int y = 0) const { return (const T*)( (const char*)DevPtr<T>::data + y * step); }
__CV_GPU_HOST_DEVICE__ T& operator ()(int y, int x) { return ptr(y)[x]; }
__CV_GPU_HOST_DEVICE__ const T& operator ()(int y, int x) const { return ptr(y)[x]; }
};
template <typename T> struct PtrStepSz : public PtrStep<T>
{
__CV_GPU_HOST_DEVICE__ PtrStepSz() : cols(0), rows(0) {}
__CV_GPU_HOST_DEVICE__ PtrStepSz(int rows_, int cols_, T* data_, size_t step_)
: PtrStep<T>(data_, step_), cols(cols_), rows(rows_) {}
int cols;
int rows;
};
template <typename T> struct DevMem2D_ : public PtrStepSz<T>
{
DevMem2D_() {}
DevMem2D_(int rows_, int cols_, T* data_, size_t step_) : PtrStepSz<T>(rows_, cols_, data_, step_) {}
template <typename U>
explicit DevMem2D_(const DevMem2D_<U>& d) : PtrStepSz<T>(d.rows, d.cols, (T*)d.data, d.step) {}
};
template<typename T> struct PtrElemStep_ : public PtrStep<T>
{
PtrElemStep_(const DevMem2D_<T>& mem) : PtrStep<T>(mem.data, mem.step)
{
StaticAssert<256 % sizeof(T) == 0>::check();
PtrStep<T>::step /= PtrStep<T>::elem_size;
}
__CV_GPU_HOST_DEVICE__ T* ptr(int y = 0) { return PtrStep<T>::data + y * PtrStep<T>::step; }
__CV_GPU_HOST_DEVICE__ const T* ptr(int y = 0) const { return PtrStep<T>::data + y * PtrStep<T>::step; }
__CV_GPU_HOST_DEVICE__ T& operator ()(int y, int x) { return ptr(y)[x]; }
__CV_GPU_HOST_DEVICE__ const T& operator ()(int y, int x) const { return ptr(y)[x]; }
};
template<typename T> struct PtrStep_ : public PtrStep<T>
{
PtrStep_() {}
PtrStep_(const DevMem2D_<T>& mem) : PtrStep<T>(mem.data, mem.step) {}
};
typedef DevMem2D_<unsigned char> DevMem2Db;
typedef DevMem2Db DevMem2D;
typedef DevMem2D_<float> DevMem2Df;
typedef DevMem2D_<int> DevMem2Di;
typedef PtrStep<unsigned char> PtrStepb;
typedef PtrStep<float> PtrStepf;
typedef PtrStep<int> PtrStepi;
typedef PtrElemStep_<unsigned char> PtrElemStep;
typedef PtrElemStep_<float> PtrElemStepf;
typedef PtrElemStep_<int> PtrElemStepi;
}
}
#endif // __cplusplus
#endif /* __OPENCV_GPU_DevMem2D_HPP__ */
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other GpuMaterials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#ifndef __OPENCV_CORE_DevMem2D_HPP__
#define __OPENCV_CORE_DevMem2D_HPP__
#ifdef __cplusplus
#ifdef __CUDACC__
#define __CV_GPU_HOST_DEVICE__ __host__ __device__ __forceinline__
#else
#define __CV_GPU_HOST_DEVICE__
#endif
namespace cv
{
namespace gpu
{
// Simple lightweight structures that encapsulates information about an image on device.
// It is intended to pass to nvcc-compiled code. GpuMat depends on headers that nvcc can't compile
template <bool expr> struct StaticAssert;
template <> struct StaticAssert<true> {static __CV_GPU_HOST_DEVICE__ void check(){}};
template<typename T> struct DevPtr
{
typedef T elem_type;
typedef int index_type;
enum { elem_size = sizeof(elem_type) };
T* data;
__CV_GPU_HOST_DEVICE__ DevPtr() : data(0) {}
__CV_GPU_HOST_DEVICE__ DevPtr(T* data_) : data(data_) {}
__CV_GPU_HOST_DEVICE__ size_t elemSize() const { return elem_size; }
__CV_GPU_HOST_DEVICE__ operator T*() { return data; }
__CV_GPU_HOST_DEVICE__ operator const T*() const { return data; }
};
template<typename T> struct PtrSz : public DevPtr<T>
{
__CV_GPU_HOST_DEVICE__ PtrSz() : size(0) {}
__CV_GPU_HOST_DEVICE__ PtrSz(T* data_, size_t size_) : DevPtr<T>(data_), size(size_) {}
size_t size;
};
template<typename T> struct PtrStep : public DevPtr<T>
{
__CV_GPU_HOST_DEVICE__ PtrStep() : step(0) {}
__CV_GPU_HOST_DEVICE__ PtrStep(T* data_, size_t step_) : DevPtr<T>(data_), step(step_) {}
/** \brief stride between two consecutive rows in bytes. Step is stored always and everywhere in bytes!!! */
size_t step;
__CV_GPU_HOST_DEVICE__ T* ptr(int y = 0) { return ( T*)( ( char*)DevPtr<T>::data + y * step); }
__CV_GPU_HOST_DEVICE__ const T* ptr(int y = 0) const { return (const T*)( (const char*)DevPtr<T>::data + y * step); }
__CV_GPU_HOST_DEVICE__ T& operator ()(int y, int x) { return ptr(y)[x]; }
__CV_GPU_HOST_DEVICE__ const T& operator ()(int y, int x) const { return ptr(y)[x]; }
};
template <typename T> struct PtrStepSz : public PtrStep<T>
{
__CV_GPU_HOST_DEVICE__ PtrStepSz() : cols(0), rows(0) {}
__CV_GPU_HOST_DEVICE__ PtrStepSz(int rows_, int cols_, T* data_, size_t step_)
: PtrStep<T>(data_, step_), cols(cols_), rows(rows_) {}
int cols;
int rows;
};
template <typename T> struct DevMem2D_ : public PtrStepSz<T>
{
DevMem2D_() {}
DevMem2D_(int rows_, int cols_, T* data_, size_t step_) : PtrStepSz<T>(rows_, cols_, data_, step_) {}
template <typename U>
explicit DevMem2D_(const DevMem2D_<U>& d) : PtrStepSz<T>(d.rows, d.cols, (T*)d.data, d.step) {}
};
template<typename T> struct PtrElemStep_ : public PtrStep<T>
{
PtrElemStep_(const DevMem2D_<T>& mem) : PtrStep<T>(mem.data, mem.step)
{
StaticAssert<256 % sizeof(T) == 0>::check();
PtrStep<T>::step /= PtrStep<T>::elem_size;
}
__CV_GPU_HOST_DEVICE__ T* ptr(int y = 0) { return PtrStep<T>::data + y * PtrStep<T>::step; }
__CV_GPU_HOST_DEVICE__ const T* ptr(int y = 0) const { return PtrStep<T>::data + y * PtrStep<T>::step; }
__CV_GPU_HOST_DEVICE__ T& operator ()(int y, int x) { return ptr(y)[x]; }
__CV_GPU_HOST_DEVICE__ const T& operator ()(int y, int x) const { return ptr(y)[x]; }
};
template<typename T> struct PtrStep_ : public PtrStep<T>
{
PtrStep_() {}
PtrStep_(const DevMem2D_<T>& mem) : PtrStep<T>(mem.data, mem.step) {}
};
typedef DevMem2D_<unsigned char> DevMem2Db;
typedef DevMem2Db DevMem2D;
typedef DevMem2D_<float> DevMem2Df;
typedef DevMem2D_<int> DevMem2Di;
typedef PtrStep<unsigned char> PtrStepb;
typedef PtrStep<float> PtrStepf;
typedef PtrStep<int> PtrStepi;
typedef PtrElemStep_<unsigned char> PtrElemStep;
typedef PtrElemStep_<float> PtrElemStepf;
typedef PtrElemStep_<int> PtrElemStepi;
}
}
#endif // __cplusplus
#endif /* __OPENCV_GPU_DevMem2D_HPP__ */
......@@ -97,7 +97,7 @@ namespace cv { namespace gpu { namespace device
}
template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask>
__device__ void loopUnrolledCached(int queryIdx, const DevMem2D_<T>& query, int imgIdx, const DevMem2D_<T>& train, const Mask& mask,
__device__ void loopUnrolledCached(int queryIdx, const DevMem2D_<T>& query,volatile int imgIdx, const DevMem2D_<T>& train, const Mask& mask,
typename Dist::value_type* s_query, typename Dist::value_type* s_train,
float& bestDistance, int& bestTrainIdx, int& bestImgIdx)
{
......@@ -253,7 +253,7 @@ namespace cv { namespace gpu { namespace device
// Match Unrolled
template <int BLOCK_SIZE, int MAX_DESC_LEN, typename Dist, typename T, typename Mask>
__device__ void loopUnrolled(int queryIdx, const DevMem2D_<T>& query, int imgIdx, const DevMem2D_<T>& train, const Mask& mask,
__device__ void loopUnrolled(int queryIdx, const DevMem2D_<T>& query,volatile int imgIdx, const DevMem2D_<T>& train, const Mask& mask,
typename Dist::value_type* s_query, typename Dist::value_type* s_train,
float& bestDistance, int& bestTrainIdx, int& bestImgIdx)
{
......@@ -409,7 +409,7 @@ namespace cv { namespace gpu { namespace device
// Match
template <int BLOCK_SIZE, typename Dist, typename T, typename Mask>
__device__ void loop(int queryIdx, const DevMem2D_<T>& query, int imgIdx, const DevMem2D_<T>& train, const Mask& mask,
__device__ void loop(int queryIdx, const DevMem2D_<T>& query, volatile int imgIdx, const DevMem2D_<T>& train, const Mask& mask,
typename Dist::value_type* s_query, typename Dist::value_type* s_train,
float& bestDistance, int& bestTrainIdx, int& bestImgIdx)
{
......
......@@ -126,18 +126,19 @@ void cv::gpu::PyrLKOpticalFlow::buildImagePyramid(const GpuMat& img0, vector<Gpu
namespace
{
void calcPatchSize(cv::Size winSize, int cn, dim3& block, dim3& patch)
void calcPatchSize(cv::Size winSize, int cn, dim3& block, dim3& patch, bool isDeviceArch11)
{
winSize.width *= cn;
if (winSize.width > 32 && winSize.width > 2 * winSize.height)
{
block.x = 32;
block.x = isDeviceArch11 ? 16 : 32;
block.y = 8;
}
else
{
block.x = block.y = 16;
block.x = 16;
block.y = isDeviceArch11 ? 8 : 16;
}
patch.x = (winSize.width + block.x - 1) / block.x;
......@@ -166,7 +167,7 @@ void cv::gpu::PyrLKOpticalFlow::sparse(const GpuMat& prevImg, const GpuMat& next
const int cn = prevImg.channels();
dim3 block, patch;
calcPatchSize(winSize, cn, block, patch);
calcPatchSize(winSize, cn, block, patch, isDeviceArch11_);
CV_Assert(derivLambda >= 0);
CV_Assert(maxLevel >= 0 && winSize.width > 2 && winSize.height > 2);
......
......@@ -42,11 +42,7 @@
#include "precomp.hpp"
#ifdef HAVE_FFMPEG
#ifdef NEW_FFMPEG
#include "cap_ffmpeg_impl_v2.hpp"
#else
#include "cap_ffmpeg_impl.hpp"
#endif
#else
#include "cap_ffmpeg_api.hpp"
#endif
......
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "cap_ffmpeg_api.hpp"
#include <assert.h>
#include <algorithm>
#include <limits>
#if defined _MSC_VER && _MSC_VER >= 1200
#pragma warning( disable: 4244 4510 4512 4610 )
#endif
#ifdef __cplusplus
extern "C" {
#endif
#include "ffmpeg_codecs.hpp"
#include <libavutil/mathematics.h>
#ifdef WIN32
#define HAVE_FFMPEG_SWSCALE 1
#include <libavcodec/avcodec.h>
#include <libswscale/swscale.h>
#else
// if the header path is not specified explicitly, let's deduce it
#if !defined HAVE_FFMPEG_AVCODEC_H && !defined HAVE_LIBAVCODEC_AVCODEC_H
#if defined(HAVE_GENTOO_FFMPEG)
#define HAVE_LIBAVCODEC_AVCODEC_H 1
#if defined(HAVE_FFMPEG_SWSCALE)
#define HAVE_LIBSWSCALE_SWSCALE_H 1
#endif
#elif defined HAVE_FFMPEG
#define HAVE_FFMPEG_AVCODEC_H 1
#if defined(HAVE_FFMPEG_SWSCALE)
#define HAVE_FFMPEG_SWSCALE_H 1
#endif
#endif
#endif
#if defined(HAVE_FFMPEG_AVCODEC_H)
#include <ffmpeg/avcodec.h>
#endif
#if defined(HAVE_FFMPEG_SWSCALE_H)
#include <ffmpeg/swscale.h>
#endif
#if defined(HAVE_LIBAVCODEC_AVCODEC_H)
#include <libavcodec/avcodec.h>
#endif
#if defined(HAVE_LIBSWSCALE_SWSCALE_H)
#include <libswscale/swscale.h>
#endif
#endif
#ifdef __cplusplus
}
#endif
#if defined _MSC_VER && _MSC_VER >= 1200
#pragma warning( default: 4244 4510 4512 4610 )
#endif
#ifdef NDEBUG
#define CV_WARN(message)
#else
#define CV_WARN(message) fprintf(stderr, "warning: %s (%s:%d)\n", message, __FILE__, __LINE__)
#endif
/* PIX_FMT_RGBA32 macro changed in newer ffmpeg versions */
#ifndef PIX_FMT_RGBA32
#define PIX_FMT_RGBA32 PIX_FMT_RGB32
#endif
#define CALC_FFMPEG_VERSION(a,b,c) ( a<<16 | b<<8 | c )
#if defined WIN32 || defined _WIN32
#include <windows.h>
#elif defined __linux__ || defined __APPLE__
#include <unistd.h>
#include <stdio.h>
#include <sys/types.h>
#include <sys/sysctl.h>
#endif
#ifndef MIN
#define MIN(a, b) ((a) < (b) ? (a) : (b))
#endif
int get_number_of_cpus(void)
{
#if defined WIN32 || defined _WIN32
SYSTEM_INFO sysinfo;
GetSystemInfo( &sysinfo );
return (int)sysinfo.dwNumberOfProcessors;
#elif defined __linux__
return (int)sysconf( _SC_NPROCESSORS_ONLN );
#elif defined __APPLE__
int numCPU=0;
int mib[4];
size_t len = sizeof(numCPU);
/* set the mib for hw.ncpu */
mib[0] = CTL_HW;
mib[1] = HW_AVAILCPU; // alternatively, try HW_NCPU;
/* get the number of CPUs from the system */
sysctl(mib, 2, &numCPU, &len, NULL, 0);
if( numCPU < 1 )
{
mib[1] = HW_NCPU;
sysctl( mib, 2, &numCPU, &len, NULL, 0 );
if( numCPU < 1 )
numCPU = 1;
}
return (int)numCPU;
#else
return 1;
#endif
}
char * FOURCC2str( int fourcc )
{
char * mystr=(char*)malloc(5);
mystr[0]=(char)((fourcc )&255);
mystr[1]=(char)((fourcc>> 8)&255);
mystr[2]=(char)((fourcc>>16)&255);
mystr[3]=(char)((fourcc>>24)&255);
mystr[4]=0;
return mystr;
}
struct Image_FFMPEG
{
unsigned char* data;
int step;
int width;
int height;
int cn;
};
inline void _opencv_ffmpeg_free(void** ptr)
{
if(*ptr) free(*ptr);
*ptr = 0;
}
struct CvCapture_FFMPEG
{
bool open( const char* filename );
void close();
double getProperty(int);
bool setProperty(int, double);
bool grabFrame();
bool retrieveFrame(int, unsigned char** data, int* step, int* width, int* height, int* cn);
void init();
bool reopen();
void seek(int64_t frame_number);
void seek(double sec);
bool slowSeek( int framenumber );
int64_t get_total_frames();
double get_duration_sec();
double get_fps();
int get_bitrate();
double r2d(AVRational r);
int64_t dts_to_frame_number(int64_t dts);
double dts_to_sec(int64_t dts);
AVFormatContext * ic;
AVCodec * avcodec;
int video_stream;
AVStream * video_st;
AVFrame * picture;
AVFrame rgb_picture;
int64_t picture_pts;
AVPacket packet;
Image_FFMPEG frame;
#if defined(HAVE_FFMPEG_SWSCALE)
struct SwsContext *img_convert_ctx;
#endif
int64_t frame_number;
double eps_zero;
/*
'filename' contains the filename of the videosource,
'filename==NULL' indicates that ffmpeg's seek support works
for the particular file.
'filename!=NULL' indicates that the slow fallback function is used for seeking,
and so the filename is needed to reopen the file on backward seeking.
*/
char * filename;
};
void CvCapture_FFMPEG::init()
{
ic = 0;
video_stream = -1;
video_st = 0;
picture = 0;
picture_pts = 0;
memset( &rgb_picture, 0, sizeof(rgb_picture) );
memset( &frame, 0, sizeof(frame) );
filename = 0;
packet.data = NULL;
#if defined(HAVE_FFMPEG_SWSCALE)
img_convert_ctx = 0;
#endif
avcodec = 0;
frame_number = 0;
eps_zero = 0.000025;
}
void CvCapture_FFMPEG::close()
{
if( picture )
av_free(picture);
if( video_st )
{
#if LIBAVFORMAT_BUILD > 4628
avcodec_close( video_st->codec );
#else
avcodec_close( &(video_st->codec) );
#endif
video_st = NULL;
}
if( ic )
{
#if LIBAVFORMAT_BUILD < CALC_FFMPEG_VERSION(53, 24, 2)
av_close_input_file(ic);
#else
avformat_close_input(&ic);
#endif
ic = NULL;
}
if( rgb_picture.data[0] )
{
free( rgb_picture.data[0] );
rgb_picture.data[0] = 0;
}
// free last packet if exist
if (packet.data) {
av_free_packet (&packet);
}
init();
}
/*
Used to reopen a video if the slower fallback function for seeking is used.
*/
bool CvCapture_FFMPEG::reopen()
{
/*if ( filename==NULL ) return false;
#if LIBAVFORMAT_BUILD > 4628
avcodec_close( video_st->codec );
#else
avcodec_close( &video_st->codec );
#endif
#if LIBAVFORMAT_BUILD < CALC_FFMPEG_VERSION(53, 24, 2)
av_close_input_file(ic);
av_open_input_file(&ic, filename, )
#else
avformat_close_input(&ic);
avformat_open_input(&ic, filename, NULL, NULL);
#endif
#if LIBAVFORMAT_BUILD >= CALC_FFMPEG_VERSION(53, 3, 0)
avformat_find_stream_info(ic, NULL);
#else
av_find_stream_info(ic);
#endif
#if LIBAVFORMAT_BUILD > 4628
AVCodecContext *enc = ic->streams[video_stream]->codec;
#else
AVCodecContext *enc = &ic->streams[video_stream]->codec;
#endif
#ifdef FF_API_THREAD_INIT
avcodec_thread_init(enc, std::min(get_number_of_cpus(), 16));
#endif
AVCodec *codec = avcodec_find_decoder(enc->codec_id);
#if LIBAVCODEC_VERSION_INT >= ((53<<16)+(8<<8)+0)
avcodec_open2(enc, codec, NULL);
#else
avcodec_open(enc, codec);
#endif
video_st = ic->streams[video_stream];
// reset framenumber to zero
frame_number = 0;
picture_pts=0;*/
return true;
}
#ifndef AVSEEK_FLAG_FRAME
#define AVSEEK_FLAG_FRAME 0
#endif
#ifndef AVSEEK_FLAG_ANY
#define AVSEEK_FLAG_ANY 1
#endif
bool CvCapture_FFMPEG::open( const char* _filename )
{
unsigned i;
bool valid = false;
close();
#if LIBAVFORMAT_BUILD >= CALC_FFMPEG_VERSION(53, 13, 0)
avformat_network_init();
#endif
/* register all codecs, demux and protocols */
av_register_all();
av_log_set_level(AV_LOG_ERROR);
int err = avformat_open_input(&ic, _filename, NULL, NULL);
if (err < 0) {
CV_WARN("Error opening file");
goto exit_func;
}
err =
#if LIBAVFORMAT_BUILD >= CALC_FFMPEG_VERSION(53, 3, 0)
avformat_find_stream_info(ic, NULL);
#else
av_find_stream_info(ic);
#endif
if (err < 0) {
CV_WARN("Could not find codec parameters");
goto exit_func;
}
for(i = 0; i < ic->nb_streams; i++)
{
#if LIBAVFORMAT_BUILD > 4628
AVCodecContext *enc = ic->streams[i]->codec;
#else
AVCodecContext *enc = &ic->streams[i]->codec;
#endif
#ifdef FF_API_THREAD_INIT
avcodec_thread_init(enc, get_number_of_cpus());
#else
enc->thread_count = get_number_of_cpus();
#endif
#if LIBAVFORMAT_BUILD < CALC_FFMPEG_VERSION(53, 2, 0)
#define AVMEDIA_TYPE_VIDEO CODEC_TYPE_VIDEO
#endif
if( AVMEDIA_TYPE_VIDEO == enc->codec_type && video_stream < 0) {
AVCodec *codec = avcodec_find_decoder(enc->codec_id);
if (!codec ||
#if LIBAVCODEC_VERSION_INT >= ((53<<16)+(8<<8)+0)
avcodec_open2(enc, codec, NULL)
#else
avcodec_open(enc, codec)
#endif
< 0) goto exit_func;
video_stream = i;
video_st = ic->streams[i];
picture = avcodec_alloc_frame();
rgb_picture.data[0] = (uint8_t*)malloc(
avpicture_get_size( PIX_FMT_BGR24,
enc->width, enc->height ));
avpicture_fill( (AVPicture*)&rgb_picture, rgb_picture.data[0],
PIX_FMT_BGR24, enc->width, enc->height );
frame.width = enc->width;
frame.height = enc->height;
frame.cn = 3;
frame.step = rgb_picture.linesize[0];
frame.data = rgb_picture.data[0];
break;
}
}
if(video_stream >= 0) valid = true;
// perform check if source is seekable via ffmpeg's seek function av_seek_frame(...)
/*err = av_seek_frame(ic, video_stream, 10, 0);
if (err < 0)
{
filename=(char*)malloc(strlen(_filename)+1);
strcpy(filename, _filename);
// reopen videofile to 'seek' back to first frame
reopen();
}
else
{
// seek seems to work, so we don't need the filename,
// but we still need to seek back to filestart
filename=NULL;
int64_t ts = video_st->first_dts;
int flags = AVSEEK_FLAG_FRAME | AVSEEK_FLAG_BACKWARD;
av_seek_frame(ic, video_stream, ts, flags);
}*/
exit_func:
if( !valid )
close();
return valid;
}
bool CvCapture_FFMPEG::grabFrame()
{
bool valid = false;
static bool bFirstTime = true;
int got_picture;
int count_errs = 0;
const int max_number_of_attempts = 64;
if (bFirstTime)
{
bFirstTime = false;
packet.data = NULL;
}
if( !ic || !video_st ) return false;
if (packet.data != NULL)
{
av_free_packet (&packet);
}
// get the next frame
while (!valid)
{
int ret = av_read_frame(ic, &packet);
if (ret == AVERROR(EAGAIN)) continue;
/* else if (ret < 0) break; */
if( packet.stream_index != video_stream )
{
av_free_packet (&packet);
count_errs++;
if (count_errs > max_number_of_attempts) break; else
continue;
}
// Decode video frame
avcodec_decode_video2(video_st->codec, picture, &got_picture, &packet);
// Did we get a video frame?
if(got_picture)
{
frame_number++;
picture_pts = packet.pts;
valid = true;
}
else
{
count_errs++;
if (count_errs > max_number_of_attempts)
break;
}
}
// return if we have a new picture or not
return valid;
}
bool CvCapture_FFMPEG::retrieveFrame(int, unsigned char** data, int* step, int* width, int* height, int* cn)
{
if( !video_st || !picture->data[0] )
return false;
avpicture_fill((AVPicture*)&rgb_picture, rgb_picture.data[0], PIX_FMT_RGB24, video_st->codec->width, video_st->codec->height);
frame.width = video_st->codec->width;
frame.height = video_st->codec->height;
img_convert_ctx = sws_getContext(
video_st->codec->width, video_st->codec->height,
video_st->codec->pix_fmt,
video_st->codec->width, video_st->codec->height,
PIX_FMT_BGR24,
SWS_BICUBIC,
NULL, NULL, NULL
);
img_convert_ctx = sws_getCachedContext(
img_convert_ctx,
video_st->codec->width, video_st->codec->height,
video_st->codec->pix_fmt,
video_st->codec->width, video_st->codec->height,
PIX_FMT_BGR24,
SWS_BICUBIC,
NULL, NULL, NULL
);
if (img_convert_ctx == NULL)
return false;//CV_Error(0, "Cannot initialize the conversion context!");
sws_scale(
img_convert_ctx,
picture->data,
picture->linesize,
0, video_st->codec->height,
rgb_picture.data,
rgb_picture.linesize
);
sws_freeContext(img_convert_ctx);
frame_number++;
*data = frame.data;
*step = frame.step;
*width = frame.width;
*height = frame.height;
*cn = frame.cn;
return true;
}
#if defined(__APPLE__)
#define AV_NOPTS_VALUE_ ((int64_t)0x8000000000000000LL)
#else
#define AV_NOPTS_VALUE_ ((int64_t)AV_NOPTS_VALUE)
#endif
double CvCapture_FFMPEG::getProperty( int property_id )
{
// if( !capture || !video_st || !picture->data[0] ) return 0;
if( !video_st ) return 0;
// double frameScale = av_q2d (video_st->time_base) * av_q2d (video_st->r_frame_rate);
//int64_t timestamp;
//timestamp = picture_pts;
switch( property_id )
{
case CV_FFMPEG_CAP_PROP_POS_MSEC:
return 1000.0*static_cast<int>(frame_number)/static_cast<int>(get_fps());
break;
case CV_FFMPEG_CAP_PROP_POS_FRAMES:
return (double)static_cast<int>(frame_number);
break;
case CV_FFMPEG_CAP_PROP_POS_AVI_RATIO:
return r2d(ic->streams[video_stream]->time_base);
break;
case CV_FFMPEG_CAP_PROP_FRAME_COUNT:
return (double)static_cast<int>(get_total_frames());
break;
case CV_FFMPEG_CAP_PROP_FRAME_WIDTH:
return (double)frame.width;
break;
case CV_FFMPEG_CAP_PROP_FRAME_HEIGHT:
return (double)frame.height;
break;
case CV_FFMPEG_CAP_PROP_FPS:
#if LIBAVCODEC_BUILD > 4753
return av_q2d (video_st->r_frame_rate);
#else
return (double)video_st->codec.frame_rate
/ (double)video_st->codec.frame_rate_base;
#endif
break;
case CV_FFMPEG_CAP_PROP_FOURCC:
#if LIBAVFORMAT_BUILD > 4628
return (double)video_st->codec->codec_tag;
#else
return (double)video_st->codec.codec_tag;
#endif
break;
}
return 0;
}
double CvCapture_FFMPEG::r2d(AVRational r)
{
if (r.num == 0 || r.den == 0)
{
return 0.0;
}
else
{
return static_cast<double>(r.num) / static_cast<double>(r.den);
}
}
double CvCapture_FFMPEG::get_duration_sec()
{
double sec = static_cast<double>(ic->duration) / static_cast<double>(AV_TIME_BASE);
if (sec < eps_zero)
{
sec = static_cast<double>(ic->streams[video_stream]->duration) * r2d(ic->streams[video_stream]->time_base);
}
if (sec < eps_zero)
{
sec = static_cast<double>(static_cast<int64_t>(ic->streams[video_stream]->duration)) * r2d(ic->streams[video_stream]->time_base);
}
return sec;
}
int CvCapture_FFMPEG::get_bitrate()
{
return ic->bit_rate;
}
double CvCapture_FFMPEG::get_fps()
{
double fps = r2d(ic->streams[video_stream]->r_frame_rate);
if (fps < eps_zero)
{
fps = r2d(ic->streams[video_stream]->avg_frame_rate);
}
if (fps < eps_zero)
{
fps = 1.0 / r2d(ic->streams[video_stream]->codec->time_base);
}
return fps;
}
int64_t CvCapture_FFMPEG::get_total_frames()
{
int64_t nbf = ic->streams[video_stream]->nb_frames;
if (nbf == 0)
{
nbf = (int64_t)floor(get_duration_sec() * get_fps() + 0.5);
}
return nbf;
}
int64_t CvCapture_FFMPEG::dts_to_frame_number(int64_t dts)
{
double sec = dts_to_sec(dts);
return static_cast<int64_t>(get_fps() * sec);
}
double CvCapture_FFMPEG::dts_to_sec(int64_t dts)
{
return static_cast<double>(dts - ic->streams[video_stream]->start_time) * r2d(ic->streams[video_stream]->time_base);
}
void CvCapture_FFMPEG::seek(int64_t frame_number)
{
frame_number = std::min(frame_number, get_total_frames());
int64_t dts = dts_to_frame_number(ic->streams[video_stream]->cur_dts);
if (abs(dts - 2 - frame_number) > 16)
{
double sec = static_cast<double>(frame_number) / static_cast<double>(get_fps());
int64_t time_stamp = ic->streams[video_stream]->start_time;
double time_base = r2d(ic->streams[video_stream]->time_base);
time_stamp += static_cast<int64_t>(sec / time_base);
av_seek_frame(ic, video_stream, time_stamp, AVSEEK_FLAG_FRAME | AVSEEK_FLAG_BACKWARD);
}
while(dts - 2 < frame_number)
{
/* cv::Mat imag = read(); */
if (!grabFrame()) break;
dts = dts_to_frame_number(ic->streams[video_stream]->cur_dts);
}
}
void CvCapture_FFMPEG::seek(double sec)
{
seek(static_cast<int64_t>(sec * get_fps()));
}
// this is a VERY slow fallback function, ONLY used if ffmpeg's av_seek_frame delivers no correct result!
bool CvCapture_FFMPEG::slowSeek( int framenumber )
{
if ( framenumber>picture_pts )
{
while ( picture_pts<framenumber )
if ( !grabFrame() ) return false;
}
else if ( framenumber<picture_pts )
{
reopen();
while ( picture_pts<framenumber )
if ( !grabFrame() ) return false;
}
return true;
}
bool CvCapture_FFMPEG::setProperty( int property_id, double value )
{
if( !video_st ) return false;
switch( property_id )
{
case CV_FFMPEG_CAP_PROP_POS_MSEC:
case CV_FFMPEG_CAP_PROP_POS_FRAMES:
case CV_FFMPEG_CAP_PROP_POS_AVI_RATIO:
{
switch( property_id )
{
case CV_FFMPEG_CAP_PROP_POS_FRAMES:
seek((int64_t)value);
break;
case CV_FFMPEG_CAP_PROP_POS_MSEC:
seek(value/1000.0);
break;
case CV_FFMPEG_CAP_PROP_POS_AVI_RATIO:
seek((int64_t)(value*ic->duration));
break;
}
picture_pts=(int64_t)value;
}
break;
default:
return false;
}
return true;
}
///////////////// FFMPEG CvVideoWriter implementation //////////////////////////
struct CvVideoWriter_FFMPEG
{
bool open( const char* filename, int fourcc,
double fps, int width, int height, bool isColor );
void close();
bool writeFrame( const unsigned char* data, int step, int width, int height, int cn, int origin );
void init();
AVOutputFormat * fmt;
AVFormatContext * oc;
uint8_t * outbuf;
uint32_t outbuf_size;
FILE * outfile;
AVFrame * picture;
AVFrame * input_picture;
uint8_t * picbuf;
AVStream * video_st;
int input_pix_fmt;
Image_FFMPEG temp_image;
int frame_width, frame_height;
bool ok;
#if defined(HAVE_FFMPEG_SWSCALE)
struct SwsContext *img_convert_ctx;
#endif
};
static const char * icvFFMPEGErrStr(int err)
{
#if LIBAVFORMAT_BUILD >= CALC_FFMPEG_VERSION(53, 2, 0)
switch(err) {
case AVERROR_BSF_NOT_FOUND:
return "Bitstream filter not found";
case AVERROR_DECODER_NOT_FOUND:
return "Decoder not found";
case AVERROR_DEMUXER_NOT_FOUND:
return "Demuxer not found";
case AVERROR_ENCODER_NOT_FOUND:
return "Encoder not found";
case AVERROR_EOF:
return "End of file";
case AVERROR_EXIT:
return "Immediate exit was requested; the called function should not be restarted";
case AVERROR_FILTER_NOT_FOUND:
return "Filter not found";
case AVERROR_INVALIDDATA:
return "Invalid data found when processing input";
case AVERROR_MUXER_NOT_FOUND:
return "Muxer not found";
case AVERROR_OPTION_NOT_FOUND:
return "Option not found";
case AVERROR_PATCHWELCOME:
return "Not yet implemented in FFmpeg, patches welcome";
case AVERROR_PROTOCOL_NOT_FOUND:
return "Protocol not found";
case AVERROR_STREAM_NOT_FOUND:
return "Stream not found";
default:
break;
}
#else
switch(err) {
case AVERROR_NUMEXPECTED:
return "Incorrect filename syntax";
case AVERROR_INVALIDDATA:
return "Invalid data in header";
case AVERROR_NOFMT:
return "Unknown format";
case AVERROR_IO:
return "I/O error occurred";
case AVERROR_NOMEM:
return "Memory allocation error";
default:
break;
}
#endif
return "Unspecified error";
}
/* function internal to FFMPEG (libavformat/riff.c) to lookup codec id by fourcc tag*/
extern "C" {
enum CodecID codec_get_bmp_id(unsigned int tag);
}
void CvVideoWriter_FFMPEG::init()
{
fmt = 0;
oc = 0;
outbuf = 0;
outbuf_size = 0;
outfile = 0;
picture = 0;
input_picture = 0;
picbuf = 0;
video_st = 0;
input_pix_fmt = 0;
memset(&temp_image, 0, sizeof(temp_image));
#if defined(HAVE_FFMPEG_SWSCALE)
img_convert_ctx = 0;
#endif
frame_width = frame_height = 0;
ok = false;
}
/**
* the following function is a modified version of code
* found in ffmpeg-0.4.9-pre1/output_example.c
*/
static AVFrame * icv_alloc_picture_FFMPEG(int pix_fmt, int width, int height, bool alloc)
{
AVFrame * picture;
uint8_t * picture_buf;
int size;
picture = avcodec_alloc_frame();
if (!picture)
return NULL;
size = avpicture_get_size( (PixelFormat) pix_fmt, width, height);
if(alloc){
picture_buf = (uint8_t *) malloc(size);
if (!picture_buf)
{
av_free(picture);
return NULL;
}
avpicture_fill((AVPicture *)picture, picture_buf,
(PixelFormat) pix_fmt, width, height);
}
else {
}
return picture;
}
/* add a video output stream to the container */
static AVStream *icv_add_video_stream_FFMPEG(AVFormatContext *oc,
CodecID codec_id,
int w, int h, int bitrate,
double fps, int pixel_format)
{
AVCodecContext *c;
AVStream *st;
int frame_rate, frame_rate_base;
AVCodec *codec;
#if LIBAVFORMAT_BUILD >= CALC_FFMPEG_VERSION(53, 10, 0)
st = avformat_new_stream(oc, 0);
#else
st = av_new_stream(oc, 0);
#endif
if (!st) {
CV_WARN("Could not allocate stream");
return NULL;
}
#if LIBAVFORMAT_BUILD > 4628
c = st->codec;
#else
c = &(st->codec);
#endif
#if LIBAVFORMAT_BUILD > 4621
c->codec_id = av_guess_codec(oc->oformat, NULL, oc->filename, NULL, AVMEDIA_TYPE_VIDEO);
#else
c->codec_id = oc->oformat->video_codec;
#endif
if(codec_id != CODEC_ID_NONE){
c->codec_id = codec_id;
}
//if(codec_tag) c->codec_tag=codec_tag;
codec = avcodec_find_encoder(c->codec_id);
c->codec_type = AVMEDIA_TYPE_VIDEO;
/* put sample parameters */
unsigned long long lbit_rate = static_cast<unsigned long long>(bitrate);
lbit_rate += (bitrate / 4);
lbit_rate = std::min(lbit_rate, static_cast<unsigned long long>(std::numeric_limits<int>::max()));
c->bit_rate = bitrate;
// took advice from
// http://ffmpeg-users.933282.n4.nabble.com/warning-clipping-1-dct-coefficients-to-127-127-td934297.html
c->qmin = 3;
/* resolution must be a multiple of two */
c->width = w;
c->height = h;
/* time base: this is the fundamental unit of time (in seconds) in terms
of which frame timestamps are represented. for fixed-fps content,
timebase should be 1/framerate and timestamp increments should be
identically 1. */
frame_rate=(int)(fps+0.5);
frame_rate_base=1;
while (fabs((double)frame_rate/frame_rate_base) - fps > 0.001){
frame_rate_base*=10;
frame_rate=(int)(fps*frame_rate_base + 0.5);
}
#if LIBAVFORMAT_BUILD > 4752
c->time_base.den = frame_rate;
c->time_base.num = frame_rate_base;
/* adjust time base for supported framerates */
if(codec && codec->supported_framerates){
const AVRational *p= codec->supported_framerates;
AVRational req = {frame_rate, frame_rate_base};
const AVRational *best=NULL;
AVRational best_error= {INT_MAX, 1};
for(; p->den!=0; p++){
AVRational error= av_sub_q(req, *p);
if(error.num <0) error.num *= -1;
if(av_cmp_q(error, best_error) < 0){
best_error= error;
best= p;
}
}
c->time_base.den= best->num;
c->time_base.num= best->den;
}
#else
c->frame_rate = frame_rate;
c->frame_rate_base = frame_rate_base;
#endif
c->gop_size = 12; /* emit one intra frame every twelve frames at most */
c->pix_fmt = (PixelFormat) pixel_format;
if (c->codec_id == CODEC_ID_MPEG2VIDEO) {
c->max_b_frames = 2;
}
if (c->codec_id == CODEC_ID_MPEG1VIDEO || c->codec_id == CODEC_ID_MSMPEG4V3){
/* needed to avoid using macroblocks in which some coeffs overflow
this doesnt happen with normal video, it just happens here as the
motion of the chroma plane doesnt match the luma plane */
/* avoid FFMPEG warning 'clipping 1 dct coefficients...' */
c->mb_decision=2;
}
#if LIBAVCODEC_VERSION_INT>0x000409
// some formats want stream headers to be seperate
if(oc->oformat->flags & AVFMT_GLOBALHEADER)
{
c->flags |= CODEC_FLAG_GLOBAL_HEADER;
}
#endif
return st;
}
static const int OPENCV_NO_FRAMES_WRITTEN_CODE = 1000;
int icv_av_write_frame_FFMPEG( AVFormatContext * oc, AVStream * video_st, uint8_t * outbuf, uint32_t outbuf_size, AVFrame * picture )
{
#if LIBAVFORMAT_BUILD > 4628
AVCodecContext * c = video_st->codec;
#else
AVCodecContext * c = &(video_st->codec);
#endif
int out_size;
int ret = 0;
if (oc->oformat->flags & AVFMT_RAWPICTURE) {
/* raw video case. The API will change slightly in the near
futur for that */
AVPacket pkt;
av_init_packet(&pkt);
#ifndef PKT_FLAG_KEY
#define PKT_FLAG_KEY AV_PKT_FLAG_KEY
#endif
pkt.flags |= PKT_FLAG_KEY;
pkt.stream_index= video_st->index;
pkt.data= (uint8_t *)picture;
pkt.size= sizeof(AVPicture);
ret = av_write_frame(oc, &pkt);
} else {
/* encode the image */
out_size = avcodec_encode_video(c, outbuf, outbuf_size, picture);
/* if zero size, it means the image was buffered */
if (out_size > 0) {
AVPacket pkt;
av_init_packet(&pkt);
#if LIBAVFORMAT_BUILD > 4752
if(c->coded_frame->pts != (int64_t)AV_NOPTS_VALUE)
pkt.pts = av_rescale_q(c->coded_frame->pts, c->time_base, video_st->time_base);
#else
pkt.pts = c->coded_frame->pts;
#endif
if(c->coded_frame->key_frame)
pkt.flags |= PKT_FLAG_KEY;
pkt.stream_index= video_st->index;
pkt.data= outbuf;
pkt.size= out_size;
/* write the compressed frame in the media file */
ret = av_write_frame(oc, &pkt);
} else {
ret = OPENCV_NO_FRAMES_WRITTEN_CODE;
}
}
return ret;
}
/// write a frame with FFMPEG
bool CvVideoWriter_FFMPEG::writeFrame( const unsigned char* data, int step, int width, int height, int cn, int origin )
{
bool ret = false;
if( (width & -2) != frame_width || (height & -2) != frame_height || !data )
return false;
width = frame_width;
height = frame_height;
// typecast from opaque data type to implemented struct
#if LIBAVFORMAT_BUILD > 4628
AVCodecContext *c = video_st->codec;
#else
AVCodecContext *c = &(video_st->codec);
#endif
#if LIBAVFORMAT_BUILD < 5231
// It is not needed in the latest versions of the ffmpeg
if( c->codec_id == CODEC_ID_RAWVIDEO && origin != 1 )
{
if( !temp_image.data )
{
temp_image.step = (width*cn + 3) & -4;
temp_image.width = width;
temp_image.height = height;
temp_image.cn = cn;
temp_image.data = (unsigned char*)malloc(temp_image.step*temp_image.height);
}
for( int y = 0; y < height; y++ )
memcpy(temp_image.data + y*temp_image.step, data + (height-1-y)*step, width*cn);
data = temp_image.data;
step = temp_image.step;
}
#else
if( width*cn != step )
{
if( !temp_image.data )
{
temp_image.step = width*cn;
temp_image.width = width;
temp_image.height = height;
temp_image.cn = cn;
temp_image.data = (unsigned char*)malloc(temp_image.step*temp_image.height);
}
if (origin == 1)
for( int y = 0; y < height; y++ )
memcpy(temp_image.data + y*temp_image.step, data + (height-1-y)*step, temp_image.step);
else
for( int y = 0; y < height; y++ )
memcpy(temp_image.data + y*temp_image.step, data + y*step, temp_image.step);
data = temp_image.data;
step = temp_image.step;
}
#endif
// check parameters
if (input_pix_fmt == PIX_FMT_BGR24) {
if (cn != 3) {
return false;
}
}
else if (input_pix_fmt == PIX_FMT_GRAY8) {
if (cn != 1) {
return false;
}
}
else {
assert(false);
}
if ( c->pix_fmt != input_pix_fmt ) {
assert( input_picture );
// let input_picture point to the raw data buffer of 'image'
avpicture_fill((AVPicture *)input_picture, (uint8_t *) data,
(PixelFormat)input_pix_fmt, width, height);
#if !defined(HAVE_FFMPEG_SWSCALE)
// convert to the color format needed by the codec
if( img_convert((AVPicture *)picture, c->pix_fmt,
(AVPicture *)input_picture, (PixelFormat)input_pix_fmt,
width, height) < 0){
return false;
}
#else
img_convert_ctx = sws_getContext(width,
height,
(PixelFormat)input_pix_fmt,
c->width,
c->height,
c->pix_fmt,
SWS_BICUBIC,
NULL, NULL, NULL);
if ( sws_scale(img_convert_ctx, input_picture->data,
input_picture->linesize, 0,
height,
picture->data, picture->linesize) < 0 )
{
return false;
}
sws_freeContext(img_convert_ctx);
#endif
}
else{
avpicture_fill((AVPicture *)picture, (uint8_t *) data,
(PixelFormat)input_pix_fmt, width, height);
}
ret = icv_av_write_frame_FFMPEG( oc, video_st, outbuf, outbuf_size, picture) >= 0;
return ret;
}
/// close video output stream and free associated memory
void CvVideoWriter_FFMPEG::close()
{
unsigned i;
// nothing to do if already released
if ( !picture )
return;
/* no more frame to compress. The codec has a latency of a few
frames if using B frames, so we get the last frames by
passing the same picture again */
// TODO -- do we need to account for latency here?
/* write the trailer, if any */
if(ok && oc)
{
if (!(oc->oformat->flags & AVFMT_RAWPICTURE))
{
for(;;)
{
int ret = icv_av_write_frame_FFMPEG( oc, video_st, outbuf, outbuf_size, NULL);
if( ret == OPENCV_NO_FRAMES_WRITTEN_CODE || ret < 0 )
break;
}
}
av_write_trailer(oc);
}
// free pictures
#if LIBAVFORMAT_BUILD > 4628
if( video_st->codec->pix_fmt != input_pix_fmt){
#else
if( video_st->codec.pix_fmt != input_pix_fmt){
#endif
if(picture->data[0])
free(picture->data[0]);
picture->data[0] = 0;
}
av_free(picture);
if (input_picture) {
av_free(input_picture);
}
/* close codec */
#if LIBAVFORMAT_BUILD > 4628
avcodec_close(video_st->codec);
#else
avcodec_close(&(video_st->codec));
#endif
av_free(outbuf);
/* free the streams */
for(i = 0; i < oc->nb_streams; i++) {
av_freep(&oc->streams[i]->codec);
av_freep(&oc->streams[i]);
}
if (!(fmt->flags & AVFMT_NOFILE)) {
/* close the output file */
#if LIBAVCODEC_VERSION_INT < ((52<<16)+(123<<8)+0)
#if LIBAVCODEC_VERSION_INT >= ((51<<16)+(49<<8)+0)
url_fclose(oc->pb);
#else
url_fclose(&oc->pb);
#endif
#else
avio_close(oc->pb);
#endif
}
/* free the stream */
av_free(oc);
if( temp_image.data )
{
free(temp_image.data);
temp_image.data = 0;
}
init();
}
/// Create a video writer object that uses FFMPEG
bool CvVideoWriter_FFMPEG::open( const char * filename, int fourcc,
double fps, int width, int height, bool is_color )
{
CodecID codec_id = CODEC_ID_NONE;
int err, codec_pix_fmt;
double bitrate_scale = 1;
close();
// check arguments
if( !filename )
return false;
if(fps <= 0)
return false;
// we allow frames of odd width or height, but in this case we truncate
// the rightmost column/the bottom row. Probably, this should be handled more elegantly,
// but some internal functions inside FFMPEG swscale require even width/height.
width &= -2;
height &= -2;
if( width <= 0 || height <= 0 )
return false;
// tell FFMPEG to register codecs
av_register_all();
av_log_set_level(AV_LOG_ERROR);
/* auto detect the output format from the name and fourcc code. */
#if LIBAVFORMAT_BUILD >= CALC_FFMPEG_VERSION(53, 2, 0)
fmt = av_guess_format(NULL, filename, NULL);
#else
fmt = guess_format(NULL, filename, NULL);
#endif
if (!fmt)
return false;
/* determine optimal pixel format */
if (is_color) {
input_pix_fmt = PIX_FMT_BGR24;
}
else {
input_pix_fmt = PIX_FMT_GRAY8;
}
/* Lookup codec_id for given fourcc */
#if LIBAVCODEC_VERSION_INT<((51<<16)+(49<<8)+0)
if( (codec_id = codec_get_bmp_id( fourcc )) == CODEC_ID_NONE )
return false;
#else
const struct AVCodecTag * tags[] = { codec_bmp_tags, NULL};
if( (codec_id = av_codec_get_id(tags, fourcc)) == CODEC_ID_NONE )
return false;
#endif
// alloc memory for context
#if LIBAVFORMAT_BUILD >= CALC_FFMPEG_VERSION(53, 2, 0)
oc = avformat_alloc_context();
#else
oc = av_alloc_format_context();
#endif
assert (oc);
/* set file name */
oc->oformat = fmt;
snprintf(oc->filename, sizeof(oc->filename), "%s", filename);
/* set some options */
oc->max_delay = (int)(0.7*AV_TIME_BASE); /* This reduces buffer underrun warnings with MPEG */
// set a few optimal pixel formats for lossless codecs of interest..
switch (codec_id) {
#if LIBAVCODEC_VERSION_INT>((50<<16)+(1<<8)+0)
case CODEC_ID_JPEGLS:
// BGR24 or GRAY8 depending on is_color...
codec_pix_fmt = input_pix_fmt;
break;
#endif
case CODEC_ID_HUFFYUV:
codec_pix_fmt = PIX_FMT_YUV422P;
break;
case CODEC_ID_MJPEG:
case CODEC_ID_LJPEG:
codec_pix_fmt = PIX_FMT_YUVJ420P;
bitrate_scale = 3;
break;
case CODEC_ID_RAWVIDEO:
codec_pix_fmt = input_pix_fmt == PIX_FMT_GRAY8 ||
input_pix_fmt == PIX_FMT_GRAY16LE ||
input_pix_fmt == PIX_FMT_GRAY16BE ? input_pix_fmt : PIX_FMT_YUV420P;
break;
default:
// good for lossy formats, MPEG, etc.
codec_pix_fmt = PIX_FMT_YUV420P;
break;
}
double bitrate = MIN(bitrate_scale*fps*width*height, (double)INT_MAX/2);
// TODO -- safe to ignore output audio stream?
video_st = icv_add_video_stream_FFMPEG(oc, codec_id,
width, height, (int)(bitrate + 0.5),
fps, codec_pix_fmt);
/* set the output parameters (must be done even if no
parameters). */
#if LIBAVFORMAT_BUILD < CALC_FFMPEG_VERSION(53, 2, 0)
if (av_set_parameters(oc, NULL) < 0) {
return false;
}
#endif
#if 0
#if FF_API_DUMP_FORMAT
dump_format(oc, 0, filename, 1);
#else
av_dump_format(oc, 0, filename, 1);
#endif
#endif
/* now that all the parameters are set, we can open the audio and
video codecs and allocate the necessary encode buffers */
if (!video_st){
return false;
}
AVCodec *codec;
AVCodecContext *c;
#if LIBAVFORMAT_BUILD > 4628
c = (video_st->codec);
#else
c = &(video_st->codec);
#endif
c->codec_tag = fourcc;
/* find the video encoder */
codec = avcodec_find_encoder(c->codec_id);
if (!codec) {
fprintf(stderr, "Could not find encoder for codec id %d: %s", c->codec_id, icvFFMPEGErrStr(
#if LIBAVFORMAT_BUILD >= CALC_FFMPEG_VERSION(53, 2, 0)
AVERROR_ENCODER_NOT_FOUND
#else
-1
#endif
));
return false;
}
unsigned long long lbit_rate = static_cast<unsigned long long>(c->bit_rate);
lbit_rate += (bitrate / 4);
lbit_rate = std::min(lbit_rate, static_cast<unsigned long long>(std::numeric_limits<int>::max()));
c->bit_rate_tolerance = static_cast<int>(lbit_rate);
/* open the codec */
if ((err=
#if LIBAVCODEC_VERSION_INT >= ((53<<16)+(8<<8)+0)
avcodec_open2(c, codec, NULL)
#else
avcodec_open(c, codec)
#endif
) < 0) {
fprintf(stderr, "Could not open codec '%s': %s", codec->name, icvFFMPEGErrStr(err));
return false;
}
outbuf = NULL;
if (!(oc->oformat->flags & AVFMT_RAWPICTURE)) {
/* allocate output buffer */
/* assume we will never get codec output with more than 4 bytes per pixel... */
outbuf_size = width*height*4;
outbuf = (uint8_t *) av_malloc(outbuf_size);
}
bool need_color_convert;
need_color_convert = (c->pix_fmt != input_pix_fmt);
/* allocate the encoded raw picture */
picture = icv_alloc_picture_FFMPEG(c->pix_fmt, c->width, c->height, need_color_convert);
if (!picture) {
return false;
}
/* if the output format is not our input format, then a temporary
picture of the input format is needed too. It is then converted
to the required output format */
input_picture = NULL;
if ( need_color_convert ) {
input_picture = icv_alloc_picture_FFMPEG(input_pix_fmt, c->width, c->height, false);
if (!input_picture) {
return false;
}
}
/* open the output file, if needed */
if (!(fmt->flags & AVFMT_NOFILE)) {
#if LIBAVFORMAT_BUILD < CALC_FFMPEG_VERSION(53, 2, 0)
if (url_fopen(&oc->pb, filename, URL_WRONLY) < 0)
#else
if (avio_open(&oc->pb, filename, AVIO_FLAG_WRITE) < 0)
#endif
{
return false;
}
}
/* write the stream header, if any */
err=avformat_write_header(oc, NULL);
if(err < 0)
{
close();
remove(filename);
return false;
}
frame_width = width;
frame_height = height;
ok = true;
return true;
}
CvCapture_FFMPEG* cvCreateFileCapture_FFMPEG( const char* filename )
{
CvCapture_FFMPEG* capture = (CvCapture_FFMPEG*)malloc(sizeof(*capture));
capture->init();
if( capture->open( filename ))
return capture;
capture->close();
free(capture);
return 0;
}
void cvReleaseCapture_FFMPEG(CvCapture_FFMPEG** capture)
{
if( capture && *capture )
{
(*capture)->close();
free(*capture);
*capture = 0;
}
}
int cvSetCaptureProperty_FFMPEG(CvCapture_FFMPEG* capture, int prop_id, double value)
{
return capture->setProperty(prop_id, value);
}
double cvGetCaptureProperty_FFMPEG(CvCapture_FFMPEG* capture, int prop_id)
{
return capture->getProperty(prop_id);
}
int cvGrabFrame_FFMPEG(CvCapture_FFMPEG* capture)
{
return capture->grabFrame();
}
int cvRetrieveFrame_FFMPEG(CvCapture_FFMPEG* capture, unsigned char** data, int* step, int* width, int* height, int* cn)
{
return capture->retrieveFrame(0, data, step, width, height, cn);
}
CvVideoWriter_FFMPEG* cvCreateVideoWriter_FFMPEG( const char* filename, int fourcc, double fps,
int width, int height, int isColor )
{
CvVideoWriter_FFMPEG* writer = (CvVideoWriter_FFMPEG*)malloc(sizeof(*writer));
writer->init();
if( writer->open( filename, fourcc, fps, width, height, isColor != 0 ))
return writer;
writer->close();
free(writer);
return 0;
}
void cvReleaseVideoWriter_FFMPEG( CvVideoWriter_FFMPEG** writer )
{
if( writer && *writer )
{
(*writer)->close();
free(*writer);
*writer = 0;
}
}
int cvWriteFrame_FFMPEG( CvVideoWriter_FFMPEG* writer,
const unsigned char* data, int step,
int width, int height, int cn, int origin)
{
return writer->writeFrame(data, step, width, height, cn, origin);
}
/*
* For CUDA encoder
*/
struct OutputMediaStream_FFMPEG
{
bool open(const char* fileName, int width, int height, double fps);
void close();
void write(unsigned char* data, int size, int keyFrame);
// add a video output stream to the container
static AVStream* addVideoStream(AVFormatContext *oc, CodecID codec_id, int w, int h, int bitrate, double fps, PixelFormat pixel_format);
AVOutputFormat* fmt_;
AVFormatContext* oc_;
AVStream* video_st_;
};
void OutputMediaStream_FFMPEG::close()
{
// no more frame to compress. The codec has a latency of a few
// frames if using B frames, so we get the last frames by
// passing the same picture again
// TODO -- do we need to account for latency here?
if (oc_)
{
// write the trailer, if any
av_write_trailer(oc_);
// free the streams
for (unsigned int i = 0; i < oc_->nb_streams; ++i)
{
av_freep(&oc_->streams[i]->codec);
av_freep(&oc_->streams[i]);
}
if (!(fmt_->flags & AVFMT_NOFILE) && oc_->pb)
{
// close the output file
#if LIBAVCODEC_VERSION_INT < ((52<<16)+(123<<8)+0)
#if LIBAVCODEC_VERSION_INT >= ((51<<16)+(49<<8)+0)
url_fclose(oc_->pb);
#else
url_fclose(&oc_->pb);
#endif
#else
avio_close(oc_->pb);
#endif
}
// free the stream
av_free(oc_);
}
}
AVStream* OutputMediaStream_FFMPEG::addVideoStream(AVFormatContext *oc, CodecID codec_id, int w, int h, int bitrate, double fps, PixelFormat pixel_format)
{
#if LIBAVFORMAT_BUILD >= CALC_FFMPEG_VERSION(53, 10, 0)
AVStream* st = avformat_new_stream(oc, 0);
#else
AVStream* st = av_new_stream(oc, 0);
#endif
if (!st)
return 0;
#if LIBAVFORMAT_BUILD > 4628
AVCodecContext* c = st->codec;
#else
AVCodecContext* c = &(st->codec);
#endif
c->codec_id = codec_id;
c->codec_type = AVMEDIA_TYPE_VIDEO;
// put sample parameters
unsigned long long lbit_rate = static_cast<unsigned long long>(bitrate);
lbit_rate += (bitrate / 4);
lbit_rate = std::min(lbit_rate, static_cast<unsigned long long>(std::numeric_limits<int>::max()));
c->bit_rate = bitrate;
// took advice from
// http://ffmpeg-users.933282.n4.nabble.com/warning-clipping-1-dct-coefficients-to-127-127-td934297.html
c->qmin = 3;
// resolution must be a multiple of two
c->width = w;
c->height = h;
AVCodec* codec = avcodec_find_encoder(c->codec_id);
// time base: this is the fundamental unit of time (in seconds) in terms
// of which frame timestamps are represented. for fixed-fps content,
// timebase should be 1/framerate and timestamp increments should be
// identically 1
int frame_rate = static_cast<int>(fps+0.5);
int frame_rate_base = 1;
while (fabs(static_cast<double>(frame_rate)/frame_rate_base) - fps > 0.001)
{
frame_rate_base *= 10;
frame_rate = static_cast<int>(fps*frame_rate_base + 0.5);
}
c->time_base.den = frame_rate;
c->time_base.num = frame_rate_base;
#if LIBAVFORMAT_BUILD > 4752
// adjust time base for supported framerates
if (codec && codec->supported_framerates)
{
AVRational req = {frame_rate, frame_rate_base};
const AVRational* best = NULL;
AVRational best_error = {INT_MAX, 1};
for (const AVRational* p = codec->supported_framerates; p->den!=0; ++p)
{
AVRational error = av_sub_q(req, *p);
if (error.num < 0)
error.num *= -1;
if (av_cmp_q(error, best_error) < 0)
{
best_error= error;
best= p;
}
}
c->time_base.den= best->num;
c->time_base.num= best->den;
}
#endif
c->gop_size = 12; // emit one intra frame every twelve frames at most
c->pix_fmt = pixel_format;
if (c->codec_id == CODEC_ID_MPEG2VIDEO)
c->max_b_frames = 2;
if (c->codec_id == CODEC_ID_MPEG1VIDEO || c->codec_id == CODEC_ID_MSMPEG4V3)
{
// needed to avoid using macroblocks in which some coeffs overflow
// this doesnt happen with normal video, it just happens here as the
// motion of the chroma plane doesnt match the luma plane
// avoid FFMPEG warning 'clipping 1 dct coefficients...'
c->mb_decision = 2;
}
#if LIBAVCODEC_VERSION_INT > 0x000409
// some formats want stream headers to be seperate
if (oc->oformat->flags & AVFMT_GLOBALHEADER)
{
c->flags |= CODEC_FLAG_GLOBAL_HEADER;
}
#endif
return st;
}
bool OutputMediaStream_FFMPEG::open(const char* fileName, int width, int height, double fps)
{
fmt_ = 0;
oc_ = 0;
video_st_ = 0;
// tell FFMPEG to register codecs
av_register_all();
av_log_set_level(AV_LOG_ERROR);
// auto detect the output format from the name and fourcc code
#if LIBAVFORMAT_BUILD >= CALC_FFMPEG_VERSION(53, 2, 0)
fmt_ = av_guess_format(NULL, fileName, NULL);
#else
fmt_ = guess_format(NULL, fileName, NULL);
#endif
if (!fmt_)
return false;
CodecID codec_id = CODEC_ID_H264;
// alloc memory for context
#if LIBAVFORMAT_BUILD >= CALC_FFMPEG_VERSION(53, 2, 0)
oc_ = avformat_alloc_context();
#else
oc_ = av_alloc_format_context();
#endif
if (!oc_)
return false;
// set some options
oc_->oformat = fmt_;
snprintf(oc_->filename, sizeof(oc_->filename), "%s", fileName);
oc_->max_delay = (int)(0.7 * AV_TIME_BASE); // This reduces buffer underrun warnings with MPEG
// set a few optimal pixel formats for lossless codecs of interest..
PixelFormat codec_pix_fmt = PIX_FMT_YUV420P;
int bitrate_scale = 64;
// TODO -- safe to ignore output audio stream?
video_st_ = addVideoStream(oc_, codec_id, width, height, width * height * bitrate_scale, fps, codec_pix_fmt);
if (!video_st_)
return false;
// set the output parameters (must be done even if no parameters)
#if LIBAVFORMAT_BUILD < CALC_FFMPEG_VERSION(53, 2, 0)
if (av_set_parameters(oc_, NULL) < 0)
return false;
#endif
// now that all the parameters are set, we can open the audio and
// video codecs and allocate the necessary encode buffers
#if LIBAVFORMAT_BUILD > 4628
AVCodecContext* c = (video_st_->codec);
#else
AVCodecContext* c = &(video_st_->codec);
#endif
c->codec_tag = MKTAG('H', '2', '6', '4');
c->bit_rate_tolerance = c->bit_rate;
// open the output file, if needed
if (!(fmt_->flags & AVFMT_NOFILE))
{
#if LIBAVFORMAT_BUILD < CALC_FFMPEG_VERSION(53, 2, 0)
int err = url_fopen(&oc_->pb, fileName, URL_WRONLY);
#else
int err = avio_open(&oc_->pb, fileName, AVIO_FLAG_WRITE);
#endif
if (err != 0)
return false;
}
// write the stream header, if any
#if LIBAVFORMAT_BUILD < CALC_FFMPEG_VERSION(53, 2, 0)
av_write_header(oc_);
#else
avformat_write_header(oc_, NULL);
#endif
return true;
}
void OutputMediaStream_FFMPEG::write(unsigned char* data, int size, int keyFrame)
{
// if zero size, it means the image was buffered
if (size > 0)
{
AVPacket pkt;
av_init_packet(&pkt);
if (keyFrame)
pkt.flags |= PKT_FLAG_KEY;
pkt.stream_index = video_st_->index;
pkt.data = data;
pkt.size = size;
// write the compressed frame in the media file
av_write_frame(oc_, &pkt);
}
}
struct OutputMediaStream_FFMPEG* create_OutputMediaStream_FFMPEG(const char* fileName, int width, int height, double fps)
{
OutputMediaStream_FFMPEG* stream = (OutputMediaStream_FFMPEG*) malloc(sizeof(OutputMediaStream_FFMPEG));
if (stream->open(fileName, width, height, fps))
return stream;
stream->close();
free(stream);
return 0;
}
void release_OutputMediaStream_FFMPEG(struct OutputMediaStream_FFMPEG* stream)
{
stream->close();
free(stream);
}
void write_OutputMediaStream_FFMPEG(struct OutputMediaStream_FFMPEG* stream, unsigned char* data, int size, int keyFrame)
{
stream->write(data, size, keyFrame);
}
/*
* For CUDA decoder
*/
enum
{
VideoCodec_MPEG1 = 0,
VideoCodec_MPEG2,
VideoCodec_MPEG4,
VideoCodec_VC1,
VideoCodec_H264,
VideoCodec_JPEG,
VideoCodec_H264_SVC,
VideoCodec_H264_MVC,
// Uncompressed YUV
VideoCodec_YUV420 = (('I'<<24)|('Y'<<16)|('U'<<8)|('V')), // Y,U,V (4:2:0)
VideoCodec_YV12 = (('Y'<<24)|('V'<<16)|('1'<<8)|('2')), // Y,V,U (4:2:0)
VideoCodec_NV12 = (('N'<<24)|('V'<<16)|('1'<<8)|('2')), // Y,UV (4:2:0)
VideoCodec_YUYV = (('Y'<<24)|('U'<<16)|('Y'<<8)|('V')), // YUYV/YUY2 (4:2:2)
VideoCodec_UYVY = (('U'<<24)|('Y'<<16)|('V'<<8)|('Y')), // UYVY (4:2:2)
};
enum
{
VideoChromaFormat_Monochrome = 0,
VideoChromaFormat_YUV420,
VideoChromaFormat_YUV422,
VideoChromaFormat_YUV444,
};
struct InputMediaStream_FFMPEG
{
public:
bool open(const char* fileName, int* codec, int* chroma_format, int* width, int* height);
void close();
bool read(unsigned char** data, int* size, int* endOfFile);
private:
InputMediaStream_FFMPEG(const InputMediaStream_FFMPEG&);
InputMediaStream_FFMPEG& operator =(const InputMediaStream_FFMPEG&);
AVFormatContext* ctx_;
int video_stream_id_;
AVPacket pkt_;
};
bool InputMediaStream_FFMPEG::open(const char* fileName, int* codec, int* chroma_format, int* width, int* height)
{
int err;
ctx_ = 0;
video_stream_id_ = -1;
memset(&pkt_, 0, sizeof(AVPacket));
#if LIBAVFORMAT_BUILD >= CALC_FFMPEG_VERSION(53, 13, 0)
avformat_network_init();
#endif
// register all codecs, demux and protocols
av_register_all();
av_log_set_level(AV_LOG_ERROR);
#if LIBAVFORMAT_BUILD >= CALC_FFMPEG_VERSION(53, 6, 0)
err = avformat_open_input(&ctx_, fileName, 0, 0);
#else
err = av_open_input_file(&ctx_, fileName, 0, 0, 0);
#endif
if (err < 0)
return false;
#if LIBAVFORMAT_BUILD >= CALC_FFMPEG_VERSION(53, 3, 0)
err = avformat_find_stream_info(ctx_, 0);
#else
err = av_find_stream_info(ctx_);
#endif
if (err < 0)
return false;
for (unsigned int i = 0; i < ctx_->nb_streams; ++i)
{
#if LIBAVFORMAT_BUILD > 4628
AVCodecContext *enc = ctx_->streams[i]->codec;
#else
AVCodecContext *enc = &ctx_->streams[i]->codec;
#endif
if (enc->codec_type == AVMEDIA_TYPE_VIDEO)
{
video_stream_id_ = static_cast<int>(i);
switch (enc->codec_id)
{
case CODEC_ID_MPEG1VIDEO:
*codec = ::VideoCodec_MPEG1;
break;
case CODEC_ID_MPEG2VIDEO:
*codec = ::VideoCodec_MPEG2;
break;
case CODEC_ID_MPEG4:
*codec = ::VideoCodec_MPEG4;
break;
case CODEC_ID_VC1:
*codec = ::VideoCodec_VC1;
break;
case CODEC_ID_H264:
*codec = ::VideoCodec_H264;
break;
default:
return false;
};
switch (enc->pix_fmt)
{
case PIX_FMT_YUV420P:
*chroma_format = ::VideoChromaFormat_YUV420;
break;
case PIX_FMT_YUV422P:
*chroma_format = ::VideoChromaFormat_YUV422;
break;
case PIX_FMT_YUV444P:
*chroma_format = ::VideoChromaFormat_YUV444;
break;
default:
return false;
}
*width = enc->coded_width;
*height = enc->coded_height;
break;
}
}
if (video_stream_id_ < 0)
return false;
av_init_packet(&pkt_);
return true;
}
void InputMediaStream_FFMPEG::close()
{
if (ctx_)
{
#if LIBAVFORMAT_BUILD >= CALC_FFMPEG_VERSION(53, 24, 2)
avformat_close_input(&ctx_);
#else
av_close_input_file(ctx_);
#endif
}
// free last packet if exist
if (pkt_.data)
av_free_packet(&pkt_);
}
bool InputMediaStream_FFMPEG::read(unsigned char** data, int* size, int* endOfFile)
{
// free last packet if exist
if (pkt_.data)
av_free_packet(&pkt_);
// get the next frame
for (;;)
{
int ret = av_read_frame(ctx_, &pkt_);
if (ret == AVERROR(EAGAIN))
continue;
if (ret < 0)
{
if (ret == AVERROR_EOF)
*endOfFile = true;
return false;
}
if (pkt_.stream_index != video_stream_id_)
{
av_free_packet(&pkt_);
continue;
}
break;
}
*data = pkt_.data;
*size = pkt_.size;
*endOfFile = false;
return true;
}
InputMediaStream_FFMPEG* create_InputMediaStream_FFMPEG(const char* fileName, int* codec, int* chroma_format, int* width, int* height)
{
InputMediaStream_FFMPEG* stream = (InputMediaStream_FFMPEG*) malloc(sizeof(InputMediaStream_FFMPEG));
if (stream && stream->open(fileName, codec, chroma_format, width, height))
return stream;
stream->close();
free(stream);
return 0;
}
void release_InputMediaStream_FFMPEG(InputMediaStream_FFMPEG* stream)
{
stream->close();
free(stream);
}
int read_InputMediaStream_FFMPEG(InputMediaStream_FFMPEG* stream, unsigned char** data, int* size, int* endOfFile)
{
return stream->read(data, size, endOfFile);
}
Planar Subdivisions (C API)
============================
.. highlight:: c
CvSubdiv2D
----------
.. ocv:struct:: CvSubdiv2D
Planar subdivision.
::
#define CV_SUBDIV2D_FIELDS() \
CV_GRAPH_FIELDS() \
int quad_edges; \
int is_geometry_valid; \
CvSubdiv2DEdge recent_edge; \
CvPoint2D32f topleft; \
CvPoint2D32f bottomright;
typedef struct CvSubdiv2D
{
CV_SUBDIV2D_FIELDS()
}
CvSubdiv2D;
..
Planar subdivision is the subdivision of a plane into a set of
non-overlapped regions (facets) that cover the whole plane. The above
structure describes a subdivision built on a 2D point set, where the points
are linked together and form a planar graph, which, together with a few
edges connecting the exterior subdivision points (namely, convex hull points)
with infinity, subdivides a plane into facets by its edges.
For every subdivision, there is a dual subdivision in which facets and
points (subdivision vertices) swap their roles. This means that a facet is
treated as a vertex (called a virtual point below) of the dual subdivision and
the original subdivision vertices become facets. In the figure below, the
original subdivision is marked with solid lines and dual subdivision -
with dotted lines.
.. image:: pics/subdiv.png
OpenCV subdivides a plane into triangles using the Delaunay's
algorithm. Subdivision is built iteratively starting from a dummy
triangle that includes all the subdivision points for sure. In this
case, the dual subdivision is a Voronoi diagram of the input 2D point set. The
subdivisions can be used for the 3D piece-wise transformation of a plane,
morphing, fast location of points on the plane, building special graphs
(such as NNG,RNG), and so forth.
CvQuadEdge2D
------------
.. ocv:struct:: CvQuadEdge2D
Quad-edge of a planar subdivision.
::
/* one of edges within quad-edge, lower 2 bits is index (0..3)
and upper bits are quad-edge pointer */
typedef long CvSubdiv2DEdge;
/* quad-edge structure fields */
#define CV_QUADEDGE2D_FIELDS() \
int flags; \
struct CvSubdiv2DPoint* pt[4]; \
CvSubdiv2DEdge next[4];
typedef struct CvQuadEdge2D
{
CV_QUADEDGE2D_FIELDS()
}
CvQuadEdge2D;
..
Quad-edge is a basic element of a subdivision containing four edges (e, eRot, reversed e, and reversed eRot):
.. image:: pics/quadedge.png
CvSubdiv2DPoint
---------------
.. ocv:struct:: CvSubdiv2DPoint
Point of an original or dual subdivision.
::
#define CV_SUBDIV2D_POINT_FIELDS()\
int flags; \
CvSubdiv2DEdge first; \
CvPoint2D32f pt; \
int id;
#define CV_SUBDIV2D_VIRTUAL_POINT_FLAG (1 << 30)
typedef struct CvSubdiv2DPoint
{
CV_SUBDIV2D_POINT_FIELDS()
}
CvSubdiv2DPoint;
..
* id
This integer can be used to index auxiliary data associated with each vertex of the planar subdivision.
CalcSubdivVoronoi2D
-------------------
Calculates the coordinates of the Voronoi diagram cells.
.. ocv:cfunction:: void cvCalcSubdivVoronoi2D( CvSubdiv2D* subdiv )
.. ocv:pyoldfunction:: cv.CalcSubdivVoronoi2D(subdiv)-> None
:param subdiv: Delaunay subdivision, in which all the points are already added.
The function calculates the coordinates
of virtual points. All virtual points corresponding to a vertex of the
original subdivision form (when connected together) a boundary of the Voronoi
cell at that point.
ClearSubdivVoronoi2D
--------------------
Removes all virtual points.
.. ocv:cfunction:: void cvClearSubdivVoronoi2D( CvSubdiv2D* subdiv )
.. ocv:pyoldfunction:: cv.ClearSubdivVoronoi2D(subdiv)-> None
:param subdiv: Delaunay subdivision.
The function removes all of the virtual points. It
is called internally in
:ocv:cfunc:`CalcSubdivVoronoi2D`
if the subdivision
was modified after the previous call to the function.
CreateSubdivDelaunay2D
----------------------
Creates an empty Delaunay triangulation.
.. ocv:cfunction:: CvSubdiv2D* cvCreateSubdivDelaunay2D( CvRect rect, CvMemStorage* storage )
.. ocv:pyoldfunction:: cv.CreateSubdivDelaunay2D(rect, storage)-> emptyDelaunayTriangulation
:param rect: Rectangle that includes all of the 2D points that are to be added to the subdivision.
:param storage: Container for the subdivision.
The function creates an empty Delaunay
subdivision where 2D points can be added using the function
:ocv:cfunc:`SubdivDelaunay2DInsert`
. All of the points to be added must be within
the specified rectangle, otherwise a runtime error is raised.
Note that the triangulation is a single large triangle that covers the given rectangle. Hence the three vertices of this triangle are outside the rectangle
``rect``
.
FindNearestPoint2D
------------------
Finds the subdivision vertex closest to the given point.
.. ocv:cfunction:: CvSubdiv2DPoint* cvFindNearestPoint2D( CvSubdiv2D* subdiv, CvPoint2D32f pt )
.. ocv:pyoldfunction:: cv.FindNearestPoint2D(subdiv, pt)-> point
:param subdiv: Delaunay or another subdivision.
:param pt: Input point.
The function is another function that
locates the input point within the subdivision. It finds the subdivision vertex that
is the closest to the input point. It is not necessarily one of vertices
of the facet containing the input point, though the facet (located using
:ocv:cfunc:`Subdiv2DLocate`
) is used as a starting
point. The function returns a pointer to the found subdivision vertex.
Subdiv2DEdgeDst
---------------
Returns the edge destination.
.. ocv:cfunction:: CvSubdiv2DPoint* cvSubdiv2DEdgeDst( CvSubdiv2DEdge edge )
.. ocv:pyoldfunction:: cv.Subdiv2DEdgeDst(edge)-> point
:param edge: Subdivision edge (not a quad-edge).
The function returns the edge destination. The
returned pointer may be NULL if the edge is from a dual subdivision and
the virtual point coordinates are not calculated yet. The virtual points
can be calculated using the function
:ocv:cfunc:`CalcSubdivVoronoi2D`.
Subdiv2DGetEdge
---------------
Returns one of the edges related to the given edge.
.. ocv:cfunction:: CvSubdiv2DEdge cvSubdiv2DGetEdge( CvSubdiv2DEdge edge, CvNextEdgeType type )
.. ocv:pyoldfunction:: cv.Subdiv2DGetEdge(edge, type)-> CvSubdiv2DEdge
:param edge: Subdivision edge (not a quad-edge).
:param type: Parameter specifying which of the related edges to return. The following values are possible:
* **CV_NEXT_AROUND_ORG** next around the edge origin ( ``eOnext`` on the picture below if ``e`` is the input edge)
* **CV_NEXT_AROUND_DST** next around the edge vertex ( ``eDnext`` )
* **CV_PREV_AROUND_ORG** previous around the edge origin (reversed ``eRnext`` )
* **CV_PREV_AROUND_DST** previous around the edge destination (reversed ``eLnext`` )
* **CV_NEXT_AROUND_LEFT** next around the left facet ( ``eLnext`` )
* **CV_NEXT_AROUND_RIGHT** next around the right facet ( ``eRnext`` )
* **CV_PREV_AROUND_LEFT** previous around the left facet (reversed ``eOnext`` )
* **CV_PREV_AROUND_RIGHT** previous around the right facet (reversed ``eDnext`` )
.. image:: pics/quadedge.png
The function returns one of the edges related to the input edge.
Subdiv2DNextEdge
----------------
Returns next edge around the edge origin.
.. ocv:cfunction:: CvSubdiv2DEdge cvSubdiv2DNextEdge( CvSubdiv2DEdge edge )
.. ocv:pyoldfunction:: cv.Subdiv2DNextEdge(edge)-> CvSubdiv2DEdge
:param edge: Subdivision edge (not a quad-edge).
The function returns the next edge around the edge origin:
``eOnext``
on the picture above if
``e``
is the input edge).
Subdiv2DLocate
--------------
Returns the location of a point within a Delaunay triangulation.
.. ocv:cfunction:: CvSubdiv2DPointLocation cvSubdiv2DLocate( CvSubdiv2D* subdiv, CvPoint2D32f pt, CvSubdiv2DEdge* edge, CvSubdiv2DPoint** vertex=NULL )
.. ocv:pyoldfunction:: cv.Subdiv2DLocate(subdiv, pt) -> (loc, where)
:param subdiv: Delaunay or another subdivision.
:param pt: Point to locate.
:param edge: Output edge that the point belongs to or is located to the right of it.
:param vertex: Optional output vertex double pointer the input point coincides with.
The function locates the input point within the subdivision. There are five cases:
*
The point falls into some facet. The function returns
``CV_PTLOC_INSIDE``
and
``*edge``
will contain one of edges of the facet.
*
The point falls onto the edge. The function returns
``CV_PTLOC_ON_EDGE``
and
``*edge``
will contain this edge.
*
The point coincides with one of the subdivision vertices. The function returns
``CV_PTLOC_VERTEX``
and
``*vertex``
will contain a pointer to the vertex.
*
The point is outside the subdivision reference rectangle. The function returns
``CV_PTLOC_OUTSIDE_RECT``
and no pointers are filled.
*
One of input arguments is invalid. A runtime error is raised or, if silent or "parent" error processing mode is selected,
``CV_PTLOC_ERROR``
is returnd.
Subdiv2DRotateEdge
------------------
Returns another edge of the same quad-edge.
.. ocv:cfunction:: CvSubdiv2DEdge cvSubdiv2DRotateEdge( CvSubdiv2DEdge edge, int rotate )
.. ocv:pyoldfunction:: cv.Subdiv2DRotateEdge(edge, rotate)-> CvSubdiv2DEdge
:param edge: Subdivision edge (not a quad-edge).
:param rotate: Parameter specifying which of the edges of the same quad-edge as the input one to return. The following values are possible:
* **0** the input edge ( ``e`` on the picture below if ``e`` is the input edge)
* **1** the rotated edge ( ``eRot`` )
* **2** the reversed edge (reversed ``e`` (in green))
* **3** the reversed rotated edge (reversed ``eRot`` (in green))
The function returns one of the edges of the same quad-edge as the input edge.
SubdivDelaunay2DInsert
----------------------
Inserts a single point into a Delaunay triangulation.
.. ocv:cfunction:: CvSubdiv2DPoint* cvSubdivDelaunay2DInsert( CvSubdiv2D* subdiv, CvPoint2D32f pt)
.. ocv:pyoldfunction:: cv.SubdivDelaunay2DInsert(subdiv, pt)-> point
:param subdiv: Delaunay subdivision created by the function :ocv:cfunc:`CreateSubdivDelaunay2D`.
:param pt: Inserted point.
The function inserts a single point into a subdivision and modifies the subdivision topology appropriately. If a point with the same coordinates exists already, no new point is added. The function returns a pointer to the allocated point. No virtual point coordinates are calculated at this stage.
......@@ -19,8 +19,6 @@ import org.opencv.features2d.KeyPoint;
import org.opencv.test.OpenCVTestCase;
import org.opencv.test.OpenCVTestRunner;
import android.util.Log;
public class BruteForceDescriptorMatcherTest extends OpenCVTestCase {
DescriptorMatcher matcher;
......@@ -173,8 +171,6 @@ public class BruteForceDescriptorMatcherTest extends OpenCVTestCase {
Mat query = getQueryDescriptors();
List<MatOfDMatch> matches = new ArrayList<MatOfDMatch>();
matcher.knnMatch(query, train, matches, k);
Log.d("knnMatch", "train = " + train);
Log.d("knnMatch", "query = " + query);
/*
Log.d("knnMatch", "train = " + train);
Log.d("knnMatch", "query = " + query);
......
This source diff could not be displayed because it is too large. You can view the blob instead.
#include "converters.h"
#ifdef DEBUG
#include <android/log.h>
#define MODULE_LOG_TAG "OpenCV.converters"
#define LOGD(...) ((void)__android_log_print(ANDROID_LOG_DEBUG, MODULE_LOG_TAG, __VA_ARGS__))
#else //DEBUG
#define LOGD(...)
#endif //DEBUG
using namespace cv;
#define CHECK_MAT(cond) if(!(cond)){ LOGD("FAILED: " #cond); return; }
// vector_int
void Mat_to_vector_int(Mat& mat, vector<int>& v_int)
{
v_int.clear();
CHECK_MAT(mat.type()==CV_32SC1 && mat.cols==1);
v_int = (vector<int>) mat;
}
void vector_int_to_Mat(vector<int>& v_int, Mat& mat)
{
mat = Mat(v_int, true);
}
//vector_double
void Mat_to_vector_double(Mat& mat, vector<double>& v_double)
{
v_double.clear();
CHECK_MAT(mat.type()==CV_64FC1 && mat.cols==1);
v_double = (vector<double>) mat;
}
void vector_double_to_Mat(vector<double>& v_double, Mat& mat)
{
mat = Mat(v_double, true);
}
// vector_float
void Mat_to_vector_float(Mat& mat, vector<float>& v_float)
{
v_float.clear();
CHECK_MAT(mat.type()==CV_32FC1 && mat.cols==1);
v_float = (vector<float>) mat;
}
void vector_float_to_Mat(vector<float>& v_float, Mat& mat)
{
mat = Mat(v_float, true);
}
//vector_uchar
void Mat_to_vector_uchar(Mat& mat, vector<uchar>& v_uchar)
{
v_uchar.clear();
CHECK_MAT(mat.type()==CV_8UC1 && mat.cols==1);
v_uchar = (vector<uchar>) mat;
}
void vector_uchar_to_Mat(vector<uchar>& v_uchar, Mat& mat)
{
mat = Mat(v_uchar, true);
}
void Mat_to_vector_char(Mat& mat, vector<char>& v_char)
{
v_char.clear();
CHECK_MAT(mat.type()==CV_8SC1 && mat.cols==1);
v_char = (vector<char>) mat;
}
void vector_char_to_Mat(vector<char>& v_char, Mat& mat)
{
mat = Mat(v_char, true);
}
//vector_Rect
void Mat_to_vector_Rect(Mat& mat, vector<Rect>& v_rect)
{
v_rect.clear();
CHECK_MAT(mat.type()==CV_32SC4 && mat.cols==1);
v_rect = (vector<Rect>) mat;
}
void vector_Rect_to_Mat(vector<Rect>& v_rect, Mat& mat)
{
mat = Mat(v_rect, true);
}
//vector_Point
void Mat_to_vector_Point(Mat& mat, vector<Point>& v_point)
{
v_point.clear();
CHECK_MAT(mat.type()==CV_32SC2 && mat.cols==1);
v_point = (vector<Point>) mat;
}
//vector_Point2f
void Mat_to_vector_Point2f(Mat& mat, vector<Point2f>& v_point)
{
v_point.clear();
CHECK_MAT(mat.type()==CV_32FC2 && mat.cols==1);
v_point = (vector<Point2f>) mat;
}
//vector_Point2d
void Mat_to_vector_Point2d(Mat& mat, vector<Point2d>& v_point)
{
v_point.clear();
CHECK_MAT(mat.type()==CV_64FC2 && mat.cols==1);
v_point = (vector<Point2d>) mat;
}
//vector_Point3i
void Mat_to_vector_Point3i(Mat& mat, vector<Point3i>& v_point)
{
v_point.clear();
CHECK_MAT(mat.type()==CV_32SC3 && mat.cols==1);
v_point = (vector<Point3i>) mat;
}
//vector_Point3f
void Mat_to_vector_Point3f(Mat& mat, vector<Point3f>& v_point)
{
v_point.clear();
CHECK_MAT(mat.type()==CV_32FC3 && mat.cols==1);
v_point = (vector<Point3f>) mat;
}
//vector_Point3d
void Mat_to_vector_Point3d(Mat& mat, vector<Point3d>& v_point)
{
v_point.clear();
CHECK_MAT(mat.type()==CV_64FC3 && mat.cols==1);
v_point = (vector<Point3d>) mat;
}
void vector_Point_to_Mat(vector<Point>& v_point, Mat& mat)
{
mat = Mat(v_point, true);
}
void vector_Point2f_to_Mat(vector<Point2f>& v_point, Mat& mat)
{
mat = Mat(v_point, true);
}
void vector_Point2d_to_Mat(vector<Point2d>& v_point, Mat& mat)
{
mat = Mat(v_point, true);
}
void vector_Point3i_to_Mat(vector<Point3i>& v_point, Mat& mat)
{
mat = Mat(v_point, true);
}
void vector_Point3f_to_Mat(vector<Point3f>& v_point, Mat& mat)
{
mat = Mat(v_point, true);
}
void vector_Point3d_to_Mat(vector<Point3d>& v_point, Mat& mat)
{
mat = Mat(v_point, true);
}
#ifdef HAVE_OPENCV_FEATURES2D
//vector_KeyPoint
void Mat_to_vector_KeyPoint(Mat& mat, vector<KeyPoint>& v_kp)
{
v_kp.clear();
CHECK_MAT(mat.type()==CV_32FC(7) && mat.cols==1);
for(int i=0; i<mat.rows; i++)
{
Vec<float, 7> v = mat.at< Vec<float, 7> >(i, 0);
KeyPoint kp(v[0], v[1], v[2], v[3], v[4], (int)v[5], (int)v[6]);
v_kp.push_back(kp);
}
return;
}
void vector_KeyPoint_to_Mat(vector<KeyPoint>& v_kp, Mat& mat)
{
int count = v_kp.size();
mat.create(count, 1, CV_32FC(7));
for(int i=0; i<count; i++)
{
KeyPoint kp = v_kp[i];
mat.at< Vec<float, 7> >(i, 0) = Vec<float, 7>(kp.pt.x, kp.pt.y, kp.size, kp.angle, kp.response, kp.octave, kp.class_id);
}
}
#endif
//vector_Mat
void Mat_to_vector_Mat(cv::Mat& mat, std::vector<cv::Mat>& v_mat)
{
v_mat.clear();
if(mat.type() == CV_32SC2 && mat.cols == 1)
{
v_mat.reserve(mat.rows);
for(int i=0; i<mat.rows; i++)
{
Vec<int, 2> a = mat.at< Vec<int, 2> >(i, 0);
long long addr = (((long long)a[0])<<32) | a[1];
Mat& m = *( (Mat*) addr );
v_mat.push_back(m);
}
} else {
LOGD("Mat_to_vector_Mat() FAILED: mat.type() == CV_32SC2 && mat.cols == 1");
}
}
void vector_Mat_to_Mat(std::vector<cv::Mat>& v_mat, cv::Mat& mat)
{
int count = v_mat.size();
mat.create(count, 1, CV_32SC2);
for(int i=0; i<count; i++)
{
long long addr = (long long) new Mat(v_mat[i]);
mat.at< Vec<int, 2> >(i, 0) = Vec<int, 2>(addr>>32, addr&0xffffffff);
}
}
#ifdef HAVE_OPENCV_FEATURES2D
//vector_DMatch
void Mat_to_vector_DMatch(Mat& mat, vector<DMatch>& v_dm)
{
v_dm.clear();
CHECK_MAT(mat.type()==CV_32FC4 && mat.cols==1);
for(int i=0; i<mat.rows; i++)
{
Vec<float, 4> v = mat.at< Vec<float, 4> >(i, 0);
DMatch dm((int)v[0], (int)v[1], (int)v[2], v[3]);
v_dm.push_back(dm);
}
return;
}
void vector_DMatch_to_Mat(vector<DMatch>& v_dm, Mat& mat)
{
int count = v_dm.size();
mat.create(count, 1, CV_32FC4);
for(int i=0; i<count; i++)
{
DMatch dm = v_dm[i];
mat.at< Vec<float, 4> >(i, 0) = Vec<float, 4>(dm.queryIdx, dm.trainIdx, dm.imgIdx, dm.distance);
}
}
#endif
void Mat_to_vector_vector_Point(Mat& mat, vector< vector< Point > >& vv_pt)
{
vector<Mat> vm;
vm.reserve( mat.rows );
Mat_to_vector_Mat(mat, vm);
for(size_t i=0; i<vm.size(); i++)
{
vector<Point> vpt;
Mat_to_vector_Point(vm[i], vpt);
vv_pt.push_back(vpt);
}
}
void Mat_to_vector_vector_Point2f(Mat& mat, vector< vector< Point2f > >& vv_pt)
{
vector<Mat> vm;
vm.reserve( mat.rows );
Mat_to_vector_Mat(mat, vm);
for(size_t i=0; i<vm.size(); i++)
{
vector<Point2f> vpt;
Mat_to_vector_Point2f(vm[i], vpt);
vv_pt.push_back(vpt);
}
}
void Mat_to_vector_vector_Point3f(Mat& mat, vector< vector< Point3f > >& vv_pt)
{
vector<Mat> vm;
vm.reserve( mat.rows );
Mat_to_vector_Mat(mat, vm);
for(size_t i=0; i<vm.size(); i++)
{
vector<Point3f> vpt;
Mat_to_vector_Point3f(vm[i], vpt);
vv_pt.push_back(vpt);
}
}
#ifdef HAVE_OPENCV_FEATURES2D
void Mat_to_vector_vector_KeyPoint(Mat& mat, vector< vector< KeyPoint > >& vv_kp)
{
vector<Mat> vm;
vm.reserve( mat.rows );
Mat_to_vector_Mat(mat, vm);
for(size_t i=0; i<vm.size(); i++)
{
vector<KeyPoint> vkp;
Mat_to_vector_KeyPoint(vm[i], vkp);
vv_kp.push_back(vkp);
}
}
void vector_vector_KeyPoint_to_Mat(vector< vector< KeyPoint > >& vv_kp, Mat& mat)
{
vector<Mat> vm;
vm.reserve( vv_kp.size() );
for(size_t i=0; i<vv_kp.size(); i++)
{
Mat m;
vector_KeyPoint_to_Mat(vv_kp[i], m);
vm.push_back(m);
}
vector_Mat_to_Mat(vm, mat);
}
void Mat_to_vector_vector_DMatch(Mat& mat, vector< vector< DMatch > >& vv_dm)
{
vector<Mat> vm;
vm.reserve( mat.rows );
Mat_to_vector_Mat(mat, vm);
for(size_t i=0; i<vm.size(); i++)
{
vector<DMatch> vdm;
Mat_to_vector_DMatch(vm[i], vdm);
vv_dm.push_back(vdm);
}
}
void vector_vector_DMatch_to_Mat(vector< vector< DMatch > >& vv_dm, Mat& mat)
{
vector<Mat> vm;
vm.reserve( vv_dm.size() );
for(size_t i=0; i<vv_dm.size(); i++)
{
Mat m;
vector_DMatch_to_Mat(vv_dm[i], m);
vm.push_back(m);
}
vector_Mat_to_Mat(vm, mat);
}
#endif
void Mat_to_vector_vector_char(Mat& mat, vector< vector< char > >& vv_ch)
{
vector<Mat> vm;
vm.reserve( mat.rows );
Mat_to_vector_Mat(mat, vm);
for(size_t i=0; i<vm.size(); i++)
{
vector<char> vch;
Mat_to_vector_char(vm[i], vch);
vv_ch.push_back(vch);
}
}
void vector_vector_char_to_Mat(vector< vector< char > >& vv_ch, Mat& mat)
{
vector<Mat> vm;
vm.reserve( vv_ch.size() );
for(size_t i=0; i<vv_ch.size(); i++)
{
Mat m;
vector_char_to_Mat(vv_ch[i], m);
vm.push_back(m);
}
vector_Mat_to_Mat(vm, mat);
}
void vector_vector_Point_to_Mat(vector< vector< Point > >& vv_pt, Mat& mat)
{
vector<Mat> vm;
vm.reserve( vv_pt.size() );
for(size_t i=0; i<vv_pt.size(); i++)
{
Mat m;
vector_Point_to_Mat(vv_pt[i], m);
vm.push_back(m);
}
vector_Mat_to_Mat(vm, mat);
}
void vector_vector_Point2f_to_Mat(vector< vector< Point2f > >& vv_pt, Mat& mat)
{
vector<Mat> vm;
vm.reserve( vv_pt.size() );
for(size_t i=0; i<vv_pt.size(); i++)
{
Mat m;
vector_Point2f_to_Mat(vv_pt[i], m);
vm.push_back(m);
}
vector_Mat_to_Mat(vm, mat);
}
void vector_vector_Point_to_Mat(vector< vector< Point > >& vv_pt, Mat& mat)
{
vector<Mat> vm;
vm.reserve( vv_pt.size() );
for(size_t i=0; i<vv_pt.size(); i++)
{
Mat m;
vector_Point_to_Mat(vv_pt[i], m);
vm.push_back(m);
}
vector_Mat_to_Mat(vm, mat);
}
void vector_vector_Point3f_to_Mat(vector< vector< Point3f > >& vv_pt, Mat& mat)
{
vector<Mat> vm;
vm.reserve( vv_pt.size() );
for(size_t i=0; i<vv_pt.size(); i++)
{
Mat m;
vector_Point3f_to_Mat(vv_pt[i], m);
vm.push_back(m);
}
vector_Mat_to_Mat(vm, mat);
}
void vector_Vec4i_to_Mat(vector<Vec4i>& v_vec, Mat& mat)
{
mat = Mat(v_vec, true);
}
void vector_Vec4f_to_Mat(vector<Vec4f>& v_vec, Mat& mat)
{
mat = Mat(v_vec, true);
}
void vector_Vec6f_to_Mat(vector<Vec6f>& v_vec, Mat& mat)
{
mat = Mat(v_vec, true);
}
#include "converters.h"
#ifdef DEBUG
#include <android/log.h>
#define MODULE_LOG_TAG "OpenCV.converters"
#define LOGD(...) ((void)__android_log_print(ANDROID_LOG_DEBUG, MODULE_LOG_TAG, __VA_ARGS__))
#else //DEBUG
#define LOGD(...)
#endif //DEBUG
using namespace cv;
#define CHECK_MAT(cond) if(!(cond)){ LOGD("FAILED: " #cond); return; }
// vector_int
void Mat_to_vector_int(Mat& mat, vector<int>& v_int)
{
v_int.clear();
CHECK_MAT(mat.type()==CV_32SC1 && mat.cols==1);
v_int = (vector<int>) mat;
}
void vector_int_to_Mat(vector<int>& v_int, Mat& mat)
{
mat = Mat(v_int, true);
}
//vector_double
void Mat_to_vector_double(Mat& mat, vector<double>& v_double)
{
v_double.clear();
CHECK_MAT(mat.type()==CV_64FC1 && mat.cols==1);
v_double = (vector<double>) mat;
}
void vector_double_to_Mat(vector<double>& v_double, Mat& mat)
{
mat = Mat(v_double, true);
}
// vector_float
void Mat_to_vector_float(Mat& mat, vector<float>& v_float)
{
v_float.clear();
CHECK_MAT(mat.type()==CV_32FC1 && mat.cols==1);
v_float = (vector<float>) mat;
}
void vector_float_to_Mat(vector<float>& v_float, Mat& mat)
{
mat = Mat(v_float, true);
}
//vector_uchar
void Mat_to_vector_uchar(Mat& mat, vector<uchar>& v_uchar)
{
v_uchar.clear();
CHECK_MAT(mat.type()==CV_8UC1 && mat.cols==1);
v_uchar = (vector<uchar>) mat;
}
void vector_uchar_to_Mat(vector<uchar>& v_uchar, Mat& mat)
{
mat = Mat(v_uchar, true);
}
void Mat_to_vector_char(Mat& mat, vector<char>& v_char)
{
v_char.clear();
CHECK_MAT(mat.type()==CV_8SC1 && mat.cols==1);
v_char = (vector<char>) mat;
}
void vector_char_to_Mat(vector<char>& v_char, Mat& mat)
{
mat = Mat(v_char, true);
}
//vector_Rect
void Mat_to_vector_Rect(Mat& mat, vector<Rect>& v_rect)
{
v_rect.clear();
CHECK_MAT(mat.type()==CV_32SC4 && mat.cols==1);
v_rect = (vector<Rect>) mat;
}
void vector_Rect_to_Mat(vector<Rect>& v_rect, Mat& mat)
{
mat = Mat(v_rect, true);
}
//vector_Point
void Mat_to_vector_Point(Mat& mat, vector<Point>& v_point)
{
v_point.clear();
CHECK_MAT(mat.type()==CV_32SC2 && mat.cols==1);
v_point = (vector<Point>) mat;
}
//vector_Point2f
void Mat_to_vector_Point2f(Mat& mat, vector<Point2f>& v_point)
{
v_point.clear();
CHECK_MAT(mat.type()==CV_32FC2 && mat.cols==1);
v_point = (vector<Point2f>) mat;
}
//vector_Point2d
void Mat_to_vector_Point2d(Mat& mat, vector<Point2d>& v_point)
{
v_point.clear();
CHECK_MAT(mat.type()==CV_64FC2 && mat.cols==1);
v_point = (vector<Point2d>) mat;
}
//vector_Point3i
void Mat_to_vector_Point3i(Mat& mat, vector<Point3i>& v_point)
{
v_point.clear();
CHECK_MAT(mat.type()==CV_32SC3 && mat.cols==1);
v_point = (vector<Point3i>) mat;
}
//vector_Point3f
void Mat_to_vector_Point3f(Mat& mat, vector<Point3f>& v_point)
{
v_point.clear();
CHECK_MAT(mat.type()==CV_32FC3 && mat.cols==1);
v_point = (vector<Point3f>) mat;
}
//vector_Point3d
void Mat_to_vector_Point3d(Mat& mat, vector<Point3d>& v_point)
{
v_point.clear();
CHECK_MAT(mat.type()==CV_64FC3 && mat.cols==1);
v_point = (vector<Point3d>) mat;
}
void vector_Point_to_Mat(vector<Point>& v_point, Mat& mat)
{
mat = Mat(v_point, true);
}
void vector_Point2f_to_Mat(vector<Point2f>& v_point, Mat& mat)
{
mat = Mat(v_point, true);
}
void vector_Point2d_to_Mat(vector<Point2d>& v_point, Mat& mat)
{
mat = Mat(v_point, true);
}
void vector_Point3i_to_Mat(vector<Point3i>& v_point, Mat& mat)
{
mat = Mat(v_point, true);
}
void vector_Point3f_to_Mat(vector<Point3f>& v_point, Mat& mat)
{
mat = Mat(v_point, true);
}
void vector_Point3d_to_Mat(vector<Point3d>& v_point, Mat& mat)
{
mat = Mat(v_point, true);
}
#ifdef HAVE_OPENCV_FEATURES2D
//vector_KeyPoint
void Mat_to_vector_KeyPoint(Mat& mat, vector<KeyPoint>& v_kp)
{
v_kp.clear();
CHECK_MAT(mat.type()==CV_32FC(7) && mat.cols==1);
for(int i=0; i<mat.rows; i++)
{
Vec<float, 7> v = mat.at< Vec<float, 7> >(i, 0);
KeyPoint kp(v[0], v[1], v[2], v[3], v[4], (int)v[5], (int)v[6]);
v_kp.push_back(kp);
}
return;
}
void vector_KeyPoint_to_Mat(vector<KeyPoint>& v_kp, Mat& mat)
{
int count = v_kp.size();
mat.create(count, 1, CV_32FC(7));
for(int i=0; i<count; i++)
{
KeyPoint kp = v_kp[i];
mat.at< Vec<float, 7> >(i, 0) = Vec<float, 7>(kp.pt.x, kp.pt.y, kp.size, kp.angle, kp.response, kp.octave, kp.class_id);
}
}
#endif
//vector_Mat
void Mat_to_vector_Mat(cv::Mat& mat, std::vector<cv::Mat>& v_mat)
{
v_mat.clear();
if(mat.type() == CV_32SC2 && mat.cols == 1)
{
v_mat.reserve(mat.rows);
for(int i=0; i<mat.rows; i++)
{
Vec<int, 2> a = mat.at< Vec<int, 2> >(i, 0);
long long addr = (((long long)a[0])<<32) | a[1];
Mat& m = *( (Mat*) addr );
v_mat.push_back(m);
}
} else {
LOGD("Mat_to_vector_Mat() FAILED: mat.type() == CV_32SC2 && mat.cols == 1");
}
}
void vector_Mat_to_Mat(std::vector<cv::Mat>& v_mat, cv::Mat& mat)
{
int count = v_mat.size();
mat.create(count, 1, CV_32SC2);
for(int i=0; i<count; i++)
{
long long addr = (long long) new Mat(v_mat[i]);
mat.at< Vec<int, 2> >(i, 0) = Vec<int, 2>(addr>>32, addr&0xffffffff);
}
}
#ifdef HAVE_OPENCV_FEATURES2D
//vector_DMatch
void Mat_to_vector_DMatch(Mat& mat, vector<DMatch>& v_dm)
{
v_dm.clear();
CHECK_MAT(mat.type()==CV_32FC4 && mat.cols==1);
for(int i=0; i<mat.rows; i++)
{
Vec<float, 4> v = mat.at< Vec<float, 4> >(i, 0);
DMatch dm((int)v[0], (int)v[1], (int)v[2], v[3]);
v_dm.push_back(dm);
}
return;
}
void vector_DMatch_to_Mat(vector<DMatch>& v_dm, Mat& mat)
{
int count = v_dm.size();
mat.create(count, 1, CV_32FC4);
for(int i=0; i<count; i++)
{
DMatch dm = v_dm[i];
mat.at< Vec<float, 4> >(i, 0) = Vec<float, 4>(dm.queryIdx, dm.trainIdx, dm.imgIdx, dm.distance);
}
}
#endif
void Mat_to_vector_vector_Point(Mat& mat, vector< vector< Point > >& vv_pt)
{
vector<Mat> vm;
vm.reserve( mat.rows );
Mat_to_vector_Mat(mat, vm);
for(size_t i=0; i<vm.size(); i++)
{
vector<Point> vpt;
Mat_to_vector_Point(vm[i], vpt);
vv_pt.push_back(vpt);
}
}
void Mat_to_vector_vector_Point2f(Mat& mat, vector< vector< Point2f > >& vv_pt)
{
vector<Mat> vm;
vm.reserve( mat.rows );
Mat_to_vector_Mat(mat, vm);
for(size_t i=0; i<vm.size(); i++)
{
vector<Point2f> vpt;
Mat_to_vector_Point2f(vm[i], vpt);
vv_pt.push_back(vpt);
}
}
void Mat_to_vector_vector_Point3f(Mat& mat, vector< vector< Point3f > >& vv_pt)
{
vector<Mat> vm;
vm.reserve( mat.rows );
Mat_to_vector_Mat(mat, vm);
for(size_t i=0; i<vm.size(); i++)
{
vector<Point3f> vpt;
Mat_to_vector_Point3f(vm[i], vpt);
vv_pt.push_back(vpt);
}
}
#ifdef HAVE_OPENCV_FEATURES2D
void Mat_to_vector_vector_KeyPoint(Mat& mat, vector< vector< KeyPoint > >& vv_kp)
{
vector<Mat> vm;
vm.reserve( mat.rows );
Mat_to_vector_Mat(mat, vm);
for(size_t i=0; i<vm.size(); i++)
{
vector<KeyPoint> vkp;
Mat_to_vector_KeyPoint(vm[i], vkp);
vv_kp.push_back(vkp);
}
}
void vector_vector_KeyPoint_to_Mat(vector< vector< KeyPoint > >& vv_kp, Mat& mat)
{
vector<Mat> vm;
vm.reserve( vv_kp.size() );
for(size_t i=0; i<vv_kp.size(); i++)
{
Mat m;
vector_KeyPoint_to_Mat(vv_kp[i], m);
vm.push_back(m);
}
vector_Mat_to_Mat(vm, mat);
}
void Mat_to_vector_vector_DMatch(Mat& mat, vector< vector< DMatch > >& vv_dm)
{
vector<Mat> vm;
vm.reserve( mat.rows );
Mat_to_vector_Mat(mat, vm);
for(size_t i=0; i<vm.size(); i++)
{
vector<DMatch> vdm;
Mat_to_vector_DMatch(vm[i], vdm);
vv_dm.push_back(vdm);
}
}
void vector_vector_DMatch_to_Mat(vector< vector< DMatch > >& vv_dm, Mat& mat)
{
vector<Mat> vm;
vm.reserve( vv_dm.size() );
for(size_t i=0; i<vv_dm.size(); i++)
{
Mat m;
vector_DMatch_to_Mat(vv_dm[i], m);
vm.push_back(m);
}
vector_Mat_to_Mat(vm, mat);
}
#endif
void Mat_to_vector_vector_char(Mat& mat, vector< vector< char > >& vv_ch)
{
vector<Mat> vm;
vm.reserve( mat.rows );
Mat_to_vector_Mat(mat, vm);
for(size_t i=0; i<vm.size(); i++)
{
vector<char> vch;
Mat_to_vector_char(vm[i], vch);
vv_ch.push_back(vch);
}
}
void vector_vector_char_to_Mat(vector< vector< char > >& vv_ch, Mat& mat)
{
vector<Mat> vm;
vm.reserve( vv_ch.size() );
for(size_t i=0; i<vv_ch.size(); i++)
{
Mat m;
vector_char_to_Mat(vv_ch[i], m);
vm.push_back(m);
}
vector_Mat_to_Mat(vm, mat);
}
void vector_vector_Point_to_Mat(vector< vector< Point > >& vv_pt, Mat& mat)
{
vector<Mat> vm;
vm.reserve( vv_pt.size() );
for(size_t i=0; i<vv_pt.size(); i++)
{
Mat m;
vector_Point_to_Mat(vv_pt[i], m);
vm.push_back(m);
}
vector_Mat_to_Mat(vm, mat);
}
void vector_vector_Point2f_to_Mat(vector< vector< Point2f > >& vv_pt, Mat& mat)
{
vector<Mat> vm;
vm.reserve( vv_pt.size() );
for(size_t i=0; i<vv_pt.size(); i++)
{
Mat m;
vector_Point2f_to_Mat(vv_pt[i], m);
vm.push_back(m);
}
vector_Mat_to_Mat(vm, mat);
}
void vector_vector_Point3f_to_Mat(vector< vector< Point3f > >& vv_pt, Mat& mat)
{
vector<Mat> vm;
vm.reserve( vv_pt.size() );
for(size_t i=0; i<vv_pt.size(); i++)
{
Mat m;
vector_Point3f_to_Mat(vv_pt[i], m);
vm.push_back(m);
}
vector_Mat_to_Mat(vm, mat);
}
void vector_Vec4i_to_Mat(vector<Vec4i>& v_vec, Mat& mat)
{
mat = Mat(v_vec, true);
}
void vector_Vec4f_to_Mat(vector<Vec4f>& v_vec, Mat& mat)
{
mat = Mat(v_vec, true);
}
void vector_Vec6f_to_Mat(vector<Vec6f>& v_vec, Mat& mat)
{
mat = Mat(v_vec, true);
}
......@@ -68,7 +68,6 @@ void vector_vector_Point_to_Mat(std::vector< std::vector< cv::Point > >& vv_pt,
void Mat_to_vector_vector_Point2f(cv::Mat& mat, std::vector< std::vector< cv::Point2f > >& vv_pt);
void vector_vector_Point2f_to_Mat(std::vector< std::vector< cv::Point2f > >& vv_pt, cv::Mat& mat);
void vector_vector_Point_to_Mat(std::vector< std::vector< cv::Point > >& vv_pt, cv::Mat& mat);
void Mat_to_vector_vector_Point3f(cv::Mat& mat, std::vector< std::vector< cv::Point3f > >& vv_pt);
void vector_vector_Point3f_to_Mat(std::vector< std::vector< cv::Point3f > >& vv_pt, cv::Mat& mat);
......@@ -4,38 +4,34 @@ import java.util.Arrays;
import java.util.List;
public class MatOfByte extends Mat {
// 8UC(x)
private static final int _depth = CvType.CV_8U;
private final int _channels;
public MatOfByte(int channels) {
super();
_channels = channels;
}
// 8UC(x)
private static final int _depth = CvType.CV_8U;
private static final int _channels = 1;
public MatOfByte() {
this(1);
super();
}
public MatOfByte(int channels, long addr) {
protected MatOfByte(long addr) {
super(addr);
_channels = channels;
if(checkVector(_channels, _depth) < 0 )
throw new IllegalArgumentException("Incomatible Mat");
//FIXME: do we need release() here?
}
public MatOfByte(int channels, Mat m) {
super(m, Range.all());
_channels = channels;
public static MatOfByte fromNativeAddr(long addr) {
return new MatOfByte(addr);
}
public MatOfByte(Mat m) {
super(m, Range.all());
if(checkVector(_channels, _depth) < 0 )
throw new IllegalArgumentException("Incomatible Mat");
//FIXME: do we need release() here?
}
public MatOfByte(int channels, byte...a) {
public MatOfByte(byte...a) {
super();
_channels = channels;
fromArray(a);
}
......@@ -51,9 +47,11 @@ public class MatOfByte extends Mat {
alloc(num);
put(0, 0, a); //TODO: check ret val!
}
public byte[] toArray() {
int num = (int) total();
int num = checkVector(_channels, _depth);
if(num < 0)
throw new RuntimeException("Native Mat has unexpected type or size: " + toString());
byte[] a = new byte[num * _channels];
if(num == 0)
return a;
......@@ -62,20 +60,20 @@ public class MatOfByte extends Mat {
}
public void fromList(List<Byte> lb) {
if(lb==null || lb.size()==0)
return;
Byte ab[] = lb.toArray(null);
byte a[] = new byte[ab.length];
for(int i=0; i<ab.length; i++)
a[i] = ab[i];
fromArray(a);
if(lb==null || lb.size()==0)
return;
Byte ab[] = lb.toArray(new Byte[0]);
byte a[] = new byte[ab.length];
for(int i=0; i<ab.length; i++)
a[i] = ab[i];
fromArray(a);
}
public List<Byte> toList() {
byte[] a = toArray();
Byte ab[] = new Byte[a.length];
for(int i=0; i<a.length; i++)
ab[i] = a[i];
return Arrays.asList(ab);
byte[] a = toArray();
Byte ab[] = new Byte[a.length];
for(int i=0; i<a.length; i++)
ab[i] = a[i];
return Arrays.asList(ab);
}
}
......@@ -6,23 +6,27 @@ import java.util.List;
import org.opencv.features2d.DMatch;
public class MatOfDMatch extends Mat {
// 32FC4
private static final int _depth = CvType.CV_32F;
private static final int _channels = 4;
// 32FC4
private static final int _depth = CvType.CV_32F;
private static final int _channels = 4;
public MatOfDMatch() {
super();
}
public MatOfDMatch(long addr) {
protected MatOfDMatch(long addr) {
super(addr);
if(checkVector(_channels, _depth) < 0 )
throw new IllegalArgumentException("Incomatible Mat");
//FIXME: do we need release() here?
}
public static MatOfDMatch fromNativeAddr(long addr) {
return new MatOfDMatch(addr);
}
public MatOfDMatch(Mat m) {
super(m, Range.all());
super(m, Range.all());
if(checkVector(_channels, _depth) < 0 )
throw new IllegalArgumentException("Incomatible Mat");
//FIXME: do we need release() here?
......@@ -32,7 +36,7 @@ public class MatOfDMatch extends Mat {
super();
fromArray(ap);
}
public void alloc(int elemNumber) {
if(elemNumber>0)
super.create(elemNumber, 1, CvType.makeType(_depth, _channels));
......@@ -68,12 +72,12 @@ public class MatOfDMatch extends Mat {
}
public void fromList(List<DMatch> ldm) {
DMatch adm[] = ldm.toArray(null);
fromArray(adm);
DMatch adm[] = ldm.toArray(new DMatch[0]);
fromArray(adm);
}
public List<DMatch> toList() {
DMatch[] adm = toArray();
return Arrays.asList(adm);
DMatch[] adm = toArray();
return Arrays.asList(adm);
}
}
......@@ -4,38 +4,34 @@ import java.util.Arrays;
import java.util.List;
public class MatOfDouble extends Mat {
// 64FC(x)
private static final int _depth = CvType.CV_64F;
private final int _channels;
public MatOfDouble(int channels) {
super();
_channels = channels;
}
// 64FC(x)
private static final int _depth = CvType.CV_64F;
private static final int _channels = 1;
public MatOfDouble() {
this(1);
super();
}
public MatOfDouble(int channels, long addr) {
protected MatOfDouble(long addr) {
super(addr);
_channels = channels;
if(checkVector(_channels, _depth) < 0 )
throw new IllegalArgumentException("Incomatible Mat");
//FIXME: do we need release() here?
}
public MatOfDouble(int channels, Mat m) {
super(m, Range.all());
_channels = channels;
public static MatOfDouble fromNativeAddr(long addr) {
return new MatOfDouble(addr);
}
public MatOfDouble(Mat m) {
super(m, Range.all());
if(checkVector(_channels, _depth) < 0 )
throw new IllegalArgumentException("Incomatible Mat");
//FIXME: do we need release() here?
}
public MatOfDouble(int channels, double...a) {
public MatOfDouble(double...a) {
super();
_channels = channels;
fromArray(a);
}
......@@ -51,9 +47,11 @@ public class MatOfDouble extends Mat {
alloc(num);
put(0, 0, a); //TODO: check ret val!
}
public double[] toArray() {
int num = (int) total();
int num = checkVector(_channels, _depth);
if(num < 0)
throw new RuntimeException("Native Mat has unexpected type or size: " + toString());
double[] a = new double[num * _channels];
if(num == 0)
return a;
......@@ -62,20 +60,20 @@ public class MatOfDouble extends Mat {
}
public void fromList(List<Double> lb) {
if(lb==null || lb.size()==0)
return;
Double ab[] = lb.toArray(null);
double a[] = new double[ab.length];
for(int i=0; i<ab.length; i++)
a[i] = ab[i];
fromArray(a);
if(lb==null || lb.size()==0)
return;
Double ab[] = lb.toArray(new Double[0]);
double a[] = new double[ab.length];
for(int i=0; i<ab.length; i++)
a[i] = ab[i];
fromArray(a);
}
public List<Double> toList() {
double[] a = toArray();
Double ab[] = new Double[a.length];
for(int i=0; i<a.length; i++)
ab[i] = a[i];
return Arrays.asList(ab);
double[] a = toArray();
Double ab[] = new Double[a.length];
for(int i=0; i<a.length; i++)
ab[i] = a[i];
return Arrays.asList(ab);
}
}
......@@ -4,38 +4,34 @@ import java.util.Arrays;
import java.util.List;
public class MatOfFloat extends Mat {
// 32FC(x)
private static final int _depth = CvType.CV_32F;
private final int _channels;
public MatOfFloat(int channels) {
super();
_channels = channels;
}
// 32FC1
private static final int _depth = CvType.CV_32F;
private static final int _channels = 1;
public MatOfFloat() {
this(1);
super();
}
public MatOfFloat(int channels, long addr) {
protected MatOfFloat(long addr) {
super(addr);
_channels = channels;
if(checkVector(_channels, _depth) < 0 )
throw new IllegalArgumentException("Incomatible Mat");
//FIXME: do we need release() here?
}
public static MatOfFloat fromNativeAddr(long addr) {
return new MatOfFloat(addr);
}
public MatOfFloat(int channels, Mat m) {
super(m, Range.all());
_channels = channels;
public MatOfFloat(Mat m) {
super(m, Range.all());
if(checkVector(_channels, _depth) < 0 )
throw new IllegalArgumentException("Incomatible Mat");
//FIXME: do we need release() here?
}
public MatOfFloat(int channels, float...a) {
public MatOfFloat(float...a) {
super();
_channels = channels;
fromArray(a);
}
......@@ -51,9 +47,11 @@ public class MatOfFloat extends Mat {
alloc(num);
put(0, 0, a); //TODO: check ret val!
}
public float[] toArray() {
int num = (int) total();
int num = checkVector(_channels, _depth);
if(num < 0)
throw new RuntimeException("Native Mat has unexpected type or size: " + toString());
float[] a = new float[num * _channels];
if(num == 0)
return a;
......@@ -62,20 +60,20 @@ public class MatOfFloat extends Mat {
}
public void fromList(List<Float> lb) {
if(lb==null || lb.size()==0)
return;
Float ab[] = lb.toArray(null);
float a[] = new float[ab.length];
for(int i=0; i<ab.length; i++)
a[i] = ab[i];
fromArray(a);
if(lb==null || lb.size()==0)
return;
Float ab[] = lb.toArray(new Float[0]);
float a[] = new float[ab.length];
for(int i=0; i<ab.length; i++)
a[i] = ab[i];
fromArray(a);
}
public List<Float> toList() {
float[] a = toArray();
Float ab[] = new Float[a.length];
for(int i=0; i<a.length; i++)
ab[i] = a[i];
return Arrays.asList(ab);
float[] a = toArray();
Float ab[] = new Float[a.length];
for(int i=0; i<a.length; i++)
ab[i] = a[i];
return Arrays.asList(ab);
}
}
......@@ -5,38 +5,34 @@ import java.util.List;
public class MatOfInt extends Mat {
// 32SC(x)
private static final int _depth = CvType.CV_32S;
private final int _channels;
public MatOfInt(int channels) {
super();
_channels = channels;
}
// 32SC1
private static final int _depth = CvType.CV_32S;
private static final int _channels = 1;
public MatOfInt() {
this(1);
super();
}
public MatOfInt(int channels, long addr) {
protected MatOfInt(long addr) {
super(addr);
_channels = channels;
if(checkVector(_channels, _depth) < 0 )
throw new IllegalArgumentException("Incomatible Mat");
//FIXME: do we need release() here?
}
public MatOfInt(int channels, Mat m) {
super(m, Range.all());
_channels = channels;
public static MatOfInt fromNativeAddr(long addr) {
return new MatOfInt(addr);
}
public MatOfInt(Mat m) {
super(m, Range.all());
if(checkVector(_channels, _depth) < 0 )
throw new IllegalArgumentException("Incomatible Mat");
//FIXME: do we need release() here?
}
public MatOfInt(int channels, int...a) {
public MatOfInt(int...a) {
super();
_channels = channels;
fromArray(a);
}
......@@ -52,9 +48,11 @@ public class MatOfInt extends Mat {
alloc(num);
put(0, 0, a); //TODO: check ret val!
}
public int[] toArray() {
int num = (int) total();
int num = checkVector(_channels, _depth);
if(num < 0)
throw new RuntimeException("Native Mat has unexpected type or size: " + toString());
int[] a = new int[num * _channels];
if(num == 0)
return a;
......@@ -63,20 +61,20 @@ public class MatOfInt extends Mat {
}
public void fromList(List<Integer> lb) {
if(lb==null || lb.size()==0)
return;
Integer ab[] = lb.toArray(null);
int a[] = new int[ab.length];
for(int i=0; i<ab.length; i++)
a[i] = ab[i];
fromArray(a);
if(lb==null || lb.size()==0)
return;
Integer ab[] = lb.toArray(new Integer[0]);
int a[] = new int[ab.length];
for(int i=0; i<ab.length; i++)
a[i] = ab[i];
fromArray(a);
}
public List<Integer> toList() {
int[] a = toArray();
Integer ab[] = new Integer[a.length];
for(int i=0; i<a.length; i++)
ab[i] = a[i];
return Arrays.asList(ab);
int[] a = toArray();
Integer ab[] = new Integer[a.length];
for(int i=0; i<a.length; i++)
ab[i] = a[i];
return Arrays.asList(ab);
}
}
......@@ -6,23 +6,27 @@ import java.util.List;
import org.opencv.features2d.KeyPoint;
public class MatOfKeyPoint extends Mat {
// 32FC7
private static final int _depth = CvType.CV_32F;
private static final int _channels = 7;
// 32FC7
private static final int _depth = CvType.CV_32F;
private static final int _channels = 7;
public MatOfKeyPoint() {
super();
}
public MatOfKeyPoint(long addr) {
protected MatOfKeyPoint(long addr) {
super(addr);
if(checkVector(_channels, _depth) < 0 )
throw new IllegalArgumentException("Incomatible Mat");
//FIXME: do we need release() here?
}
public static MatOfKeyPoint fromNativeAddr(long addr) {
return new MatOfKeyPoint(addr);
}
public MatOfKeyPoint(Mat m) {
super(m, Range.all());
super(m, Range.all());
if(checkVector(_channels, _depth) < 0 )
throw new IllegalArgumentException("Incomatible Mat");
//FIXME: do we need release() here?
......@@ -32,7 +36,7 @@ public class MatOfKeyPoint extends Mat {
super();
fromArray(a);
}
public void alloc(int elemNumber) {
if(elemNumber>0)
super.create(elemNumber, 1, CvType.makeType(_depth, _channels));
......@@ -71,12 +75,12 @@ public class MatOfKeyPoint extends Mat {
}
public void fromList(List<KeyPoint> lkp) {
KeyPoint akp[] = lkp.toArray(null);
fromArray(akp);
KeyPoint akp[] = lkp.toArray(new KeyPoint[0]);
fromArray(akp);
}
public List<KeyPoint> toList() {
KeyPoint[] akp = toArray();
return Arrays.asList(akp);
KeyPoint[] akp = toArray();
return Arrays.asList(akp);
}
}
......@@ -4,23 +4,27 @@ import java.util.Arrays;
import java.util.List;
public class MatOfPoint extends Mat {
// 32SC2
private static final int _depth = CvType.CV_32S;
private static final int _channels = 2;
// 32SC2
private static final int _depth = CvType.CV_32S;
private static final int _channels = 2;
public MatOfPoint() {
super();
}
public MatOfPoint(long addr) {
protected MatOfPoint(long addr) {
super(addr);
if(checkVector(_channels, _depth) < 0 )
throw new IllegalArgumentException("Incomatible Mat");
//FIXME: do we need release() here?
}
public static MatOfPoint fromNativeAddr(long addr) {
return new MatOfPoint(addr);
}
public MatOfPoint(Mat m) {
super(m, Range.all());
super(m, Range.all());
if(checkVector(_channels, _depth) < 0 )
throw new IllegalArgumentException("Incomatible Mat");
//FIXME: do we need release() here?
......@@ -30,7 +34,7 @@ public class MatOfPoint extends Mat {
super();
fromArray(a);
}
public void alloc(int elemNumber) {
if(elemNumber>0)
super.create(elemNumber, 1, CvType.makeType(_depth, _channels));
......@@ -49,7 +53,7 @@ public class MatOfPoint extends Mat {
}
put(0, 0, buff); //TODO: check ret val!
}
public Point[] toArray() {
int num = (int) total();
Point[] ap = new Point[num];
......@@ -63,12 +67,12 @@ public class MatOfPoint extends Mat {
}
public void fromList(List<Point> lp) {
Point ap[] = lp.toArray(null);
fromArray(ap);
Point ap[] = lp.toArray(new Point[0]);
fromArray(ap);
}
public List<Point> toList() {
Point[] ap = toArray();
return Arrays.asList(ap);
Point[] ap = toArray();
return Arrays.asList(ap);
}
}
......@@ -4,23 +4,27 @@ import java.util.Arrays;
import java.util.List;
public class MatOfPoint2f extends Mat {
// 32FC2
private static final int _depth = CvType.CV_32F;
private static final int _channels = 2;
// 32FC2
private static final int _depth = CvType.CV_32F;
private static final int _channels = 2;
public MatOfPoint2f() {
super();
}
public MatOfPoint2f(long addr) {
protected MatOfPoint2f(long addr) {
super(addr);
if(checkVector(_channels, _depth) < 0 )
throw new IllegalArgumentException("Incomatible Mat");
//FIXME: do we need release() here?
}
public static MatOfPoint2f fromNativeAddr(long addr) {
return new MatOfPoint2f(addr);
}
public MatOfPoint2f(Mat m) {
super(m, Range.all());
super(m, Range.all());
if(checkVector(_channels, _depth) < 0 )
throw new IllegalArgumentException("Incomatible Mat");
//FIXME: do we need release() here?
......@@ -30,7 +34,7 @@ public class MatOfPoint2f extends Mat {
super();
fromArray(a);
}
public void alloc(int elemNumber) {
if(elemNumber>0)
super.create(elemNumber, 1, CvType.makeType(_depth, _channels));
......@@ -49,7 +53,7 @@ public class MatOfPoint2f extends Mat {
}
put(0, 0, buff); //TODO: check ret val!
}
public Point[] toArray() {
int num = (int) total();
Point[] ap = new Point[num];
......@@ -63,12 +67,12 @@ public class MatOfPoint2f extends Mat {
}
public void fromList(List<Point> lp) {
Point ap[] = lp.toArray(null);
fromArray(ap);
Point ap[] = lp.toArray(new Point[0]);
fromArray(ap);
}
public List<Point> toList() {
Point[] ap = toArray();
return Arrays.asList(ap);
Point[] ap = toArray();
return Arrays.asList(ap);
}
}
......@@ -4,23 +4,27 @@ import java.util.Arrays;
import java.util.List;
public class MatOfPoint3 extends Mat {
// 32SC3
private static final int _depth = CvType.CV_32S;
private static final int _channels = 3;
// 32SC3
private static final int _depth = CvType.CV_32S;
private static final int _channels = 3;
public MatOfPoint3() {
super();
}
public MatOfPoint3(long addr) {
protected MatOfPoint3(long addr) {
super(addr);
if(checkVector(_channels, _depth) < 0 )
throw new IllegalArgumentException("Incomatible Mat");
//FIXME: do we need release() here?
}
public static MatOfPoint3 fromNativeAddr(long addr) {
return new MatOfPoint3(addr);
}
public MatOfPoint3(Mat m) {
super(m, Range.all());
super(m, Range.all());
if(checkVector(_channels, _depth) < 0 )
throw new IllegalArgumentException("Incomatible Mat");
//FIXME: do we need release() here?
......@@ -30,7 +34,7 @@ public class MatOfPoint3 extends Mat {
super();
fromArray(a);
}
public void alloc(int elemNumber) {
if(elemNumber>0)
super.create(elemNumber, 1, CvType.makeType(_depth, _channels));
......@@ -50,7 +54,7 @@ public class MatOfPoint3 extends Mat {
}
put(0, 0, buff); //TODO: check ret val!
}
public Point3[] toArray() {
int num = (int) total();
Point3[] ap = new Point3[num];
......@@ -64,12 +68,12 @@ public class MatOfPoint3 extends Mat {
}
public void fromList(List<Point3> lp) {
Point3 ap[] = lp.toArray(null);
fromArray(ap);
Point3 ap[] = lp.toArray(new Point3[0]);
fromArray(ap);
}
public List<Point3> toList() {
Point3[] ap = toArray();
return Arrays.asList(ap);
Point3[] ap = toArray();
return Arrays.asList(ap);
}
}
......@@ -4,23 +4,27 @@ import java.util.Arrays;
import java.util.List;
public class MatOfPoint3f extends Mat {
// 32FC3
private static final int _depth = CvType.CV_32F;
private static final int _channels = 3;
// 32FC3
private static final int _depth = CvType.CV_32F;
private static final int _channels = 3;
public MatOfPoint3f() {
super();
}
public MatOfPoint3f(long addr) {
protected MatOfPoint3f(long addr) {
super(addr);
if(checkVector(_channels, _depth) < 0 )
throw new IllegalArgumentException("Incomatible Mat");
//FIXME: do we need release() here?
}
public static MatOfPoint3f fromNativeAddr(long addr) {
return new MatOfPoint3f(addr);
}
public MatOfPoint3f(Mat m) {
super(m, Range.all());
super(m, Range.all());
if(checkVector(_channels, _depth) < 0 )
throw new IllegalArgumentException("Incomatible Mat");
//FIXME: do we need release() here?
......@@ -30,7 +34,7 @@ public class MatOfPoint3f extends Mat {
super();
fromArray(a);
}
public void alloc(int elemNumber) {
if(elemNumber>0)
super.create(elemNumber, 1, CvType.makeType(_depth, _channels));
......@@ -50,7 +54,7 @@ public class MatOfPoint3f extends Mat {
}
put(0, 0, buff); //TODO: check ret val!
}
public Point3[] toArray() {
int num = (int) total();
Point3[] ap = new Point3[num];
......@@ -64,12 +68,12 @@ public class MatOfPoint3f extends Mat {
}
public void fromList(List<Point3> lp) {
Point3 ap[] = lp.toArray(null);
fromArray(ap);
Point3 ap[] = lp.toArray(new Point3[0]);
fromArray(ap);
}
public List<Point3> toList() {
Point3[] ap = toArray();
return Arrays.asList(ap);
Point3[] ap = toArray();
return Arrays.asList(ap);
}
}
......@@ -5,23 +5,27 @@ import java.util.List;
public class MatOfRect extends Mat {
// 32SC4
private static final int _depth = CvType.CV_32S;
private static final int _channels = 4;
// 32SC4
private static final int _depth = CvType.CV_32S;
private static final int _channels = 4;
public MatOfRect() {
super();
}
public MatOfRect(long addr) {
protected MatOfRect(long addr) {
super(addr);
if(checkVector(_channels, _depth) < 0 )
throw new IllegalArgumentException("Incomatible Mat");
//FIXME: do we need release() here?
}
public static MatOfRect fromNativeAddr(long addr) {
return new MatOfRect(addr);
}
public MatOfRect(Mat m) {
super(m, Range.all());
super(m, Range.all());
if(checkVector(_channels, _depth) < 0 )
throw new IllegalArgumentException("Incomatible Mat");
//FIXME: do we need release() here?
......@@ -31,7 +35,7 @@ public class MatOfRect extends Mat {
super();
fromArray(a);
}
public void alloc(int elemNumber) {
if(elemNumber>0)
super.create(elemNumber, 1, CvType.makeType(_depth, _channels));
......@@ -44,7 +48,7 @@ public class MatOfRect extends Mat {
alloc(num);
int buff[] = new int[num * _channels];
for(int i=0; i<num; i++) {
Rect r = a[i];
Rect r = a[i];
buff[_channels*i+0] = (int) r.x;
buff[_channels*i+1] = (int) r.y;
buff[_channels*i+2] = (int) r.width;
......@@ -52,7 +56,7 @@ public class MatOfRect extends Mat {
}
put(0, 0, buff); //TODO: check ret val!
}
public Rect[] toArray() {
int num = (int) total();
......@@ -66,12 +70,12 @@ public class MatOfRect extends Mat {
return a;
}
public void fromList(List<Rect> lr) {
Rect ap[] = lr.toArray(null);
fromArray(ap);
Rect ap[] = lr.toArray(new Rect[0]);
fromArray(ap);
}
public List<Rect> toList() {
Rect[] ar = toArray();
return Arrays.asList(ar);
Rect[] ar = toArray();
return Arrays.asList(ar);
}
}
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// Intel License Agreement
// For Open Source Computer Vision Library
//
// Copyright( C) 2000, Intel Corporation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of Intel Corporation may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
//(including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort(including negligence or otherwise) arising in any way out of
// the use of this software, even ifadvised of the possibility of such damage.
//
//M*/
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// Intel License Agreement
// For Open Source Computer Vision Library
//
// Copyright( C) 2000, Intel Corporation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of Intel Corporation may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
//(including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort(including negligence or otherwise) arising in any way out of
// the use of this software, even ifadvised of the possibility of such damage.
//
//M*/
#include "precomp.hpp"
using namespace cv;
CvEMParams::CvEMParams() : nclusters(10), cov_mat_type(CvEM::COV_MAT_DIAGONAL),
start_step(CvEM::START_AUTO_STEP), probs(0), weights(0), means(0), covs(0)
start_step(CvEM::START_AUTO_STEP), probs(0), weights(0), means(0), covs(0)
{
term_crit=cvTermCriteria( CV_TERMCRIT_ITER+CV_TERMCRIT_EPS, 100, FLT_EPSILON );
}
CvEMParams::CvEMParams( int _nclusters, int _cov_mat_type, int _start_step,
CvTermCriteria _term_crit, const CvMat* _probs,
const CvMat* _weights, const CvMat* _means, const CvMat** _covs ) :
nclusters(_nclusters), cov_mat_type(_cov_mat_type), start_step(_start_step),
probs(_probs), weights(_weights), means(_means), covs(_covs), term_crit(_term_crit)
CvTermCriteria _term_crit, const CvMat* _probs,
const CvMat* _weights, const CvMat* _means, const CvMat** _covs ) :
nclusters(_nclusters), cov_mat_type(_cov_mat_type), start_step(_start_step),
probs(_probs), weights(_weights), means(_means), covs(_covs), term_crit(_term_crit)
{}
CvEM::CvEM() : logLikelihood(DBL_MAX)
......@@ -61,7 +61,7 @@ CvEM::CvEM() : logLikelihood(DBL_MAX)
}
CvEM::CvEM( const CvMat* samples, const CvMat* sample_idx,
CvEMParams params, CvMat* labels ) : logLikelihood(DBL_MAX)
CvEMParams params, CvMat* labels ) : logLikelihood(DBL_MAX)
{
train(samples, sample_idx, params, labels);
}
......@@ -142,7 +142,7 @@ void init_params(const CvEMParams& src,
prbs = src.probs;
weights = src.weights;
means = src.means;
if(src.covs)
{
covsHdrs.resize(src.nclusters);
......@@ -152,7 +152,7 @@ void init_params(const CvEMParams& src,
}
bool CvEM::train( const CvMat* _samples, const CvMat* _sample_idx,
CvEMParams _params, CvMat* _labels )
CvEMParams _params, CvMat* _labels )
{
CV_Assert(_sample_idx == 0);
Mat samples = cvarrToMat(_samples), labels0, labels;
......@@ -161,7 +161,7 @@ bool CvEM::train( const CvMat* _samples, const CvMat* _sample_idx,
bool isOk = train(samples, Mat(), _params, _labels ? &labels : 0);
CV_Assert( labels0.data == labels.data );
return isOk;
}
......@@ -204,7 +204,7 @@ bool CvEM::train( const Mat& _samples, const Mat& _sample_idx,
Mat prbs, weights, means, logLikelihoods;
std::vector<Mat> covsHdrs;
init_params(_params, prbs, weights, means, covsHdrs);
emObj = EM(_params.nclusters, _params.cov_mat_type, _params.term_crit);
bool isOk = false;
if( _params.start_step == EM::START_AUTO_STEP )
......@@ -224,7 +224,7 @@ bool CvEM::train( const Mat& _samples, const Mat& _sample_idx,
logLikelihood = sum(logLikelihoods).val[0];
set_mat_hdrs();
}
return isOk;
}
......
......@@ -45,45 +45,45 @@ using namespace std;
using namespace cv;
static
void defaultDistribs( Mat& means, vector<Mat>& covs )
void defaultDistribs( Mat& means, vector<Mat>& covs, int type=CV_32FC1 )
{
float mp0[] = {0.0f, 0.0f}, cp0[] = {0.67f, 0.0f, 0.0f, 0.67f};
float mp1[] = {5.0f, 0.0f}, cp1[] = {1.0f, 0.0f, 0.0f, 1.0f};
float mp2[] = {1.0f, 5.0f}, cp2[] = {1.0f, 0.0f, 0.0f, 1.0f};
means.create(3, 2, CV_32FC1);
means.create(3, 2, type);
Mat m0( 1, 2, CV_32FC1, mp0 ), c0( 2, 2, CV_32FC1, cp0 );
Mat m1( 1, 2, CV_32FC1, mp1 ), c1( 2, 2, CV_32FC1, cp1 );
Mat m2( 1, 2, CV_32FC1, mp2 ), c2( 2, 2, CV_32FC1, cp2 );
means.resize(3), covs.resize(3);
Mat mr0 = means.row(0);
m0.copyTo(mr0);
c0.copyTo(covs[0]);
m0.convertTo(mr0, type);
c0.convertTo(covs[0], type);
Mat mr1 = means.row(1);
m1.copyTo(mr1);
c1.copyTo(covs[1]);
m1.convertTo(mr1, type);
c1.convertTo(covs[1], type);
Mat mr2 = means.row(2);
m2.copyTo(mr2);
c2.copyTo(covs[2]);
m2.convertTo(mr2, type);
c2.convertTo(covs[2], type);
}
// generate points sets by normal distributions
static
void generateData( Mat& data, Mat& labels, const vector<int>& sizes, const Mat& _means, const vector<Mat>& covs, int labelType )
void generateData( Mat& data, Mat& labels, const vector<int>& sizes, const Mat& _means, const vector<Mat>& covs, int dataType, int labelType )
{
vector<int>::const_iterator sit = sizes.begin();
int total = 0;
for( ; sit != sizes.end(); ++sit )
total += *sit;
assert( _means.rows == (int)sizes.size() && covs.size() == sizes.size() );
assert( !data.empty() && data.rows == total );
assert( data.type() == CV_32FC1 );
CV_Assert( _means.rows == (int)sizes.size() && covs.size() == sizes.size() );
CV_Assert( !data.empty() && data.rows == total );
CV_Assert( data.type() == dataType );
labels.create( data.rows, 1, labelType );
randn( data, Scalar::all(0.0), Scalar::all(1.0) );
randn( data, Scalar::all(-1.0), Scalar::all(1.0) );
vector<Mat> means(sizes.size());
for(int i = 0; i < _means.rows; i++)
means[i] = _means.row(i);
......@@ -98,8 +98,8 @@ void generateData( Mat& data, Mat& labels, const vector<int>& sizes, const Mat&
assert( cit->rows == data.cols && cit->cols == data.cols );
for( int i = bi; i < ei; i++, p++ )
{
Mat r(1, data.cols, CV_32FC1, data.ptr<float>(i));
r = r * (*cit) + *mit;
Mat r = data.row(i);
r = r * (*cit) + *mit;
if( labelType == CV_32FC1 )
labels.at<float>(p, 0) = (float)l;
else if( labelType == CV_32SC1 )
......@@ -129,7 +129,7 @@ int maxIdx( const vector<int>& count )
}
static
bool getLabelsMap( const Mat& labels, const vector<int>& sizes, vector<int>& labelsMap )
bool getLabelsMap( const Mat& labels, const vector<int>& sizes, vector<int>& labelsMap, bool checkClusterUniq=true )
{
size_t total = 0, nclusters = sizes.size();
for(size_t i = 0; i < sizes.size(); i++)
......@@ -158,21 +158,25 @@ bool getLabelsMap( const Mat& labels, const vector<int>& sizes, vector<int>& lab
startIndex += sizes[clusterIndex];
int cls = maxIdx( count );
CV_Assert( !buzy[cls] );
CV_Assert( !checkClusterUniq || !buzy[cls] );
labelsMap[clusterIndex] = cls;
buzy[cls] = true;
}
for(size_t i = 0; i < buzy.size(); i++)
if(!buzy[i])
return false;
if(checkClusterUniq)
{
for(size_t i = 0; i < buzy.size(); i++)
if(!buzy[i])
return false;
}
return true;
}
static
bool calcErr( const Mat& labels, const Mat& origLabels, const vector<int>& sizes, float& err, bool labelsEquivalent = true )
bool calcErr( const Mat& labels, const Mat& origLabels, const vector<int>& sizes, float& err, bool labelsEquivalent, bool checkClusterUniq )
{
err = 0;
CV_Assert( !labels.empty() && !origLabels.empty() );
......@@ -186,7 +190,7 @@ bool calcErr( const Mat& labels, const Mat& origLabels, const vector<int>& sizes
bool isFlt = labels.type() == CV_32FC1;
if( !labelsEquivalent )
{
if( !getLabelsMap( labels, sizes, labelsMap ) )
if( !getLabelsMap( labels, sizes, labelsMap, checkClusterUniq ) )
return false;
for( int i = 0; i < labels.rows; i++ )
......@@ -234,7 +238,7 @@ int CV_CvEMTest::runCase( int caseIndex, const CvEMParams& params,
em.train( trainData, Mat(), params, &labels );
// check train error
if( !calcErr( labels, trainLabels, sizes, err , false ) )
if( !calcErr( labels, trainLabels, sizes, err , false, false ) )
{
ts->printf( cvtest::TS::LOG, "Case index %i : Bad output labels.\n", caseIndex );
code = cvtest::TS::FAIL_INVALID_OUTPUT;
......@@ -252,7 +256,7 @@ int CV_CvEMTest::runCase( int caseIndex, const CvEMParams& params,
Mat sample = testData.row(i);
labels.at<int>(i,0) = (int)em.predict( sample, 0 );
}
if( !calcErr( labels, testLabels, sizes, err, false ) )
if( !calcErr( labels, testLabels, sizes, err, false, false ) )
{
ts->printf( cvtest::TS::LOG, "Case index %i : Bad output labels.\n", caseIndex );
code = cvtest::TS::FAIL_INVALID_OUTPUT;
......@@ -279,11 +283,11 @@ void CV_CvEMTest::run( int /*start_from*/ )
// train data
Mat trainData( pointsCount, 2, CV_32FC1 ), trainLabels;
vector<int> sizes( sizesArr, sizesArr + sizeof(sizesArr) / sizeof(sizesArr[0]) );
generateData( trainData, trainLabels, sizes, means, covs, CV_32SC1 );
generateData( trainData, trainLabels, sizes, means, covs, CV_32FC1, CV_32SC1 );
// test data
Mat testData( pointsCount, 2, CV_32FC1 ), testLabels;
generateData( testData, testLabels, sizes, means, covs, CV_32SC1 );
generateData( testData, testLabels, sizes, means, covs, CV_32FC1, CV_32SC1 );
CvEMParams params;
params.nclusters = 3;
......@@ -440,5 +444,5 @@ protected:
}
};
TEST(ML_CvEM, accuracy) { CV_CvEMTest test; test.safe_run(); }
TEST(ML_CvEM, save_load) { CV_CvEMTest_SaveLoad test; test.safe_run(); }
TEST(Legacy_CvEM, accuracy) { CV_CvEMTest test; test.safe_run(); }
TEST(Legacy_CvEM, save_load) { CV_CvEMTest_SaveLoad test; test.safe_run(); }
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment