Unverified Commit ebc80156 authored by Alexander Alekhin's avatar Alexander Alekhin Committed by GitHub

Merge pull request #13074 from alalek:cleanup_videoio

parents 35c7cab7 5869415a
...@@ -228,7 +228,6 @@ OCV_OPTION(WITH_CUFFT "Include NVidia Cuda Fast Fourier Transform (FFT) ...@@ -228,7 +228,6 @@ OCV_OPTION(WITH_CUFFT "Include NVidia Cuda Fast Fourier Transform (FFT)
OCV_OPTION(WITH_CUBLAS "Include NVidia Cuda Basic Linear Algebra Subprograms (BLAS) library support" ON IF (NOT IOS AND NOT WINRT) ) OCV_OPTION(WITH_CUBLAS "Include NVidia Cuda Basic Linear Algebra Subprograms (BLAS) library support" ON IF (NOT IOS AND NOT WINRT) )
OCV_OPTION(WITH_NVCUVID "Include NVidia Video Decoding library support" ON IF (NOT IOS AND NOT APPLE) ) OCV_OPTION(WITH_NVCUVID "Include NVidia Video Decoding library support" ON IF (NOT IOS AND NOT APPLE) )
OCV_OPTION(WITH_EIGEN "Include Eigen2/Eigen3 support" (NOT CV_DISABLE_OPTIMIZATION) IF (NOT WINRT AND NOT CMAKE_CROSSCOMPILING) ) OCV_OPTION(WITH_EIGEN "Include Eigen2/Eigen3 support" (NOT CV_DISABLE_OPTIMIZATION) IF (NOT WINRT AND NOT CMAKE_CROSSCOMPILING) )
OCV_OPTION(WITH_VFW "Include Video for Windows support" ON IF WIN32 )
OCV_OPTION(WITH_FFMPEG "Include FFMPEG support" ON IF (NOT ANDROID AND NOT IOS AND NOT WINRT) ) OCV_OPTION(WITH_FFMPEG "Include FFMPEG support" ON IF (NOT ANDROID AND NOT IOS AND NOT WINRT) )
OCV_OPTION(WITH_GSTREAMER "Include Gstreamer support" ON IF (NOT ANDROID AND NOT IOS AND NOT WINRT) ) OCV_OPTION(WITH_GSTREAMER "Include Gstreamer support" ON IF (NOT ANDROID AND NOT IOS AND NOT WINRT) )
OCV_OPTION(WITH_GSTREAMER_0_10 "Enable Gstreamer 0.10 support (instead of 1.x)" OFF ) OCV_OPTION(WITH_GSTREAMER_0_10 "Enable Gstreamer 0.10 support (instead of 1.x)" OFF )
...@@ -253,14 +252,11 @@ OCV_OPTION(WITH_GIGEAPI "Include Smartek GigE support" OFF ...@@ -253,14 +252,11 @@ OCV_OPTION(WITH_GIGEAPI "Include Smartek GigE support" OFF
OCV_OPTION(WITH_ARAVIS "Include Aravis GigE support" OFF IF (NOT ANDROID AND NOT IOS AND NOT WINRT AND NOT WIN32) ) OCV_OPTION(WITH_ARAVIS "Include Aravis GigE support" OFF IF (NOT ANDROID AND NOT IOS AND NOT WINRT AND NOT WIN32) )
OCV_OPTION(WITH_QT "Build with Qt Backend support" OFF IF (NOT ANDROID AND NOT IOS AND NOT WINRT) ) OCV_OPTION(WITH_QT "Build with Qt Backend support" OFF IF (NOT ANDROID AND NOT IOS AND NOT WINRT) )
OCV_OPTION(WITH_WIN32UI "Build with Win32 UI Backend support" ON IF WIN32 AND NOT WINRT) OCV_OPTION(WITH_WIN32UI "Build with Win32 UI Backend support" ON IF WIN32 AND NOT WINRT)
OCV_OPTION(WITH_QUICKTIME "Use QuickTime for Video I/O (OBSOLETE)" OFF IF APPLE )
OCV_OPTION(WITH_QTKIT "Use QTKit Video I/O backend" OFF IF APPLE )
OCV_OPTION(WITH_TBB "Include Intel TBB support" OFF IF (NOT IOS AND NOT WINRT) ) OCV_OPTION(WITH_TBB "Include Intel TBB support" OFF IF (NOT IOS AND NOT WINRT) )
OCV_OPTION(WITH_HPX "Include Ste||ar Group HPX support" OFF) OCV_OPTION(WITH_HPX "Include Ste||ar Group HPX support" OFF)
OCV_OPTION(WITH_OPENMP "Include OpenMP support" OFF) OCV_OPTION(WITH_OPENMP "Include OpenMP support" OFF)
OCV_OPTION(WITH_PTHREADS_PF "Use pthreads-based parallel_for" ON IF (NOT WIN32 OR MINGW) ) OCV_OPTION(WITH_PTHREADS_PF "Use pthreads-based parallel_for" ON IF (NOT WIN32 OR MINGW) )
OCV_OPTION(WITH_TIFF "Include TIFF support" ON IF (NOT IOS) ) OCV_OPTION(WITH_TIFF "Include TIFF support" ON IF (NOT IOS) )
OCV_OPTION(WITH_UNICAP "Include Unicap support (GPL)" OFF IF (UNIX AND NOT APPLE AND NOT ANDROID) )
OCV_OPTION(WITH_V4L "Include Video 4 Linux support" ON IF (UNIX AND NOT ANDROID AND NOT APPLE) ) OCV_OPTION(WITH_V4L "Include Video 4 Linux support" ON IF (UNIX AND NOT ANDROID AND NOT APPLE) )
OCV_OPTION(WITH_LIBV4L "Use libv4l for Video 4 Linux support" OFF IF (UNIX AND NOT ANDROID AND NOT APPLE) ) OCV_OPTION(WITH_LIBV4L "Use libv4l for Video 4 Linux support" OFF IF (UNIX AND NOT ANDROID AND NOT APPLE) )
OCV_OPTION(WITH_DSHOW "Build VideoIO with DirectShow support" ON IF (WIN32 AND NOT ARM AND NOT WINRT) ) OCV_OPTION(WITH_DSHOW "Build VideoIO with DirectShow support" ON IF (WIN32 AND NOT ARM AND NOT WINRT) )
...@@ -1245,15 +1241,9 @@ endif() ...@@ -1245,15 +1241,9 @@ endif()
status("") status("")
status(" Video I/O:") status(" Video I/O:")
if(WITH_VFW OR HAVE_VFW) if(WITH_1394 OR HAVE_DC1394_2)
status(" Video for Windows:" HAVE_VFW THEN YES ELSE NO)
endif()
if(WITH_1394 OR HAVE_DC1394)
if (HAVE_DC1394_2) if (HAVE_DC1394_2)
status(" DC1394:" "YES (ver ${ALIASOF_libdc1394-2_VERSION})") status(" DC1394:" "YES (ver ${ALIASOF_libdc1394-2_VERSION})")
elseif (HAVE_DC1394)
status(" DC1394:" "YES (ver ${ALIASOF_libdc1394_VERSION})")
else() else()
status(" DC1394:" "NO") status(" DC1394:" "NO")
endif() endif()
...@@ -1311,14 +1301,6 @@ if(APPLE) ...@@ -1311,14 +1301,6 @@ if(APPLE)
if(WITH_QUICKTIME OR HAVE_QUICKTIME) if(WITH_QUICKTIME OR HAVE_QUICKTIME)
status(" QuickTime:" HAVE_QUICKTIME THEN YES ELSE NO) status(" QuickTime:" HAVE_QUICKTIME THEN YES ELSE NO)
endif() endif()
if(WITH_QTKIT OR HAVE_QTKIT)
status(" QTKit:" HAVE_QTKIT THEN "YES (deprecated)" ELSE NO)
endif()
endif()
if(WITH_UNICAP OR HAVE_UNICAP)
status(" UniCap:" HAVE_UNICAP THEN "YES (ver ${ALIASOF_libunicap_VERSION})" ELSE NO)
status(" UniCap ucil:" HAVE_UNICAP_UCIL THEN "YES (ver ${ALIASOF_libucil_VERSION})" ELSE NO)
endif() endif()
if(WITH_V4L OR WITH_LIBV4L OR HAVE_LIBV4L OR HAVE_CAMV4L OR HAVE_CAMV4L2 OR HAVE_VIDEOIO) if(WITH_V4L OR WITH_LIBV4L OR HAVE_LIBV4L OR HAVE_CAMV4L OR HAVE_CAMV4L2 OR HAVE_VIDEOIO)
......
...@@ -2,14 +2,6 @@ ...@@ -2,14 +2,6 @@
# Detect 3rd-party video IO libraries # Detect 3rd-party video IO libraries
# ---------------------------------------------------------------------------- # ----------------------------------------------------------------------------
ocv_clear_vars(HAVE_VFW)
if(WITH_VFW)
try_compile(HAVE_VFW
"${OpenCV_BINARY_DIR}"
"${OpenCV_SOURCE_DIR}/cmake/checks/vfwtest.cpp"
CMAKE_FLAGS "-DLINK_LIBRARIES:STRING=vfw32")
endif(WITH_VFW)
# --- GStreamer --- # --- GStreamer ---
ocv_clear_vars(HAVE_GSTREAMER) ocv_clear_vars(HAVE_GSTREAMER)
# try to find gstreamer 1.x first if 0.10 was not requested # try to find gstreamer 1.x first if 0.10 was not requested
...@@ -62,16 +54,6 @@ if(WITH_GSTREAMER AND NOT HAVE_GSTREAMER OR WITH_GSTREAMER_0_10) ...@@ -62,16 +54,6 @@ if(WITH_GSTREAMER AND NOT HAVE_GSTREAMER OR WITH_GSTREAMER_0_10)
endif() endif()
endif(WITH_GSTREAMER AND NOT HAVE_GSTREAMER OR WITH_GSTREAMER_0_10) endif(WITH_GSTREAMER AND NOT HAVE_GSTREAMER OR WITH_GSTREAMER_0_10)
# --- unicap ---
ocv_clear_vars(HAVE_UNICAP)
if(WITH_UNICAP)
CHECK_MODULE(libunicap HAVE_UNICAP_ VIDEOIO)
CHECK_MODULE(libucil HAVE_UNICAP_UCIL VIDEOIO)
if(HAVE_UNICAP_ AND HAVE_UNICAP_UCIL)
set(HAVE_UNICAP TRUE)
endif()
endif(WITH_UNICAP)
# --- PvApi --- # --- PvApi ---
ocv_clear_vars(HAVE_PVAPI) ocv_clear_vars(HAVE_PVAPI)
if(WITH_PVAPI) if(WITH_PVAPI)
...@@ -286,12 +268,8 @@ endif(WITH_MSMF) ...@@ -286,12 +268,8 @@ endif(WITH_MSMF)
# --- Extra HighGUI and VideoIO libs on Windows --- # --- Extra HighGUI and VideoIO libs on Windows ---
if(WIN32) if(WIN32)
list(APPEND HIGHGUI_LIBRARIES comctl32 gdi32 ole32 setupapi ws2_32) list(APPEND HIGHGUI_LIBRARIES comctl32 gdi32 ole32 setupapi ws2_32)
if(HAVE_VFW)
list(APPEND VIDEOIO_LIBRARIES vfw32)
endif()
if(MINGW64) if(MINGW64)
list(APPEND VIDEOIO_LIBRARIES avifil32 avicap32 winmm msvfw32) list(APPEND VIDEOIO_LIBRARIES avifil32 avicap32 winmm msvfw32)
list(REMOVE_ITEM VIDEOIO_LIBRARIES vfw32)
elseif(MINGW) elseif(MINGW)
list(APPEND VIDEOIO_LIBRARIES winmm) list(APPEND VIDEOIO_LIBRARIES winmm)
endif() endif()
...@@ -301,13 +279,6 @@ if(APPLE) ...@@ -301,13 +279,6 @@ if(APPLE)
if(WITH_AVFOUNDATION) if(WITH_AVFOUNDATION)
set(HAVE_AVFOUNDATION YES) set(HAVE_AVFOUNDATION YES)
endif() endif()
if(NOT IOS)
if(WITH_QUICKTIME)
set(HAVE_QUICKTIME YES)
elseif(WITH_QTKIT)
set(HAVE_QTKIT YES)
endif()
endif()
endif(APPLE) endif(APPLE)
# --- Intel librealsense --- # --- Intel librealsense ---
......
#include <windows.h>
#include <vfw.h>
int main()
{
AVIFileInit();
AVIFileExit();
return 0;
}
...@@ -163,12 +163,6 @@ ...@@ -163,12 +163,6 @@
/* Qt OpenGL support */ /* Qt OpenGL support */
#cmakedefine HAVE_QT_OPENGL #cmakedefine HAVE_QT_OPENGL
/* QuickTime video libraries */
#cmakedefine HAVE_QUICKTIME
/* QTKit video libraries */
#cmakedefine HAVE_QTKIT
/* Intel Threading Building Blocks */ /* Intel Threading Building Blocks */
#cmakedefine HAVE_TBB #cmakedefine HAVE_TBB
...@@ -178,12 +172,6 @@ ...@@ -178,12 +172,6 @@
/* TIFF codec */ /* TIFF codec */
#cmakedefine HAVE_TIFF #cmakedefine HAVE_TIFF
/* Unicap video capture library */
#cmakedefine HAVE_UNICAP
/* Video for Windows support */
#cmakedefine HAVE_VFW
/* V4L2 capturing support in videoio.h */ /* V4L2 capturing support in videoio.h */
#cmakedefine HAVE_VIDEOIO #cmakedefine HAVE_VIDEOIO
...@@ -223,8 +211,6 @@ ...@@ -223,8 +211,6 @@
#if defined(HAVE_XINE) || \ #if defined(HAVE_XINE) || \
defined(HAVE_GSTREAMER) || \ defined(HAVE_GSTREAMER) || \
defined(HAVE_QUICKTIME) || \
defined(HAVE_QTKIT) || \
defined(HAVE_AVFOUNDATION) || \ defined(HAVE_AVFOUNDATION) || \
/*defined(HAVE_OPENNI) || too specialized */ \ /*defined(HAVE_OPENNI) || too specialized */ \
defined(HAVE_FFMPEG) || \ defined(HAVE_FFMPEG) || \
...@@ -234,8 +220,6 @@ ...@@ -234,8 +220,6 @@
#if /*defined(HAVE_XINE) || */\ #if /*defined(HAVE_XINE) || */\
defined(HAVE_GSTREAMER) || \ defined(HAVE_GSTREAMER) || \
defined(HAVE_QUICKTIME) || \
defined(HAVE_QTKIT) || \
defined(HAVE_AVFOUNDATION) || \ defined(HAVE_AVFOUNDATION) || \
defined(HAVE_FFMPEG) || \ defined(HAVE_FFMPEG) || \
defined(HAVE_MSMF) defined(HAVE_MSMF)
......
...@@ -73,10 +73,6 @@ if(HAVE_MFX) ...@@ -73,10 +73,6 @@ if(HAVE_MFX)
list(APPEND VIDEOIO_LIBRARIES mfx) list(APPEND VIDEOIO_LIBRARIES mfx)
endif() endif()
if(WIN32 AND NOT ARM)
list(APPEND videoio_srcs ${CMAKE_CURRENT_LIST_DIR}/src/cap_cmu.cpp)
endif()
if (WIN32 AND HAVE_DSHOW) if (WIN32 AND HAVE_DSHOW)
list(APPEND videoio_srcs ${CMAKE_CURRENT_LIST_DIR}/src/cap_dshow.cpp) list(APPEND videoio_srcs ${CMAKE_CURRENT_LIST_DIR}/src/cap_dshow.cpp)
list(APPEND videoio_hdrs ${CMAKE_CURRENT_LIST_DIR}/src/cap_dshow.hpp) list(APPEND videoio_hdrs ${CMAKE_CURRENT_LIST_DIR}/src/cap_dshow.hpp)
...@@ -93,10 +89,6 @@ if (WIN32 AND HAVE_MSMF) ...@@ -93,10 +89,6 @@ if (WIN32 AND HAVE_MSMF)
endif() endif()
endif() endif()
if (WIN32 AND HAVE_VFW)
list(APPEND videoio_srcs ${CMAKE_CURRENT_LIST_DIR}/src/cap_vfw.cpp)
endif()
if(HAVE_XINE) if(HAVE_XINE)
list(APPEND videoio_srcs ${CMAKE_CURRENT_LIST_DIR}/src/cap_xine.cpp) list(APPEND videoio_srcs ${CMAKE_CURRENT_LIST_DIR}/src/cap_xine.cpp)
endif(HAVE_XINE) endif(HAVE_XINE)
...@@ -105,10 +97,6 @@ if(HAVE_DC1394_2) ...@@ -105,10 +97,6 @@ if(HAVE_DC1394_2)
list(APPEND videoio_srcs ${CMAKE_CURRENT_LIST_DIR}/src/cap_dc1394_v2.cpp) list(APPEND videoio_srcs ${CMAKE_CURRENT_LIST_DIR}/src/cap_dc1394_v2.cpp)
endif(HAVE_DC1394_2) endif(HAVE_DC1394_2)
if(HAVE_DC1394)
list(APPEND videoio_srcs ${CMAKE_CURRENT_LIST_DIR}/src/cap_dc1394.cpp)
endif(HAVE_DC1394)
if(HAVE_GSTREAMER) if(HAVE_GSTREAMER)
IF(WIN32) IF(WIN32)
INCLUDE_DIRECTORIES(${GSTREAMER_INCLUDE_DIR}) INCLUDE_DIRECTORIES(${GSTREAMER_INCLUDE_DIR})
...@@ -117,10 +105,6 @@ if(HAVE_GSTREAMER) ...@@ -117,10 +105,6 @@ if(HAVE_GSTREAMER)
list(APPEND videoio_srcs ${CMAKE_CURRENT_LIST_DIR}/src/cap_gstreamer.cpp) list(APPEND videoio_srcs ${CMAKE_CURRENT_LIST_DIR}/src/cap_gstreamer.cpp)
endif(HAVE_GSTREAMER) endif(HAVE_GSTREAMER)
if(HAVE_UNICAP)
list(APPEND videoio_srcs ${CMAKE_CURRENT_LIST_DIR}/src/cap_unicap.cpp)
endif(HAVE_UNICAP)
if(HAVE_LIBV4L) if(HAVE_LIBV4L)
list(APPEND videoio_srcs ${CMAKE_CURRENT_LIST_DIR}/src/cap_libv4l.cpp) list(APPEND videoio_srcs ${CMAKE_CURRENT_LIST_DIR}/src/cap_libv4l.cpp)
elseif(HAVE_CAMV4L2 OR HAVE_VIDEOIO) elseif(HAVE_CAMV4L2 OR HAVE_VIDEOIO)
...@@ -206,14 +190,6 @@ if(HAVE_AVFOUNDATION) ...@@ -206,14 +190,6 @@ if(HAVE_AVFOUNDATION)
endif() endif()
endif() endif()
if(HAVE_QUICKTIME)
list(APPEND videoio_srcs ${CMAKE_CURRENT_LIST_DIR}/src/cap_qt.cpp)
list(APPEND VIDEOIO_LIBRARIES "-framework Carbon" "-framework QuickTime" "-framework CoreFoundation" "-framework QuartzCore")
elseif(HAVE_QTKIT)
list(APPEND videoio_srcs ${CMAKE_CURRENT_LIST_DIR}/src/cap_qtkit.mm)
list(APPEND VIDEOIO_LIBRARIES "-framework QTKit" "-framework QuartzCore" "-framework AppKit")
endif()
if(HAVE_INTELPERC) if(HAVE_INTELPERC)
list(APPEND videoio_srcs ${CMAKE_CURRENT_LIST_DIR}/src/cap_intelperc.cpp) list(APPEND videoio_srcs ${CMAKE_CURRENT_LIST_DIR}/src/cap_intelperc.cpp)
ocv_include_directories(${INTELPERC_INCLUDE_DIR}) ocv_include_directories(${INTELPERC_INCLUDE_DIR})
......
...@@ -15,7 +15,7 @@ I/O APIs used as backend. ...@@ -15,7 +15,7 @@ I/O APIs used as backend.
![Video I/O with OpenCV](pics/videoio_overview.svg) ![Video I/O with OpenCV](pics/videoio_overview.svg)
Some backends such as (DSHOW) Direct Show, Video For Windows (VFW), Microsoft Media Foundation (MSMF), Some backends such as (DSHOW) Direct Show, Microsoft Media Foundation (MSMF),
Video 4 Linux (V4L), etc... are interfaces to the video I/O library provided by the operating system. Video 4 Linux (V4L), etc... are interfaces to the video I/O library provided by the operating system.
Some others backends like OpenNI2 for Kinect, Intel Perceptual Computing SDK, GStreamer, Some others backends like OpenNI2 for Kinect, Intel Perceptual Computing SDK, GStreamer,
...@@ -62,11 +62,11 @@ cap.open(filename, cv::CAP_DSHOW); ...@@ -62,11 +62,11 @@ cap.open(filename, cv::CAP_DSHOW);
Backends are available only if they have been built with your OpenCV binaries. Backends are available only if they have been built with your OpenCV binaries.
Check in `opencv2/cvconfig.h` to know which APIs are currently available Check in `opencv2/cvconfig.h` to know which APIs are currently available
(e.g. `HAVE_MSMF, HAVE_VFW, HAVE_LIBV4L`, etc...). (e.g. `HAVE_MSMF, HAVE_V4L2`, etc...).
To enable/disable APIs, you have to: To enable/disable APIs, you have to:
1. re-configure OpenCV using appropriates CMake switches 1. re-configure OpenCV using appropriates CMake switches
(e.g. `-DWITH_MSMF=ON -DWITH_VFW=ON ... `) or checking related switch in cmake-gui (e.g. `-DWITH_MSMF=ON -DWITH_DSHOW=ON ... `) or checking related switch in cmake-gui
2. rebuild OpenCV itself 2. rebuild OpenCV itself
#### Use 3rd party drivers or cameras #### Use 3rd party drivers or cameras
......
...@@ -88,16 +88,16 @@ See @ref videoio_overview for more information. ...@@ -88,16 +88,16 @@ See @ref videoio_overview for more information.
*/ */
enum VideoCaptureAPIs { enum VideoCaptureAPIs {
CAP_ANY = 0, //!< Auto detect == 0 CAP_ANY = 0, //!< Auto detect == 0
CAP_VFW = 200, //!< Video For Windows (platform native) CAP_VFW = 200, //!< Video For Windows (obsolete, removed)
CAP_V4L = 200, //!< V4L/V4L2 capturing support via libv4l CAP_V4L = 200, //!< V4L/V4L2 capturing support via libv4l
CAP_V4L2 = CAP_V4L, //!< Same as CAP_V4L CAP_V4L2 = CAP_V4L, //!< Same as CAP_V4L
CAP_FIREWIRE = 300, //!< IEEE 1394 drivers CAP_FIREWIRE = 300, //!< IEEE 1394 drivers
CAP_FIREWARE = CAP_FIREWIRE, //!< Same as CAP_FIREWIRE CAP_FIREWARE = CAP_FIREWIRE, //!< Same value as CAP_FIREWIRE
CAP_IEEE1394 = CAP_FIREWIRE, //!< Same as CAP_FIREWIRE CAP_IEEE1394 = CAP_FIREWIRE, //!< Same value as CAP_FIREWIRE
CAP_DC1394 = CAP_FIREWIRE, //!< Same as CAP_FIREWIRE CAP_DC1394 = CAP_FIREWIRE, //!< Same value as CAP_FIREWIRE
CAP_CMU1394 = CAP_FIREWIRE, //!< Same as CAP_FIREWIRE CAP_CMU1394 = CAP_FIREWIRE, //!< Same value as CAP_FIREWIRE
CAP_QT = 500, //!< QuickTime CAP_QT = 500, //!< QuickTime (obsolete, removed)
CAP_UNICAP = 600, //!< Unicap drivers CAP_UNICAP = 600, //!< Unicap drivers (obsolete, removed)
CAP_DSHOW = 700, //!< DirectShow (via videoInput) CAP_DSHOW = 700, //!< DirectShow (via videoInput)
CAP_PVAPI = 800, //!< PvAPI, Prosilica GigE SDK CAP_PVAPI = 800, //!< PvAPI, Prosilica GigE SDK
CAP_OPENNI = 900, //!< OpenNI (for Kinect) CAP_OPENNI = 900, //!< OpenNI (for Kinect)
...@@ -815,8 +815,8 @@ public: ...@@ -815,8 +815,8 @@ public:
The constructors/functions initialize video writers. The constructors/functions initialize video writers.
- On Linux FFMPEG is used to write videos; - On Linux FFMPEG is used to write videos;
- On Windows FFMPEG or VFW is used; - On Windows FFMPEG or MSWF or DSHOW is used;
- On MacOSX QTKit is used. - On MacOSX AVFoundation is used.
*/ */
CV_WRAP VideoWriter(); CV_WRAP VideoWriter();
......
...@@ -29,7 +29,7 @@ PERF_TEST_P(VideoWriter_Writing, WriteFrame, ...@@ -29,7 +29,7 @@ PERF_TEST_P(VideoWriter_Writing, WriteFrame,
const string filename = getDataPath(get<0>(GetParam())); const string filename = getDataPath(get<0>(GetParam()));
const bool isColor = get<1>(GetParam()); const bool isColor = get<1>(GetParam());
Mat image = imread(filename, isColor ? IMREAD_COLOR : IMREAD_GRAYSCALE ); Mat image = imread(filename, isColor ? IMREAD_COLOR : IMREAD_GRAYSCALE );
#if defined(HAVE_MSMF) && !defined(HAVE_VFW) && !defined(HAVE_FFMPEG) // VFW has greater priority #if defined(HAVE_MSMF) && !defined(HAVE_FFMPEG)
const string outfile = cv::tempfile(".wmv"); const string outfile = cv::tempfile(".wmv");
const int fourcc = VideoWriter::fourcc('W', 'M', 'V', '3'); const int fourcc = VideoWriter::fourcc('W', 'M', 'V', '3');
#else #else
......
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// Intel License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000, Intel Corporation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of Intel Corporation may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "precomp.hpp"
#ifdef _WIN32
/****************** Capturing video from camera via CMU lib *******************/
#ifdef HAVE_CMU1394
// This firewire capability added by Philip Gruebele (pgruebele@cox.net).
// For this to work you need to install the CMU firewire DCAM drivers,
// located at http://www-2.cs.cmu.edu/~iwan/1394/.
#include "1394camera.h"
class CvCaptureCAM_CMU : public CvCapture
{
public:
CvCaptureCAM_CMU()
{
index = -1;
image = 0;
}
virtual ~CvCaptureCAM_CMU()
{
close();
}
virtual bool open(int cameraId);
virtual void close();
virtual double getProperty(int) const CV_OVERRIDE;
virtual bool setProperty(int, double) CV_OVERRIDE;
virtual bool grabFrame() CV_OVERRIDE;
virtual IplImage* retrieveFrame(int) CV_OVERRIDE;
protected:
C1394Camera* camera();
CvSize getSize();
int getDepth();
int getNChannels();
bool setVideoSize(int, int);
bool setMode(int mode);
bool setFrameRate(int rate);
bool setFormat(int format);
int fps; // 0-5
int mode; // 0-7
int format; // 0-2, 7 ?
int index;
IplImage* image;
};
// CMU 1394 camera stuff.
// This firewire capability added by Philip Gruebele (pgruebele@cox.net)
// and modified by Roman Stanchak (rstanchak@yahoo.com).
// For this to work you need to install the CMU firewire DCAM drivers,
// located at http://www-2.cs.cmu.edu/~iwan/1394/.
#define CMU_MAX_CAMERAS 20
int CMU_numCameras = 0;
int CMU_numActiveCameras = 0;
bool CMU_useCameraFlags[CMU_MAX_CAMERAS];
C1394Camera *CMU_theCamera = 0;
// stupid defines for mode, format, FPS
#define CV_CAP_IEEE1394_FPS_1_875 0
#define CV_CAP_IEEE1394_FPS_3_75 1
#define CV_CAP_IEEE1394_FPS_7_5 2
#define CV_CAP_IEEE1394_FPS_15 3
#define CV_CAP_IEEE1394_FPS_30 4
#define CV_CAP_IEEE1394_FPS_60 5
// index by size, color
#define CV_CAP_IEEE1394_COLOR_MONO 0
#define CV_CAP_IEEE1394_COLOR_MONO16 1
#define CV_CAP_IEEE1394_COLOR_YUV444 2
#define CV_CAP_IEEE1394_COLOR_YUV422 3
#define CV_CAP_IEEE1394_COLOR_YUV411 4
#define CV_CAP_IEEE1394_COLOR_RGB 5
#define CV_CAP_IEEE1394_SIZE_160X120 0
#define CV_CAP_IEEE1394_SIZE_320X240 1
#define CV_CAP_IEEE1394_SIZE_640X480 2
#define CV_CAP_IEEE1394_SIZE_800X600 3
#define CV_CAP_IEEE1394_SIZE_1024X768 4
#define CV_CAP_IEEE1394_SIZE_1280X960 5
#define CV_CAP_IEEE1394_SIZE_1600X1200 6
// given color, size, output format
// 1 16 444 422 411 RGB
static char CV_CAP_IEEE1394_FORMAT[7][6] =
{
{-1, -1, 0, -1, -1, -1}, // 160x120
{-1, -1, -1, 0, -1, -1}, // 320x240
{ 0, 0, -1, 0, 0, 0}, // 640x480
{ 1, 1, -1, 1, -1, 1}, // 800x600
{ 1, 1, -1, 1, -1, 1}, // 1024x768
{ 2, 2, -1, 2, -1, 2}, // 1280x960
{ 2, 2, -1, 2, -1, 2} // 1600x1200
};
// given color, size, output corresponding mode
static char CV_CAP_IEEE1394_MODE[7][6] =
{
{-1, -1, 0, -1, -1, -1}, // 160x120
{-1, -1, -1, 1, -1, -1}, // 320x240
{ 5, 6, -1, 3, 2, 4}, // 640x480
{ 2, 6, -1, 0, -1, 1}, // 800x600
{ 5, 7, -1, 3, -1, 4}, // 1024x768
{ 2, 6, -1, 0, -1, 1}, // 1280x960
{ 5, 7, -1, 3, -1, 4} // 1600x1200
};
// given format, mode, return COLOR
static char CV_CAP_IEEE1394_COLOR[2][8] =
{
{
CV_CAP_IEEE1394_COLOR_YUV444,
CV_CAP_IEEE1394_COLOR_YUV422,
CV_CAP_IEEE1394_COLOR_YUV411,
CV_CAP_IEEE1394_COLOR_YUV422,
CV_CAP_IEEE1394_COLOR_RGB,
CV_CAP_IEEE1394_COLOR_MONO,
CV_CAP_IEEE1394_COLOR_MONO16
},
{
CV_CAP_IEEE1394_COLOR_YUV422,
CV_CAP_IEEE1394_COLOR_RGB,
CV_CAP_IEEE1394_COLOR_MONO,
CV_CAP_IEEE1394_COLOR_YUV422,
CV_CAP_IEEE1394_COLOR_RGB,
CV_CAP_IEEE1394_COLOR_MONO,
CV_CAP_IEEE1394_COLOR_MONO16,
CV_CAP_IEEE1394_COLOR_MONO16
}
};
// convert frame rate to suitable enum
/*static int icvFrameRateToIndex_CMU(double framerate){
if(framerate > 30) return CV_CAP_IEEE1394_FPS_60;
else if(framerate > 15) return CV_CAP_IEEE1394_FPS_30;
else if(framerate > 7.5) return CV_CAP_IEEE1394_FPS_15;
else if(framerate > 3.75) return CV_CAP_IEEE1394_FPS_7_5;
else if(framerate > 1.875) return CV_CAP_IEEE1394_FPS_3_75;
return CV_CAP_IEEE1394_FPS_1_875;
}*/
#if _MSC_VER >= 1200
#pragma comment(lib,"1394camera.lib")
#endif
C1394Camera* CvCaptureCAM_CMU::camera()
{
return CMU_theCamera && index >= 0 ? &CMU_theCamera[index] : 0;
}
// return the size of the image
CvSize CvCaptureCAM_CMU::getSize()
{
C1394Camera* cmucam = camera();
unsigned long width = 0, height = 0;
cmucam->GetVideoFrameDimensions( &width, &height );
return cvSize((int)width, (int)height);
}
// return the opencv depth flag corresponding to the camera format
int CvCaptureCAM_CMU::getDepth()
{
C1394Camera* cmucam = camera();
int format = cmucam->GetVideoFormat();
int mode = cmucam->GetVideoMode();
// TODO
if( format==7 ) {
assert(0);
return 1;
}
// irrelvant to depth
if( format > 1 )
format = 1;
if( CV_CAP_IEEE1394_COLOR[format][mode]==CV_CAP_IEEE1394_COLOR_MONO16 )
return IPL_DEPTH_16S;
return IPL_DEPTH_8U;
}
// return the number of channels for camera
int CvCaptureCAM_CMU::getNChannels()
{
C1394Camera* cmucam = camera();
int format = cmucam->GetVideoFormat();
int mode = cmucam->GetVideoMode();
if( format==7 ){
assert(0);
return 1;
}
// irrelvant to nchannels
if( format > 1 )
format = 1;
switch(CV_CAP_IEEE1394_COLOR[format][mode]){
case CV_CAP_IEEE1394_COLOR_RGB:
return 3;
case CV_CAP_IEEE1394_COLOR_MONO:
case CV_CAP_IEEE1394_COLOR_MONO16:
return 1;
case CV_CAP_IEEE1394_COLOR_YUV422:
case CV_CAP_IEEE1394_COLOR_YUV444:
case CV_CAP_IEEE1394_COLOR_YUV411:
return 3;
default:
;
}
return -1;
}
bool CvCaptureCAM_CMU::open( int _index )
{
close();
// if first time, then allocate all available cameras
if( CMU_numCameras == 0 )
{
CMU_numActiveCameras = 0;
CMU_theCamera = new C1394Camera[CMU_MAX_CAMERAS];
////////////////////////////////////////////////////////////////////////////////////////////////////////
// create all cameras
try
{
// create camera0
if( CMU_theCamera[0].CheckLink() != CAM_SUCCESS )
throw 1;
// we have one pin per camera
CMU_numCameras = CMU_theCamera[0].GetNumberCameras();
// allocate remaining cameras
for(int i = 1; i < CMU_numCameras && i<CMU_MAX_CAMERAS; i++ )
{
CMU_useCameraFlags[i] = false;
if (CMU_theCamera[i].CheckLink() != CAM_SUCCESS)
throw 1;
}
}
catch (...)
{
// free any allocated cameras
// ...
CMU_numCameras = 0;
return false;
}
}
try
{
CvSize size;
// pick first unused camera
if(_index==-1){
for(int i = 0; i < CMU_numCameras; i++ )
{
if( !CMU_useCameraFlags[i] ){
_index = i;
break;
}
}
}
// no empty camera found
if (_index==-1)
throw 1;
if (CMU_theCamera[_index].SelectCamera(_index) != CAM_SUCCESS)
throw 2;
if (CMU_theCamera[_index].InitCamera() != CAM_SUCCESS)
throw 3;
// set initial format -- try to pick best frame rate first, then color, then size
bool found_format = false;
for (int rate=5; rate>=0 && !found_format; rate--)
{
for (int color=CV_CAP_IEEE1394_COLOR_RGB; color>=0 && !found_format; color--)
{
for (int size=CV_CAP_IEEE1394_SIZE_1600X1200; size>=0 && !found_format; size--)
{
int format = CV_CAP_IEEE1394_FORMAT[size][color];
int mode = CV_CAP_IEEE1394_MODE[size][color];
if (format!=-1 && mode!=-1 &&
CMU_theCamera[_index].HasVideoFrameRate(format,mode,rate))
{
CMU_theCamera[_index].SetVideoFormat(format);
CMU_theCamera[_index].SetVideoMode(mode);
CMU_theCamera[_index].SetVideoFrameRate(rate);
found_format = (CMU_theCamera[_index].StartImageAcquisition() == CAM_SUCCESS);
}
}
}
}
// try format 7
if(!found_format){
CMU_theCamera[_index].SetVideoFormat(7);
CMU_theCamera[_index].SetVideoMode(0);
if(CMU_theCamera[_index].StartImageAcquisition() != CAM_SUCCESS){
// no format found
throw 9;
}
}
index = _index;
size = getSize();
// allocate image frame
image = cvCreateImage( size, 8, 3 );
cvZero(image);
// successfully activated camera
CMU_numActiveCameras++;
CMU_useCameraFlags[_index] = true;
}
catch ( int )
{
return false;
}
return true;
}
void CvCaptureCAM_CMU::close()
{
C1394Camera* cmucam = camera();
if( cmucam )
{
cvReleaseImage( &image );
cmucam->StopImageAcquisition();
CMU_useCameraFlags[index] = false;
index = -1;
if( --CMU_numActiveCameras == 0 )
{
delete[] CMU_theCamera;
CMU_theCamera = 0;
CMU_numCameras = 0;
}
}
}
bool CvCaptureCAM_CMU::grabFrame()
{
C1394Camera* cmucam = camera();
return cmucam ? cmucam->AcquireImage() == CAM_SUCCESS : false;
}
/*static void swapRedBlue(IplImage * im)
{
uchar * ptr = (uchar *) im->imageData;
uchar t;
for(int i=0; i<im->height; i++){
ptr = (uchar *) im->imageData+im->widthStep*i;
for(int j=0; j<im->width; j++){
t = ptr[0];
ptr[0] = ptr[2];
ptr[2] = t;
ptr+=3;
}
}
}*/
IplImage* CvCaptureCAM_CMU::retrieveFrame(int)
{
C1394Camera* cmucam = camera();
if( !cmucam )
return 0;
cmucam->getRGB((uchar*)image->imageData, image->imageSize);
cvConvertImage( image, image, CV_CVTIMG_SWAP_RB );
return image;
}
double CvCaptureCAM_CMU::getProperty( int property_id ) const
{
C1394Camera* cmucam = camera();
if( !cmucam )
return 0;
switch( property_id )
{
case CV_CAP_PROP_FRAME_WIDTH:
return image->width;
case CV_CAP_PROP_FRAME_HEIGHT:
return image->height;
case CV_CAP_PROP_FPS:
return cmucam->GetVideoFrameRate();
case CV_CAP_PROP_MODE:
return cmucam->GetVideoMode();
case CV_CAP_PROP_FORMAT:
return cmucam->GetVideoFormat();
}
return 0;
}
bool CvCaptureCAM_CMU::setVideoSize(int, int)
{
return false;
}
bool CvCaptureCAM_CMU::setMode(int mode)
{
int format;
C1394Camera* cmucam = camera();
if( !cmucam )
return false;
format = cmucam->GetVideoFormat();
if( mode < 0 || mode > 7 || !cmucam->HasVideoMode(format, mode))
return false;
cmucam->StopImageAcquisition();
cmucam->SetVideoMode(mode);
cmucam->StartImageAcquisition();
return true;
}
bool CvCaptureCAM_CMU::setFrameRate(int rate)
{
int format, mode;
C1394Camera* cmucam = camera();
if( !cmucam )
return false;
mode = cmucam->GetVideoMode();
format = cmucam->GetVideoFormat();
if( rate < 0 || rate > 5 || !cmucam->HasVideoFrameRate(format, mode, rate) )
return false;
cmucam->StopImageAcquisition();
cmucam->SetVideoFrameRate(rate);
cmucam->StartImageAcquisition();
return true;
}
bool CvCaptureCAM_CMU::setFormat(int format)
{
C1394Camera* cmucam = camera();
if( !cmucam )
return false;
if( format < 0 || format > 2 || !cmucam->HasVideoFormat(format) )
return false;
cmucam->StopImageAcquisition();
cmucam->SetVideoFormat(format);
cmucam->StartImageAcquisition();
return true;
}
bool CvCaptureCAM_CMU::setProperty( int property_id, double value )
{
bool retval = false;
int ival = cvRound(value);
C1394Camera* cmucam = camera();
if( !cmucam )
return false;
switch (property_id) {
case CV_CAP_PROP_FRAME_WIDTH:
case CV_CAP_PROP_FRAME_HEIGHT:
{
int width, height;
if (property_id == CV_CAP_PROP_FRAME_WIDTH)
{
width = ival;
height = width*3/4;
}
else {
height = ival;
width = height*4/3;
}
retval = setVideoSize(width, height);
}
break;
case CV_CAP_PROP_FPS:
retval = setFrameRate(ival);
break;
case CV_CAP_PROP_MODE:
retval = setMode(ival);
break;
case CV_CAP_PROP_FORMAT:
retval = setFormat(ival);
break;
}
// resize image if its not the right size anymore
CvSize size = getSize();
if( !image || image->width != size.width || image->height != size.height )
{
cvReleaseImage( &image );
image = cvCreateImage( size, 8, 3 );
}
return retval;
}
CvCapture * cvCreateCameraCapture_CMU (int index)
{
CvCaptureCAM_CMU* capture = new CvCaptureCAM_CMU;
if( capture->open(index) )
return capture;
delete capture;
return 0;
}
#endif // CMU
#endif // _WIN32
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// Intel License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000, Intel Corporation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of Intel Corporation may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "precomp.hpp"
// Original implementation by Mark Asbach
// Institute of Communications Engineering
// RWTH Aachen University
//
// For implementation details and background see:
// http://developer.apple.com/samplecode/qtframestepper.win/listing1.html
//
// Please note that timing will only be correct for videos that contain a visual track
// that has full length (compared to other tracks)
// standard includes
#include <cstdio>
#include <cassert>
// Mac OS includes
#include <Carbon/Carbon.h>
#include <CoreFoundation/CoreFoundation.h>
#include <QuickTime/QuickTime.h>
// Global state (did we call EnterMovies?)
static int did_enter_movies = 0;
// ----------------------------------------------------------------------------------------
#pragma mark Reading Video Files
/// Movie state structure for QuickTime movies
typedef struct CvCapture_QT_Movie
{
Movie myMovie; // movie handle
GWorldPtr myGWorld; // we render into an offscreen GWorld
CvSize size; // dimensions of the movie
TimeValue movie_start_time; // movies can start at arbitrary times
long number_of_frames; // duration in frames
long next_frame_time;
long next_frame_number;
IplImage * image_rgb; // will point to the PixMap of myGWorld
IplImage * image_bgr; // will be returned by icvRetrieveFrame_QT()
} CvCapture_QT_Movie;
static int icvOpenFile_QT_Movie (CvCapture_QT_Movie * capture, const char * filename);
static int icvClose_QT_Movie (CvCapture_QT_Movie * capture);
static double icvGetProperty_QT_Movie (CvCapture_QT_Movie * capture, int property_id);
static int icvSetProperty_QT_Movie (CvCapture_QT_Movie * capture, int property_id, double value);
static int icvGrabFrame_QT_Movie (CvCapture_QT_Movie * capture);
static const void * icvRetrieveFrame_QT_Movie (CvCapture_QT_Movie * capture, int);
static CvCapture_QT_Movie * icvCaptureFromFile_QT (const char * filename)
{
static int did_enter_movies = 0;
if (! did_enter_movies)
{
EnterMovies();
did_enter_movies = 1;
}
CvCapture_QT_Movie * capture = 0;
if (filename)
{
capture = (CvCapture_QT_Movie *) cvAlloc (sizeof (*capture));
memset (capture, 0, sizeof(*capture));
if (!icvOpenFile_QT_Movie (capture, filename))
cvFree( &capture );
}
return capture;
}
/**
* convert full path to CFStringRef and open corresponding Movie. Then
* step over 'interesting frame times' to count total number of frames
* for video material with varying frame durations and create offscreen
* GWorld for rendering the movie frames.
*
* @author Mark Asbach <asbach@ient.rwth-aachen.de>
* @date 2005-11-04
*/
static int icvOpenFile_QT_Movie (CvCapture_QT_Movie * capture, const char * filename)
{
Rect myRect;
short myResID = 0;
Handle myDataRef = nil;
OSType myDataRefType = 0;
OSErr myErr = noErr;
// no old errors please
ClearMoviesStickyError ();
// initialize pointers to zero
capture->myMovie = 0;
capture->myGWorld = nil;
// initialize numbers with invalid values
capture->next_frame_time = -1;
capture->next_frame_number = -1;
capture->number_of_frames = -1;
capture->movie_start_time = -1;
capture->size = cvSize (-1,-1);
// we would use CFStringCreateWithFileSystemRepresentation (kCFAllocatorDefault, filename) on Mac OS X 10.4
CFStringRef inPath = CFStringCreateWithCString (kCFAllocatorDefault, filename, kCFStringEncodingISOLatin1);
OPENCV_ASSERT ((inPath != nil), "icvOpenFile_QT_Movie", "couldn't create CFString from a string");
// create the data reference
myErr = QTNewDataReferenceFromFullPathCFString (inPath, kQTPOSIXPathStyle, 0, & myDataRef, & myDataRefType);
if (myErr != noErr)
{
fprintf (stderr, "Couldn't create QTNewDataReferenceFromFullPathCFString().\n");
return 0;
}
// get the Movie
myErr = NewMovieFromDataRef(& capture->myMovie, newMovieActive | newMovieAsyncOK /* | newMovieIdleImportOK */,
& myResID, myDataRef, myDataRefType);
// dispose of the data reference handle - we no longer need it
DisposeHandle (myDataRef);
// if NewMovieFromDataRef failed, we already disposed the DataRef, so just return with an error
if (myErr != noErr)
{
fprintf (stderr, "Couldn't create a NewMovieFromDataRef() - error is %d.\n", myErr);
return 0;
}
// count the number of video 'frames' in the movie by stepping through all of the
// video 'interesting times', or in other words, the places where the movie displays
// a new video sample. The time between these interesting times is not necessarily constant.
{
OSType whichMediaType = VisualMediaCharacteristic;
TimeValue theTime = -1;
// find out movie start time
GetMovieNextInterestingTime (capture->myMovie, short (nextTimeMediaSample + nextTimeEdgeOK),
1, & whichMediaType, TimeValue (0), 0, & theTime, NULL);
if (theTime == -1)
{
fprintf (stderr, "Couldn't inquire first frame time\n");
return 0;
}
capture->movie_start_time = theTime;
capture->next_frame_time = theTime;
capture->next_frame_number = 0;
// count all 'interesting times' of the movie
capture->number_of_frames = 0;
while (theTime >= 0)
{
GetMovieNextInterestingTime (capture->myMovie, short (nextTimeMediaSample),
1, & whichMediaType, theTime, 0, & theTime, NULL);
capture->number_of_frames++;
}
}
// get the bounding rectangle of the movie
GetMoviesError ();
GetMovieBox (capture->myMovie, & myRect);
capture->size = cvSize (myRect.right - myRect.left, myRect.bottom - myRect.top);
// create gworld for decompressed image
myErr = QTNewGWorld (& capture->myGWorld, k32ARGBPixelFormat /* k24BGRPixelFormat geht leider nicht */,
& myRect, nil, nil, 0);
OPENCV_ASSERT (myErr == noErr, "icvOpenFile_QT_Movie", "couldn't create QTNewGWorld() for output image");
SetMovieGWorld (capture->myMovie, capture->myGWorld, nil);
// build IplImage header that will point to the PixMap of the Movie's GWorld later on
capture->image_rgb = cvCreateImageHeader (capture->size, IPL_DEPTH_8U, 4);
// create IplImage that hold correctly formatted result
capture->image_bgr = cvCreateImage (capture->size, IPL_DEPTH_8U, 3);
// okay, that's it - should we wait until the Movie is playable?
return 1;
}
/**
* dispose of QuickTime Movie and free memory buffers
*
* @author Mark Asbach <asbach@ient.rwth-aachen.de>
* @date 2005-11-04
*/
static int icvClose_QT_Movie (CvCapture_QT_Movie * capture)
{
OPENCV_ASSERT (capture, "icvClose_QT_Movie", "'capture' is a NULL-pointer");
// deallocate and free resources
if (capture->myMovie)
{
cvReleaseImage (& capture->image_bgr);
cvReleaseImageHeader (& capture->image_rgb);
DisposeGWorld (capture->myGWorld);
DisposeMovie (capture->myMovie);
}
// okay, that's it
return 1;
}
/**
* get a capture property
*
* @author Mark Asbach <asbach@ient.rwth-aachen.de>
* @date 2005-11-05
*/
static double icvGetProperty_QT_Movie (CvCapture_QT_Movie * capture, int property_id)
{
OPENCV_ASSERT (capture, "icvGetProperty_QT_Movie", "'capture' is a NULL-pointer");
OPENCV_ASSERT (capture->myMovie, "icvGetProperty_QT_Movie", "invalid Movie handle");
OPENCV_ASSERT (capture->number_of_frames > 0, "icvGetProperty_QT_Movie", "movie has invalid number of frames");
OPENCV_ASSERT (capture->movie_start_time >= 0, "icvGetProperty_QT_Movie", "movie has invalid start time");
// inquire desired property
switch (property_id)
{
case CV_CAP_PROP_POS_FRAMES:
return (capture->next_frame_number);
case CV_CAP_PROP_POS_MSEC:
case CV_CAP_PROP_POS_AVI_RATIO:
{
TimeValue position = capture->next_frame_time - capture->movie_start_time;
if (property_id == CV_CAP_PROP_POS_MSEC)
{
TimeScale timescale = GetMovieTimeScale (capture->myMovie);
return (static_cast<double> (position) * 1000.0 / timescale);
}
else
{
TimeValue duration = GetMovieDuration (capture->myMovie);
return (static_cast<double> (position) / duration);
}
}
break; // never reached
case CV_CAP_PROP_FRAME_WIDTH:
return static_cast<double> (capture->size.width);
case CV_CAP_PROP_FRAME_HEIGHT:
return static_cast<double> (capture->size.height);
case CV_CAP_PROP_FPS:
{
TimeValue duration = GetMovieDuration (capture->myMovie);
TimeScale timescale = GetMovieTimeScale (capture->myMovie);
return (capture->number_of_frames / (static_cast<double> (duration) / timescale));
}
case CV_CAP_PROP_FRAME_COUNT:
return static_cast<double> (capture->number_of_frames);
case CV_CAP_PROP_FOURCC: // not implemented
case CV_CAP_PROP_FORMAT: // not implemented
case CV_CAP_PROP_MODE: // not implemented
default:
// unhandled or unknown capture property
OPENCV_ERROR (CV_StsBadArg, "icvSetProperty_QT_Movie", "unknown or unhandled property_id");
return CV_StsBadArg;
}
return 0;
}
/**
* set a capture property. With movie files, it is only possible to set the
* position (i.e. jump to a given time or frame number)
*
* @author Mark Asbach <asbach@ient.rwth-aachen.de>
* @date 2005-11-05
*/
static int icvSetProperty_QT_Movie (CvCapture_QT_Movie * capture, int property_id, double value)
{
OPENCV_ASSERT (capture, "icvSetProperty_QT_Movie", "'capture' is a NULL-pointer");
OPENCV_ASSERT (capture->myMovie, "icvSetProperty_QT_Movie", "invalid Movie handle");
OPENCV_ASSERT (capture->number_of_frames > 0, "icvSetProperty_QT_Movie", "movie has invalid number of frames");
OPENCV_ASSERT (capture->movie_start_time >= 0, "icvSetProperty_QT_Movie", "movie has invalid start time");
// inquire desired property
//
// rework these three points to really work through 'interesting times'.
// with the current implementation, they result in wrong times or wrong frame numbers with content that
// features varying frame durations
switch (property_id)
{
case CV_CAP_PROP_POS_MSEC:
case CV_CAP_PROP_POS_AVI_RATIO:
{
TimeValue destination;
OSType myType = VisualMediaCharacteristic;
OSErr myErr = noErr;
if (property_id == CV_CAP_PROP_POS_MSEC)
{
TimeScale timescale = GetMovieTimeScale (capture->myMovie);
destination = static_cast<TimeValue> (value / 1000.0 * timescale + capture->movie_start_time);
}
else
{
TimeValue duration = GetMovieDuration (capture->myMovie);
destination = static_cast<TimeValue> (value * duration + capture->movie_start_time);
}
// really seek?
if (capture->next_frame_time == destination)
break;
// seek into which direction?
if (capture->next_frame_time < destination)
{
while (capture->next_frame_time < destination)
{
capture->next_frame_number++;
GetMovieNextInterestingTime (capture->myMovie, nextTimeStep, 1, & myType, capture->next_frame_time,
1, & capture->next_frame_time, NULL);
myErr = GetMoviesError();
if (myErr != noErr)
{
fprintf (stderr, "Couldn't go on to GetMovieNextInterestingTime() in icvGrabFrame_QT.\n");
return 0;
}
}
}
else
{
while (capture->next_frame_time > destination)
{
capture->next_frame_number--;
GetMovieNextInterestingTime (capture->myMovie, nextTimeStep, 1, & myType, capture->next_frame_time,
-1, & capture->next_frame_time, NULL);
myErr = GetMoviesError();
if (myErr != noErr)
{
fprintf (stderr, "Couldn't go back to GetMovieNextInterestingTime() in icvGrabFrame_QT.\n");
return 0;
}
}
}
}
break;
case CV_CAP_PROP_POS_FRAMES:
{
TimeValue destination = static_cast<TimeValue> (value);
short direction = (destination > capture->next_frame_number) ? 1 : -1;
OSType myType = VisualMediaCharacteristic;
OSErr myErr = noErr;
while (destination != capture->next_frame_number)
{
capture->next_frame_number += direction;
GetMovieNextInterestingTime (capture->myMovie, nextTimeStep, 1, & myType, capture->next_frame_time,
direction, & capture->next_frame_time, NULL);
myErr = GetMoviesError();
if (myErr != noErr)
{
fprintf (stderr, "Couldn't step to desired frame number in icvGrabFrame_QT.\n");
return 0;
}
}
}
break;
default:
// unhandled or unknown capture property
OPENCV_ERROR (CV_StsBadArg, "icvSetProperty_QT_Movie", "unknown or unhandled property_id");
return 0;
}
// positive result means success
return 1;
}
/**
* the original meaning of this method is to acquire raw frame data for the next video
* frame but not decompress it. With the QuickTime video reader, this is reduced to
* advance to the current frame time.
*
* @author Mark Asbach <asbach@ient.rwth-aachen.de>
* @date 2005-11-06
*/
static int icvGrabFrame_QT_Movie (CvCapture_QT_Movie * capture)
{
OPENCV_ASSERT (capture, "icvGrabFrame_QT_Movie", "'capture' is a NULL-pointer");
OPENCV_ASSERT (capture->myMovie, "icvGrabFrame_QT_Movie", "invalid Movie handle");
TimeValue myCurrTime;
OSType myType = VisualMediaCharacteristic;
OSErr myErr = noErr;
// jump to current video sample
SetMovieTimeValue (capture->myMovie, capture->next_frame_time);
myErr = GetMoviesError();
if (myErr != noErr)
{
fprintf (stderr, "Couldn't SetMovieTimeValue() in icvGrabFrame_QT_Movie.\n");
return 0;
}
// where are we now?
myCurrTime = GetMovieTime (capture->myMovie, NULL);
// increment counters
capture->next_frame_number++;
GetMovieNextInterestingTime (capture->myMovie, nextTimeStep, 1, & myType, myCurrTime, 1, & capture->next_frame_time, NULL);
myErr = GetMoviesError();
if (myErr != noErr)
{
fprintf (stderr, "Couldn't GetMovieNextInterestingTime() in icvGrabFrame_QT_Movie.\n");
return 0;
}
// that's it
return 1;
}
/**
* render the current frame into an image buffer and convert to OpenCV IplImage
* buffer layout (BGR sampling)
*
* @author Mark Asbach <asbach@ient.rwth-aachen.de>
* @date 2005-11-06
*/
static const void * icvRetrieveFrame_QT_Movie (CvCapture_QT_Movie * capture, int)
{
OPENCV_ASSERT (capture, "icvRetrieveFrame_QT_Movie", "'capture' is a NULL-pointer");
OPENCV_ASSERT (capture->myMovie, "icvRetrieveFrame_QT_Movie", "invalid Movie handle");
OPENCV_ASSERT (capture->image_rgb, "icvRetrieveFrame_QT_Movie", "invalid source image");
OPENCV_ASSERT (capture->image_bgr, "icvRetrieveFrame_QT_Movie", "invalid destination image");
PixMapHandle myPixMapHandle = nil;
OSErr myErr = noErr;
// invalidates the movie's display state so that the Movie Toolbox
// redraws the movie the next time we call MoviesTask
UpdateMovie (capture->myMovie);
myErr = GetMoviesError ();
if (myErr != noErr)
{
fprintf (stderr, "Couldn't UpdateMovie() in icvRetrieveFrame_QT_Movie().\n");
return 0;
}
// service active movie (= redraw immediately)
MoviesTask (capture->myMovie, 0L);
myErr = GetMoviesError ();
if (myErr != noErr)
{
fprintf (stderr, "MoviesTask() didn't succeed in icvRetrieveFrame_QT_Movie().\n");
return 0;
}
// update IplImage header that points to PixMap of the Movie's GWorld.
// unfortunately, cvCvtColor doesn't know ARGB, the QuickTime pixel format,
// so we pass a modified address.
// ATTENTION: don't access the last pixel's alpha entry, it's inexistant
myPixMapHandle = GetGWorldPixMap (capture->myGWorld);
LockPixels (myPixMapHandle);
cvSetData (capture->image_rgb, GetPixBaseAddr (myPixMapHandle) + 1, GetPixRowBytes (myPixMapHandle));
// covert RGB of GWorld to BGR
cvCvtColor (capture->image_rgb, capture->image_bgr, CV_RGBA2BGR);
// allow QuickTime to access the buffer again
UnlockPixels (myPixMapHandle);
// always return the same image pointer
return capture->image_bgr;
}
// ----------------------------------------------------------------------------------------
#pragma mark -
#pragma mark Capturing from Video Cameras
#ifdef USE_VDIG_VERSION
/// SequenceGrabber state structure for QuickTime
typedef struct CvCapture_QT_Cam_vdig
{
ComponentInstance grabber;
short channel;
GWorldPtr myGWorld;
PixMapHandle pixmap;
CvSize size;
long number_of_frames;
IplImage * image_rgb; // will point to the PixMap of myGWorld
IplImage * image_bgr; // will be returned by icvRetrieveFrame_QT()
} CvCapture_QT_Cam;
#else
typedef struct CvCapture_QT_Cam_barg
{
SeqGrabComponent grabber;
SGChannel channel;
GWorldPtr gworld;
Rect bounds;
ImageSequence sequence;
volatile bool got_frame;
CvSize size;
IplImage * image_rgb; // will point to the PixMap of myGWorld
IplImage * image_bgr; // will be returned by icvRetrieveFrame_QT()
} CvCapture_QT_Cam;
#endif
static int icvOpenCamera_QT (CvCapture_QT_Cam * capture, const int index);
static int icvClose_QT_Cam (CvCapture_QT_Cam * capture);
static double icvGetProperty_QT_Cam (CvCapture_QT_Cam * capture, int property_id);
static int icvSetProperty_QT_Cam (CvCapture_QT_Cam * capture, int property_id, double value);
static int icvGrabFrame_QT_Cam (CvCapture_QT_Cam * capture);
static const void * icvRetrieveFrame_QT_Cam (CvCapture_QT_Cam * capture, int);
/**
* Initialize memory structure and call method to open camera
*
* @author Mark Asbach <asbach@ient.rwth-aachen.de>
* @date 2006-01-29
*/
static CvCapture_QT_Cam * icvCaptureFromCam_QT (const int index)
{
if (! did_enter_movies)
{
EnterMovies();
did_enter_movies = 1;
}
CvCapture_QT_Cam * capture = 0;
if (index >= 0)
{
capture = (CvCapture_QT_Cam *) cvAlloc (sizeof (*capture));
memset (capture, 0, sizeof(*capture));
if (!icvOpenCamera_QT (capture, index))
cvFree (&capture);
}
return capture;
}
/// capture properties currently unimplemented for QuickTime camera interface
static double icvGetProperty_QT_Cam (CvCapture_QT_Cam * capture, int property_id)
{
assert (0);
return 0;
}
/// capture properties currently unimplemented for QuickTime camera interface
static int icvSetProperty_QT_Cam (CvCapture_QT_Cam * capture, int property_id, double value)
{
assert (0);
return 0;
}
#ifdef USE_VDIG_VERSION
#pragma mark Capturing using VDIG
/**
* Open a quicktime video grabber component. This could be an attached
* IEEE1394 camera, a web cam, an iSight or digitizer card / video converter.
*
* @author Mark Asbach <asbach@ient.rwth-aachen.de>
* @date 2006-01-29
*/
static int icvOpenCamera_QT (CvCapture_QT_Cam * capture, const int index)
{
OPENCV_ASSERT (capture, "icvOpenCamera_QT", "'capture' is a NULL-pointer");
OPENCV_ASSERT (index >=0, "icvOpenCamera_QT", "camera index is negative");
ComponentDescription component_description;
Component component = 0;
int number_of_inputs = 0;
Rect myRect;
ComponentResult result = noErr;
// travers all components and count video digitizer channels
component_description.componentType = videoDigitizerComponentType;
component_description.componentSubType = 0L;
component_description.componentManufacturer = 0L;
component_description.componentFlags = 0L;
component_description.componentFlagsMask = 0L;
do
{
// traverse component list
component = FindNextComponent (component, & component_description);
// found a component?
if (component)
{
// dump component name
#ifndef NDEBUG
ComponentDescription desc;
Handle nameHandle = NewHandleClear (200);
char nameBuffer [255];
result = GetComponentInfo (component, & desc, nameHandle, nil, nil);
OPENCV_ASSERT (result == noErr, "icvOpenCamera_QT", "couldn't GetComponentInfo()");
OPENCV_ASSERT (*nameHandle, "icvOpenCamera_QT", "No name returned by GetComponentInfo()");
snprintf (nameBuffer, (**nameHandle) + 1, "%s", (char *) (* nameHandle + 1));
printf ("- Videodevice: %s\n", nameBuffer);
DisposeHandle (nameHandle);
#endif
// open component to count number of inputs
capture->grabber = OpenComponent (component);
if (capture->grabber)
{
result = VDGetNumberOfInputs (capture->grabber, & capture->channel);
if (result != noErr)
fprintf (stderr, "Couldn't GetNumberOfInputs: %d\n", (int) result);
else
{
#ifndef NDEBUG
printf (" Number of inputs: %d\n", (int) capture->channel + 1);
#endif
// add to overall number of inputs
number_of_inputs += capture->channel + 1;
// did the user select an input that falls into this device's
// range of inputs? Then leave the loop
if (number_of_inputs > index)
{
// calculate relative channel index
capture->channel = index - number_of_inputs + capture->channel + 1;
OPENCV_ASSERT (capture->channel >= 0, "icvOpenCamera_QT", "negative channel number");
// dump channel name
#ifndef NDEBUG
char name[256];
Str255 nameBuffer;
result = VDGetInputName (capture->grabber, capture->channel, nameBuffer);
OPENCV_ASSERT (result == noErr, "ictOpenCamera_QT", "couldn't GetInputName()");
snprintf (name, *nameBuffer, "%s", (char *) (nameBuffer + 1));
printf (" Choosing input %d - %s\n", (int) capture->channel, name);
#endif
// leave the loop
break;
}
}
// obviously no inputs of this device/component were needed
CloseComponent (capture->grabber);
}
}
}
while (component);
// did we find the desired input?
if (! component)
{
fprintf(stderr, "Not enough inputs available - can't choose input %d\n", index);
return 0;
}
// -- Okay now, we selected the digitizer input, lets set up digitizer destination --
ClearMoviesStickyError();
// Select the desired input
result = VDSetInput (capture->grabber, capture->channel);
OPENCV_ASSERT (result == noErr, "icvOpenCamera_QT", "couldn't select video digitizer input");
// get the bounding rectangle of the video digitizer
result = VDGetActiveSrcRect (capture->grabber, capture->channel, & myRect);
OPENCV_ASSERT (result == noErr, "icvOpenCamera_QT", "couldn't create VDGetActiveSrcRect from digitizer");
myRect.right = 640; myRect.bottom = 480;
capture->size = cvSize (myRect.right - myRect.left, myRect.bottom - myRect.top);
printf ("Source rect is %d, %d -- %d, %d\n", (int) myRect.left, (int) myRect.top, (int) myRect.right, (int) myRect.bottom);
// create offscreen GWorld
result = QTNewGWorld (& capture->myGWorld, k32ARGBPixelFormat, & myRect, nil, nil, 0);
OPENCV_ASSERT (result == noErr, "icvOpenCamera_QT", "couldn't create QTNewGWorld() for output image");
// get pixmap
capture->pixmap = GetGWorldPixMap (capture->myGWorld);
result = GetMoviesError ();
OPENCV_ASSERT (result == noErr, "icvOpenCamera_QT", "couldn't get pixmap");
// set digitizer rect
result = VDSetDigitizerRect (capture->grabber, & myRect);
OPENCV_ASSERT (result == noErr, "icvOpenCamera_QT", "couldn't create VDGetActiveSrcRect from digitizer");
// set destination of digitized input
result = VDSetPlayThruDestination (capture->grabber, capture->pixmap, & myRect, nil, nil);
printf ("QuickTime error: %d\n", (int) result);
OPENCV_ASSERT (result == noErr, "icvOpenCamera_QT", "couldn't set video destination");
// get destination of digitized images
result = VDGetPlayThruDestination (capture->grabber, & capture->pixmap, nil, nil, nil);
printf ("QuickTime error: %d\n", (int) result);
OPENCV_ASSERT (result == noErr, "icvOpenCamera_QT", "couldn't get video destination");
OPENCV_ASSERT (capture->pixmap != nil, "icvOpenCamera_QT", "empty set video destination");
// get the bounding rectangle of the video digitizer
GetPixBounds (capture->pixmap, & myRect);
capture->size = cvSize (myRect.right - myRect.left, myRect.bottom - myRect.top);
// build IplImage header that will point to the PixMap of the Movie's GWorld later on
capture->image_rgb = cvCreateImageHeader (capture->size, IPL_DEPTH_8U, 4);
OPENCV_ASSERT (capture->image_rgb, "icvOpenCamera_QT", "couldn't create image header");
// create IplImage that hold correctly formatted result
capture->image_bgr = cvCreateImage (capture->size, IPL_DEPTH_8U, 3);
OPENCV_ASSERT (capture->image_bgr, "icvOpenCamera_QT", "couldn't create image");
// notify digitizer component, that we well be starting grabbing soon
result = VDCaptureStateChanging (capture->grabber, vdFlagCaptureIsForRecord | vdFlagCaptureStarting | vdFlagCaptureLowLatency);
OPENCV_ASSERT (result == noErr, "icvOpenCamera_QT", "couldn't set capture state");
// yeah, we did it
return 1;
}
static int icvClose_QT_Cam (CvCapture_QT_Cam * capture)
{
OPENCV_ASSERT (capture, "icvClose_QT_Cam", "'capture' is a NULL-pointer");
ComponentResult result = noErr;
// notify digitizer component, that we well be stopping grabbing soon
result = VDCaptureStateChanging (capture->grabber, vdFlagCaptureStopping);
OPENCV_ASSERT (result == noErr, "icvOpenCamera_QT", "couldn't set capture state");
// release memory
cvReleaseImage (& capture->image_bgr);
cvReleaseImageHeader (& capture->image_rgb);
DisposeGWorld (capture->myGWorld);
CloseComponent (capture->grabber);
// successful
return 1;
}
static int icvGrabFrame_QT_Cam (CvCapture_QT_Cam * capture)
{
OPENCV_ASSERT (capture, "icvGrabFrame_QT_Cam", "'capture' is a NULL-pointer");
OPENCV_ASSERT (capture->grabber, "icvGrabFrame_QT_Cam", "'grabber' is a NULL-pointer");
ComponentResult result = noErr;
// grab one frame
result = VDGrabOneFrame (capture->grabber);
if (result != noErr)
{
fprintf (stderr, "VDGrabOneFrame failed\n");
return 0;
}
// successful
return 1;
}
static const void * icvRetrieveFrame_QT_Cam (CvCapture_QT_Cam * capture, int)
{
OPENCV_ASSERT (capture, "icvRetrieveFrame_QT_Cam", "'capture' is a NULL-pointer");
PixMapHandle myPixMapHandle = nil;
// update IplImage header that points to PixMap of the Movie's GWorld.
// unfortunately, cvCvtColor doesn't know ARGB, the QuickTime pixel format,
// so we pass a modified address.
// ATTENTION: don't access the last pixel's alpha entry, it's inexistant
//myPixMapHandle = GetGWorldPixMap (capture->myGWorld);
myPixMapHandle = capture->pixmap;
LockPixels (myPixMapHandle);
cvSetData (capture->image_rgb, GetPixBaseAddr (myPixMapHandle) + 1, GetPixRowBytes (myPixMapHandle));
// covert RGB of GWorld to BGR
cvCvtColor (capture->image_rgb, capture->image_bgr, CV_RGBA2BGR);
// allow QuickTime to access the buffer again
UnlockPixels (myPixMapHandle);
// always return the same image pointer
return capture->image_bgr;
}
#else
#pragma mark Capturing using Sequence Grabber
static OSErr icvDataProc_QT_Cam (SGChannel channel, Ptr raw_data, long len, long *, long, TimeValue, short, long refCon)
{
CvCapture_QT_Cam * capture = (CvCapture_QT_Cam *) refCon;
CodecFlags ignore;
ComponentResult err = noErr;
// we need valid pointers
OPENCV_ASSERT (capture, "icvDataProc_QT_Cam", "'capture' is a NULL-pointer");
OPENCV_ASSERT (capture->gworld, "icvDataProc_QT_Cam", "'gworld' is a NULL-pointer");
OPENCV_ASSERT (raw_data, "icvDataProc_QT_Cam", "'raw_data' is a NULL-pointer");
// create a decompression sequence the first time
if (capture->sequence == 0)
{
ImageDescriptionHandle description = (ImageDescriptionHandle) NewHandle(0);
// we need a decompression sequence that fits the raw data coming from the camera
err = SGGetChannelSampleDescription (channel, (Handle) description);
OPENCV_ASSERT (err == noErr, "icvDataProc_QT_Cam", "couldn't get channel sample description");
//*************************************************************************************//
//This fixed a bug when Quicktime is called twice to grab a frame (black band bug) - Yannick Verdie 2010
Rect sourceRect;
sourceRect.top = 0;
sourceRect.left = 0;
sourceRect.right = (**description).width;
sourceRect.bottom = (**description).height;
MatrixRecord scaleMatrix;
RectMatrix(&scaleMatrix,&sourceRect,&capture->bounds);
err = DecompressSequenceBegin (&capture->sequence, description, capture->gworld, 0,&capture->bounds,&scaleMatrix, srcCopy, NULL, 0, codecNormalQuality, bestSpeedCodec);
//**************************************************************************************//
OPENCV_ASSERT (err == noErr, "icvDataProc_QT_Cam", "couldn't begin decompression sequence");
DisposeHandle ((Handle) description);
}
// okay, we have a decompression sequence -> decompress!
err = DecompressSequenceFrameS (capture->sequence, raw_data, len, 0, &ignore, nil);
if (err != noErr)
{
fprintf (stderr, "icvDataProc_QT_Cam: couldn't decompress frame - %d\n", (int) err);
return err;
}
// check if we dropped a frame
/*#ifndef NDEBUG
if (capture->got_frame)
fprintf (stderr, "icvDataProc_QT_Cam: frame was dropped\n");
#endif*/
// everything worked as expected
capture->got_frame = true;
return noErr;
}
static int icvOpenCamera_QT (CvCapture_QT_Cam * capture, const int index)
{
OPENCV_ASSERT (capture, "icvOpenCamera_QT", "'capture' is a NULL-pointer");
OPENCV_ASSERT (index >= 0, "icvOpenCamera_QT", "camera index is negative");
PixMapHandle pixmap = nil;
OSErr result = noErr;
// open sequence grabber component
capture->grabber = OpenDefaultComponent (SeqGrabComponentType, 0);
OPENCV_ASSERT (capture->grabber, "icvOpenCamera_QT", "couldn't create image");
// initialize sequence grabber component
result = SGInitialize (capture->grabber);
OPENCV_ASSERT (result == noErr, "icvOpenCamera_QT", "couldn't initialize sequence grabber");
result = SGSetDataRef (capture->grabber, 0, 0, seqGrabDontMakeMovie);
OPENCV_ASSERT (result == noErr, "icvOpenCamera_QT", "couldn't set data reference of sequence grabber");
// set up video channel
result = SGNewChannel (capture->grabber, VideoMediaType, & (capture->channel));
OPENCV_ASSERT (result == noErr, "icvOpenCamera_QT", "couldn't create new video channel");
// select the camera indicated by index
SGDeviceList device_list = 0;
result = SGGetChannelDeviceList (capture->channel, 0, & device_list);
OPENCV_ASSERT (result == noErr, "icvOpenCamera_QT", "couldn't get channel device list");
for (int i = 0, current_index = 1; i < (*device_list)->count; i++)
{
SGDeviceName device = (*device_list)->entry[i];
if (device.flags == 0)
{
if (current_index == index)
{
result = SGSetChannelDevice (capture->channel, device.name);
OPENCV_ASSERT (result == noErr, "icvOpenCamera_QT", "couldn't set the channel video device");
break;
}
current_index++;
}
}
result = SGDisposeDeviceList (capture->grabber, device_list);
OPENCV_ASSERT (result == noErr, "icvOpenCamera_QT", "couldn't dispose the channel device list");
// query natural camera resolution -- this will be wrong, but will be an upper
// bound on the actual resolution -- the actual resolution is set below
// after starting the frame grabber
result = SGGetSrcVideoBounds (capture->channel, & (capture->bounds));
OPENCV_ASSERT (result == noErr, "icvOpenCamera_QT", "couldn't set video channel bounds");
// create offscreen GWorld
result = QTNewGWorld (& (capture->gworld), k32ARGBPixelFormat, & (capture->bounds), 0, 0, 0);
result = SGSetGWorld (capture->grabber, capture->gworld, 0);
OPENCV_ASSERT (result == noErr, "icvOpenCamera_QT", "couldn't set GWorld for sequence grabber");
result = SGSetChannelBounds (capture->channel, & (capture->bounds));
OPENCV_ASSERT (result == noErr, "icvOpenCamera_QT", "couldn't set video channel bounds");
result = SGSetChannelUsage (capture->channel, seqGrabRecord);
OPENCV_ASSERT (result == noErr, "icvOpenCamera_QT", "couldn't set channel usage");
// start recording so we can size
result = SGStartRecord (capture->grabber);
OPENCV_ASSERT (result == noErr, "icvOpenCamera_QT", "couldn't start recording");
// don't know *actual* resolution until now
ImageDescriptionHandle imageDesc = (ImageDescriptionHandle)NewHandle(0);
result = SGGetChannelSampleDescription(capture->channel, (Handle)imageDesc);
OPENCV_ASSERT( result == noErr, "icvOpenCamera_QT", "couldn't get image size");
capture->bounds.right = (**imageDesc).width;
capture->bounds.bottom = (**imageDesc).height;
DisposeHandle ((Handle) imageDesc);
// stop grabber so that we can reset the parameters to the right size
result = SGStop (capture->grabber);
OPENCV_ASSERT (result == noErr, "icveClose_QT_Cam", "couldn't stop recording");
// reset GWorld to correct image size
GWorldPtr tmpgworld;
result = QTNewGWorld( &tmpgworld, k32ARGBPixelFormat, &(capture->bounds), 0, 0, 0);
OPENCV_ASSERT (result == noErr, "icvOpenCamera_QT", "couldn't create offscreen GWorld");
result = SGSetGWorld( capture->grabber, tmpgworld, 0);
OPENCV_ASSERT (result == noErr, "icvOpenCamera_QT", "couldn't set GWorld for sequence grabber");
DisposeGWorld( capture->gworld );
capture->gworld = tmpgworld;
result = SGSetChannelBounds (capture->channel, & (capture->bounds));
OPENCV_ASSERT (result == noErr, "icvOpenCamera_QT", "couldn't set video channel bounds");
// allocate images
capture->size = cvSize (capture->bounds.right - capture->bounds.left, capture->bounds.bottom - capture->bounds.top);
// build IplImage header that points to the PixMap of the Movie's GWorld.
// unfortunately, cvCvtColor doesn't know ARGB, the QuickTime pixel format,
// so we shift the base address by one byte.
// ATTENTION: don't access the last pixel's alpha entry, it's inexistant
capture->image_rgb = cvCreateImageHeader (capture->size, IPL_DEPTH_8U, 4);
OPENCV_ASSERT (capture->image_rgb, "icvOpenCamera_QT", "couldn't create image header");
pixmap = GetGWorldPixMap (capture->gworld);
OPENCV_ASSERT (pixmap, "icvOpenCamera_QT", "didn't get GWorld PixMap handle");
LockPixels (pixmap);
cvSetData (capture->image_rgb, GetPixBaseAddr (pixmap) + 1, GetPixRowBytes (pixmap));
// create IplImage that hold correctly formatted result
capture->image_bgr = cvCreateImage (capture->size, IPL_DEPTH_8U, 3);
OPENCV_ASSERT (capture->image_bgr, "icvOpenCamera_QT", "couldn't create image");
// tell the sequence grabber to invoke our data proc
result = SGSetDataProc (capture->grabber, NewSGDataUPP (icvDataProc_QT_Cam), (long) capture);
OPENCV_ASSERT (result == noErr, "icvOpenCamera_QT", "couldn't set data proc");
// start recording
result = SGStartRecord (capture->grabber);
OPENCV_ASSERT (result == noErr, "icvOpenCamera_QT", "couldn't start recording");
return 1;
}
static int icvClose_QT_Cam (CvCapture_QT_Cam * capture)
{
OPENCV_ASSERT (capture, "icvClose_QT_Cam", "'capture' is a NULL-pointer");
OSErr result = noErr;
// stop recording
result = SGStop (capture->grabber);
OPENCV_ASSERT (result == noErr, "icveClose_QT_Cam", "couldn't stop recording");
// close sequence grabber component
result = CloseComponent (capture->grabber);
OPENCV_ASSERT (result == noErr, "icveClose_QT_Cam", "couldn't close sequence grabber component");
// end decompression sequence
CDSequenceEnd (capture->sequence);
// free memory
cvReleaseImage (& capture->image_bgr);
cvReleaseImageHeader (& capture->image_rgb);
DisposeGWorld (capture->gworld);
// successful
return 1;
}
static int icvGrabFrame_QT_Cam (CvCapture_QT_Cam * capture)
{
OPENCV_ASSERT (capture, "icvGrabFrame_QT_Cam", "'capture' is a NULL-pointer");
OPENCV_ASSERT (capture->grabber, "icvGrabFrame_QT_Cam", "'grabber' is a NULL-pointer");
ComponentResult result = noErr;
// grab one frame
result = SGIdle (capture->grabber);
if (result != noErr)
{
fprintf (stderr, "SGIdle failed in icvGrabFrame_QT_Cam with error %d\n", (int) result);
return 0;
}
// successful
return 1;
}
static const void * icvRetrieveFrame_QT_Cam (CvCapture_QT_Cam * capture, int)
{
OPENCV_ASSERT (capture, "icvRetrieveFrame_QT_Cam", "'capture' is a NULL-pointer");
OPENCV_ASSERT (capture->image_rgb, "icvRetrieveFrame_QT_Cam", "invalid source image");
OPENCV_ASSERT (capture->image_bgr, "icvRetrieveFrame_QT_Cam", "invalid destination image");
OSErr myErr = noErr;
// service active sequence grabbers (= redraw immediately)
while (! capture->got_frame)
{
myErr = SGIdle (capture->grabber);
if (myErr != noErr)
{
fprintf (stderr, "SGIdle() didn't succeed in icvRetrieveFrame_QT_Cam().\n");
return 0;
}
}
// covert RGB of GWorld to BGR
cvCvtColor (capture->image_rgb, capture->image_bgr, CV_RGBA2BGR);
// reset grabbing status
capture->got_frame = false;
// always return the same image pointer
return capture->image_bgr;
}
#endif
typedef struct CvVideoWriter_QT {
DataHandler data_handler;
Movie movie;
Track track;
Media video;
ICMCompressionSessionRef compression_session_ref;
TimeValue duration_per_sample;
} CvVideoWriter_QT;
static TimeScale const TIME_SCALE = 600;
static OSStatus icvEncodedFrameOutputCallback(
void* writer,
ICMCompressionSessionRef compression_session_ref,
OSStatus error,
ICMEncodedFrameRef encoded_frame_ref,
void* reserved
);
static void icvSourceTrackingCallback(
void *source_tracking_ref_con,
ICMSourceTrackingFlags source_tracking_flags,
void *source_frame_ref_con,
void *reserved
);
static int icvWriteFrame_QT(
CvVideoWriter_QT * video_writer,
const IplImage * image
) {
CVPixelBufferRef pixel_buffer_ref = NULL;
CVReturn retval =
CVPixelBufferCreate(
kCFAllocatorDefault,
image->width, image->height, k24RGBPixelFormat,
NULL /* pixel_buffer_attributes */,
&pixel_buffer_ref
);
// convert BGR IPL image to RGB pixel buffer
IplImage* image_rgb =
cvCreateImageHeader(
cvSize( image->width, image->height ),
IPL_DEPTH_8U,
3
);
retval = CVPixelBufferLockBaseAddress( pixel_buffer_ref, 0 );
void* base_address = CVPixelBufferGetBaseAddress( pixel_buffer_ref );
size_t bytes_per_row = CVPixelBufferGetBytesPerRow( pixel_buffer_ref );
cvSetData( image_rgb, base_address, bytes_per_row );
cvConvertImage( image, image_rgb, CV_CVTIMG_SWAP_RB );
retval = CVPixelBufferUnlockBaseAddress( pixel_buffer_ref, 0 );
cvReleaseImageHeader( &image_rgb );
ICMSourceTrackingCallbackRecord source_tracking_callback_record;
source_tracking_callback_record.sourceTrackingCallback =
icvSourceTrackingCallback;
source_tracking_callback_record.sourceTrackingRefCon = NULL;
OSStatus status =
ICMCompressionSessionEncodeFrame(
video_writer->compression_session_ref,
pixel_buffer_ref,
0,
video_writer->duration_per_sample,
kICMValidTime_DisplayDurationIsValid,
NULL,
&source_tracking_callback_record,
static_cast<void*>( &pixel_buffer_ref )
);
return 0;
}
static void icvReleaseVideoWriter_QT( CvVideoWriter_QT ** writer ) {
if ( ( writer != NULL ) && ( *writer != NULL ) ) {
CvVideoWriter_QT* video_writer = *writer;
// force compression session to complete encoding of outstanding source
// frames
ICMCompressionSessionCompleteFrames(
video_writer->compression_session_ref, 1, 0, 0
);
EndMediaEdits( video_writer->video );
ICMCompressionSessionRelease( video_writer->compression_session_ref );
InsertMediaIntoTrack(
video_writer->track,
0,
0,
GetMediaDuration( video_writer->video ),
FixRatio( 1, 1 )
);
UpdateMovieInStorage( video_writer->movie, video_writer->data_handler );
CloseMovieStorage( video_writer->data_handler );
/*
// export to AVI
Handle data_ref;
OSType data_ref_type;
QTNewDataReferenceFromFullPathCFString(
CFSTR( "/Users/seibert/Desktop/test.avi" ), kQTPOSIXPathStyle, 0,
&data_ref, &data_ref_type
);
ConvertMovieToDataRef( video_writer->movie, NULL, data_ref,
data_ref_type, kQTFileTypeAVI, 'TVOD', 0, NULL );
DisposeHandle( data_ref );
*/
DisposeMovie( video_writer->movie );
cvFree( writer );
}
}
static OSStatus icvEncodedFrameOutputCallback(
void* writer,
ICMCompressionSessionRef compression_session_ref,
OSStatus error,
ICMEncodedFrameRef encoded_frame_ref,
void* reserved
) {
CvVideoWriter_QT* video_writer = static_cast<CvVideoWriter_QT*>( writer );
OSStatus err = AddMediaSampleFromEncodedFrame( video_writer->video,
encoded_frame_ref, NULL );
return err;
}
static void icvSourceTrackingCallback(
void *source_tracking_ref_con,
ICMSourceTrackingFlags source_tracking_flags,
void *source_frame_ref_con,
void *reserved
) {
if ( source_tracking_flags & kICMSourceTracking_ReleasedPixelBuffer ) {
CVPixelBufferRelease(
*static_cast<CVPixelBufferRef*>( source_frame_ref_con )
);
}
}
static CvVideoWriter_QT* icvCreateVideoWriter_QT(
const char * filename,
int fourcc,
double fps,
CvSize frame_size,
int is_color
) {
CV_FUNCNAME( "icvCreateVideoWriter" );
CvVideoWriter_QT* video_writer =
static_cast<CvVideoWriter_QT*>( cvAlloc( sizeof( CvVideoWriter_QT ) ) );
memset( video_writer, 0, sizeof( CvVideoWriter_QT ) );
Handle data_ref = NULL;
OSType data_ref_type;
DataHandler data_handler = NULL;
Movie movie = NULL;
ICMCompressionSessionOptionsRef options_ref = NULL;
ICMCompressionSessionRef compression_session_ref = NULL;
CFStringRef out_path = nil;
Track video_track = nil;
Media video = nil;
OSErr err = noErr;
CodecType codecType = kRawCodecType;
__BEGIN__
// validate input arguments
if ( filename == NULL ) {
CV_ERROR( CV_StsBadArg, "Video file name must not be NULL" );
}
if ( fps <= 0.0 ) {
CV_ERROR( CV_StsBadArg, "FPS must be larger than 0.0" );
}
if ( ( frame_size.width <= 0 ) || ( frame_size.height <= 0 ) ) {
CV_ERROR( CV_StsBadArg,
"Frame width and height must be larger than 0" );
}
// initialize QuickTime
if ( !did_enter_movies ) {
err = EnterMovies();
if ( err != noErr ) {
CV_ERROR( CV_StsInternal, "Unable to initialize QuickTime" );
}
did_enter_movies = 1;
}
// convert the file name into a data reference
out_path = CFStringCreateWithCString( kCFAllocatorDefault, filename, kCFStringEncodingISOLatin1 );
CV_ASSERT( out_path != nil );
err = QTNewDataReferenceFromFullPathCFString( out_path, kQTPOSIXPathStyle,
0, &data_ref, &data_ref_type );
CFRelease( out_path );
if ( err != noErr ) {
CV_ERROR( CV_StsInternal,
"Cannot create data reference from file name" );
}
// create a new movie on disk
err = CreateMovieStorage( data_ref, data_ref_type, 'TVOD',
smCurrentScript, newMovieActive, &data_handler, &movie );
if ( err != noErr ) {
CV_ERROR( CV_StsInternal, "Cannot create movie storage" );
}
// create a track with video
video_track = NewMovieTrack (movie,
FixRatio( frame_size.width, 1 ),
FixRatio( frame_size.height, 1 ),
kNoVolume);
err = GetMoviesError();
if ( err != noErr ) {
CV_ERROR( CV_StsInternal, "Cannot create video track" );
}
video = NewTrackMedia( video_track, VideoMediaType, TIME_SCALE, nil, 0 );
err = GetMoviesError();
if ( err != noErr ) {
CV_ERROR( CV_StsInternal, "Cannot create video media" );
}
/*if( fourcc == CV_FOURCC( 'D', 'I', 'B', ' ' ))
codecType = kRawCodecType;*/
// start a compression session
err = ICMCompressionSessionOptionsCreate( kCFAllocatorDefault,
&options_ref );
if ( err != noErr ) {
CV_ERROR( CV_StsInternal, "Cannot create compression session options" );
}
err = ICMCompressionSessionOptionsSetAllowTemporalCompression( options_ref,
true );
if ( err != noErr) {
CV_ERROR( CV_StsInternal, "Cannot enable temporal compression" );
}
err = ICMCompressionSessionOptionsSetAllowFrameReordering( options_ref,
true );
if ( err != noErr) {
CV_ERROR( CV_StsInternal, "Cannot enable frame reordering" );
}
ICMEncodedFrameOutputRecord encoded_frame_output_record;
encoded_frame_output_record.encodedFrameOutputCallback =
icvEncodedFrameOutputCallback;
encoded_frame_output_record.encodedFrameOutputRefCon =
static_cast<void*>( video_writer );
encoded_frame_output_record.frameDataAllocator = NULL;
err = ICMCompressionSessionCreate( kCFAllocatorDefault, frame_size.width,
frame_size.height, codecType, TIME_SCALE, options_ref,
NULL /*source_pixel_buffer_attributes*/, &encoded_frame_output_record,
&compression_session_ref );
ICMCompressionSessionOptionsRelease( options_ref );
if ( err != noErr ) {
CV_ERROR( CV_StsInternal, "Cannot create compression session" );
}
err = BeginMediaEdits( video );
if ( err != noErr ) {
CV_ERROR( CV_StsInternal, "Cannot begin media edits" );
}
// fill in the video writer structure
video_writer->data_handler = data_handler;
video_writer->movie = movie;
video_writer->track = video_track;
video_writer->video = video;
video_writer->compression_session_ref = compression_session_ref;
video_writer->duration_per_sample =
static_cast<TimeValue>( static_cast<double>( TIME_SCALE ) / fps );
__END__
// clean up in case of error (unless error processing mode is
// CV_ErrModeLeaf)
if ( err != noErr ) {
if ( options_ref != NULL ) {
ICMCompressionSessionOptionsRelease( options_ref );
}
if ( compression_session_ref != NULL ) {
ICMCompressionSessionRelease( compression_session_ref );
}
if ( data_handler != NULL ) {
CloseMovieStorage( data_handler );
}
if ( movie != NULL ) {
DisposeMovie( movie );
}
if ( data_ref != NULL ) {
DeleteMovieStorage( data_ref, data_ref_type );
DisposeHandle( data_ref );
}
cvFree( reinterpret_cast<void**>( &video_writer ) );
video_writer = NULL;
}
return video_writer;
}
/**
*
* Wrappers for the new C++ CvCapture & CvVideoWriter structures
*
*/
class CvCapture_QT_Movie_CPP : public CvCapture
{
public:
CvCapture_QT_Movie_CPP() { captureQT = 0; }
virtual ~CvCapture_QT_Movie_CPP() { close(); }
virtual bool open( const char* filename );
virtual void close();
virtual double getProperty(int) const CV_OVERRIDE;
virtual bool setProperty(int, double) CV_OVERRIDE;
virtual bool grabFrame() CV_OVERRIDE;
virtual IplImage* retrieveFrame(int) CV_OVERRIDE;
virtual int getCaptureDomain() CV_OVERRIDE { return CV_CAP_QT; }
protected:
CvCapture_QT_Movie* captureQT;
};
bool CvCapture_QT_Movie_CPP::open( const char* filename )
{
close();
captureQT = icvCaptureFromFile_QT( filename );
return captureQT != 0;
}
void CvCapture_QT_Movie_CPP::close()
{
if( captureQT )
{
icvClose_QT_Movie( captureQT );
cvFree( &captureQT );
}
}
bool CvCapture_QT_Movie_CPP::grabFrame()
{
return captureQT ? icvGrabFrame_QT_Movie( captureQT ) != 0 : false;
}
IplImage* CvCapture_QT_Movie_CPP::retrieveFrame(int)
{
return captureQT ? (IplImage*)icvRetrieveFrame_QT_Movie( captureQT, 0 ) : 0;
}
double CvCapture_QT_Movie_CPP::getProperty( int propId ) const
{
return captureQT ? icvGetProperty_QT_Movie( captureQT, propId ) : 0;
}
bool CvCapture_QT_Movie_CPP::setProperty( int propId, double value )
{
return captureQT ? icvSetProperty_QT_Movie( captureQT, propId, value ) != 0 : false;
}
CvCapture* cvCreateFileCapture_QT( const char* filename )
{
CvCapture_QT_Movie_CPP* capture = new CvCapture_QT_Movie_CPP;
if( capture->open( filename ))
return capture;
delete capture;
return 0;
}
/////////////////////////////////////
class CvCapture_QT_Cam_CPP : public CvCapture
{
public:
CvCapture_QT_Cam_CPP() { captureQT = 0; }
virtual ~CvCapture_QT_Cam_CPP() { close(); }
virtual bool open( int index );
virtual void close();
virtual double getProperty(int) const;
virtual bool setProperty(int, double);
virtual bool grabFrame();
virtual IplImage* retrieveFrame(int);
virtual int getCaptureDomain() { return CV_CAP_QT; } // Return the type of the capture object: CV_CAP_VFW, etc...
protected:
CvCapture_QT_Cam* captureQT;
};
bool CvCapture_QT_Cam_CPP::open( int index )
{
close();
captureQT = icvCaptureFromCam_QT( index );
return captureQT != 0;
}
void CvCapture_QT_Cam_CPP::close()
{
if( captureQT )
{
icvClose_QT_Cam( captureQT );
cvFree( &captureQT );
}
}
bool CvCapture_QT_Cam_CPP::grabFrame()
{
return captureQT ? icvGrabFrame_QT_Cam( captureQT ) != 0 : false;
}
IplImage* CvCapture_QT_Cam_CPP::retrieveFrame(int)
{
return captureQT ? (IplImage*)icvRetrieveFrame_QT_Cam( captureQT, 0 ) : 0;
}
double CvCapture_QT_Cam_CPP::getProperty( int propId ) const
{
return captureQT ? icvGetProperty_QT_Cam( captureQT, propId ) : 0;
}
bool CvCapture_QT_Cam_CPP::setProperty( int propId, double value )
{
return captureQT ? icvSetProperty_QT_Cam( captureQT, propId, value ) != 0 : false;
}
CvCapture* cvCreateCameraCapture_QT( int index )
{
CvCapture_QT_Cam_CPP* capture = new CvCapture_QT_Cam_CPP;
if( capture->open( index ))
return capture;
delete capture;
return 0;
}
/////////////////////////////////
class CvVideoWriter_QT_CPP : public CvVideoWriter
{
public:
CvVideoWriter_QT_CPP() { writerQT = 0; }
virtual ~CvVideoWriter_QT_CPP() { close(); }
virtual bool open( const char* filename, int fourcc,
double fps, CvSize frameSize, bool isColor );
virtual void close();
virtual bool writeFrame( const IplImage* );
int getCaptureDomain() const CV_OVERRIDE { return cv::CAP_QT; }
protected:
CvVideoWriter_QT* writerQT;
};
bool CvVideoWriter_QT_CPP::open( const char* filename, int fourcc,
double fps, CvSize frameSize, bool isColor )
{
close();
writerQT = icvCreateVideoWriter_QT( filename, fourcc, fps, frameSize, isColor );
return writerQT != 0;
}
void CvVideoWriter_QT_CPP::close()
{
if( writerQT )
{
icvReleaseVideoWriter_QT( &writerQT );
writerQT = 0;
}
}
bool CvVideoWriter_QT_CPP::writeFrame( const IplImage* image )
{
if( !writerQT || !image )
return false;
return icvWriteFrame_QT( writerQT, image ) >= 0;
}
CvVideoWriter* cvCreateVideoWriter_QT( const char* filename, int fourcc,
double fps, CvSize frameSize, int isColor )
{
CvVideoWriter_QT_CPP* writer = new CvVideoWriter_QT_CPP;
if( writer->open( filename, fourcc, fps, frameSize, isColor != 0 ))
return writer;
delete writer;
return 0;
}
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the contributor be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*////////////////////////////////////////////////////////////////////////////////////////
#include "precomp.hpp"
#include "opencv2/imgproc.hpp"
#include <iostream>
#import <QTKit/QTKit.h>
/********************** Declaration of class headers ************************/
/*****************************************************************************
*
* CaptureDelegate Declaration.
*
* CaptureDelegate is notified on a separate thread by the OS whenever there
* is a new frame. When "updateImage" is called from the main thread, it
* copies this new frame into an IplImage, but only if this frame has not
* been copied before. When "getOutput" is called from the main thread,
* it gives the last copied IplImage.
*
*****************************************************************************/
#ifndef QTKIT_VERSION_7_6_3
#define QTKIT_VERSION_7_6_3 70603
#define QTKIT_VERSION_7_0 70000
#endif
#ifndef QTKIT_VERSION_MAX_ALLOWED
#define QTKIT_VERSION_MAX_ALLOWED QTKIT_VERSION_7_0
#endif
#define DISABLE_AUTO_RESTART 999
@interface CaptureDelegate : NSObject
{
int newFrame;
CVImageBufferRef mCurrentImageBuffer;
char* imagedata;
IplImage* image;
char* bgr_imagedata;
IplImage* bgr_image;
size_t currSize;
}
- (void)captureOutput:(QTCaptureOutput *)captureOutput
didOutputVideoFrame:(CVImageBufferRef)videoFrame
withSampleBuffer:(QTSampleBuffer *)sampleBuffer
fromConnection:(QTCaptureConnection *)connection;
- (void)captureOutput:(QTCaptureOutput *)captureOutput
didDropVideoFrameWithSampleBuffer:(QTSampleBuffer *)sampleBuffer
fromConnection:(QTCaptureConnection *)connection;
- (int)updateImage;
- (IplImage*)getOutput;
- (void)doFireTimer:(NSTimer *)timer;
@end
/*****************************************************************************
*
* CvCaptureCAM Declaration.
*
* CvCaptureCAM is the instantiation of a capture source for cameras.
*
*****************************************************************************/
class CvCaptureCAM : public CvCapture {
public:
CvCaptureCAM(int cameraNum = -1) ;
~CvCaptureCAM();
virtual bool grabFrame();
virtual IplImage* retrieveFrame(int);
virtual double getProperty(int property_id) const;
virtual bool setProperty(int property_id, double value);
virtual int didStart();
private:
QTCaptureSession *mCaptureSession;
QTCaptureDeviceInput *mCaptureDeviceInput;
QTCaptureDecompressedVideoOutput *mCaptureDecompressedVideoOutput;
CaptureDelegate* capture;
int startCaptureDevice(int cameraNum);
void stopCaptureDevice();
void setWidthHeight();
bool grabFrame(double timeOut);
int camNum;
int width;
int height;
int settingWidth;
int settingHeight;
int started;
int disableAutoRestart;
};
/*****************************************************************************
*
* CvCaptureFile Declaration.
*
* CvCaptureFile is the instantiation of a capture source for video files.
*
*****************************************************************************/
class CvCaptureFile : public CvCapture {
public:
CvCaptureFile(const char* filename) ;
~CvCaptureFile();
virtual bool grabFrame();
virtual IplImage* retrieveFrame(int);
virtual double getProperty(int property_id) const;
virtual bool setProperty(int property_id, double value);
virtual int didStart();
private:
QTMovie *mCaptureSession;
char* imagedata;
IplImage* image;
char* bgr_imagedata;
IplImage* bgr_image;
size_t currSize;
//IplImage* retrieveFrameBitmap();
IplImage* retrieveFramePixelBuffer();
double getFPS();
int movieWidth;
int movieHeight;
double movieFPS;
double currentFPS;
double movieDuration;
int changedPos;
int started;
QTTime endOfMovie;
};
/*****************************************************************************
*
* CvCaptureFile Declaration.
*
* CvCaptureFile is the instantiation of a capture source for video files.
*
*****************************************************************************/
class CvVideoWriter_QT : public CvVideoWriter{
public:
CvVideoWriter_QT(const char* filename, int fourcc,
double fps, CvSize frame_size,
int is_color=1);
~CvVideoWriter_QT();
bool writeFrame(const IplImage* image);
int getCaptureDomain() const CV_OVERRIDE { return cv::CAP_QT; }
private:
IplImage* argbimage;
QTMovie* mMovie;
NSString* path;
NSString* codec;
double movieFPS;
CvSize movieSize;
int movieColor;
};
/****************** Implementation of interface functions ********************/
CvCapture* cvCreateFileCapture_QT(const char* filename) {
CvCaptureFile *retval = new CvCaptureFile(filename);
if(retval->didStart())
return retval;
delete retval;
return NULL;
}
CvCapture* cvCreateCameraCapture_QT(int index ) {
CvCapture* retval = new CvCaptureCAM(index);
if (!((CvCaptureCAM *)retval)->didStart())
cvReleaseCapture(&retval);
return retval;
}
CvVideoWriter* cvCreateVideoWriter_QT(const char* filename, int fourcc,
double fps, CvSize frame_size,
int is_color) {
return new CvVideoWriter_QT(filename, fourcc, fps, frame_size,is_color);
}
/********************** Implementation of Classes ****************************/
/*****************************************************************************
*
* CvCaptureCAM Implementation.
*
* CvCaptureCAM is the instantiation of a capture source for cameras.
*
*****************************************************************************/
CvCaptureCAM::CvCaptureCAM(int cameraNum) {
mCaptureSession = nil;
mCaptureDeviceInput = nil;
mCaptureDecompressedVideoOutput = nil;
capture = nil;
width = 0;
height = 0;
settingWidth = 0;
settingHeight = 0;
disableAutoRestart = 0;
camNum = cameraNum;
if (!startCaptureDevice(camNum)) {
std::cout << "Warning, camera failed to properly initialize!" << std::endl;
started = 0;
} else {
started = 1;
}
}
CvCaptureCAM::~CvCaptureCAM() {
stopCaptureDevice();
}
int CvCaptureCAM::didStart() {
return started;
}
bool CvCaptureCAM::grabFrame() {
return grabFrame(5);
}
bool CvCaptureCAM::grabFrame(double timeOut) {
NSAutoreleasePool* localpool = [[NSAutoreleasePool alloc] init];
double sleepTime = 0.005;
double total = 0;
// If the capture is launched in a separate thread, then
// [NSRunLoop currentRunLoop] is not the same as in the main thread, and has no timer.
//see https://developer.apple.com/library/mac/#documentation/Cocoa/Reference/Foundation/Classes/nsrunloop_Class/Reference/Reference.html
// "If no input sources or timers are attached to the run loop, this
// method exits immediately"
// using usleep() is not a good alternative, because it may block the GUI.
// Create a dummy timer so that runUntilDate does not exit immediately:
[NSTimer scheduledTimerWithTimeInterval:100 target:capture selector:@selector(doFireTimer:) userInfo:nil repeats:YES];
while (![capture updateImage] && (total += sleepTime)<=timeOut) {
[[NSRunLoop currentRunLoop] runUntilDate:[NSDate dateWithTimeIntervalSinceNow:sleepTime]];
}
[localpool drain];
return total <= timeOut;
}
IplImage* CvCaptureCAM::retrieveFrame(int) {
return [capture getOutput];
}
void CvCaptureCAM::stopCaptureDevice() {
NSAutoreleasePool* localpool = [[NSAutoreleasePool alloc] init];
[mCaptureSession stopRunning];
QTCaptureDevice *device = [mCaptureDeviceInput device];
if ([device isOpen]) [device close];
[mCaptureSession release];
[mCaptureDeviceInput release];
[mCaptureDecompressedVideoOutput setDelegate:mCaptureDecompressedVideoOutput];
[mCaptureDecompressedVideoOutput release];
[capture release];
[localpool drain];
}
int CvCaptureCAM::startCaptureDevice(int cameraNum) {
NSAutoreleasePool* localpool = [[NSAutoreleasePool alloc] init];
capture = [[CaptureDelegate alloc] init];
QTCaptureDevice *device;
NSArray* devices = [[[QTCaptureDevice inputDevicesWithMediaType:QTMediaTypeVideo]
arrayByAddingObjectsFromArray:[QTCaptureDevice inputDevicesWithMediaType:QTMediaTypeMuxed]] retain];
if ([devices count] == 0) {
std::cout << "QTKit didn't find any attached Video Input Devices!" << std::endl;
[localpool drain];
return 0;
}
if (cameraNum >= 0) {
NSUInteger nCameras = [devices count];
if( (NSUInteger)cameraNum >= nCameras ) {
[localpool drain];
return 0;
}
device = [devices objectAtIndex:cameraNum] ;
} else {
device = [QTCaptureDevice defaultInputDeviceWithMediaType:QTMediaTypeVideo] ;
}
int success;
NSError* error;
if (device) {
success = [device open:&error];
if (!success) {
std::cout << "QTKit failed to open a Video Capture Device" << std::endl;
[localpool drain];
return 0;
}
mCaptureDeviceInput = [[QTCaptureDeviceInput alloc] initWithDevice:device] ;
mCaptureSession = [[QTCaptureSession alloc] init] ;
success = [mCaptureSession addInput:mCaptureDeviceInput error:&error];
if (!success) {
std::cout << "QTKit failed to start capture session with opened Capture Device" << std::endl;
[localpool drain];
return 0;
}
mCaptureDecompressedVideoOutput = [[QTCaptureDecompressedVideoOutput alloc] init];
[mCaptureDecompressedVideoOutput setDelegate:capture];
NSDictionary *pixelBufferOptions ;
if (width > 0 && height > 0) {
pixelBufferOptions = [NSDictionary dictionaryWithObjectsAndKeys:
[NSNumber numberWithDouble:1.0*width], (id)kCVPixelBufferWidthKey,
[NSNumber numberWithDouble:1.0*height], (id)kCVPixelBufferHeightKey,
//[NSNumber numberWithUnsignedInt:k32BGRAPixelFormat], (id)kCVPixelBufferPixelFormatTypeKey,
[NSNumber numberWithUnsignedInt:kCVPixelFormatType_32BGRA],
(id)kCVPixelBufferPixelFormatTypeKey,
nil];
} else {
pixelBufferOptions = [NSDictionary dictionaryWithObjectsAndKeys:
[NSNumber numberWithUnsignedInt:kCVPixelFormatType_32BGRA],
(id)kCVPixelBufferPixelFormatTypeKey,
nil];
}
[mCaptureDecompressedVideoOutput setPixelBufferAttributes:pixelBufferOptions];
#if QTKIT_VERSION_MAX_ALLOWED >= QTKIT_VERSION_7_6_3
[mCaptureDecompressedVideoOutput setAutomaticallyDropsLateVideoFrames:YES];
#endif
success = [mCaptureSession addOutput:mCaptureDecompressedVideoOutput error:&error];
if (!success) {
std::cout << "QTKit failed to add Output to Capture Session" << std::endl;
[localpool drain];
return 0;
}
[mCaptureSession startRunning];
grabFrame(60);
[localpool drain];
return 1;
}
[localpool drain];
return 0;
}
void CvCaptureCAM::setWidthHeight() {
NSAutoreleasePool* localpool = [[NSAutoreleasePool alloc] init];
[mCaptureSession stopRunning];
NSDictionary* pixelBufferOptions = [NSDictionary dictionaryWithObjectsAndKeys:
[NSNumber numberWithDouble:1.0*width], (id)kCVPixelBufferWidthKey,
[NSNumber numberWithDouble:1.0*height], (id)kCVPixelBufferHeightKey,
[NSNumber numberWithUnsignedInt:kCVPixelFormatType_32BGRA],
(id)kCVPixelBufferPixelFormatTypeKey,
nil];
[mCaptureDecompressedVideoOutput setPixelBufferAttributes:pixelBufferOptions];
[mCaptureSession startRunning];
grabFrame(60);
[localpool drain];
}
double CvCaptureCAM::getProperty(int property_id) const{
int retval;
NSAutoreleasePool* localpool = [[NSAutoreleasePool alloc] init];
NSArray* connections = [mCaptureDeviceInput connections];
QTFormatDescription* format = [[connections objectAtIndex:0] formatDescription];
NSSize s1 = [[format attributeForKey:QTFormatDescriptionVideoCleanApertureDisplaySizeAttribute] sizeValue];
switch (property_id) {
case CV_CAP_PROP_FRAME_WIDTH:
retval = s1.width;
break;
case CV_CAP_PROP_FRAME_HEIGHT:
retval = s1.height;
break;
default:
retval = 0;
break;
}
[localpool drain];
return retval;
}
bool CvCaptureCAM::setProperty(int property_id, double value) {
switch (property_id) {
case CV_CAP_PROP_FRAME_WIDTH:
width = value;
settingWidth = 1;
if (settingWidth && settingHeight) {
setWidthHeight();
settingWidth =0;
settingHeight = 0;
}
return true;
case CV_CAP_PROP_FRAME_HEIGHT:
height = value;
settingHeight = 1;
if (settingWidth && settingHeight) {
setWidthHeight();
settingWidth =0;
settingHeight = 0;
}
return true;
case DISABLE_AUTO_RESTART:
disableAutoRestart = value;
return 1;
default:
return false;
}
}
/*****************************************************************************
*
* CaptureDelegate Implementation.
*
* CaptureDelegate is notified on a separate thread by the OS whenever there
* is a new frame. When "updateImage" is called from the main thread, it
* copies this new frame into an IplImage, but only if this frame has not
* been copied before. When "getOutput" is called from the main thread,
* it gives the last copied IplImage.
*
*****************************************************************************/
@implementation CaptureDelegate
- (id)init {
self = [super init];
if (self) {
newFrame = 0;
imagedata = NULL;
bgr_imagedata = NULL;
currSize = 0;
image = NULL;
bgr_image = NULL;
}
return self;
}
-(void)dealloc {
if (imagedata != NULL) free(imagedata);
if (bgr_imagedata != NULL) free(bgr_imagedata);
cvReleaseImage(&image);
cvReleaseImage(&bgr_image);
[super dealloc];
}
- (void)captureOutput:(QTCaptureOutput *)captureOutput
didOutputVideoFrame:(CVImageBufferRef)videoFrame
withSampleBuffer:(QTSampleBuffer *)sampleBuffer
fromConnection:(QTCaptureConnection *)connection {
(void)captureOutput;
(void)sampleBuffer;
(void)connection;
CVBufferRetain(videoFrame);
CVImageBufferRef imageBufferToRelease = mCurrentImageBuffer;
@synchronized (self) {
mCurrentImageBuffer = videoFrame;
newFrame = 1;
}
CVBufferRelease(imageBufferToRelease);
}
- (void)captureOutput:(QTCaptureOutput *)captureOutput
didDropVideoFrameWithSampleBuffer:(QTSampleBuffer *)sampleBuffer
fromConnection:(QTCaptureConnection *)connection {
(void)captureOutput;
(void)sampleBuffer;
(void)connection;
std::cout << "Camera dropped frame!" << std::endl;
}
-(IplImage*) getOutput {
return bgr_image;
}
-(int) updateImage {
if (newFrame==0) return 0;
CVPixelBufferRef pixels;
@synchronized (self){
pixels = CVBufferRetain(mCurrentImageBuffer);
newFrame = 0;
}
CVPixelBufferLockBaseAddress(pixels, 0);
uint32_t* baseaddress = (uint32_t*)CVPixelBufferGetBaseAddress(pixels);
size_t width = CVPixelBufferGetWidth(pixels);
size_t height = CVPixelBufferGetHeight(pixels);
size_t rowBytes = CVPixelBufferGetBytesPerRow(pixels);
if (rowBytes != 0) {
if (currSize != rowBytes*height*sizeof(char)) {
currSize = rowBytes*height*sizeof(char);
if (imagedata != NULL) free(imagedata);
if (bgr_imagedata != NULL) free(bgr_imagedata);
imagedata = (char*)malloc(currSize);
bgr_imagedata = (char*)malloc(currSize);
}
memcpy(imagedata, baseaddress, currSize);
if (image == NULL) {
image = cvCreateImageHeader(cvSize((int)width,(int)height), IPL_DEPTH_8U, 4);
}
image->width = (int)width;
image->height = (int)height;
image->nChannels = 4;
image->depth = IPL_DEPTH_8U;
image->widthStep = (int)rowBytes;
image->imageData = imagedata;
image->imageSize = (int)currSize;
if (bgr_image == NULL) {
bgr_image = cvCreateImageHeader(cvSize((int)width,(int)height), IPL_DEPTH_8U, 3);
}
bgr_image->width = (int)width;
bgr_image->height = (int)height;
bgr_image->nChannels = 3;
bgr_image->depth = IPL_DEPTH_8U;
bgr_image->widthStep = (int)rowBytes;
bgr_image->imageData = bgr_imagedata;
bgr_image->imageSize = (int)currSize;
cvCvtColor(image, bgr_image, CV_BGRA2BGR);
}
CVPixelBufferUnlockBaseAddress(pixels, 0);
CVBufferRelease(pixels);
return 1;
}
- (void)doFireTimer:(NSTimer *)timer {
(void)timer;
// dummy
}
@end
/*****************************************************************************
*
* CvCaptureFile Implementation.
*
* CvCaptureFile is the instantiation of a capture source for video files.
*
*****************************************************************************/
CvCaptureFile::CvCaptureFile(const char* filename) {
NSAutoreleasePool* localpool = [[NSAutoreleasePool alloc] init];
mCaptureSession = nil;
image = NULL;
bgr_image = NULL;
imagedata = NULL;
bgr_imagedata = NULL;
currSize = 0;
movieWidth = 0;
movieHeight = 0;
movieFPS = 0;
currentFPS = 0;
movieDuration = 0;
changedPos = 0;
started = 0;
NSError* error;
mCaptureSession = [[QTMovie movieWithFile:[NSString stringWithCString:filename
encoding:NSASCIIStringEncoding]
error:&error] retain];
[mCaptureSession setAttribute:[NSNumber numberWithBool:YES]
forKey:QTMovieLoopsAttribute];
if (mCaptureSession == nil) {
std::cout << "WARNING: Couldn't read movie file " << filename << std::endl;
[localpool drain];
started = 0;
return;
}
[mCaptureSession gotoEnd];
endOfMovie = [mCaptureSession currentTime];
[mCaptureSession gotoBeginning];
NSSize size = [[mCaptureSession attributeForKey:QTMovieNaturalSizeAttribute] sizeValue];
movieWidth = size.width;
movieHeight = size.height;
movieFPS = getFPS();
currentFPS = movieFPS;
QTTime t;
[[mCaptureSession attributeForKey:QTMovieDurationAttribute] getValue:&t];
movieDuration = (t.timeValue *1000.0 / t.timeScale);
started = 1;
[localpool drain];
}
CvCaptureFile::~CvCaptureFile() {
NSAutoreleasePool* localpool = [[NSAutoreleasePool alloc] init];
if (imagedata != NULL) free(imagedata);
if (bgr_imagedata != NULL) free(bgr_imagedata);
cvReleaseImage(&image);
cvReleaseImage(&bgr_image);
[mCaptureSession release];
[localpool drain];
}
int CvCaptureFile::didStart() {
return started;
}
bool CvCaptureFile::grabFrame() {
NSAutoreleasePool* localpool = [[NSAutoreleasePool alloc] init];
double t1 = getProperty(CV_CAP_PROP_POS_MSEC);
QTTime curTime;
curTime = [mCaptureSession currentTime];
bool isEnd=(QTTimeCompare(curTime,endOfMovie) == NSOrderedSame);
[mCaptureSession stepForward];
double t2 = getProperty(CV_CAP_PROP_POS_MSEC);
if (t2>t1 && !changedPos) {
currentFPS = 1000.0/(t2-t1);
} else {
currentFPS = movieFPS;
}
changedPos = 0;
[localpool drain];
return !isEnd;
}
IplImage* CvCaptureFile::retrieveFramePixelBuffer() {
NSAutoreleasePool* localpool = [[NSAutoreleasePool alloc] init];
NSDictionary *attributes = [NSDictionary dictionaryWithObjectsAndKeys:
QTMovieFrameImageTypeCVPixelBufferRef, QTMovieFrameImageType,
#ifdef MAC_OS_X_VERSION_10_6
[NSNumber numberWithBool:YES], QTMovieFrameImageSessionMode,
#endif
nil];
CVPixelBufferRef frame = (CVPixelBufferRef)[mCaptureSession frameImageAtTime:[mCaptureSession currentTime]
withAttributes:attributes
error:nil];
CVPixelBufferRef pixels = CVBufferRetain(frame);
CVPixelBufferLockBaseAddress(pixels, 0);
uint32_t* baseaddress = (uint32_t*)CVPixelBufferGetBaseAddress(pixels);
size_t width = CVPixelBufferGetWidth(pixels);
size_t height = CVPixelBufferGetHeight(pixels);
size_t rowBytes = CVPixelBufferGetBytesPerRow(pixels);
if (rowBytes != 0) {
if (currSize != rowBytes*height*sizeof(char)) {
currSize = rowBytes*height*sizeof(char);
if (imagedata != NULL) free(imagedata);
if (bgr_imagedata != NULL) free(bgr_imagedata);
imagedata = (char*)malloc(currSize);
bgr_imagedata = (char*)malloc(currSize);
}
memcpy(imagedata, baseaddress, currSize);
//ARGB -> BGRA
for (unsigned int i = 0; i < currSize; i+=4) {
char temp = imagedata[i];
imagedata[i] = imagedata[i+3];
imagedata[i+3] = temp;
temp = imagedata[i+1];
imagedata[i+1] = imagedata[i+2];
imagedata[i+2] = temp;
}
if (image == NULL) {
image = cvCreateImageHeader(cvSize((int)width,(int)height), IPL_DEPTH_8U, 4);
}
image->width = (int)width;
image->height = (int)height;
image->nChannels = 4;
image->depth = IPL_DEPTH_8U;
image->widthStep = (int)rowBytes;
image->imageData = imagedata;
image->imageSize = (int)currSize;
if (bgr_image == NULL) {
bgr_image = cvCreateImageHeader(cvSize((int)width,(int)height), IPL_DEPTH_8U, 3);
}
bgr_image->width = (int)width;
bgr_image->height = (int)height;
bgr_image->nChannels = 3;
bgr_image->depth = IPL_DEPTH_8U;
bgr_image->widthStep = (int)rowBytes;
bgr_image->imageData = bgr_imagedata;
bgr_image->imageSize = (int)currSize;
cvCvtColor(image, bgr_image,CV_BGRA2BGR);
}
CVPixelBufferUnlockBaseAddress(pixels, 0);
CVBufferRelease(pixels);
[localpool drain];
return bgr_image;
}
IplImage* CvCaptureFile::retrieveFrame(int) {
return retrieveFramePixelBuffer();
}
double CvCaptureFile::getFPS() {
if (mCaptureSession == nil) return 0;
NSAutoreleasePool* localpool = [[NSAutoreleasePool alloc] init];
double now = getProperty(CV_CAP_PROP_POS_MSEC);
double retval = 0;
if (now == 0) {
[mCaptureSession stepForward];
double t2 = getProperty(CV_CAP_PROP_POS_MSEC);
[mCaptureSession stepBackward];
retval = 1000.0 / (t2-now);
} else {
[mCaptureSession stepBackward];
double t2 = getProperty(CV_CAP_PROP_POS_MSEC);
[mCaptureSession stepForward];
retval = 1000.0 / (now-t2);
}
[localpool drain];
return retval;
}
double CvCaptureFile::getProperty(int property_id) const{
if (mCaptureSession == nil) return 0;
NSAutoreleasePool* localpool = [[NSAutoreleasePool alloc] init];
double retval;
QTTime t;
//cerr << "get_prop"<<std::endl;
switch (property_id) {
case CV_CAP_PROP_POS_MSEC:
[[mCaptureSession attributeForKey:QTMovieCurrentTimeAttribute] getValue:&t];
retval = t.timeValue * 1000.0 / t.timeScale;
break;
case CV_CAP_PROP_POS_FRAMES:
retval = movieFPS * getProperty(CV_CAP_PROP_POS_MSEC) / 1000;
break;
case CV_CAP_PROP_POS_AVI_RATIO:
retval = (getProperty(CV_CAP_PROP_POS_MSEC)) / (movieDuration );
break;
case CV_CAP_PROP_FRAME_WIDTH:
retval = movieWidth;
break;
case CV_CAP_PROP_FRAME_HEIGHT:
retval = movieHeight;
break;
case CV_CAP_PROP_FPS:
retval = currentFPS;
break;
case CV_CAP_PROP_FRAME_COUNT:
retval = movieDuration*movieFPS/1000;
break;
case CV_CAP_PROP_FOURCC:
default:
retval = 0;
}
[localpool drain];
return retval;
}
bool CvCaptureFile::setProperty(int property_id, double value) {
if (mCaptureSession == nil) return false;
NSAutoreleasePool* localpool = [[NSAutoreleasePool alloc] init];
bool retval = false;
QTTime t;
double ms;
switch (property_id) {
case CV_CAP_PROP_POS_MSEC:
[[mCaptureSession attributeForKey:QTMovieCurrentTimeAttribute] getValue:&t];
t.timeValue = value * t.timeScale / 1000;
[mCaptureSession setCurrentTime:t];
changedPos = 1;
retval = true;
break;
case CV_CAP_PROP_POS_FRAMES:
ms = (value*1000.0 -5)/ currentFPS;
retval = setProperty(CV_CAP_PROP_POS_MSEC, ms);
break;
case CV_CAP_PROP_POS_AVI_RATIO:
ms = value * movieDuration;
retval = setProperty(CV_CAP_PROP_POS_MSEC, ms);
break;
case CV_CAP_PROP_FRAME_WIDTH:
//retval = movieWidth;
break;
case CV_CAP_PROP_FRAME_HEIGHT:
//retval = movieHeight;
break;
case CV_CAP_PROP_FPS:
//etval = currentFPS;
break;
case CV_CAP_PROP_FRAME_COUNT:
{
NSArray *videoTracks = [mCaptureSession tracksOfMediaType:QTMediaTypeVideo];
if ([videoTracks count] > 0) {
QTMedia *media = [[videoTracks objectAtIndex:0] media];
retval = [[media attributeForKey:QTMediaSampleCountAttribute] longValue];
} else {
retval = 0;
}
}
break;
case CV_CAP_PROP_FOURCC:
default:
retval = false;
}
[localpool drain];
return retval;
}
/*****************************************************************************
*
* CvVideoWriter Implementation.
*
* CvVideoWriter is the instantiation of a video output class
*
*****************************************************************************/
CvVideoWriter_QT::CvVideoWriter_QT(const char* filename, int fourcc,
double fps, CvSize frame_size,
int is_color) {
NSAutoreleasePool* localpool = [[NSAutoreleasePool alloc] init];
movieFPS = fps;
movieSize = frame_size;
movieColor = is_color;
mMovie = nil;
path = [[[NSString stringWithCString:filename encoding:NSASCIIStringEncoding] stringByExpandingTildeInPath] retain];
argbimage = cvCreateImage(movieSize, IPL_DEPTH_8U, 4);
char cc[5];
cc[0] = fourcc & 255;
cc[1] = (fourcc >> 8) & 255;
cc[2] = (fourcc >> 16) & 255;
cc[3] = (fourcc >> 24) & 255;
cc[4] = 0;
int cc2 = CV_FOURCC(cc[0], cc[1], cc[2], cc[3]);
if (cc2!=fourcc) {
std::cout << "WARNING: Didn't properly encode FourCC. Expected " << fourcc
<< " but got " << cc2 << "." << std::endl;
}
codec = [[NSString stringWithCString:cc encoding:NSASCIIStringEncoding] retain];
NSError *error = nil;
if (!mMovie) {
NSFileManager* files = [NSFileManager defaultManager];
if ([files fileExistsAtPath:path]) {
if (![files removeItemAtPath:path error:nil]) {
std::cout << "WARNING: Failed to remove existing file " << [path cStringUsingEncoding:NSASCIIStringEncoding] << std::endl;
}
}
mMovie = [[QTMovie alloc] initToWritableFile:path error:&error];
if (!mMovie) {
std::cout << "WARNING: Could not create empty movie file container." << std::endl;
[localpool drain];
return;
}
}
[mMovie setAttribute:[NSNumber numberWithBool:YES] forKey:QTMovieEditableAttribute];
[localpool drain];
}
CvVideoWriter_QT::~CvVideoWriter_QT() {
cvReleaseImage(&argbimage);
NSAutoreleasePool* localpool = [[NSAutoreleasePool alloc] init];
[mMovie release];
[path release];
[codec release];
[localpool drain];
}
bool CvVideoWriter_QT::writeFrame(const IplImage* image) {
NSAutoreleasePool* localpool = [[NSAutoreleasePool alloc] init];
cvCvtColor(image, argbimage, CV_BGR2BGRA);
unsigned char* imagedata_ = (unsigned char*)argbimage->imageData;
//BGRA --> ARGB
for (int j = 0; j < argbimage->height; j++) {
int rowstart = argbimage->widthStep * j;
for (int i = rowstart; i < rowstart+argbimage->widthStep; i+=4) {
unsigned char temp = imagedata_[i];
imagedata_[i] = 255;
imagedata_[i+3] = temp;
temp = imagedata_[i+2];
imagedata_[i+2] = imagedata_[i+1];
imagedata_[i+1] = temp;
}
}
NSBitmapImageRep* imageRep = [[NSBitmapImageRep alloc] initWithBitmapDataPlanes:&imagedata_
pixelsWide:movieSize.width
pixelsHigh:movieSize.height
bitsPerSample:8
samplesPerPixel:4
hasAlpha:YES
isPlanar:NO
colorSpaceName:NSDeviceRGBColorSpace
bitmapFormat:NSAlphaFirstBitmapFormat
bytesPerRow:argbimage->widthStep
bitsPerPixel:32] ;
NSImage* nsimage = [[NSImage alloc] init];
[nsimage addRepresentation:imageRep];
/*
codecLosslessQuality = 0x00000400,
codecMaxQuality = 0x000003FF,
codecMinQuality = 0x00000000,
codecLowQuality = 0x00000100,
codecNormalQuality = 0x00000200,
codecHighQuality = 0x00000300
*/
[mMovie addImage:nsimage forDuration:QTMakeTime(100,100*movieFPS) withAttributes:[NSDictionary dictionaryWithObjectsAndKeys:
codec, QTAddImageCodecType,
//[NSNumber numberWithInt:codecLowQuality], QTAddImageCodecQuality,
[NSNumber numberWithInt:100*movieFPS], QTTrackTimeScaleAttribute,nil]];
if (![mMovie updateMovieFile]) {
std::cout << "Didn't successfully update movie file." << std::endl;
}
[imageRep release];
[nsimage release];
[localpool drain];
return 1;
}
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// Intel License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2008, Xavier Delacour, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of Intel Corporation may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
// 2008-04-27 Xavier Delacour <xavier.delacour@gmail.com>
#include "precomp.hpp"
#include <unistd.h>
#include <unicap.h>
extern "C" {
#include <ucil.h>
}
#ifdef NDEBUG
#define CV_WARN(message)
#else
#define CV_WARN(message) fprintf(stderr, "warning: %s (%s:%d)\n", message, __FILE__, __LINE__)
#endif
struct CvCapture_Unicap : public CvCapture
{
CvCapture_Unicap() { init(); }
virtual ~CvCapture_Unicap() { close(); }
virtual bool open( int index );
virtual void close();
virtual double getProperty(int) const CV_OVERRIDE;
virtual bool setProperty(int, double) CV_OVERRIDE;
virtual bool grabFrame() CV_OVERRIDE;
virtual IplImage* retrieveFrame(int) CV_OVERRIDE;
virtual int getCaptureDomain() CV_OVERRIDE { return CV_CAP_UNICAP; }
bool shutdownDevice();
bool initDevice();
void init()
{
device_initialized = false;
desired_format = 0;
desired_size = cvSize(0,0);
convert_rgb = false;
handle = 0;
memset( &device, 0, sizeof(device) );
memset( &format_spec, 0, sizeof(format_spec) );
memset( &format, 0, sizeof(format) );
memset( &raw_buffer, 0, sizeof(raw_buffer) );
memset( &buffer, 0, sizeof(buffer) );
raw_frame = frame = 0;
}
bool device_initialized;
int desired_device;
int desired_format;
CvSize desired_size;
bool convert_rgb;
unicap_handle_t handle;
unicap_device_t device;
unicap_format_t format_spec;
unicap_format_t format;
unicap_data_buffer_t raw_buffer;
unicap_data_buffer_t buffer;
IplImage *raw_frame;
IplImage *frame;
};
bool CvCapture_Unicap::shutdownDevice() {
bool result = false;
CV_FUNCNAME("CvCapture_Unicap::shutdownDevice");
__BEGIN__;
if (!SUCCESS(unicap_stop_capture(handle)))
CV_ERROR(CV_StsError, "unicap: failed to stop capture on device\n");
if (!SUCCESS(unicap_close(handle)))
CV_ERROR(CV_StsError, "unicap: failed to close the device\n");
cvReleaseImage(&raw_frame);
cvReleaseImage(&frame);
device_initialized = false;
result = true;
__END__;
return result;
}
bool CvCapture_Unicap::initDevice() {
bool result = false;
CV_FUNCNAME("CvCapture_Unicap::initDevice");
__BEGIN__;
if (device_initialized && !shutdownDevice())
return false;
if(!SUCCESS(unicap_enumerate_devices(NULL, &device, desired_device)))
CV_ERROR(CV_StsError, "unicap: failed to get info for device\n");
if(!SUCCESS(unicap_open( &handle, &device)))
CV_ERROR(CV_StsError, "unicap: failed to open device\n");
unicap_void_format(&format_spec);
if (!SUCCESS(unicap_enumerate_formats(handle, &format_spec, &format, desired_format))) {
shutdownDevice();
CV_ERROR(CV_StsError, "unicap: failed to get video format\n");
}
int i;
if (format.sizes)
{
for (i = format.size_count - 1; i > 0; i--)
if (format.sizes[i].width == desired_size.width &&
format.sizes[i].height == desired_size.height)
break;
format.size.width = format.sizes[i].width;
format.size.height = format.sizes[i].height;
}
if (!SUCCESS(unicap_set_format(handle, &format))) {
shutdownDevice();
CV_ERROR(CV_StsError, "unicap: failed to set video format\n");
}
memset(&raw_buffer, 0x0, sizeof(unicap_data_buffer_t));
raw_frame = cvCreateImage(cvSize(format.size.width,
format.size.height),
8, format.bpp / 8);
memcpy(&raw_buffer.format, &format, sizeof(raw_buffer.format));
raw_buffer.data = (unsigned char*)raw_frame->imageData;
raw_buffer.buffer_size = format.size.width *
format.size.height * format.bpp / 8;
memset(&buffer, 0x0, sizeof(unicap_data_buffer_t));
memcpy(&buffer.format, &format, sizeof(buffer.format));
buffer.format.fourcc = UCIL_FOURCC('B','G','R','3');
buffer.format.bpp = 24;
// * todo support greyscale output
// buffer.format.fourcc = UCIL_FOURCC('G','R','E','Y');
// buffer.format.bpp = 8;
frame = cvCreateImage(cvSize(buffer.format.size.width,
buffer.format.size.height),
8, buffer.format.bpp / 8);
buffer.data = (unsigned char*)frame->imageData;
buffer.buffer_size = buffer.format.size.width *
buffer.format.size.height * buffer.format.bpp / 8;
if(!SUCCESS(unicap_start_capture(handle))) {
shutdownDevice();
CV_ERROR(CV_StsError, "unicap: failed to start capture on device\n");
}
device_initialized = true;
result = true;
__END__;
return result;
}
void CvCapture_Unicap::close() {
if(device_initialized)
shutdownDevice();
}
bool CvCapture_Unicap::grabFrame() {
bool result = false;
CV_FUNCNAME("CvCapture_Unicap::grabFrame");
__BEGIN__;
unicap_data_buffer_t *returned_buffer;
int retry_count = 100;
while (retry_count--) {
if(!SUCCESS(unicap_queue_buffer(handle, &raw_buffer)))
CV_ERROR(CV_StsError, "unicap: failed to queue a buffer on device\n");
if(SUCCESS(unicap_wait_buffer(handle, &returned_buffer)))
{
result = true;
EXIT;
}
CV_WARN("unicap: failed to wait for buffer on device\n");
usleep(100 * 1000);
}
__END__;
return result;
}
IplImage * CvCapture_Unicap::retrieveFrame(int) {
if (convert_rgb) {
ucil_convert_buffer(&buffer, &raw_buffer);
return frame;
}
return raw_frame;
}
double CvCapture_Unicap::getProperty(int id) const
{
switch (id) {
case CV_CAP_PROP_POS_MSEC: break;
case CV_CAP_PROP_POS_FRAMES: break;
case CV_CAP_PROP_POS_AVI_RATIO: break;
case CV_CAP_PROP_FRAME_WIDTH:
return desired_size.width;
case CV_CAP_PROP_FRAME_HEIGHT:
return desired_size.height;
case CV_CAP_PROP_FPS: break;
case CV_CAP_PROP_FOURCC: break;
case CV_CAP_PROP_FRAME_COUNT: break;
case CV_CAP_PROP_FORMAT:
return desired_format;
case CV_CAP_PROP_MODE: break;
case CV_CAP_PROP_BRIGHTNESS: break;
case CV_CAP_PROP_CONTRAST: break;
case CV_CAP_PROP_SATURATION: break;
case CV_CAP_PROP_HUE: break;
case CV_CAP_PROP_GAIN: break;
case CV_CAP_PROP_CONVERT_RGB:
return convert_rgb;
}
return 0;
}
bool CvCapture_Unicap::setProperty(int id, double value) {
bool reinit = false;
switch (id) {
case CV_CAP_PROP_POS_MSEC: break;
case CV_CAP_PROP_POS_FRAMES: break;
case CV_CAP_PROP_POS_AVI_RATIO: break;
case CV_CAP_PROP_FRAME_WIDTH:
desired_size.width = (int)value;
reinit = true;
break;
case CV_CAP_PROP_FRAME_HEIGHT:
desired_size.height = (int)value;
reinit = true;
break;
case CV_CAP_PROP_FPS: break;
case CV_CAP_PROP_FOURCC: break;
case CV_CAP_PROP_FRAME_COUNT: break;
case CV_CAP_PROP_FORMAT:
desired_format = id;
reinit = true;
break;
case CV_CAP_PROP_MODE: break;
case CV_CAP_PROP_BRIGHTNESS: break;
case CV_CAP_PROP_CONTRAST: break;
case CV_CAP_PROP_SATURATION: break;
case CV_CAP_PROP_HUE: break;
case CV_CAP_PROP_GAIN: break;
case CV_CAP_PROP_CONVERT_RGB:
convert_rgb = value != 0;
break;
}
if (reinit && !initDevice())
return false;
return true;
}
bool CvCapture_Unicap::open(int index)
{
close();
device_initialized = false;
desired_device = index < 0 ? 0 : index;
desired_format = 0;
desired_size = cvSize(320, 240);
convert_rgb = true;
return initDevice();
}
CvCapture * cvCreateCameraCapture_Unicap(const int index)
{
CvCapture_Unicap *cap = new CvCapture_Unicap;
if( cap->open(index) )
return cap;
delete cap;
return 0;
}
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// Intel License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000, Intel Corporation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of Intel Corporation may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "precomp.hpp"
#include <vfw.h>
#ifdef __GNUC__
#define WM_CAP_FIRSTA (WM_USER)
#define capSendMessage(hwnd,m,w,l) (IsWindow(hwnd)?SendMessage(hwnd,m,w,l):0)
#endif
#if defined _M_X64 && defined _MSC_VER
#pragma optimize("",off)
#pragma warning(disable: 4748)
#endif
/********************* Capturing video from AVI via VFW ************************/
static BITMAPINFOHEADER icvBitmapHeader( int width, int height, int bpp, int compression = BI_RGB )
{
BITMAPINFOHEADER bmih;
memset( &bmih, 0, sizeof(bmih));
bmih.biSize = sizeof(bmih);
bmih.biWidth = width;
bmih.biHeight = height;
bmih.biBitCount = (WORD)bpp;
bmih.biCompression = compression;
bmih.biPlanes = 1;
return bmih;
}
static void icvInitCapture_VFW()
{
static int isInitialized = 0;
if( !isInitialized )
{
AVIFileInit();
isInitialized = 1;
}
}
class CvCaptureAVI_VFW : public CvCapture
{
public:
CvCaptureAVI_VFW()
{
CoInitialize(NULL);
init();
}
virtual ~CvCaptureAVI_VFW()
{
close();
CoUninitialize();
}
virtual bool open( const char* filename );
virtual void close();
virtual double getProperty(int) const CV_OVERRIDE;
virtual bool setProperty(int, double) CV_OVERRIDE;
virtual bool grabFrame() CV_OVERRIDE;
virtual IplImage* retrieveFrame(int) CV_OVERRIDE;
virtual int getCaptureDomain() CV_OVERRIDE { return CV_CAP_VFW; }
protected:
void init();
PAVIFILE avifile;
PAVISTREAM avistream;
PGETFRAME getframe;
AVISTREAMINFO aviinfo;
BITMAPINFOHEADER * bmih;
CvSlice film_range;
double fps;
int pos;
IplImage* frame;
CvSize size;
};
void CvCaptureAVI_VFW::init()
{
avifile = 0;
avistream = 0;
getframe = 0;
memset( &aviinfo, 0, sizeof(aviinfo) );
bmih = 0;
film_range = cvSlice(0,0);
fps = 0;
pos = 0;
frame = 0;
size = cvSize(0,0);
}
void CvCaptureAVI_VFW::close()
{
if( getframe )
AVIStreamGetFrameClose( getframe );
if( avistream )
AVIStreamRelease( avistream );
if( avifile )
AVIFileRelease( avifile );
if (frame)
cvReleaseImage( &frame );
init();
}
bool CvCaptureAVI_VFW::open( const char* filename )
{
close();
icvInitCapture_VFW();
if( !filename )
return false;
HRESULT hr = AVIFileOpen( &avifile, filename, OF_READ, NULL );
if( SUCCEEDED(hr))
{
hr = AVIFileGetStream( avifile, &avistream, streamtypeVIDEO, 0 );
if( SUCCEEDED(hr))
{
hr = AVIStreamInfo( avistream, &aviinfo, sizeof(aviinfo));
if( SUCCEEDED(hr))
{
size.width = aviinfo.rcFrame.right - aviinfo.rcFrame.left;
size.height = aviinfo.rcFrame.bottom - aviinfo.rcFrame.top;
BITMAPINFOHEADER bmihdr = icvBitmapHeader( size.width, size.height, 24 );
film_range.start_index = (int)aviinfo.dwStart;
film_range.end_index = film_range.start_index + (int)aviinfo.dwLength;
fps = (double)aviinfo.dwRate/aviinfo.dwScale;
pos = film_range.start_index;
getframe = AVIStreamGetFrameOpen( avistream, &bmihdr );
if( getframe != 0 )
return true;
// Attempt to open as 8-bit AVI.
bmihdr = icvBitmapHeader( size.width, size.height, 8);
getframe = AVIStreamGetFrameOpen( avistream, &bmihdr );
if( getframe != 0 )
return true;
}
}
}
close();
return false;
}
bool CvCaptureAVI_VFW::grabFrame()
{
if( avistream )
bmih = (BITMAPINFOHEADER*)AVIStreamGetFrame( getframe, pos++ );
return bmih != 0;
}
IplImage* CvCaptureAVI_VFW::retrieveFrame(int)
{
if( avistream && bmih )
{
bool isColor = bmih->biBitCount == 24;
int nChannels = (isColor) ? 3 : 1;
IplImage src;
cvInitImageHeader( &src, cvSize( bmih->biWidth, bmih->biHeight ),
IPL_DEPTH_8U, nChannels, IPL_ORIGIN_BL, 4 );
char* dataPtr = (char*)(bmih + 1);
// Only account for the color map size if we are an 8-bit image and the color map is used
if (!isColor)
{
static int RGBQUAD_SIZE_PER_BYTE = sizeof(RGBQUAD)/sizeof(BYTE);
int offsetFromColormapToData = (int)bmih->biClrUsed*RGBQUAD_SIZE_PER_BYTE;
dataPtr += offsetFromColormapToData;
}
cvSetData( &src, dataPtr, src.widthStep );
if( !frame || frame->width != src.width || frame->height != src.height )
{
cvReleaseImage( &frame );
frame = cvCreateImage( cvGetSize(&src), 8, nChannels );
}
cvFlip( &src, frame, 0 );
return frame;
}
return 0;
}
double CvCaptureAVI_VFW::getProperty( int property_id ) const
{
switch( property_id )
{
case CV_CAP_PROP_POS_MSEC:
return cvRound(pos*1000./fps);
case CV_CAP_PROP_POS_FRAMES:
return pos;
case CV_CAP_PROP_POS_AVI_RATIO:
return (pos - film_range.start_index)/
(film_range.end_index - film_range.start_index + 1e-10);
case CV_CAP_PROP_FRAME_WIDTH:
return size.width;
case CV_CAP_PROP_FRAME_HEIGHT:
return size.height;
case CV_CAP_PROP_FPS:
return fps;
case CV_CAP_PROP_FOURCC:
return aviinfo.fccHandler;
case CV_CAP_PROP_FRAME_COUNT:
return film_range.end_index - film_range.start_index;
}
return 0;
}
bool CvCaptureAVI_VFW::setProperty( int property_id, double value )
{
switch( property_id )
{
case CV_CAP_PROP_POS_MSEC:
case CV_CAP_PROP_POS_FRAMES:
case CV_CAP_PROP_POS_AVI_RATIO:
{
switch( property_id )
{
case CV_CAP_PROP_POS_MSEC:
pos = cvRound(value*fps*0.001);
break;
case CV_CAP_PROP_POS_AVI_RATIO:
pos = cvRound(value*(film_range.end_index -
film_range.start_index) +
film_range.start_index);
break;
default:
pos = cvRound(value);
}
if( pos < film_range.start_index )
pos = film_range.start_index;
if( pos > film_range.end_index )
pos = film_range.end_index;
}
break;
default:
return false;
}
return true;
}
CvCapture* cvCreateFileCapture_VFW (const char* filename)
{
CvCaptureAVI_VFW* capture = new CvCaptureAVI_VFW;
if( capture->open(filename) )
return capture;
delete capture;
return 0;
}
/********************* Capturing video from camera via VFW *********************/
class CvCaptureCAM_VFW : public CvCapture
{
public:
CvCaptureCAM_VFW()
{
CoInitialize(NULL);
init();
}
virtual ~CvCaptureCAM_VFW()
{
close();
CoUninitialize();
}
virtual bool open( int index );
virtual void close();
virtual double getProperty(int) const;
virtual bool setProperty(int, double);
virtual bool grabFrame();
virtual IplImage* retrieveFrame(int);
virtual int getCaptureDomain() { return CV_CAP_VFW; } // Return the type of the capture object: CV_CAP_VFW, etc...
protected:
void init();
void closeHIC();
static LRESULT PASCAL frameCallback( HWND hWnd, VIDEOHDR* hdr );
CAPDRIVERCAPS caps;
HWND capWnd;
VIDEOHDR* hdr;
DWORD fourcc;
int width, height;
int widthSet, heightSet;
HIC hic;
IplImage* frame;
};
void CvCaptureCAM_VFW::init()
{
memset( &caps, 0, sizeof(caps) );
capWnd = 0;
hdr = 0;
fourcc = 0;
hic = 0;
frame = 0;
width = height = -1;
widthSet = heightSet = 0;
}
void CvCaptureCAM_VFW::closeHIC()
{
if( hic )
{
ICDecompressEnd( hic );
ICClose( hic );
hic = 0;
}
}
LRESULT PASCAL CvCaptureCAM_VFW::frameCallback( HWND hWnd, VIDEOHDR* hdr )
{
CvCaptureCAM_VFW* capture = 0;
if (!hWnd) return FALSE;
capture = (CvCaptureCAM_VFW*)capGetUserData(hWnd);
if (!capture) return (LRESULT)FALSE;
capture->hdr = hdr;
return (LRESULT)TRUE;
}
// Initialize camera input
bool CvCaptureCAM_VFW::open( int wIndex )
{
char szDeviceName[80];
char szDeviceVersion[80];
HWND hWndC = 0;
close();
if( (unsigned)wIndex >= 10 )
wIndex = 0;
for( ; wIndex < 10; wIndex++ )
{
if( capGetDriverDescription( wIndex, szDeviceName,
sizeof (szDeviceName), szDeviceVersion,
sizeof (szDeviceVersion)))
{
hWndC = capCreateCaptureWindow ( "My Own Capture Window",
WS_POPUP | WS_CHILD, 0, 0, 320, 240, 0, 0);
if( capDriverConnect (hWndC, wIndex))
break;
DestroyWindow( hWndC );
hWndC = 0;
}
}
if( hWndC )
{
capWnd = hWndC;
hdr = 0;
hic = 0;
fourcc = (DWORD)-1;
memset( &caps, 0, sizeof(caps));
capDriverGetCaps( hWndC, &caps, sizeof(caps));
CAPSTATUS status = {};
capGetStatus(hWndC, &status, sizeof(status));
::SetWindowPos(hWndC, NULL, 0, 0, status.uiImageWidth, status.uiImageHeight, SWP_NOZORDER|SWP_NOMOVE);
capSetUserData( hWndC, (size_t)this );
capSetCallbackOnFrame( hWndC, frameCallback );
CAPTUREPARMS p;
capCaptureGetSetup(hWndC,&p,sizeof(CAPTUREPARMS));
p.dwRequestMicroSecPerFrame = 66667/2; // 30 FPS
capCaptureSetSetup(hWndC,&p,sizeof(CAPTUREPARMS));
//capPreview( hWndC, 1 );
capPreviewScale(hWndC,FALSE);
capPreviewRate(hWndC,1);
// Get frame initial parameters.
const DWORD size = capGetVideoFormatSize(capWnd);
if( size > 0 )
{
unsigned char *pbi = new unsigned char[size];
if( pbi )
{
if( capGetVideoFormat(capWnd, pbi, size) == size )
{
BITMAPINFOHEADER& vfmt = ((BITMAPINFO*)pbi)->bmiHeader;
widthSet = vfmt.biWidth;
heightSet = vfmt.biHeight;
fourcc = vfmt.biCompression;
}
delete []pbi;
}
}
// And alternative way in case of failure.
if( widthSet == 0 || heightSet == 0 )
{
widthSet = status.uiImageWidth;
heightSet = status.uiImageHeight;
}
}
return capWnd != 0;
}
void CvCaptureCAM_VFW::close()
{
if( capWnd )
{
capSetCallbackOnFrame( capWnd, NULL );
capDriverDisconnect( capWnd );
DestroyWindow( capWnd );
closeHIC();
}
cvReleaseImage( &frame );
init();
}
bool CvCaptureCAM_VFW::grabFrame()
{
if( capWnd )
return capGrabFrameNoStop(capWnd) == TRUE;
return false;
}
IplImage* CvCaptureCAM_VFW::retrieveFrame(int)
{
BITMAPINFO vfmt;
memset( &vfmt, 0, sizeof(vfmt));
BITMAPINFOHEADER& vfmt0 = vfmt.bmiHeader;
if( !capWnd )
return 0;
const DWORD sz = capGetVideoFormat( capWnd, &vfmt, sizeof(vfmt));
const int prevWidth = frame ? frame->width : 0;
const int prevHeight = frame ? frame->height : 0;
if( !hdr || hdr->lpData == 0 || sz == 0 )
return 0;
if( !frame || frame->width != vfmt0.biWidth || frame->height != vfmt0.biHeight )
{
cvReleaseImage( &frame );
frame = cvCreateImage( cvSize( vfmt0.biWidth, vfmt0.biHeight ), 8, 3 );
}
if ( vfmt0.biCompression == MAKEFOURCC('N','V','1','2') )
{
// Frame is in YUV 4:2:0 NV12 format, convert to BGR color space
// See https://msdn.microsoft.com/en-us/library/windows/desktop/dd206750(v=vs.85).aspx#nv12)
IplImage src;
cvInitImageHeader( &src, cvSize( vfmt0.biWidth, vfmt0.biHeight * 3 / 2 ), IPL_DEPTH_8U, 1, IPL_ORIGIN_BL, 4 );
cvSetData( &src, hdr->lpData, src.widthStep );
cvCvtColor( &src, frame, CV_YUV2BGR_NV12 );
}
else if( vfmt0.biCompression != BI_RGB ||
vfmt0.biBitCount != 24 )
{
BITMAPINFOHEADER vfmt1 = icvBitmapHeader( vfmt0.biWidth, vfmt0.biHeight, 24 );
if( hic == 0 || fourcc != vfmt0.biCompression ||
prevWidth != vfmt0.biWidth || prevHeight != vfmt0.biHeight )
{
closeHIC();
hic = ICOpen( MAKEFOURCC('V','I','D','C'),
vfmt0.biCompression, ICMODE_DECOMPRESS );
if( hic )
{
if( ICDecompressBegin( hic, &vfmt0, &vfmt1 ) != ICERR_OK )
{
closeHIC();
return 0;
}
}
}
if( !hic || ICDecompress( hic, 0, &vfmt0, hdr->lpData,
&vfmt1, frame->imageData ) != ICERR_OK )
{
closeHIC();
return 0;
}
cvFlip( frame, frame, 0 );
}
else
{
IplImage src;
cvInitImageHeader( &src, cvSize(vfmt0.biWidth, vfmt0.biHeight),
IPL_DEPTH_8U, 3, IPL_ORIGIN_BL, 4 );
cvSetData( &src, hdr->lpData, src.widthStep );
cvFlip( &src, frame, 0 );
}
return frame;
}
double CvCaptureCAM_VFW::getProperty( int property_id ) const
{
switch( property_id )
{
case CV_CAP_PROP_FRAME_WIDTH:
return widthSet;
case CV_CAP_PROP_FRAME_HEIGHT:
return heightSet;
case CV_CAP_PROP_FOURCC:
return fourcc;
case CV_CAP_PROP_FPS:
{
CAPTUREPARMS params = {};
if( capCaptureGetSetup(capWnd, &params, sizeof(params)) )
return 1e6 / params.dwRequestMicroSecPerFrame;
}
break;
default:
break;
}
return 0;
}
bool CvCaptureCAM_VFW::setProperty(int property_id, double value)
{
bool handledSize = false;
switch( property_id )
{
case CV_CAP_PROP_FRAME_WIDTH:
width = cvRound(value);
handledSize = true;
break;
case CV_CAP_PROP_FRAME_HEIGHT:
height = cvRound(value);
handledSize = true;
break;
case CV_CAP_PROP_FOURCC:
break;
case CV_CAP_PROP_FPS:
if( value > 0 )
{
CAPTUREPARMS params;
if( capCaptureGetSetup(capWnd, &params, sizeof(params)) )
{
params.dwRequestMicroSecPerFrame = cvRound(1e6/value);
return capCaptureSetSetup(capWnd, &params, sizeof(params)) == TRUE;
}
}
break;
default:
break;
}
if ( handledSize )
{
// If both width and height are set then change frame size.
if( width > 0 && height > 0 )
{
const DWORD size = capGetVideoFormatSize(capWnd);
if( size == 0 )
return false;
unsigned char *pbi = new unsigned char[size];
if( !pbi )
return false;
if( capGetVideoFormat(capWnd, pbi, size) != size )
{
delete []pbi;
return false;
}
BITMAPINFOHEADER& vfmt = ((BITMAPINFO*)pbi)->bmiHeader;
bool success = true;
if( width != vfmt.biWidth || height != vfmt.biHeight )
{
// Change frame size.
vfmt.biWidth = width;
vfmt.biHeight = height;
vfmt.biSizeImage = height * ((width * vfmt.biBitCount + 31) / 32) * 4;
vfmt.biCompression = BI_RGB;
success = capSetVideoFormat(capWnd, pbi, size) == TRUE;
}
if( success )
{
// Adjust capture window size.
CAPSTATUS status = {};
capGetStatus(capWnd, &status, sizeof(status));
::SetWindowPos(capWnd, NULL, 0, 0, status.uiImageWidth, status.uiImageHeight, SWP_NOZORDER|SWP_NOMOVE);
// Store frame size.
widthSet = width;
heightSet = height;
}
delete []pbi;
width = height = -1;
return success;
}
return true;
}
return false;
}
CvCapture* cvCreateCameraCapture_VFW( int index )
{
CvCaptureCAM_VFW* capture = new CvCaptureCAM_VFW;
if( capture->open( index ))
return capture;
delete capture;
return 0;
}
/*************************** writing AVIs ******************************/
class CvVideoWriter_VFW : public CvVideoWriter
{
public:
CvVideoWriter_VFW()
{
CoInitialize(NULL);
init();
}
virtual ~CvVideoWriter_VFW()
{
close();
CoUninitialize();
}
virtual bool open( const char* filename, int fourcc,
double fps, CvSize frameSize, bool isColor );
virtual void close();
virtual bool writeFrame( const IplImage* );
int getCaptureDomain() const CV_OVERRIDE { return cv::CAP_VFW; }
protected:
void init();
bool createStreams( CvSize frameSize, bool isColor );
PAVIFILE avifile;
PAVISTREAM compressed;
PAVISTREAM uncompressed;
double fps;
IplImage* tempFrame;
long pos;
int fourcc;
};
void CvVideoWriter_VFW::init()
{
avifile = 0;
compressed = uncompressed = 0;
fps = 0;
tempFrame = 0;
pos = 0;
fourcc = 0;
}
void CvVideoWriter_VFW::close()
{
if( uncompressed )
AVIStreamRelease( uncompressed );
if( compressed )
AVIStreamRelease( compressed );
if( avifile )
AVIFileRelease( avifile );
cvReleaseImage( &tempFrame );
init();
}
// philipg. Made this code capable of writing 8bpp gray scale bitmaps
struct BITMAPINFO_8Bit
{
BITMAPINFOHEADER bmiHeader;
RGBQUAD bmiColors[256];
};
bool CvVideoWriter_VFW::open( const char* filename, int _fourcc, double _fps, CvSize frameSize, bool isColor )
{
close();
icvInitCapture_VFW();
if( AVIFileOpen( &avifile, filename, OF_CREATE | OF_WRITE, 0 ) == AVIERR_OK )
{
fourcc = _fourcc;
fps = _fps;
if( frameSize.width > 0 && frameSize.height > 0 &&
!createStreams( frameSize, isColor ) )
{
close();
return false;
}
return true;
}
else
return false;
}
bool CvVideoWriter_VFW::createStreams( CvSize frameSize, bool isColor )
{
if( !avifile )
return false;
AVISTREAMINFO aviinfo;
BITMAPINFO_8Bit bmih;
bmih.bmiHeader = icvBitmapHeader( frameSize.width, frameSize.height, isColor ? 24 : 8 );
for( int i = 0; i < 256; i++ )
{
bmih.bmiColors[i].rgbBlue = (BYTE)i;
bmih.bmiColors[i].rgbGreen = (BYTE)i;
bmih.bmiColors[i].rgbRed = (BYTE)i;
bmih.bmiColors[i].rgbReserved = 0;
}
memset( &aviinfo, 0, sizeof(aviinfo));
aviinfo.fccType = streamtypeVIDEO;
aviinfo.fccHandler = 0;
// use highest possible accuracy for dwRate/dwScale
aviinfo.dwScale = (DWORD)((double)0x7FFFFFFF / fps);
aviinfo.dwRate = cvRound(fps * aviinfo.dwScale);
aviinfo.rcFrame.top = aviinfo.rcFrame.left = 0;
aviinfo.rcFrame.right = frameSize.width;
aviinfo.rcFrame.bottom = frameSize.height;
if( AVIFileCreateStream( avifile, &uncompressed, &aviinfo ) == AVIERR_OK )
{
AVICOMPRESSOPTIONS copts, *pcopts = &copts;
copts.fccType = streamtypeVIDEO;
copts.fccHandler = fourcc != -1 ? fourcc : 0;
copts.dwKeyFrameEvery = 1;
copts.dwQuality = 10000;
copts.dwBytesPerSecond = 0;
copts.dwFlags = AVICOMPRESSF_VALID;
copts.lpFormat = &bmih;
copts.cbFormat = (isColor ? sizeof(BITMAPINFOHEADER) : sizeof(bmih));
copts.lpParms = 0;
copts.cbParms = 0;
copts.dwInterleaveEvery = 0;
if( fourcc != -1 || AVISaveOptions( 0, 0, 1, &uncompressed, &pcopts ) == TRUE )
{
if( AVIMakeCompressedStream( &compressed, uncompressed, pcopts, 0 ) == AVIERR_OK &&
AVIStreamSetFormat( compressed, 0, &bmih, sizeof(bmih)) == AVIERR_OK )
{
fps = fps;
fourcc = (int)copts.fccHandler;
frameSize = frameSize;
tempFrame = cvCreateImage( frameSize, 8, (isColor ? 3 : 1) );
return true;
}
}
}
return false;
}
bool CvVideoWriter_VFW::writeFrame( const IplImage* image )
{
bool result = false;
CV_FUNCNAME( "CvVideoWriter_VFW::writeFrame" );
__BEGIN__;
if( !image )
EXIT;
if( !compressed && !createStreams( cvGetSize(image), image->nChannels > 1 ))
EXIT;
if( image->width != tempFrame->width || image->height != tempFrame->height )
CV_ERROR( CV_StsUnmatchedSizes,
"image size is different from the currently set frame size" );
if( image->nChannels != tempFrame->nChannels ||
image->depth != tempFrame->depth ||
image->origin == 0 ||
image->widthStep != cvAlign(image->width*image->nChannels*((image->depth & 255)/8), 4))
{
cvConvertImage( image, tempFrame, image->origin == 0 ? CV_CVTIMG_FLIP : 0 );
image = (const IplImage*)tempFrame;
}
result = AVIStreamWrite( compressed, pos++, 1, image->imageData,
image->imageSize, AVIIF_KEYFRAME, 0, 0 ) == AVIERR_OK;
__END__;
return result;
}
CvVideoWriter* cvCreateVideoWriter_VFW( const char* filename, int fourcc,
double fps, CvSize frameSize, int isColor )
{
CvVideoWriter_VFW* writer = new CvVideoWriter_VFW;
if( writer->open( filename, fourcc, fps, frameSize, isColor != 0 ))
return writer;
delete writer;
return 0;
}
...@@ -93,7 +93,7 @@ struct CvCapture ...@@ -93,7 +93,7 @@ struct CvCapture
virtual bool setProperty(int, double) { return 0; } virtual bool setProperty(int, double) { return 0; }
virtual bool grabFrame() { return true; } virtual bool grabFrame() { return true; }
virtual IplImage* retrieveFrame(int) { return 0; } virtual IplImage* retrieveFrame(int) { return 0; }
virtual int getCaptureDomain() { return cv::CAP_ANY; } // Return the type of the capture object: CAP_VFW, etc... virtual int getCaptureDomain() { return cv::CAP_ANY; } // Return the type of the capture object: CAP_DSHOW, etc...
}; };
/*************************** CvVideoWriter structure ****************************/ /*************************** CvVideoWriter structure ****************************/
...@@ -107,18 +107,12 @@ struct CvVideoWriter ...@@ -107,18 +107,12 @@ struct CvVideoWriter
CvCapture * cvCreateCameraCapture_V4L( int index ); CvCapture * cvCreateCameraCapture_V4L( int index );
CvCapture * cvCreateCameraCapture_V4L( const char* deviceName ); CvCapture * cvCreateCameraCapture_V4L( const char* deviceName );
CvCapture * cvCreateCameraCapture_DC1394( int index );
CvCapture * cvCreateCameraCapture_DC1394_2( int index ); CvCapture * cvCreateCameraCapture_DC1394_2( int index );
CvCapture* cvCreateCameraCapture_MIL( int index ); CvCapture* cvCreateCameraCapture_MIL( int index );
CvCapture* cvCreateCameraCapture_Giganetix( int index ); CvCapture* cvCreateCameraCapture_Giganetix( int index );
CvCapture * cvCreateCameraCapture_CMU( int index );
CvCapture* cvCreateFileCapture_Win32( const char* filename ); CvCapture* cvCreateFileCapture_Win32( const char* filename );
CvCapture* cvCreateCameraCapture_VFW( int index );
CvCapture* cvCreateFileCapture_VFW( const char* filename );
CvVideoWriter* cvCreateVideoWriter_Win32( const char* filename, int fourcc, CvVideoWriter* cvCreateVideoWriter_Win32( const char* filename, int fourcc,
double fps, CvSize frameSize, int is_color ); double fps, CvSize frameSize, int is_color );
CvVideoWriter* cvCreateVideoWriter_VFW( const char* filename, int fourcc,
double fps, CvSize frameSize, int is_color );
CvCapture* cvCreateCameraCapture_DShow( int index ); CvCapture* cvCreateCameraCapture_DShow( int index );
CvCapture* cvCreateCameraCapture_OpenNI( int index ); CvCapture* cvCreateCameraCapture_OpenNI( int index );
CvCapture* cvCreateCameraCapture_OpenNI2( int index ); CvCapture* cvCreateCameraCapture_OpenNI2( int index );
...@@ -150,7 +144,6 @@ CvVideoWriter* cvCreateVideoWriter_AVFoundation( const char* filename, int fourc ...@@ -150,7 +144,6 @@ CvVideoWriter* cvCreateVideoWriter_AVFoundation( const char* filename, int fourc
double fps, CvSize frameSize, int is_color ); double fps, CvSize frameSize, int is_color );
CvCapture * cvCreateCameraCapture_Unicap (const int index);
CvCapture * cvCreateCameraCapture_PvAPI (const int index); CvCapture * cvCreateCameraCapture_PvAPI (const int index);
CvVideoWriter* cvCreateVideoWriter_GStreamer( const char* filename, int fourcc, CvVideoWriter* cvCreateVideoWriter_GStreamer( const char* filename, int fourcc,
double fps, CvSize frameSize, int is_color ); double fps, CvSize frameSize, int is_color );
...@@ -167,7 +160,7 @@ namespace cv ...@@ -167,7 +160,7 @@ namespace cv
virtual bool grabFrame() = 0; virtual bool grabFrame() = 0;
virtual bool retrieveFrame(int, OutputArray) = 0; virtual bool retrieveFrame(int, OutputArray) = 0;
virtual bool isOpened() const = 0; virtual bool isOpened() const = 0;
virtual int getCaptureDomain() { return CAP_ANY; } // Return the type of the capture object: CAP_VFW, etc... virtual int getCaptureDomain() { return CAP_ANY; } // Return the type of the capture object: CAP_DSHOW, etc...
}; };
class IVideoWriter class IVideoWriter
......
...@@ -44,10 +44,10 @@ namespace { ...@@ -44,10 +44,10 @@ namespace {
/** Ordering guidelines: /** Ordering guidelines:
- modern optimized, multi-platform libraries: ffmpeg, gstreamer, Media SDK - modern optimized, multi-platform libraries: ffmpeg, gstreamer, Media SDK
- platform specific universal SDK: WINRT, QTKIT/AVFOUNDATION, MSMF/VFW/DSHOW, V4L/V4L2 - platform specific universal SDK: WINRT, AVFOUNDATION, MSMF/DSHOW, V4L/V4L2
- RGB-D: OpenNI/OpenNI2, INTELPERC/REALSENSE - RGB-D: OpenNI/OpenNI2, INTELPERC/REALSENSE
- special OpenCV (file-based): "images", "mjpeg" - special OpenCV (file-based): "images", "mjpeg"
- special camera SDKs, including stereo: other special SDKs: FIREWIRE/1394, XIMEA/ARAVIS/GIGANETIX/PVAPI(GigE), UNICAP - special camera SDKs, including stereo: other special SDKs: FIREWIRE/1394, XIMEA/ARAVIS/GIGANETIX/PVAPI(GigE)
- other: XINE, gphoto2, etc - other: XINE, gphoto2, etc
*/ */
static const struct VideoBackendInfo builtin_backends[] = static const struct VideoBackendInfo builtin_backends[] =
...@@ -64,9 +64,6 @@ static const struct VideoBackendInfo builtin_backends[] = ...@@ -64,9 +64,6 @@ static const struct VideoBackendInfo builtin_backends[] =
// Apple platform // Apple platform
#if defined(HAVE_QUICKTIME) || defined(HAVE_QTKIT)
DECLARE_BACKEND(CAP_QT, "QUICKTIME", MODE_CAPTURE_ALL | MODE_WRITER),
#endif
#ifdef HAVE_AVFOUNDATION #ifdef HAVE_AVFOUNDATION
DECLARE_BACKEND(CAP_AVFOUNDATION, "AVFOUNDATION", MODE_CAPTURE_ALL | MODE_WRITER), DECLARE_BACKEND(CAP_AVFOUNDATION, "AVFOUNDATION", MODE_CAPTURE_ALL | MODE_WRITER),
#endif #endif
...@@ -81,9 +78,6 @@ static const struct VideoBackendInfo builtin_backends[] = ...@@ -81,9 +78,6 @@ static const struct VideoBackendInfo builtin_backends[] =
#ifdef HAVE_DSHOW #ifdef HAVE_DSHOW
DECLARE_BACKEND(CAP_DSHOW, "DSHOW", MODE_CAPTURE_BY_INDEX), DECLARE_BACKEND(CAP_DSHOW, "DSHOW", MODE_CAPTURE_BY_INDEX),
#endif #endif
#ifdef HAVE_VFW
DECLARE_BACKEND(CAP_VFW, "VFW", MODE_CAPTURE_ALL | MODE_WRITER),
#endif
// Linux, some Unix // Linux, some Unix
#if defined HAVE_CAMV4L2 #if defined HAVE_CAMV4L2
...@@ -111,7 +105,7 @@ static const struct VideoBackendInfo builtin_backends[] = ...@@ -111,7 +105,7 @@ static const struct VideoBackendInfo builtin_backends[] =
DECLARE_BACKEND(CAP_OPENCV_MJPEG, "CV_MJPEG", MODE_CAPTURE_BY_FILENAME | MODE_WRITER), DECLARE_BACKEND(CAP_OPENCV_MJPEG, "CV_MJPEG", MODE_CAPTURE_BY_FILENAME | MODE_WRITER),
// special interfaces / stereo cameras / other SDKs // special interfaces / stereo cameras / other SDKs
#if defined(HAVE_DC1394_2) || defined(HAVE_DC1394) || defined(HAVE_CMU1394) #if defined(HAVE_DC1394_2)
DECLARE_BACKEND(CAP_FIREWIRE, "FIREWIRE", MODE_CAPTURE_BY_INDEX), DECLARE_BACKEND(CAP_FIREWIRE, "FIREWIRE", MODE_CAPTURE_BY_INDEX),
#endif #endif
// GigE // GigE
...@@ -127,9 +121,6 @@ static const struct VideoBackendInfo builtin_backends[] = ...@@ -127,9 +121,6 @@ static const struct VideoBackendInfo builtin_backends[] =
#ifdef HAVE_ARAVIS_API #ifdef HAVE_ARAVIS_API
DECLARE_BACKEND(CAP_ARAVIS, "ARAVIS", MODE_CAPTURE_BY_INDEX), DECLARE_BACKEND(CAP_ARAVIS, "ARAVIS", MODE_CAPTURE_BY_INDEX),
#endif #endif
#ifdef HAVE_UNICAP
DECLARE_BACKEND(CAP_UNICAP, "UNICAP", MODE_CAPTURE_BY_INDEX),
#endif
#ifdef HAVE_GPHOTO2 #ifdef HAVE_GPHOTO2
DECLARE_BACKEND(CAP_GPHOTO2, "GPHOTO2", MODE_CAPTURE_ALL), DECLARE_BACKEND(CAP_GPHOTO2, "GPHOTO2", MODE_CAPTURE_ALL),
...@@ -443,23 +434,9 @@ void VideoCapture_create(CvCapture*& capture, Ptr<IVideoCapture>& icap, VideoCap ...@@ -443,23 +434,9 @@ void VideoCapture_create(CvCapture*& capture, Ptr<IVideoCapture>& icap, VideoCap
TRY_OPEN(createGPhoto2Capture(index)); TRY_OPEN(createGPhoto2Capture(index));
break; break;
#endif #endif
case CAP_VFW: // or CAP_V4L or CAP_V4L2
#ifdef HAVE_VFW
TRY_OPEN_LEGACY(cvCreateCameraCapture_VFW(index))
#endif
#if defined HAVE_LIBV4L || defined HAVE_CAMV4L || defined HAVE_CAMV4L2 || defined HAVE_VIDEOIO
TRY_OPEN_LEGACY(cvCreateCameraCapture_V4L(index))
#endif
break;
case CAP_FIREWIRE: case CAP_FIREWIRE:
#ifdef HAVE_DC1394_2 #ifdef HAVE_DC1394_2
TRY_OPEN_LEGACY(cvCreateCameraCapture_DC1394_2(index)) TRY_OPEN_LEGACY(cvCreateCameraCapture_DC1394_2(index))
#endif
#ifdef HAVE_DC1394
TRY_OPEN_LEGACY(cvCreateCameraCapture_DC1394(index))
#endif
#ifdef HAVE_CMU1394
TRY_OPEN_LEGACY(cvCreateCameraCapture_CMU(index))
#endif #endif
break; // CAP_FIREWIRE break; // CAP_FIREWIRE
#ifdef HAVE_MIL #ifdef HAVE_MIL
...@@ -467,16 +444,6 @@ void VideoCapture_create(CvCapture*& capture, Ptr<IVideoCapture>& icap, VideoCap ...@@ -467,16 +444,6 @@ void VideoCapture_create(CvCapture*& capture, Ptr<IVideoCapture>& icap, VideoCap
TRY_OPEN_LEGACY(cvCreateCameraCapture_MIL(index)) TRY_OPEN_LEGACY(cvCreateCameraCapture_MIL(index))
break; break;
#endif #endif
#if defined(HAVE_QUICKTIME) || defined(HAVE_QTKIT)
case CAP_QT:
TRY_OPEN_LEGACY(cvCreateCameraCapture_QT(index))
break;
#endif
#ifdef HAVE_UNICAP
case CAP_UNICAP:
TRY_OPEN_LEGACY(cvCreateCameraCapture_Unicap(index))
break;
#endif
#ifdef HAVE_PVAPI #ifdef HAVE_PVAPI
case CAP_PVAPI: case CAP_PVAPI:
TRY_OPEN_LEGACY(cvCreateCameraCapture_PvAPI(index)) TRY_OPEN_LEGACY(cvCreateCameraCapture_PvAPI(index))
...@@ -531,18 +498,6 @@ void VideoCapture_create(CvCapture*& capture, Ptr<IVideoCapture>& icap, VideoCap ...@@ -531,18 +498,6 @@ void VideoCapture_create(CvCapture*& capture, Ptr<IVideoCapture>& icap, VideoCap
break; break;
#endif #endif
#ifdef HAVE_VFW
case CAP_VFW:
TRY_OPEN_LEGACY(cvCreateFileCapture_VFW(filename.c_str()))
break;
#endif
#if defined(HAVE_QUICKTIME) || defined(HAVE_QTKIT)
case CAP_QT:
TRY_OPEN_LEGACY(cvCreateFileCapture_QT(filename.c_str()))
break;
#endif
#ifdef HAVE_AVFOUNDATION #ifdef HAVE_AVFOUNDATION
case CAP_AVFOUNDATION: case CAP_AVFOUNDATION:
TRY_OPEN_LEGACY(cvCreateFileCapture_AVFoundation(filename.c_str())) TRY_OPEN_LEGACY(cvCreateFileCapture_AVFoundation(filename.c_str()))
...@@ -664,21 +619,11 @@ void VideoWriter_create(CvVideoWriter*& writer, Ptr<IVideoWriter>& iwriter, Vide ...@@ -664,21 +619,11 @@ void VideoWriter_create(CvVideoWriter*& writer, Ptr<IVideoWriter>& iwriter, Vide
CREATE_WRITER(VideoWriter_IntelMFX::create(filename, fourcc, fps, frameSize, isColor)); CREATE_WRITER(VideoWriter_IntelMFX::create(filename, fourcc, fps, frameSize, isColor));
break; break;
#endif #endif
#ifdef HAVE_VFW
case CAP_VFW:
CREATE_WRITER_LEGACY(cvCreateVideoWriter_VFW(filename.c_str(), fourcc, fps, cvSize(frameSize), isColor))
break;
#endif
#ifdef HAVE_AVFOUNDATION #ifdef HAVE_AVFOUNDATION
case CAP_AVFOUNDATION: case CAP_AVFOUNDATION:
CREATE_WRITER_LEGACY(cvCreateVideoWriter_AVFoundation(filename.c_str(), fourcc, fps, cvSize(frameSize), isColor)) CREATE_WRITER_LEGACY(cvCreateVideoWriter_AVFoundation(filename.c_str(), fourcc, fps, cvSize(frameSize), isColor))
break; break;
#endif #endif
#if defined(HAVE_QUICKTIME) || defined(HAVE_QTKIT)
case(CAP_QT):
CREATE_WRITER_LEGACY(cvCreateVideoWriter_QT(filename.c_str(), fourcc, fps, cvSize(frameSize), isColor))
break;
#endif
#ifdef HAVE_GSTREAMER #ifdef HAVE_GSTREAMER
case CAP_GSTREAMER: case CAP_GSTREAMER:
CREATE_WRITER_LEGACY(cvCreateVideoWriter_GStreamer (filename.c_str(), fourcc, fps, cvSize(frameSize), isColor)) CREATE_WRITER_LEGACY(cvCreateVideoWriter_GStreamer (filename.c_str(), fourcc, fps, cvSize(frameSize), isColor))
......
...@@ -316,11 +316,6 @@ static const VideoCaptureAPIs backend_params[] = { ...@@ -316,11 +316,6 @@ static const VideoCaptureAPIs backend_params[] = {
CAP_MSMF, CAP_MSMF,
#endif #endif
// TODO: Broken?
//#ifdef HAVE_VFW
// CAP_VFW,
//#endif
#ifdef HAVE_GSTREAMER #ifdef HAVE_GSTREAMER
CAP_GSTREAMER, CAP_GSTREAMER,
#endif #endif
...@@ -389,18 +384,6 @@ static Ext_Fourcc_PSNR synthetic_params[] = { ...@@ -389,18 +384,6 @@ static Ext_Fourcc_PSNR synthetic_params[] = {
makeParam("mov", "H264", 30.f, CAP_MSMF), makeParam("mov", "H264", 30.f, CAP_MSMF),
#endif #endif
// TODO: Broken?
//#ifdef HAVE_VFW
//#if !defined(_M_ARM)
// makeParam("wmv", "WMV1", 30.f, CAP_VFW),
// makeParam("wmv", "WMV2", 30.f, CAP_VFW),
//#endif
// makeParam("wmv", "WMV3", 30.f, CAP_VFW),
// makeParam("wmv", "WVC1", 30.f, CAP_VFW),
// makeParam("avi", "H264", 30.f, CAP_VFW),
// makeParam("avi", "MJPG", 30.f, CAP_VFW),
//#endif
#ifdef HAVE_QUICKTIME #ifdef HAVE_QUICKTIME
makeParam("mov", "mp4v", 30.f, CAP_QT), makeParam("mov", "mp4v", 30.f, CAP_QT),
makeParam("avi", "XVID", 30.f, CAP_QT), makeParam("avi", "XVID", 30.f, CAP_QT),
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment