Commit 318cba4c authored by Alexander Alekhin's avatar Alexander Alekhin

Merge remote-tracking branch 'upstream/3.4' into merge-3.4

parents cf932173 9d14c0b3
......@@ -143,6 +143,7 @@ CV_INTRIN_DEF_TYPE_TRAITS(double, int64, uint64, double, void, void, double, 2);
#ifndef CV_DOXYGEN
#ifndef CV_CPU_OPTIMIZATION_HAL_NAMESPACE
#ifdef CV_CPU_DISPATCH_MODE
#define CV_CPU_OPTIMIZATION_HAL_NAMESPACE __CV_CAT(hal_, CV_CPU_DISPATCH_MODE)
#define CV_CPU_OPTIMIZATION_HAL_NAMESPACE_BEGIN namespace __CV_CAT(hal_, CV_CPU_DISPATCH_MODE) {
......@@ -152,6 +153,7 @@ CV_INTRIN_DEF_TYPE_TRAITS(double, int64, uint64, double, void, void, double, 2);
#define CV_CPU_OPTIMIZATION_HAL_NAMESPACE_BEGIN namespace hal_baseline {
#define CV_CPU_OPTIMIZATION_HAL_NAMESPACE_END }
#endif
#endif // CV_CPU_OPTIMIZATION_HAL_NAMESPACE
CV_CPU_OPTIMIZATION_HAL_NAMESPACE_BEGIN
CV_CPU_OPTIMIZATION_HAL_NAMESPACE_END
......@@ -168,29 +170,29 @@ using namespace CV_CPU_OPTIMIZATION_HAL_NAMESPACE;
# undef CV_MSA
#endif
#if CV_SSE2 || CV_NEON || CV_VSX || CV_MSA || CV_WASM_SIMD
#if (CV_SSE2 || CV_NEON || CV_VSX || CV_MSA || CV_WASM_SIMD) && !defined(CV_FORCE_SIMD128_CPP)
#define CV__SIMD_FORWARD 128
#include "opencv2/core/hal/intrin_forward.hpp"
#endif
#if CV_SSE2
#if CV_SSE2 && !defined(CV_FORCE_SIMD128_CPP)
#include "opencv2/core/hal/intrin_sse_em.hpp"
#include "opencv2/core/hal/intrin_sse.hpp"
#elif CV_NEON
#elif CV_NEON && !defined(CV_FORCE_SIMD128_CPP)
#include "opencv2/core/hal/intrin_neon.hpp"
#elif CV_VSX
#elif CV_VSX && !defined(CV_FORCE_SIMD128_CPP)
#include "opencv2/core/hal/intrin_vsx.hpp"
#elif CV_MSA
#elif CV_MSA && !defined(CV_FORCE_SIMD128_CPP)
#include "opencv2/core/hal/intrin_msa.hpp"
#elif CV_WASM_SIMD
#elif CV_WASM_SIMD && !defined(CV_FORCE_SIMD128_CPP)
#include "opencv2/core/hal/intrin_wasm.hpp"
#else
......
......@@ -365,6 +365,13 @@ template<typename _Tp, int n> struct v_reg
return c;
}
v_reg& operator=(const v_reg<_Tp, n> & r)
{
for( int i = 0; i < n; i++ )
s[i] = r.s[i];
return *this;
}
_Tp s[n];
//! @endcond
};
......@@ -623,7 +630,7 @@ template<typename _Tp, int n>
inline v_reg<typename V_TypeTraits<_Tp>::abs_type, n> v_popcount(const v_reg<_Tp, n>& a)
{
v_reg<typename V_TypeTraits<_Tp>::abs_type, n> b = v_reg<typename V_TypeTraits<_Tp>::abs_type, n>::zero();
for (int i = 0; i < (int)(n*sizeof(_Tp)); i++)
for (int i = 0; i < n*(int)sizeof(_Tp); i++)
b.s[i/sizeof(_Tp)] += popCountTable[v_reinterpret_as_u8(a).s[i]];
return b;
}
......
......@@ -9,7 +9,7 @@ typedef cuda::Event::CreateFlags Event_CreateFlags;
template<> struct pyopencvVecConverter<cuda::GpuMat>
{
static bool to(PyObject* obj, std::vector<cuda::GpuMat>& value, const ArgInfo info)
static bool to(PyObject* obj, std::vector<cuda::GpuMat>& value, const ArgInfo& info)
{
return pyopencv_to_generic_vec(obj, value, info);
}
......
......@@ -4,6 +4,19 @@
#include "test_precomp.hpp"
#include "test_intrin128.simd.hpp"
// see "test_intrin_emulator.cpp"
// see "opencv2/core/private/cv_cpu_include_simd_declarations.hpp"
#define CV_CPU_OPTIMIZATION_DECLARATIONS_ONLY
#undef CV_CPU_OPTIMIZATION_NAMESPACE_BEGIN
#undef CV_CPU_OPTIMIZATION_NAMESPACE_END
#define CV_CPU_OPTIMIZATION_NAMESPACE_BEGIN namespace opt_EMULATOR_CPP {
#define CV_CPU_OPTIMIZATION_NAMESPACE_END }
#include "test_intrin128.simd.hpp"
#undef CV_CPU_OPTIMIZATION_NAMESPACE_BEGIN
#undef CV_CPU_OPTIMIZATION_NAMESPACE_END
#undef CV_CPU_OPTIMIZATION_DECLARATIONS_ONLY
#include "test_intrin128.simd_declarations.hpp"
#undef CV_CPU_DISPATCH_MODES_ALL
......@@ -22,6 +35,8 @@
namespace opencv_test { namespace hal {
#define CV_CPU_CALL_CPP_EMULATOR_(fn, args) return (opt_EMULATOR_CPP::fn args)
#define CV_CPU_CALL_BASELINE_(fn, args) CV_CPU_CALL_BASELINE(fn, args)
#define DISPATCH_SIMD128(fn, cpu_opt) do { \
......@@ -53,6 +68,8 @@ TEST(hal_intrin ## simd_size, float64x2_ ## cpu_opt) { DISPATCH_SIMD ## simd_siz
namespace intrin128 {
DEFINE_SIMD_TESTS(128, CPP_EMULATOR)
DEFINE_SIMD_TESTS(128, BASELINE)
#if defined CV_CPU_DISPATCH_COMPILE_SSE2 || defined CV_CPU_BASELINE_COMPILE_SSE2
......
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
#include "test_precomp.hpp"
// see "opencv2/core/hal/intrin.hpp"
#define CV_CPU_OPTIMIZATION_HAL_NAMESPACE hal_EMULATOR_CPP
#define CV_CPU_OPTIMIZATION_HAL_NAMESPACE_BEGIN namespace hal_EMULATOR_CPP {
#define CV_CPU_OPTIMIZATION_HAL_NAMESPACE_END }
// see "opencv2/core/private/cv_cpu_include_simd_declarations.hpp"
//#define CV_CPU_OPTIMIZATION_DECLARATIONS_ONLY
#define CV_FORCE_SIMD128_CPP
#undef CV_CPU_OPTIMIZATION_NAMESPACE_BEGIN
#undef CV_CPU_OPTIMIZATION_NAMESPACE_END
#define CV_CPU_OPTIMIZATION_NAMESPACE_BEGIN namespace opt_EMULATOR_CPP {
#define CV_CPU_OPTIMIZATION_NAMESPACE_END }
#include "test_intrin128.simd.hpp"
#undef CV_CPU_OPTIMIZATION_NAMESPACE_BEGIN
#undef CV_CPU_OPTIMIZATION_NAMESPACE_END
#undef CV_CPU_DISPATCH_MODE
#undef CV_FORCE_SIMD128_CPP
// tests implementation is in test_intrin_utils.hpp
......@@ -1383,4 +1383,16 @@ TEST(UMat, testTempObjects_Mat_issue_8693)
EXPECT_EQ(0, cvtest::norm(srcUMat.getMat(ACCESS_READ), srcMat, NORM_INF));
}
TEST(UMat, resize_Mat_issue_13577)
{
// save the current state
bool useOCL = cv::ocl::useOpenCL();
cv::ocl::setUseOpenCL(false);
UMat foo(10, 10, CV_32FC1);
cv::resize(foo, foo, cv::Size(), .5, .5);
cv::ocl::setUseOpenCL(useOCL); // restore state
}
} } // namespace opencv_test::ocl
......@@ -4,9 +4,9 @@ typedef std::vector<dnn::MatShape> vector_MatShape;
typedef std::vector<std::vector<dnn::MatShape> > vector_vector_MatShape;
template<>
bool pyopencv_to(PyObject *o, dnn::DictValue &dv, const char *name)
bool pyopencv_to(PyObject *o, dnn::DictValue &dv, const ArgInfo& info)
{
CV_UNUSED(name);
CV_UNUSED(info);
if (!o || o == Py_None)
return true; //Current state will be used
else if (PyLong_Check(o))
......@@ -36,12 +36,6 @@ bool pyopencv_to(PyObject *o, dnn::DictValue &dv, const char *name)
return false;
}
template<>
bool pyopencv_to(PyObject *o, std::vector<Mat> &blobs, const char *name) //required for Layer::blobs RW
{
return pyopencvVecConverter<Mat>::to(o, blobs, ArgInfo(name, false));
}
template<typename T>
PyObject* pyopencv_from(const dnn::DictValue &dv)
{
......
......@@ -610,7 +610,7 @@ void InfEngineBackendNet::initPlugin(InferenceEngine::CNNNetwork& net)
{
candidates.push_back(param_pluginPath);
}
#if INF_ENGINE_VER_MAJOR_LE(INF_ENGINE_RELEASE_2019R3)
if (device_name == "CPU" || device_name == "FPGA")
{
std::string suffixes[] = {"_avx2", "_sse4", ""};
......@@ -633,6 +633,7 @@ void InfEngineBackendNet::initPlugin(InferenceEngine::CNNNetwork& net)
#endif // _WIN32
}
}
#endif
bool found = false;
for (size_t i = 0; i != candidates.size(); ++i)
{
......
......@@ -15,9 +15,9 @@ PyObject* pyopencv_from(const cvflann_flann_distance_t& value)
}
template<>
bool pyopencv_to(PyObject *o, cv::flann::IndexParams& p, const char *name)
bool pyopencv_to(PyObject *o, cv::flann::IndexParams& p, const ArgInfo& info)
{
CV_UNUSED(name);
CV_UNUSED(info);
bool ok = true;
PyObject* key = NULL;
PyObject* item = NULL;
......@@ -71,16 +71,16 @@ bool pyopencv_to(PyObject *o, cv::flann::IndexParams& p, const char *name)
}
template<>
bool pyopencv_to(PyObject* obj, cv::flann::SearchParams & value, const char * name)
bool pyopencv_to(PyObject* obj, cv::flann::SearchParams & value, const ArgInfo& info)
{
return pyopencv_to<cv::flann::IndexParams>(obj, value, name);
return pyopencv_to<cv::flann::IndexParams>(obj, value, info);
}
template<>
bool pyopencv_to(PyObject *o, cvflann::flann_distance_t& dist, const char *name)
bool pyopencv_to(PyObject *o, cvflann::flann_distance_t& dist, const ArgInfo& info)
{
int d = (int)dist;
bool ok = pyopencv_to(o, d, name);
bool ok = pyopencv_to(o, d, info);
dist = (cvflann::flann_distance_t)d;
return ok;
}
......
......@@ -3738,6 +3738,11 @@ void cv::resize( InputArray _src, OutputArray _dst, Size dsize,
CV_OCL_RUN(_src.dims() <= 2 && _dst.isUMat() && _src.cols() > 10 && _src.rows() > 10,
ocl_resize(_src, _dst, dsize, inv_scale_x, inv_scale_y, interpolation))
// Fake reference to source. Resolves issue 13577 in case of src == dst.
UMat srcUMat;
if (_src.isUMat())
srcUMat = _src.getUMat();
Mat src = _src.getMat();
_dst.create(dsize, src.type());
Mat dst = _dst.getMat();
......
......@@ -59,7 +59,7 @@ QUnit.test('Detectors', function(assert) {
let akaze = new cv.AKAZE();
akaze.detect(image, kp);
assert.equal(kp.size(), 52, 'AKAZE');
assert.equal(kp.size(), 53, 'AKAZE');
});
QUnit.test('BFMatcher', function(assert) {
......
template<>
bool pyopencv_to(PyObject *obj, CvTermCriteria& dst, const char *name)
bool pyopencv_to(PyObject *obj, CvTermCriteria& dst, const ArgInfo& info)
{
CV_UNUSED(name);
CV_UNUSED(info);
if(!obj)
return true;
return PyArg_ParseTuple(obj, "iid", &dst.type, &dst.max_iter, &dst.epsilon) > 0;
}
template<>
bool pyopencv_to(PyObject* obj, CvSlice& r, const char* name)
bool pyopencv_to(PyObject* obj, CvSlice& r, const ArgInfo& info)
{
CV_UNUSED(name);
CV_UNUSED(info);
if(!obj || obj == Py_None)
return true;
if(PyObject_Size(obj) == 0)
......
This diff is collapsed.
......@@ -45,7 +45,7 @@ gen_template_func_body = Template("""$code_decl
gen_template_mappable = Template("""
{
${mappable} _src;
if (pyopencv_to(src, _src, name))
if (pyopencv_to(src, _src, info))
{
return cv_mappable_to(_src, dst);
}
......@@ -62,7 +62,7 @@ struct PyOpenCV_Converter< ${cname} >
{
return pyopencv_${name}_Instance(r);
}
static bool to(PyObject* src, ${cname}& dst, const char* name)
static bool to(PyObject* src, ${cname}& dst, const ArgInfo& info)
{
if(!src || src == Py_None)
return true;
......@@ -73,7 +73,7 @@ struct PyOpenCV_Converter< ${cname} >
return true;
}
${mappable_code}
failmsg("Expected ${cname} for argument '%s'", name);
failmsg("Expected ${cname} for argument '%s'", info.name);
return false;
}
};
......@@ -81,7 +81,7 @@ struct PyOpenCV_Converter< ${cname} >
""")
gen_template_map_type_cvt = Template("""
template<> bool pyopencv_to(PyObject* src, ${cname}& dst, const char* name);
template<> bool pyopencv_to(PyObject* src, ${cname}& dst, const ArgInfo& info);
""")
......@@ -89,7 +89,7 @@ gen_template_set_prop_from_map = Template("""
if( PyMapping_HasKeyString(src, (char*)"$propname") )
{
tmp = PyMapping_GetItemString(src, (char*)"$propname");
ok = tmp && pyopencv_to(tmp, dst.$propname);
ok = tmp && pyopencv_to(tmp, dst.$propname, ArgInfo("$propname", false));
Py_DECREF(tmp);
if(!ok) return false;
}""")
......@@ -143,7 +143,7 @@ static int pyopencv_${name}_set_${member}(pyopencv_${name}_t* p, PyObject *value
PyErr_SetString(PyExc_TypeError, "Cannot delete the ${member} attribute");
return -1;
}
return pyopencv_to(value, p->v${access}${member}) ? 0 : -1;
return pyopencv_to(value, p->v${access}${member}, ArgInfo("value", false)) ? 0 : -1;
}
""")
......@@ -161,7 +161,7 @@ static int pyopencv_${name}_set_${member}(pyopencv_${name}_t* p, PyObject *value
failmsgp("Incorrect type of object (must be '${name}' or its derivative)");
return -1;
}
return pyopencv_to(value, _self_${access}${member}) ? 0 : -1;
return pyopencv_to(value, _self_${access}${member}, ArgInfo("value", false)) ? 0 : -1;
}
""")
......@@ -238,10 +238,10 @@ class ClassInfo(object):
def gen_map_code(self, codegen):
all_classes = codegen.classes
code = "static bool pyopencv_to(PyObject* src, %s& dst, const char* name)\n{\n PyObject* tmp;\n bool ok;\n" % (self.cname)
code = "static bool pyopencv_to(PyObject* src, %s& dst, const ArgInfo& info)\n{\n PyObject* tmp;\n bool ok;\n" % (self.cname)
code += "".join([gen_template_set_prop_from_map.substitute(propname=p.name,proptype=p.tp) for p in self.props])
if self.base:
code += "\n return pyopencv_to(src, (%s&)dst, name);\n}\n" % all_classes[self.base].cname
code += "\n return pyopencv_to(src, (%s&)dst, info);\n}\n" % all_classes[self.base].cname
else:
code += "\n return true;\n}\n"
return code
......
......@@ -101,13 +101,13 @@ static inline bool getUnicodeString(PyObject * obj, std::string &str)
#define CV_PY_TO_CLASS(TYPE) \
template<> \
bool pyopencv_to(PyObject* dst, TYPE& src, const char* name) \
bool pyopencv_to(PyObject* dst, TYPE& src, const ArgInfo& info) \
{ \
if (!dst || dst == Py_None) \
return true; \
Ptr<TYPE> ptr; \
\
if (!pyopencv_to(dst, ptr, name)) return false; \
if (!pyopencv_to(dst, ptr, info)) return false; \
src = *ptr; \
return true; \
}
......@@ -124,13 +124,13 @@ PyObject* pyopencv_from(const TYPE& src)
#define CV_PY_TO_CLASS_PTR(TYPE) \
template<> \
bool pyopencv_to(PyObject* dst, TYPE*& src, const char* name) \
bool pyopencv_to(PyObject* dst, TYPE*& src, const ArgInfo& info) \
{ \
if (!dst || dst == Py_None) \
return true; \
Ptr<TYPE> ptr; \
\
if (!pyopencv_to(dst, ptr, name)) return false; \
if (!pyopencv_to(dst, ptr, info)) return false; \
src = ptr; \
return true; \
}
......@@ -143,13 +143,13 @@ static PyObject* pyopencv_from(TYPE*& src)
#define CV_PY_TO_ENUM(TYPE) \
template<> \
bool pyopencv_to(PyObject* dst, TYPE& src, const char* name) \
bool pyopencv_to(PyObject* dst, TYPE& src, const ArgInfo& info) \
{ \
if (!dst || dst == Py_None) \
return true; \
int underlying = 0; \
\
if (!pyopencv_to(dst, underlying, name)) return false; \
if (!pyopencv_to(dst, underlying, info)) return false; \
src = static_cast<TYPE>(underlying); \
return true; \
}
......
......@@ -9,7 +9,7 @@ typedef std::vector<detail::CameraParams> vector_CameraParams;
template<> struct pyopencvVecConverter<detail::ImageFeatures>
{
static bool to(PyObject* obj, std::vector<detail::ImageFeatures>& value, const ArgInfo info)
static bool to(PyObject* obj, std::vector<detail::ImageFeatures>& value, const ArgInfo& info)
{
return pyopencv_to_generic_vec(obj, value, info);
}
......@@ -22,7 +22,7 @@ template<> struct pyopencvVecConverter<detail::ImageFeatures>
template<> struct pyopencvVecConverter<detail::MatchesInfo>
{
static bool to(PyObject* obj, std::vector<detail::MatchesInfo>& value, const ArgInfo info)
static bool to(PyObject* obj, std::vector<detail::MatchesInfo>& value, const ArgInfo& info)
{
return pyopencv_to_generic_vec(obj, value, info);
}
......@@ -35,7 +35,7 @@ template<> struct pyopencvVecConverter<detail::MatchesInfo>
template<> struct pyopencvVecConverter<detail::CameraParams>
{
static bool to(PyObject* obj, std::vector<detail::CameraParams>& value, const ArgInfo info)
static bool to(PyObject* obj, std::vector<detail::CameraParams>& value, const ArgInfo& info)
{
return pyopencv_to_generic_vec(obj, value, info);
}
......
......@@ -3,7 +3,7 @@ typedef std::vector<VideoCaptureAPIs> vector_VideoCaptureAPIs;
template<> struct pyopencvVecConverter<cv::VideoCaptureAPIs>
{
static bool to(PyObject* obj, std::vector<cv::VideoCaptureAPIs>& value, const ArgInfo info)
static bool to(PyObject* obj, std::vector<cv::VideoCaptureAPIs>& value, const ArgInfo& info)
{
return pyopencv_to_generic_vec(obj, value, info);
}
......@@ -15,9 +15,9 @@ template<> struct pyopencvVecConverter<cv::VideoCaptureAPIs>
};
template<>
bool pyopencv_to(PyObject *o, std::vector<cv::VideoCaptureAPIs>& apis, const char *name)
bool pyopencv_to(PyObject *o, std::vector<cv::VideoCaptureAPIs>& apis, const ArgInfo& info)
{
return pyopencvVecConverter<cv::VideoCaptureAPIs>::to(o, apis, ArgInfo(name, false));
return pyopencvVecConverter<cv::VideoCaptureAPIs>::to(o, apis, info);
}
#endif // HAVE_OPENCV_VIDEOIO
......@@ -1058,7 +1058,15 @@ bool CvCapture_FFMPEG::processRawPacket()
AVCodecID eVideoCodec = video_st->codec.codec_id;
#endif
const char* filterName = NULL;
if (eVideoCodec == CV_CODEC(CODEC_ID_H264) || eVideoCodec == CV_CODEC(CODEC_ID_H265))
if (eVideoCodec == CV_CODEC(CODEC_ID_H264)
#if LIBAVCODEC_VERSION_MICRO >= 100 \
&& LIBAVCODEC_BUILD >= CALC_FFMPEG_VERSION(57, 24, 102) // FFmpeg 3.0
|| eVideoCodec == CV_CODEC(CODEC_ID_H265)
#elif LIBAVCODEC_VERSION_MICRO < 100 \
&& LIBAVCODEC_BUILD >= CALC_FFMPEG_VERSION(55, 34, 1) // libav v10+
|| eVideoCodec == CV_CODEC(CODEC_ID_HEVC)
#endif
)
{
// check start code prefixed mode (as defined in the Annex B H.264 / H.265 specification)
if (packet.size >= 5
......@@ -1105,7 +1113,7 @@ bool CvCapture_FFMPEG::processRawPacket()
{
if (packet_filtered.data)
{
av_packet_unref(&packet_filtered);
_opencv_ffmpeg_av_packet_unref(&packet_filtered);
}
#if LIBAVFORMAT_BUILD >= CALC_FFMPEG_VERSION(58, 20, 100)
......
......@@ -114,17 +114,18 @@ TEST_P(videoio_container, read)
const string fileNameOut = tempfile(cv::format("test_container_stream.%s", ext_raw.c_str()).c_str());
// Write encoded video read using VideoContainer to tmp file
size_t totalBytes = 0;
{
VideoCapture container(findDataFile(fileName), api);
ASSERT_TRUE(container.isOpened());
if (!container.isOpened())
throw SkipTestException("Video stream is not supported");
if (!container.set(CAP_PROP_FORMAT, -1)) // turn off video decoder (extract stream)
throw SkipTestException("Fetching of RAW video streams is not supported");
ASSERT_EQ(-1.f, container.get(CAP_PROP_FORMAT)); // check
EXPECT_EQ(codec, fourccToString((int)container.get(CAP_PROP_FOURCC)));
EXPECT_EQ(pixelFormat, fourccToString((int)container.get(CAP_PROP_CODEC_PIXEL_FORMAT)));
std::ofstream file(fileNameOut, ios::out | ios::trunc | std::ios::binary);
size_t totalBytes = 0;
std::ofstream file(fileNameOut.c_str(), ios::out | ios::trunc | std::ios::binary);
Mat raw_data;
while (true)
{
......@@ -144,7 +145,7 @@ TEST_P(videoio_container, read)
ASSERT_GE(totalBytes, (size_t)65536) << "Encoded stream is too small";
}
std::cout << "Checking extracted video stream: " << fileNameOut << std::endl;
std::cout << "Checking extracted video stream: " << fileNameOut << " (size: " << totalBytes << " bytes)" << std::endl;
// Check decoded frames read from original media are equal to frames decoded from tmp file
{
......@@ -158,7 +159,7 @@ TEST_P(videoio_container, read)
{
nframes++;
ASSERT_TRUE(capActual.read(actual)) << nframes;
EXPECT_EQ(0, cvtest::norm(actual, reference, NORM_INF)) << nframes << " err=" << ++n_err;
EXPECT_EQ(0, cvtest::norm(actual, reference, NORM_INF)) << "frame=" << nframes << " err=" << ++n_err;
}
ASSERT_GT(nframes, 0);
}
......@@ -168,12 +169,13 @@ TEST_P(videoio_container, read)
const videoio_container_params_t videoio_container_params[] =
{
make_tuple(CAP_FFMPEG, "video/big_buck_bunny", "h264", "h264", "h264", "I420"),
make_tuple(CAP_FFMPEG, "video/big_buck_bunny", "h265", "h265", "hevc", "I420"),
//make_tuple(CAP_FFMPEG, "video/big_buck_bunny", "h264.mkv", "mkv.h264", "h264", "I420"),
//make_tuple(CAP_FFMPEG, "video/big_buck_bunny", "h265.mkv", "mkv.h265", "hevc", "I420"),
//make_tuple(CAP_FFMPEG, "video/big_buck_bunny", "h264.mp4", "mp4.avc1", "avc1", "I420"),
//make_tuple(CAP_FFMPEG, "video/big_buck_bunny", "h265.mp4", "mp4.hev1", "hev1", "I420"),
videoio_container_params_t(CAP_FFMPEG, "video/big_buck_bunny", "h264", "h264", "h264", "I420"),
videoio_container_params_t(CAP_FFMPEG, "video/big_buck_bunny", "h265", "h265", "hevc", "I420"),
videoio_container_params_t(CAP_FFMPEG, "video/big_buck_bunny", "mjpg.avi", "mjpg", "MJPG", "I420"),
//videoio_container_params_t(CAP_FFMPEG, "video/big_buck_bunny", "h264.mkv", "mkv.h264", "h264", "I420"),
//videoio_container_params_t(CAP_FFMPEG, "video/big_buck_bunny", "h265.mkv", "mkv.h265", "hevc", "I420"),
//videoio_container_params_t(CAP_FFMPEG, "video/big_buck_bunny", "h264.mp4", "mp4.avc1", "avc1", "I420"),
//videoio_container_params_t(CAP_FFMPEG, "video/big_buck_bunny", "h265.mp4", "mp4.hev1", "hev1", "I420"),
};
INSTANTIATE_TEST_CASE_P(/**/, videoio_container, testing::ValuesIn(videoio_container_params));
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment