Commit 6389627d authored by Alexander Alekhin's avatar Alexander Alekhin

Merge pull request #1885 from vpisarev:shuffle_optflow_algos

parents f39cb5b0 dbfa4d72
This diff is collapsed.
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
#include "../perf_precomp.hpp"
#include "opencv2/ts/ocl_perf.hpp"
namespace opencv_test { namespace {
#ifdef HAVE_OPENCL
void MakeArtificialExample(UMat &dst_frame1, UMat &dst_frame2);
typedef tuple<String, Size> DISParams;
typedef TestBaseWithParam<DISParams> DenseOpticalFlow_DIS;
OCL_PERF_TEST_P(DenseOpticalFlow_DIS, perf,
Combine(Values("PRESET_ULTRAFAST", "PRESET_FAST", "PRESET_MEDIUM"), Values(szVGA, sz720p, sz1080p)))
{
DISParams params = GetParam();
// use strings to print preset names in the perf test results:
String preset_string = get<0>(params);
int preset = DISOpticalFlow::PRESET_FAST;
if (preset_string == "PRESET_ULTRAFAST")
preset = DISOpticalFlow::PRESET_ULTRAFAST;
else if (preset_string == "PRESET_FAST")
preset = DISOpticalFlow::PRESET_FAST;
else if (preset_string == "PRESET_MEDIUM")
preset = DISOpticalFlow::PRESET_MEDIUM;
Size sz = get<1>(params);
UMat frame1(sz, CV_8U);
UMat frame2(sz, CV_8U);
UMat flow;
MakeArtificialExample(frame1, frame2);
Ptr<DenseOpticalFlow> algo = createOptFlow_DIS(preset);
OCL_TEST_CYCLE_N(10)
{
algo->calc(frame1, frame2, flow);
}
SANITY_CHECK_NOTHING();
}
void MakeArtificialExample(UMat &dst_frame1, UMat &dst_frame2)
{
int src_scale = 2;
int OF_scale = 6;
double sigma = dst_frame1.cols / 300;
UMat tmp(Size(dst_frame1.cols / (1 << src_scale), dst_frame1.rows / (1 << src_scale)), CV_8U);
randu(tmp, 0, 255);
resize(tmp, dst_frame1, dst_frame1.size(), 0.0, 0.0, INTER_LINEAR_EXACT);
resize(tmp, dst_frame2, dst_frame2.size(), 0.0, 0.0, INTER_LINEAR_EXACT);
Mat displacement_field(Size(dst_frame1.cols / (1 << OF_scale), dst_frame1.rows / (1 << OF_scale)),
CV_32FC2);
randn(displacement_field, 0.0, sigma);
resize(displacement_field, displacement_field, dst_frame2.size(), 0.0, 0.0, INTER_CUBIC);
for (int i = 0; i < displacement_field.rows; i++)
for (int j = 0; j < displacement_field.cols; j++)
displacement_field.at<Vec2f>(i, j) += Vec2f((float)j, (float)i);
remap(dst_frame2, dst_frame2, displacement_field, Mat(), INTER_LINEAR, BORDER_REPLICATE);
}
#endif // HAVE_OPENCL
}} // namespace
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2010-2012, Multicoreware, Inc., all rights reserved.
// Copyright (C) 2010-2012, Advanced Micro Devices, Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// @Authors
// Fangfang Bai, fangfang@multicorewareinc.com
// Jin Ma, jin@multicorewareinc.com
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors as is and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "../perf_precomp.hpp"
#include "opencv2/ts/ocl_perf.hpp"
#ifdef HAVE_OPENCL
namespace opencv_test {
namespace ocl {
///////////// OpticalFlow Dual TVL1 ////////////////////////
typedef tuple< tuple<int, double>, bool> OpticalFlowDualTVL1Params;
typedef TestBaseWithParam<OpticalFlowDualTVL1Params> OpticalFlowDualTVL1Fixture;
OCL_PERF_TEST_P(OpticalFlowDualTVL1Fixture, OpticalFlowDualTVL1,
::testing::Combine(
::testing::Values(make_tuple<int, double>(-1, 0.3),
make_tuple<int, double>(3, 0.5)),
::testing::Bool()
)
)
{
Mat frame0 = imread(getDataPath("cv/optflow/RubberWhale1.png"), IMREAD_GRAYSCALE);
ASSERT_FALSE(frame0.empty()) << "can't load RubberWhale1.png";
Mat frame1 = imread(getDataPath("cv/optflow/RubberWhale2.png"), IMREAD_GRAYSCALE);
ASSERT_FALSE(frame1.empty()) << "can't load RubberWhale2.png";
const Size srcSize = frame0.size();
const OpticalFlowDualTVL1Params params = GetParam();
const tuple<int, double> filteringScale = get<0>(params);
const int medianFiltering = get<0>(filteringScale);
const double scaleStep = get<1>(filteringScale);
const bool useInitFlow = get<1>(params);
double eps = 0.9;
UMat uFrame0; frame0.copyTo(uFrame0);
UMat uFrame1; frame1.copyTo(uFrame1);
UMat uFlow(srcSize, CV_32FC2);
declare.in(uFrame0, uFrame1, WARMUP_READ).out(uFlow, WARMUP_READ);
//create algorithm
Ptr<DualTVL1OpticalFlow> alg = createOptFlow_DualTVL1();
//set parameters
alg->setScaleStep(scaleStep);
alg->setMedianFiltering(medianFiltering);
if (useInitFlow)
{
//calculate initial flow as result of optical flow
alg->calc(uFrame0, uFrame1, uFlow);
}
//set flag to use initial flow
alg->setUseInitialFlow(useInitFlow);
OCL_TEST_CYCLE()
alg->calc(uFrame0, uFrame1, uFlow);
SANITY_CHECK(uFlow, eps, ERROR_RELATIVE);
}
}
} // namespace opencv_test::ocl
#endif // HAVE_OPENCL
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
#include "perf_precomp.hpp"
namespace opencv_test { namespace {
void MakeArtificialExample(Mat &dst_frame1, Mat &dst_frame2);
typedef tuple<String, Size> DISParams;
typedef TestBaseWithParam<DISParams> DenseOpticalFlow_DIS;
PERF_TEST_P(DenseOpticalFlow_DIS, perf,
Combine(Values("PRESET_ULTRAFAST", "PRESET_FAST", "PRESET_MEDIUM"), Values(szVGA, sz720p, sz1080p)))
{
DISParams params = GetParam();
// use strings to print preset names in the perf test results:
String preset_string = get<0>(params);
int preset = DISOpticalFlow::PRESET_FAST;
if (preset_string == "PRESET_ULTRAFAST")
preset = DISOpticalFlow::PRESET_ULTRAFAST;
else if (preset_string == "PRESET_FAST")
preset = DISOpticalFlow::PRESET_FAST;
else if (preset_string == "PRESET_MEDIUM")
preset = DISOpticalFlow::PRESET_MEDIUM;
Size sz = get<1>(params);
Mat frame1(sz, CV_8U);
Mat frame2(sz, CV_8U);
Mat flow;
MakeArtificialExample(frame1, frame2);
TEST_CYCLE_N(10)
{
Ptr<DenseOpticalFlow> algo = createOptFlow_DIS(preset);
algo->calc(frame1, frame2, flow);
}
SANITY_CHECK_NOTHING();
}
void MakeArtificialExample(Mat &dst_frame1, Mat &dst_frame2)
{
int src_scale = 2;
int OF_scale = 6;
double sigma = dst_frame1.cols / 300;
Mat tmp(Size(dst_frame1.cols / (1 << src_scale), dst_frame1.rows / (1 << src_scale)), CV_8U);
randu(tmp, 0, 255);
resize(tmp, dst_frame1, dst_frame1.size(), 0.0, 0.0, INTER_LINEAR_EXACT);
resize(tmp, dst_frame2, dst_frame2.size(), 0.0, 0.0, INTER_LINEAR_EXACT);
Mat displacement_field(Size(dst_frame1.cols / (1 << OF_scale), dst_frame1.rows / (1 << OF_scale)),
CV_32FC2);
randn(displacement_field, 0.0, sigma);
resize(displacement_field, displacement_field, dst_frame2.size(), 0.0, 0.0, INTER_CUBIC);
for (int i = 0; i < displacement_field.rows; i++)
for (int j = 0; j < displacement_field.cols; j++)
displacement_field.at<Vec2f>(i, j) += Vec2f((float)j, (float)i);
remap(dst_frame2, dst_frame2, displacement_field, Mat(), INTER_LINEAR, BORDER_REPLICATE);
}
}} // namespace
#include "perf_precomp.hpp"
namespace opencv_test { namespace {
using namespace perf;
typedef TestBaseWithParam< std::pair<string, string> > ImagePair;
std::pair<string, string> impair(const char* im1, const char* im2)
{
return std::make_pair(string(im1), string(im2));
}
PERF_TEST_P(ImagePair, OpticalFlowDual_TVL1, testing::Values(impair("cv/optflow/RubberWhale1.png", "cv/optflow/RubberWhale2.png")))
{
declare.time(260);
Mat frame1 = imread(getDataPath(GetParam().first), IMREAD_GRAYSCALE);
Mat frame2 = imread(getDataPath(GetParam().second), IMREAD_GRAYSCALE);
ASSERT_FALSE(frame1.empty());
ASSERT_FALSE(frame2.empty());
Mat flow;
Ptr<DenseOpticalFlow> tvl1 = createOptFlow_DualTVL1();
TEST_CYCLE() tvl1->calc(frame1, frame2, flow);
SANITY_CHECK_NOTHING();
}
}} // namespace
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
#include "perf_precomp.hpp"
namespace opencv_test { namespace {
typedef tuple<Size, int, int> VarRefParams;
typedef TestBaseWithParam<VarRefParams> DenseOpticalFlow_VariationalRefinement;
PERF_TEST_P(DenseOpticalFlow_VariationalRefinement, perf, Combine(Values(szQVGA, szVGA), Values(5, 10), Values(5, 10)))
{
VarRefParams params = GetParam();
Size sz = get<0>(params);
int sorIter = get<1>(params);
int fixedPointIter = get<2>(params);
Mat frame1(sz, CV_8U);
Mat frame2(sz, CV_8U);
Mat flow(sz, CV_32FC2);
randu(frame1, 0, 255);
randu(frame2, 0, 255);
flow.setTo(0.0f);
TEST_CYCLE_N(10)
{
Ptr<VariationalRefinement> var = createVariationalFlowRefinement();
var->setAlpha(20.0f);
var->setGamma(10.0f);
var->setDelta(5.0f);
var->setSorIterations(sorIter);
var->setFixedPointIterations(fixedPointIter);
var->calc(frame1, frame2, flow);
}
SANITY_CHECK_NOTHING();
}
}} // namespace
#include "opencv2/core/utility.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/imgproc.hpp"
#include "opencv2/optflow.hpp"
using namespace std;
using namespace cv;
using namespace optflow;
static void help()
{
printf("Usage: dis_optflow <video_file>\n");
}
int main(int argc, char **argv)
{
VideoCapture cap;
if (argc < 2)
{
help();
exit(1);
}
cap.open(argv[1]);
if(!cap.isOpened())
{
printf("ERROR: Cannot open file %s\n", argv[1]);
return -1;
}
Mat prevgray, gray, rgb, frame;
Mat flow, flow_uv[2];
Mat mag, ang;
Mat hsv_split[3], hsv;
char ret;
namedWindow("flow", 1);
namedWindow("orig", 1);
Ptr<DenseOpticalFlow> algorithm = createOptFlow_DIS(DISOpticalFlow::PRESET_MEDIUM);
while(true)
{
cap >> frame;
if (frame.empty())
break;
cvtColor(frame, gray, COLOR_BGR2GRAY);
if (!prevgray.empty())
{
algorithm->calc(prevgray, gray, flow);
split(flow, flow_uv);
multiply(flow_uv[1], -1, flow_uv[1]);
cartToPolar(flow_uv[0], flow_uv[1], mag, ang, true);
normalize(mag, mag, 0, 1, NORM_MINMAX);
hsv_split[0] = ang;
hsv_split[1] = mag;
hsv_split[2] = Mat::ones(ang.size(), ang.type());
merge(hsv_split, 3, hsv);
cvtColor(hsv, rgb, COLOR_HSV2BGR);
imshow("flow", rgb);
imshow("orig", frame);
}
if ((ret = (char)waitKey(20)) > 0)
break;
std::swap(prevgray, gray);
}
return 0;
}
......@@ -101,7 +101,7 @@ int main( int argc, const char **argv )
Mat from = imread( fromPath );
Mat to = imread( toPath );
Mat gt = optflow::readOpticalFlow( gtPath );
Mat gt = readOpticalFlow( gtPath );
std::vector< std::pair< Point2i, Point2i > > corr;
TickMeter meter;
......
......@@ -269,11 +269,11 @@ int main( int argc, char** argv )
algorithm = createOptFlow_PCAFlow();
}
else if ( method == "DISflow_ultrafast" )
algorithm = createOptFlow_DIS(DISOpticalFlow::PRESET_ULTRAFAST);
algorithm = DISOpticalFlow::create(DISOpticalFlow::PRESET_ULTRAFAST);
else if (method == "DISflow_fast")
algorithm = createOptFlow_DIS(DISOpticalFlow::PRESET_FAST);
algorithm = DISOpticalFlow::create(DISOpticalFlow::PRESET_FAST);
else if (method == "DISflow_medium")
algorithm = createOptFlow_DIS(DISOpticalFlow::PRESET_MEDIUM);
algorithm = DISOpticalFlow::create(DISOpticalFlow::PRESET_MEDIUM);
else
{
printf("Wrong method!\n");
......@@ -300,7 +300,7 @@ int main( int argc, char** argv )
if ( !groundtruth_path.empty() )
{ // compare to ground truth
ground_truth = optflow::readOpticalFlow(groundtruth_path);
ground_truth = readOpticalFlow(groundtruth_path);
if ( flow.size() != ground_truth.size() || flow.channels() != 2
|| ground_truth.channels() != 2 )
{
......
......@@ -135,7 +135,7 @@ int main( int argc, const char **argv )
Mat i1 = imread( img1 );
Mat i2 = imread( img2 );
Mat gt = optflow::readOpticalFlow( groundtruth );
Mat gt = readOpticalFlow( groundtruth );
Mat i1g, i2g;
cvtColor( i1, i1g, COLOR_BGR2GRAY );
......
#include <iostream>
#include <fstream>
#include <opencv2/core/utility.hpp>
#include "opencv2/video.hpp"
#include "opencv2/optflow.hpp"
#include "opencv2/imgcodecs.hpp"
#include "opencv2/highgui.hpp"
using namespace cv;
using namespace std;
using namespace optflow;
inline bool isFlowCorrect(Point2f u)
{
return !cvIsNaN(u.x) && !cvIsNaN(u.y) && fabs(u.x) < 1e9 && fabs(u.y) < 1e9;
}
static Vec3b computeColor(float fx, float fy)
{
static bool first = true;
// relative lengths of color transitions:
// these are chosen based on perceptual similarity
// (e.g. one can distinguish more shades between red and yellow
// than between yellow and green)
const int RY = 15;
const int YG = 6;
const int GC = 4;
const int CB = 11;
const int BM = 13;
const int MR = 6;
const int NCOLS = RY + YG + GC + CB + BM + MR;
static Vec3i colorWheel[NCOLS];
if (first)
{
int k = 0;
for (int i = 0; i < RY; ++i, ++k)
colorWheel[k] = Vec3i(255, 255 * i / RY, 0);
for (int i = 0; i < YG; ++i, ++k)
colorWheel[k] = Vec3i(255 - 255 * i / YG, 255, 0);
for (int i = 0; i < GC; ++i, ++k)
colorWheel[k] = Vec3i(0, 255, 255 * i / GC);
for (int i = 0; i < CB; ++i, ++k)
colorWheel[k] = Vec3i(0, 255 - 255 * i / CB, 255);
for (int i = 0; i < BM; ++i, ++k)
colorWheel[k] = Vec3i(255 * i / BM, 0, 255);
for (int i = 0; i < MR; ++i, ++k)
colorWheel[k] = Vec3i(255, 0, 255 - 255 * i / MR);
first = false;
}
const float rad = sqrt(fx * fx + fy * fy);
const float a = atan2(-fy, -fx) / (float)CV_PI;
const float fk = (a + 1.0f) / 2.0f * (NCOLS - 1);
const int k0 = static_cast<int>(fk);
const int k1 = (k0 + 1) % NCOLS;
const float f = fk - k0;
Vec3b pix;
for (int b = 0; b < 3; b++)
{
const float col0 = colorWheel[k0][b] / 255.f;
const float col1 = colorWheel[k1][b] / 255.f;
float col = (1 - f) * col0 + f * col1;
if (rad <= 1)
col = 1 - rad * (1 - col); // increase saturation with radius
else
col *= .75; // out of range
pix[2 - b] = static_cast<uchar>(255.f * col);
}
return pix;
}
static void drawOpticalFlow(const Mat_<Point2f>& flow, Mat& dst, float maxmotion = -1)
{
dst.create(flow.size(), CV_8UC3);
dst.setTo(Scalar::all(0));
// determine motion range:
float maxrad = maxmotion;
if (maxmotion <= 0)
{
maxrad = 1;
for (int y = 0; y < flow.rows; ++y)
{
for (int x = 0; x < flow.cols; ++x)
{
Point2f u = flow(y, x);
if (!isFlowCorrect(u))
continue;
maxrad = max(maxrad, sqrt(u.x * u.x + u.y * u.y));
}
}
}
for (int y = 0; y < flow.rows; ++y)
{
for (int x = 0; x < flow.cols; ++x)
{
Point2f u = flow(y, x);
if (isFlowCorrect(u))
dst.at<Vec3b>(y, x) = computeColor(u.x / maxrad, u.y / maxrad);
}
}
}
// binary file format for flow data specified here:
// http://vision.middlebury.edu/flow/data/
static void writeOpticalFlowToFile(const Mat_<Point2f>& flow, const string& fileName)
{
static const char FLO_TAG_STRING[] = "PIEH";
ofstream file(fileName.c_str(), ios_base::binary);
file << FLO_TAG_STRING;
file.write((const char*) &flow.cols, sizeof(int));
file.write((const char*) &flow.rows, sizeof(int));
for (int i = 0; i < flow.rows; ++i)
{
for (int j = 0; j < flow.cols; ++j)
{
const Point2f u = flow(i, j);
file.write((const char*) &u.x, sizeof(float));
file.write((const char*) &u.y, sizeof(float));
}
}
}
int main(int argc, const char* argv[])
{
cv::CommandLineParser parser(argc, argv, "{help h || show help message}"
"{ @frame0 | | frame 0}{ @frame1 | | frame 1}{ @output | | output flow}");
if (parser.has("help"))
{
parser.printMessage();
return 0;
}
string frame0_name = parser.get<string>("@frame0");
string frame1_name = parser.get<string>("@frame1");
string file = parser.get<string>("@output");
if (frame0_name.empty() || frame1_name.empty() || file.empty())
{
cerr << "Usage : " << argv[0] << " [<frame0>] [<frame1>] [<output_flow>]" << endl;
return -1;
}
Mat frame0 = imread(frame0_name, IMREAD_GRAYSCALE);
Mat frame1 = imread(frame1_name, IMREAD_GRAYSCALE);
if (frame0.empty())
{
cerr << "Can't open image [" << parser.get<string>("frame0") << "]" << endl;
return -1;
}
if (frame1.empty())
{
cerr << "Can't open image [" << parser.get<string>("frame1") << "]" << endl;
return -1;
}
if (frame1.size() != frame0.size())
{
cerr << "Images should be of equal sizes" << endl;
return -1;
}
Mat_<Point2f> flow;
Ptr<DualTVL1OpticalFlow> tvl1 = DualTVL1OpticalFlow::create();
const double start = (double)getTickCount();
tvl1->calc(frame0, frame1, flow);
const double timeSec = (getTickCount() - start) / getTickFrequency();
cout << "calcOpticalFlowDual_TVL1 : " << timeSec << " sec" << endl;
Mat out;
drawOpticalFlow(flow, out);
if (!file.empty())
writeOpticalFlowToFile(flow, file);
imshow("Flow", out);
waitKey();
return 0;
}
......@@ -147,7 +147,7 @@ void OpticalFlowDeepFlow::calc( InputArray _I0, InputArray _I1, InputOutputArray
for ( int level = levelCount - 1; level >= 0; --level )
{ //iterate through all levels, beginning with the most coarse
Ptr<VariationalRefinement> var = createVariationalFlowRefinement();
Ptr<VariationalRefinement> var = VariationalRefinement::create();
var->setAlpha(4 * alpha);
var->setDelta(delta / 3);
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "precomp.hpp"
#include<iostream>
#include<fstream>
namespace cv {
namespace optflow {
const float FLOW_TAG_FLOAT = 202021.25f;
const char *FLOW_TAG_STRING = "PIEH";
CV_EXPORTS_W Mat readOpticalFlow( const String& path )
{
// CV_Assert(sizeof(float) == 4);
//FIXME: ensure right sizes of int and float - here and in writeOpticalFlow()
Mat_<Point2f> flow;
std::ifstream file(path.c_str(), std::ios_base::binary);
if ( !file.good() )
return flow; // no file - return empty matrix
float tag;
file.read((char*) &tag, sizeof(float));
if ( tag != FLOW_TAG_FLOAT )
return flow;
int width, height;
file.read((char*) &width, 4);
file.read((char*) &height, 4);
flow.create(height, width);
for ( int i = 0; i < flow.rows; ++i )
{
for ( int j = 0; j < flow.cols; ++j )
{
Point2f u;
file.read((char*) &u.x, sizeof(float));
file.read((char*) &u.y, sizeof(float));
if ( !file.good() )
{
flow.release();
return flow;
}
flow(i, j) = u;
}
}
file.close();
return flow;
}
CV_EXPORTS_W bool writeOpticalFlow( const String& path, InputArray flow )
{
// CV_Assert(sizeof(float) == 4);
const int nChannels = 2;
Mat input = flow.getMat();
if ( input.channels() != nChannels || input.depth() != CV_32F || path.length() == 0 )
return false;
std::ofstream file(path.c_str(), std::ofstream::binary);
if ( !file.good() )
return false;
int nRows, nCols;
nRows = (int) input.size().height;
nCols = (int) input.size().width;
const int headerSize = 12;
char header[headerSize];
memcpy(header, FLOW_TAG_STRING, 4);
// size of ints is known - has been asserted in the current function
memcpy(header + 4, reinterpret_cast<const char*>(&nCols), sizeof(nCols));
memcpy(header + 8, reinterpret_cast<const char*>(&nRows), sizeof(nRows));
file.write(header, headerSize);
if ( !file.good() )
return false;
// if ( input.isContinuous() ) //matrix is continous - treat it as a single row
// {
// nCols *= nRows;
// nRows = 1;
// }
int row;
char* p;
for ( row = 0; row < nRows; row++ )
{
p = input.ptr<char>(row);
file.write(p, nCols * nChannels * sizeof(float));
if ( !file.good() )
return false;
}
file.close();
return true;
}
}
}
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
......@@ -6,6 +6,6 @@ set(the_description "Super Resolution")
if(HAVE_CUDA)
ocv_warnings_disable(CMAKE_CXX_FLAGS -Wundef -Wshadow)
endif()
ocv_define_module(superres opencv_imgproc opencv_video
ocv_define_module(superres opencv_imgproc opencv_video opencv_optflow
OPTIONAL opencv_videoio opencv_cudaarithm opencv_cudafilters opencv_cudawarping opencv_cudaimgproc opencv_cudaoptflow opencv_cudacodec
WRAP python)
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment