Commit e67653ea authored by si40wiga's avatar si40wiga Committed by Alexander Alekhin

Merge pull request #2296 from si40wiga:fsr-inpaint

* new algorithm Rapid Frequency Selective Reconstruction (FSR) added

* fix compiler warning

* applied changes suggested in alalek's review

* fix trailing whitespace

* xphoto: update inpaint() test

* fix pre-processing of error mask

* xphoto: move inpainting FSR algorithm into a separate file

* xphoto: cleanup inpaining documentation

* xphoto: inpainting fsr - avoid uninitialized values
parent 7d7312a9
...@@ -29,3 +29,49 @@ ...@@ -29,3 +29,49 @@
publisher = {ACM}, publisher = {ACM},
url = {https://www.researchgate.net/profile/Julie_Dorsey/publication/220184746_Fast_Bilateral_Filtering_for_the_Display_of_High_-_dynamic_-_range_Images/links/54566b000cf26d5090a95f96/Fast-Bilateral-Filtering-for-the-Display-of-High-dynamic-range-Images.pdf} url = {https://www.researchgate.net/profile/Julie_Dorsey/publication/220184746_Fast_Bilateral_Filtering_for_the_Display_of_High_-_dynamic_-_range_Images/links/54566b000cf26d5090a95f96/Fast-Bilateral-Filtering-for-the-Display-of-High-dynamic-range-Images.pdf}
} }
@INPROCEEDINGS{GenserPCS2018,
author={N. {Genser} and J. {Seiler} and F. {Schilling} and A. {Kaup}},
booktitle={Proc. Picture Coding Symposium (PCS)},
title={Signal and Loss Geometry Aware Frequency Selective Extrapolation for Error Concealment},
year={2018},
pages={159-163},
keywords={extrapolation;image reconstruction;video coding;loss geometry aware frequency selective extrapolation;error concealment;complex models;moderate computational complexity;Full HD image;error pattern;adjacent samples;undistorted samples;reconstruction parameters;processing order;High Efficiency Video Coding;content based partitioning;signal characteristics;block based frequency selective extrapolation;Image reconstruction;Extrapolation;Geometry;Partitioning algorithms;Task analysis;Computational modeling;Standards},
doi={10.1109/PCS.2018.8456259},
month={June},
}
@ARTICLE{SeilerTIP2015,
author={J. {Seiler} and M. {Jonscher} and M. {Schöberl} and A. {Kaup}},
journal={IEEE Transactions on Image Processing},
title={Resampling Images to a Regular Grid From a Non-Regular Subset of Pixel Positions Using Frequency Selective Reconstruction},
year={2015},
volume={24},
number={11},
pages={4540-4555},
keywords={Fourier transforms;image reconstruction;resampling images;regular grid;nonregular subset;pixel positions;frequency selective reconstruction;displaying image signals;image signal reconstruction algorithm;Fourier domain;optical transfer function;visual quality;peak signal-to-noise ratio;Image reconstruction;Signal processing algorithms;Reconstruction algorithms;Signal processing;Spatial resolution;;Image reconstruction;non-regular sampling;interpolation},
doi={10.1109/TIP.2015.2463084},
month={Nov},
}
@INPROCEEDINGS{GroscheICIP2018,
author={S. {Grosche} and J. {Seiler} and A. {Kaup}},
booktitle={Proc. 25th IEEE International Conference on Image Processing (ICIP)},
title={Iterative Optimization of Quarter Sampling Masks for Non-Regular Sampling Sensors},
year={2018},
pages={26-30},
keywords={extrapolation;image enhancement;image reconstruction;image resolution;image sampling;image sensors;interpolation;iterative methods;optimisation;regression analysis;iterative optimization;nonregular sampling sensors;iterative algorithm;arbitrary quarter sampling mask;reconstruction algorithms;random quarter sampling mask;optimized mask;frequency selective extrapolation;steering kernel regression;nearest neighbor interpolation;linear interpolation;regular imaging sensor;reconstruction quality;noise figure 0.31 dB to 0.68 dB;Image resolution;Image reconstruction;Sensors;Optimization;Energy resolution;Reconstruction algorithms;Image sensors;Non-Regular Sampling;Image reconstruction},
doi={10.1109/ICIP.2018.8451658},
month={Oct},
}
@INPROCEEDINGS{GroscheIST2018,
author={S. {Grosche} and J. {Seiler} and A. {Kaup}},
booktitle={Proc. IEEE International Conference on Imaging Systems and Techniques (IST)},
title={Design Techniques for Incremental Non-Regular Image Sampling Patterns},
year={2018},
pages={1-6},
keywords={image reconstruction;image resolution;image sampling;design techniques;incremental nonregular image sampling patterns;image signals;regular two dimensional grid;nonregular sampling patterns;sampling positions;random patterns;regular patterns;arbitrary sampling densities;incremental sampling patterns;sampling density;Image reconstruction;Scanning electron microscopy;Probability distribution;Atomic force microscopy;Reconstruction algorithms;Measurement by laser beam;Image Reconstruction;non-Regular Sampling},
doi={10.1109/IST.2018.8577090},
month={Oct},
}
...@@ -9,9 +9,14 @@ ...@@ -9,9 +9,14 @@
// //
// License Agreement // License Agreement
// For Open Source Computer Vision Library // For Open Source Computer Vision Library
// (3-clause BSD License)
// //
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2000-2019, Intel Corporation, all rights reserved.
// Copyright (C) 2009-2011, Willow Garage Inc., all rights reserved. // Copyright (C) 2009-2011, Willow Garage Inc., all rights reserved.
// Copyright (C) 2009-2016, NVIDIA Corporation, all rights reserved.
// Copyright (C) 2010-2013, Advanced Micro Devices, Inc., all rights reserved.
// Copyright (C) 2015-2016, OpenCV Foundation, all rights reserved.
// Copyright (C) 2015-2016, Itseez Inc., all rights reserved.
// Third party copyrights are property of their respective owners. // Third party copyrights are property of their respective owners.
// //
// Redistribution and use in source and binary forms, with or without modification, // Redistribution and use in source and binary forms, with or without modification,
...@@ -24,8 +29,9 @@ ...@@ -24,8 +29,9 @@
// this list of conditions and the following disclaimer in the documentation // this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution. // and/or other materials provided with the distribution.
// //
// * The name of the copyright holders may not be used to endorse or promote products // * Neither the names of the copyright holders nor the names of the contributors
// derived from this software without specific prior written permission. // may be used to endorse or promote products derived from this software
// without specific prior written permission.
// //
// This software is provided by the copyright holders and contributors "as is" and // This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied // any express or implied warranties, including, but not limited to, the implied
...@@ -58,24 +64,48 @@ namespace xphoto ...@@ -58,24 +64,48 @@ namespace xphoto
//! @addtogroup xphoto //! @addtogroup xphoto
//! @{ //! @{
//! various inpainting algorithms //! @brief Various inpainting algorithms
//! @sa inpaint
enum InpaintTypes enum InpaintTypes
{ {
/** This algorithm searches for dominant correspondences (transformations) of /** This algorithm searches for dominant correspondences (transformations) of
image patches and tries to seamlessly fill-in the area to be inpainted using this image patches and tries to seamlessly fill-in the area to be inpainted using this
transformations */ transformations */
INPAINT_SHIFTMAP = 0 INPAINT_SHIFTMAP = 0,
/** Performs Frequency Selective Reconstruction (FSR).
One of the two quality profiles BEST and FAST can be chosen, depending on the time available for reconstruction.
See @cite GenserPCS2018 and @cite SeilerTIP2015 for details.
The algorithm may be utilized for the following areas of application:
1. %Error Concealment (Inpainting).
The sampling mask indicates the missing pixels of the distorted input
image to be reconstructed.
2. Non-Regular Sampling.
For more information on how to choose a good sampling mask, please review
@cite GroscheICIP2018 and @cite GroscheIST2018.
1-channel grayscale or 3-channel BGR image are accepted.
Conventional accepted ranges:
- 0-255 for CV_8U
- 0-65535 for CV_16U
- 0-1 for CV_32F/CV_64F.
*/
INPAINT_FSR_BEST = 1,
INPAINT_FSR_FAST = 2 //!< See #INPAINT_FSR_BEST
}; };
/** @brief The function implements different single-image inpainting algorithms. /** @brief The function implements different single-image inpainting algorithms.
See the original paper @cite He2012 for details. See the original papers @cite He2012 (Shiftmap) or @cite GenserPCS2018 and @cite SeilerTIP2015 (FSR) for details.
@param src source image, it could be of any type and any number of channels from 1 to 4. In case of @param src source image
- #INPAINT_SHIFTMAP: it could be of any type and any number of channels from 1 to 4. In case of
3- and 4-channels images the function expect them in CIELab colorspace or similar one, where first 3- and 4-channels images the function expect them in CIELab colorspace or similar one, where first
color component shows intensity, while second and third shows colors. Nonetheless you can try any color component shows intensity, while second and third shows colors. Nonetheless you can try any
colorspaces. colorspaces.
@param mask mask (CV_8UC1), where non-zero pixels indicate valid image area, while zero pixels - #INPAINT_FSR_BEST or #INPAINT_FSR_FAST: 1-channel grayscale or 3-channel BGR image.
@param mask mask (#CV_8UC1), where non-zero pixels indicate valid image area, while zero pixels
indicate area to be inpainted indicate area to be inpainted
@param dst destination image @param dst destination image
@param algorithmType see xphoto::InpaintTypes @param algorithmType see xphoto::InpaintTypes
......
...@@ -9,11 +9,19 @@ ...@@ -9,11 +9,19 @@
// //
// License Agreement // License Agreement
// For Open Source Computer Vision Library // For Open Source Computer Vision Library
// (3-clause BSD License)
// //
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2000-2019, Intel Corporation, all rights reserved.
// Copyright (C) 2009-2011, Willow Garage Inc., all rights reserved. // Copyright (C) 2009-2011, Willow Garage Inc., all rights reserved.
// Copyright (C) 2009-2016, NVIDIA Corporation, all rights reserved.
// Copyright (C) 2010-2013, Advanced Micro Devices, Inc., all rights reserved.
// Copyright (C) 2015-2016, OpenCV Foundation, all rights reserved.
// Copyright (C) 2015-2016, Itseez Inc., all rights reserved.
// Third party copyrights are property of their respective owners. // Third party copyrights are property of their respective owners.
// //
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice, // * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer. // this list of conditions and the following disclaimer.
// //
...@@ -21,8 +29,9 @@ ...@@ -21,8 +29,9 @@
// this list of conditions and the following disclaimer in the documentation // this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution. // and/or other materials provided with the distribution.
// //
// * The name of Intel Corporation may not be used to endorse or promote products // * Neither the names of the copyright holders nor the names of the contributors
// derived from this software without specific prior written permission. // may be used to endorse or promote products derived from this software
// without specific prior written permission.
// //
// This software is provided by the copyright holders and contributors "as is" and // This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied // any express or implied warranties, including, but not limited to, the implied
...@@ -46,6 +55,8 @@ ...@@ -46,6 +55,8 @@
#include <fstream> #include <fstream>
#include <time.h> #include <time.h>
#include <functional> #include <functional>
#include <string>
#include <tuple>
#include "opencv2/xphoto.hpp" #include "opencv2/xphoto.hpp"
...@@ -62,6 +73,8 @@ ...@@ -62,6 +73,8 @@
#include "annf.hpp" #include "annf.hpp"
#include "advanced_types.hpp" #include "advanced_types.hpp"
#include "inpainting_fsr.impl.hpp"
namespace cv namespace cv
{ {
namespace xphoto namespace xphoto
...@@ -298,17 +311,9 @@ namespace xphoto ...@@ -298,17 +311,9 @@ namespace xphoto
} }
} }
/*! The function reconstructs the selected image area from known area. static
* \param src : source image. void inpaint_shiftmap(const Mat &src, const Mat &mask, Mat &dst, const int algorithmType)
* \param mask : inpainting mask, 8-bit 1-channel image. Zero pixels indicate the area that needs to be inpainted.
* \param dst : destination image.
* \param algorithmType : inpainting method.
*/
void inpaint(const Mat &src, const Mat &mask, Mat &dst, const int algorithmType)
{ {
CV_Assert( mask.channels() == 1 && mask.depth() == CV_8U );
CV_Assert( src.rows == mask.rows && src.cols == mask.cols );
switch ( src.type() ) switch ( src.type() )
{ {
case CV_8SC1: case CV_8SC1:
...@@ -399,8 +404,25 @@ namespace xphoto ...@@ -399,8 +404,25 @@ namespace xphoto
CV_Error_( CV_StsNotImplemented, CV_Error_( CV_StsNotImplemented,
("Unsupported source image format (=%d)", ("Unsupported source image format (=%d)",
src.type()) ); src.type()) );
break;
} }
} }
void inpaint(const Mat &src, const Mat &mask, Mat &dst, const int algorithmType)
{
CV_Assert(!src.empty());
CV_Assert(!mask.empty());
CV_CheckTypeEQ(mask.type(), CV_8UC1, "");
CV_Assert(src.rows == mask.rows && src.cols == mask.cols);
switch (algorithmType)
{
case xphoto::INPAINT_SHIFTMAP:
return inpaint_shiftmap(src, mask, dst, algorithmType);
case xphoto::INPAINT_FSR_BEST:
case xphoto::INPAINT_FSR_FAST:
return inpaint_fsr(src, mask, dst, algorithmType);
}
CV_Error_(Error::StsNotImplemented, ("Unsupported inpainting algorithm type (=%d)", algorithmType));
} }
}
}} // namespace
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
// This is not a standalone header, see inpainting.cpp
namespace cv
{
namespace xphoto
{
struct fsr_parameters
{
// default variables
int block_size = 16;
double conc_weighting = 0.5;
double rhos[4] = { 0.80, 0.70, 0.66, 0.64 };
double threshold_stddev_Y[3] = { 0.014, 0.030, 0.090 };
double threshold_stddev_Cx[3] = { 0.006, 0.010, 0.028 };
// quality profile dependent variables
int block_size_min, fft_size, max_iter, min_iter, iter_const;
double orthogonality_correction;
fsr_parameters(const int quality)
{
if (quality == xphoto::INPAINT_FSR_BEST)
{
block_size_min = 2;
fft_size = 64;
max_iter = 400;
min_iter = 50;
iter_const = 2000;
orthogonality_correction = 0.2;
}
else if (quality == xphoto::INPAINT_FSR_FAST)
{
block_size_min = 4;
fft_size = 32;
max_iter = 100;
min_iter = 20;
iter_const = 1000;
orthogonality_correction = 0.5;
}
else
{
CV_Error(Error::StsBadArg, "Unknown quality level set, supported: FAST, BEST");
}
}
};
static void
icvSgnMat(const Mat& src, Mat& dst) {
dst = Mat::zeros(src.size(), CV_64F);
for (int y = 0; y < src.rows; ++y)
{
for (int x = 0; x < src.cols; ++x)
{
double curr_val = src.at<double>(y,x);
if (curr_val > 0)
{
dst.at<double>(y,x) = 1;
}
else if (curr_val)
{
dst.at<double>(y,x) = -1;
}
}
}
}
static double
icvStandardDeviation(const Mat& distorted_block_2d, const Mat& error_mask_2d) {
if (countNonZero(error_mask_2d) < 1)
{
return NAN; // block with no undistorted pixels shouldn't be chosen for processing (only if block_size_min is reached)
}
Scalar tmp_stddev, tmp_mean;
Mat mask8u;
error_mask_2d.convertTo(mask8u, CV_8U, 2.0);
meanStdDev(distorted_block_2d, tmp_mean, tmp_stddev, mask8u);
double sigma_n = tmp_stddev[0] / 255;
if (sigma_n < 0)
{
sigma_n = 0;
}
else if (sigma_n > 1)
{
sigma_n = 1;
}
return sigma_n;
}
static void
icvExtrapolateBlock(Mat& distorted_block, Mat& error_mask, fsr_parameters& fsr_params, double rho, double normedStdDev, Mat& extrapolated_block)
{
double fft_size = fsr_params.fft_size;
double orthogonality_correction = fsr_params.orthogonality_correction;
int M = distorted_block.rows;
int N = distorted_block.cols;
int fft_x_offset = cvFloor((fft_size - N) / 2);
int fft_y_offset = cvFloor((fft_size - M) / 2);
// weighting function
Mat w = Mat::zeros(fsr_params.fft_size, fsr_params.fft_size, CV_64F);
error_mask.copyTo(w(Range(fft_y_offset, fft_y_offset + M), Range(fft_x_offset, fft_x_offset + N)));
for (int u = 0; u < fft_size; ++u)
{
for (int v = 0; v < fft_size; ++v)
{
w.at<double>(u, v) *= std::pow(rho, std::sqrt(std::pow(u + 0.5 - (fft_y_offset + M / 2), 2) + std::pow(v + 0.5 - (fft_x_offset + N / 2), 2)));
}
}
Mat W;
dft(w, W, DFT_COMPLEX_OUTPUT);
Mat W_padded;
hconcat(W, W, W_padded);
vconcat(W_padded, W_padded, W_padded);
// frequency weighting
Mat frequency_weighting = Mat::ones(fsr_params.fft_size, fsr_params.fft_size / 2 + 1, CV_64F);
for (int y = 0; y < fft_size; ++y)
{
for (int x = 0; x < (fft_size / 2 + 1); ++x)
{
double y2 = fft_size / 2 - std::abs(y - fft_size / 2);
double x2 = fft_size / 2 - std::abs(x - fft_size / 2);
frequency_weighting.at<double>(y, x) = 1 - std::sqrt(x2*x2 + y2 * y2)*std::sqrt(2) / fft_size;
}
}
// pad image to fft window size
Mat f(Size(fsr_params.fft_size, fsr_params.fft_size), CV_64F, Scalar::all(0));
distorted_block.copyTo(f(Range(fft_y_offset, fft_y_offset + M), Range(fft_x_offset, fft_x_offset + N)));
// create initial model
Mat G = Mat::zeros(fsr_params.fft_size, fsr_params.fft_size, CV_64FC2); // complex
// calculate initial residual
Mat Rw_tmp, Rw;
dft(f.mul(w), Rw_tmp, DFT_COMPLEX_OUTPUT);
Rw = Rw_tmp(Range(0, fsr_params.fft_size), Range(0, fsr_params.fft_size / 2 + 1));
// estimate ideal number of iterations (GenserIWSSIP2017)
// calculate stddev if not available (e.g., for smallest block size)
if (normedStdDev == 0) {
normedStdDev = icvStandardDeviation(distorted_block, error_mask);
}
int num_iters = cvRound(fsr_params.iter_const * normedStdDev);
if (num_iters < fsr_params.min_iter) {
num_iters = fsr_params.min_iter;
}
else if (num_iters > fsr_params.max_iter) {
num_iters = fsr_params.max_iter;
}
int iter_counter = 0;
while (iter_counter < num_iters)
{ // Spectral Constrained FSE (GenserIWSSIP2018)
Mat projection_distances(Rw.size(), CV_64F);
Mat Rw_mag = Mat(Rw.size(), CV_64F);
std::vector<Mat> channels(2);
split(Rw, channels);
magnitude(channels[0], channels[1], Rw_mag);
projection_distances = Rw_mag.mul(frequency_weighting);
double minVal, maxVal;
int maxLocx = -1;
int maxLocy = -1;
minMaxLoc(projection_distances, &minVal, &maxVal);
for (int y = 0; y < projection_distances.rows; ++y)
{ // assure that first appearance of max Value is selected
for (int x = 0; x < projection_distances.cols; ++x)
{
if (std::abs(projection_distances.at<double>(y, x) - maxVal) < 0.001)
{
maxLocy = y;
maxLocx = x;
break;
}
}
if (maxLocy != -1)
{
break;
}
}
int bf2select = maxLocy + maxLocx * projection_distances.rows;
int v = static_cast<int>(std::max(0.0, std::floor(bf2select / fft_size)));
int u = static_cast<int>(std::max(0, bf2select % fsr_params.fft_size));
// exclude second half of first and middle col
if ((v == 0 && u > fft_size / 2) || (v == fft_size / 2 && u > fft_size / 2))
{
int u_prev = u;
u = fsr_params.fft_size - u;
Rw.at<std::complex<double> >(u, v) = std::conj(Rw.at<std::complex<double> >(u_prev, v));
}
// calculate complex conjugate solution
int u_cj = -1;
int v_cj = -1;
// fill first lower col (copy from first upper col)
if (u >= 1 && u < fft_size / 2 && v == 0)
{
u_cj = fsr_params.fft_size - u;
v_cj = v;
}
// fill middle lower col (copy from first middle col)
if (u >= 1 && u < fft_size / 2 && v == fft_size / 2)
{
u_cj = fsr_params.fft_size - u;
v_cj = v;
}
// fill first row right (copy from first row left)
if (u == 0 && v >= 1 && v < fft_size / 2)
{
u_cj = u;
v_cj = fsr_params.fft_size - v;
}
// fill middle row right (copy from middle row left)
if (u == fft_size / 2 && v >= 1 && v < fft_size / 2)
{
u_cj = u;
v_cj = fsr_params.fft_size - v;
}
// fill cell upper right (copy from lower cell left)
if (u >= fft_size / 2 + 1 && v >= 1 && v < fft_size / 2)
{
u_cj = fsr_params.fft_size - u;
v_cj = fsr_params.fft_size - v;
}
// fill cell lower right (copy from upper cell left)
if (u >= 1 && u < fft_size / 2 && v >= 1 && v < fft_size / 2)
{
u_cj = fsr_params.fft_size - u;
v_cj = fsr_params.fft_size - v;
}
/// add coef to model and update residual
if (u_cj != -1 && v_cj != -1)
{
std::complex< double> expansion_coefficient = orthogonality_correction * Rw.at< std::complex<double> >(u, v) / W.at<std::complex<double> >(0, 0);
G.at< std::complex<double> >(u, v) += fft_size * fft_size * expansion_coefficient;
G.at< std::complex<double> >(u_cj, v_cj) = std::conj(G.at< std::complex<double> >(u, v));
Mat expansion_mat(Rw.size(), CV_64FC2, Scalar(expansion_coefficient.real(), expansion_coefficient.imag()));
Mat W_tmp1 = W_padded(Range(fsr_params.fft_size - u, fsr_params.fft_size - u + Rw.rows), Range(fsr_params.fft_size - v, fsr_params.fft_size - v + Rw.cols));
Mat W_tmp2 = W_padded(Range(fsr_params.fft_size - u_cj, fsr_params.fft_size - u_cj + Rw.rows), Range(fsr_params.fft_size - v_cj, fsr_params.fft_size - v_cj + Rw.cols));
Mat res_1(W_tmp1.size(), W_tmp1.type());
mulSpectrums(expansion_mat, W_tmp1, res_1, 0);
expansion_mat.setTo(Scalar(expansion_coefficient.real(), -expansion_coefficient.imag()));
Mat res_2(W_tmp1.size(), W_tmp1.type());
mulSpectrums(expansion_mat, W_tmp2, res_2, 0);
Rw -= res_1 + res_2;
++iter_counter; // ... as two basis functions were added
}
else
{
std::complex<double> expansion_coefficient = orthogonality_correction * Rw.at< std::complex<double> >(u, v) / W.at< std::complex<double> >(0, 0);
G.at< std::complex<double> >(u, v) += fft_size * fft_size * expansion_coefficient;
Mat expansion_mat(Rw.size(), CV_64FC2, Scalar(expansion_coefficient.real(), expansion_coefficient.imag()));
Mat W_tmp = W_padded(Range(fsr_params.fft_size - u, fsr_params.fft_size - u + Rw.rows), Range(fsr_params.fft_size - v, fsr_params.fft_size - v + Rw.cols));
Mat res_tmp(W_tmp.size(), W_tmp.type());
mulSpectrums(expansion_mat, W_tmp, res_tmp, 0);
Rw -= res_tmp;
}
++iter_counter;
}
// get pixels from model
Mat g;
idft(G, g, DFT_SCALE);
// extract reconstructed pixels
Mat g_real(M, N, CV_64F);
for (int x = 0; x < M; ++x)
{
for (int y = 0; y < N; ++y)
{
g_real.at<double>(x, y) = g.at< std::complex<double> >(fft_y_offset + x, fft_x_offset + y).real();
}
}
g_real.copyTo(extrapolated_block);
Mat orig_samples;
error_mask.convertTo(orig_samples, CV_8U);
distorted_block.copyTo(extrapolated_block, orig_samples); // copy where orig_samples is nonzero
}
static void
icvGetTodoBlocks(Mat& sampled_img, Mat& sampling_mask, std::vector< std::tuple< int, int > >& set_todo, int block_size, int block_size_min, int border_width, double homo_threshold, Mat& set_process_this_block_size, std::vector< std::tuple< int, int > >& set_later, Mat& sigma_n_array)
{
std::vector< std::tuple< int, int > > set_now;
set_later.clear();
size_t list_length = set_todo.size();
int img_height = sampled_img.rows;
int img_width = sampled_img.cols;
Mat reconstructed_img;
sampled_img.copyTo(reconstructed_img);
// calculate block lists
for (size_t entry = 0; entry < list_length; ++entry)
{
int xblock_counter = std::get<0>(set_todo[entry]);
int yblock_counter = std::get<1>(set_todo[entry]);
int left_border = std::min(xblock_counter*block_size, border_width);
int top_border = std::min(yblock_counter*block_size, border_width);
int right_border = std::max(0, std::min(img_width - (xblock_counter + 1)*block_size, border_width));
int bottom_border = std::max(0, std::min(img_height - (yblock_counter + 1)*block_size, border_width));
// extract blocks from images
Mat distorted_block_2d = reconstructed_img(Range(yblock_counter*block_size - top_border, std::min(img_height, (yblock_counter*block_size + block_size + bottom_border))), Range(xblock_counter*block_size - left_border, std::min(img_width, (xblock_counter*block_size + block_size + right_border))));
Mat error_mask_2d = sampling_mask(Range(yblock_counter*block_size - top_border, std::min(img_height, (yblock_counter*block_size + block_size + bottom_border))), Range(xblock_counter*block_size - left_border, std::min(img_width, (xblock_counter*block_size + block_size + right_border))));
// determine normalized and weighted standard deviation
if (block_size > block_size_min)
{
double sigma_n = icvStandardDeviation(distorted_block_2d, error_mask_2d);
sigma_n_array.at<double>( yblock_counter, xblock_counter) = sigma_n;
// homogeneous case
if (sigma_n < homo_threshold)
{
set_now.emplace_back(xblock_counter, yblock_counter);
set_process_this_block_size.at<double>(yblock_counter, xblock_counter) = 255;
}
else
{
int yblock_counter_quadernary = yblock_counter * 2;
int xblock_counter_quadernary = xblock_counter * 2;
int yblock_offset = 0;
int xblock_offset = 0;
for (int quader_counter = 0; quader_counter < 4; ++quader_counter)
{
if (quader_counter == 0)
{
yblock_offset = 0;
xblock_offset = 0;
}
else if (quader_counter == 1)
{
yblock_offset = 0;
xblock_offset = 1;
}
else if (quader_counter == 2)
{
yblock_offset = 1;
xblock_offset = 0;
}
else if (quader_counter == 3)
{
yblock_offset = 1;
xblock_offset = 1;
}
set_later.emplace_back(xblock_counter_quadernary + xblock_offset, yblock_counter_quadernary + yblock_offset);
}
}
}
}
}
static void
icvDetermineProcessingOrder(
const Mat& _sampled_img, const Mat& _sampling_mask,
const int quality, const std::string& channel, Mat& reconstructed_img
)
{
fsr_parameters fsr_params(quality);
int block_size = fsr_params.block_size;
int block_size_max = fsr_params.block_size;
int block_size_min = fsr_params.block_size_min;
double conc_weighting = fsr_params.conc_weighting;
int fft_size = fsr_params.fft_size;
double rho = fsr_params.rhos[0];
Mat sampled_img, sampling_mask;
_sampled_img.convertTo(sampled_img, CV_64F);
reconstructed_img = sampled_img.clone();
_sampling_mask.convertTo(sampling_mask, CV_64F);
double threshold_stddev_LUT[3];
if (channel == "Y")
{
std::copy(fsr_params.threshold_stddev_Y, fsr_params.threshold_stddev_Y + 3, threshold_stddev_LUT);
}
else if (channel == "Cx")
{
std::copy(fsr_params.threshold_stddev_Cx, fsr_params.threshold_stddev_Cx + 3, threshold_stddev_LUT);
}
else
{
CV_Error(Error::StsBadArg, "channel type unsupported!");
}
double threshold_stddev = threshold_stddev_LUT[0];
std::vector< std::tuple< int, int > > set_later;
int img_height = sampled_img.rows;
int img_width = sampled_img.cols;
// inital scan of distorted blocks
std::vector< std::tuple< int, int > > set_todo;
int blocks_column = divUp(img_height, block_size);
int blocks_line = divUp(img_width, block_size);
for (int y = 0; y < blocks_column; ++y)
{
for (int x = 0; x < blocks_line; ++x)
{
Mat curr_block = sampling_mask(Range(y*block_size, std::min(img_height, (y + 1)*block_size)), Range(x*block_size, std::min(img_width, (x + 1)*block_size)));
double min_block, max_block;
minMaxLoc(curr_block, &min_block, &max_block);
if (min_block == 0)
{
set_todo.emplace_back(x, y);
}
}
}
// loop over all distorted blocks and extrapolate them depending on
// their block size
int border_width = 0;
while (block_size >= block_size_min)
{
int blocks_per_column = cvCeil(img_height / block_size);
int blocks_per_line = cvCeil(img_width / block_size);
Mat nen_array = Mat::zeros(blocks_per_column, blocks_per_line, CV_64F);
Mat proc_array = Mat::zeros(blocks_per_column, blocks_per_line, CV_64F);
Mat sigma_n_array = Mat::zeros(blocks_per_column, blocks_per_line, CV_64F);
Mat set_process_this_block_size = Mat::zeros(blocks_per_column, blocks_per_line, CV_64F);
if (block_size > block_size_min)
{
if (block_size < block_size_max)
{
set_todo = set_later;
}
border_width = cvFloor(fft_size - block_size) / 2;
icvGetTodoBlocks(sampled_img, sampling_mask, set_todo, block_size, block_size_min, border_width, threshold_stddev, set_process_this_block_size, set_later, sigma_n_array);
}
else
{
set_process_this_block_size.setTo(Scalar(255));
}
// if block to be extrapolated, increase nen of neighboring pixels
for (int yblock_counter = 0; yblock_counter < blocks_per_column; ++yblock_counter)
{
for (int xblock_counter = 0; xblock_counter < blocks_per_line; ++xblock_counter)
{
Mat curr_block = sampling_mask(Range(yblock_counter*block_size, std::min(img_height, (yblock_counter + 1)*block_size)), Range(xblock_counter*block_size, std::min(img_width, (xblock_counter + 1)*block_size)));
double min_block, max_block;
minMaxLoc(curr_block, &min_block, &max_block);
if (min_block == 0)
{
if (yblock_counter > 0 && xblock_counter > 0)
{
nen_array.at<double>(yblock_counter - 1, xblock_counter - 1)++;
}
if (yblock_counter > 0)
{
nen_array.at<double>(yblock_counter - 1, xblock_counter)++;
}
if (yblock_counter > 0 && xblock_counter < (blocks_per_line - 1))
{
nen_array.at<double>(yblock_counter - 1, xblock_counter + 1)++;
}
if (xblock_counter > 0)
{
nen_array.at<double>(yblock_counter, xblock_counter - 1)++;
}
if (xblock_counter < (blocks_per_line - 1))
{
nen_array.at<double>(yblock_counter, xblock_counter + 1)++;
}
if (yblock_counter < (blocks_per_column - 1) && xblock_counter>0)
{
nen_array.at<double>(yblock_counter + 1, xblock_counter - 1)++;
}
if (yblock_counter < (blocks_per_column - 1))
{
nen_array.at<double>(yblock_counter + 1, xblock_counter)++;
}
if (yblock_counter < (blocks_per_column - 1) && xblock_counter < (blocks_per_line - 1))
{
nen_array.at<double>(yblock_counter + 1, xblock_counter + 1)++;
}
}
}
}
// determine if block itself has to be extrapolated
for (int yblock_counter = 0; yblock_counter < blocks_per_column; ++yblock_counter)
{
for (int xblock_counter = 0; xblock_counter < blocks_per_line; ++xblock_counter)
{
Mat curr_block = sampling_mask(Range(yblock_counter*block_size, std::min(img_height, (yblock_counter + 1)*block_size)), Range(xblock_counter*block_size, std::min(img_width, (xblock_counter + 1)*block_size)));
double min_block, max_block;
minMaxLoc(curr_block, &min_block, &max_block);
if (min_block != 0)
{
nen_array.at<double>(yblock_counter, xblock_counter) = -1;
}
else
{
// if border block, increase nen respectively
if (yblock_counter == 0 && xblock_counter == 0)
{
nen_array.at<double>(yblock_counter, xblock_counter) = nen_array.at<double>(yblock_counter, xblock_counter) + 5;
}
if (yblock_counter == 0 && xblock_counter == (blocks_per_line - 1))
{
nen_array.at<double>(yblock_counter, xblock_counter) = nen_array.at<double>(yblock_counter, xblock_counter) + 5;
}
if (yblock_counter == (blocks_per_column - 1) && xblock_counter == 0)
{
nen_array.at<double>(yblock_counter, xblock_counter) = nen_array.at<double>(yblock_counter, xblock_counter) + 5;
}
if (yblock_counter == (blocks_per_column - 1) && xblock_counter == (blocks_per_line - 1))
{
nen_array.at<double>(yblock_counter, xblock_counter) = nen_array.at<double>(yblock_counter, xblock_counter) + 5;
}
if (yblock_counter == 0 && xblock_counter != 0 && xblock_counter != (blocks_per_line - 1))
{
nen_array.at<double>(yblock_counter, xblock_counter) = nen_array.at<double>(yblock_counter, xblock_counter) + 3;
}
if (yblock_counter == (blocks_per_column - 1) && xblock_counter != 0 && xblock_counter != (blocks_per_line - 1))
{
nen_array.at<double>(yblock_counter, xblock_counter) = nen_array.at<double>(yblock_counter, xblock_counter) + 3;
}
if (yblock_counter != 0 && yblock_counter != (blocks_per_column - 1) && xblock_counter == 0)
{
nen_array.at<double>(yblock_counter, xblock_counter) = nen_array.at<double>(yblock_counter, xblock_counter) + 3;
}
if (yblock_counter != 0 && yblock_counter != (blocks_per_column - 1) && xblock_counter == (blocks_per_line - 1))
{
nen_array.at<double>(yblock_counter, xblock_counter) = nen_array.at<double>(yblock_counter, xblock_counter) + 3;
}
}
}
}
// if all blocks have 8 not extrapolated neighbors, penalize nen of blocks without any known samples by one
double min_nen_tmp, max_nen_tmp;
minMaxLoc(nen_array, &min_nen_tmp, &max_nen_tmp);
if (min_nen_tmp == 8) {
for (int yblock_counter = 0; yblock_counter < blocks_per_column; ++yblock_counter)
{
for (int xblock_counter = 0; xblock_counter < blocks_per_line; ++xblock_counter)
{
Mat curr_block = sampling_mask(Range(yblock_counter*block_size, std::min(img_height, (yblock_counter + 1)*block_size)), Range(xblock_counter*block_size, std::min(img_width, (xblock_counter + 1)*block_size)));
double min_block, max_block;
minMaxLoc(curr_block, &min_block, &max_block);
if (max_block == 0)
{
nen_array.at<double>(yblock_counter, xblock_counter)++;
}
}
}
}
// do actual processing per block
int all_blocks_finished = 0;
while (all_blocks_finished == 0) {
// clear proc_array
proc_array.setTo(Scalar(1));
// determine blocks to extrapolate
double min_nen = 99;
int bl_counter = 0;
// add all homogeneous blocks that shall be processed to list
// using same priority
// begins with highest prioroty or lowest nen array value
std::vector< std::tuple< int, int > > block_list;
for (int yblock_counter = 0; yblock_counter < blocks_per_column; ++yblock_counter)
{
for (int xblock_counter = 0; xblock_counter < blocks_per_line; ++xblock_counter)
{
// decision if block contains errors
double tmp_val = nen_array.at<double>(yblock_counter, xblock_counter);
if (tmp_val >= 0 && tmp_val < min_nen && set_process_this_block_size.at<double>(yblock_counter, xblock_counter) == 255) {
bl_counter = 0;
block_list.clear();
min_nen = tmp_val;
proc_array.setTo(Scalar(1));
}
if (tmp_val == min_nen && proc_array.at<double>(yblock_counter, xblock_counter) != 0 && set_process_this_block_size.at<double>(yblock_counter, xblock_counter) == 0) {
nen_array.at<double>(yblock_counter, xblock_counter) = -1;
}
if (tmp_val == min_nen && proc_array.at<double>(yblock_counter, xblock_counter) != 0 && set_process_this_block_size.at<double>(yblock_counter, xblock_counter) != 0) {
block_list.emplace_back(yblock_counter, xblock_counter);
bl_counter++;
// block neighboring blocks from processing
if (yblock_counter > 0 && xblock_counter > 0)
{
proc_array.at<double>(yblock_counter - 1, xblock_counter - 1) = 0;
}
if (yblock_counter > 0)
{
proc_array.at<double>(yblock_counter - 1, xblock_counter) = 0;
}
if (yblock_counter > 0 && xblock_counter > 0)
{
proc_array.at<double>(yblock_counter - 1, xblock_counter - 1) = 0;
}
if (yblock_counter > 0)
{
proc_array.at<double>(yblock_counter - 1, xblock_counter) = 0;
}
if (yblock_counter > 0 && xblock_counter < (blocks_per_line - 1))
{
proc_array.at<double>(yblock_counter - 1, xblock_counter + 1) = 0;
}
if (xblock_counter > 0)
{
proc_array.at<double>(yblock_counter, xblock_counter - 1) = 0;
}
if (xblock_counter < (blocks_per_line - 1))
{
proc_array.at<double>(yblock_counter, xblock_counter + 1) = 0;
}
if (yblock_counter < (blocks_per_column - 1) && xblock_counter > 0)
{
proc_array.at<double>(yblock_counter + 1, xblock_counter - 1) = 0;
}
if (yblock_counter < (blocks_per_column - 1))
{
proc_array.at<double>(yblock_counter + 1, xblock_counter) = 0;
}
if (yblock_counter < (blocks_per_column - 1) && xblock_counter < (blocks_per_line - 1))
{
proc_array.at<double>(yblock_counter + 1, xblock_counter + 1) = 0;
}
}
}
}
int max_bl_counter = bl_counter;
block_list.emplace_back(-1, -1);
if (bl_counter == 0)
{
all_blocks_finished = 1;
}
// blockwise extrapolation of all blocks that can be processed in parallel
for (bl_counter = 0; bl_counter < max_bl_counter; ++bl_counter)
{
int yblock_counter = std::get<0>(block_list[bl_counter]);
int xblock_counter = std::get<1>(block_list[bl_counter]);
// calculation of the extrapolation area's borders
int left_border = std::min(xblock_counter*block_size, border_width);
int top_border = std::min(yblock_counter*block_size, border_width);
int right_border = std::max(0, std::min(img_width - (xblock_counter + 1)*block_size, border_width));
int bottom_border = std::max(0, std::min(img_height - (yblock_counter + 1)*block_size, border_width));
// extract blocks from images
Mat distorted_block_2d = reconstructed_img(Range(yblock_counter*block_size - top_border, std::min(img_height, (yblock_counter*block_size + block_size + bottom_border))), Range(xblock_counter*block_size - left_border, std::min(img_width, (xblock_counter*block_size + block_size + right_border))));
Mat error_mask_2d = sampling_mask(Range(yblock_counter*block_size - top_border, std::min(img_height, (yblock_counter*block_size + block_size + bottom_border))), Range(xblock_counter*block_size - left_border, std::min(img_width, xblock_counter*block_size + block_size + right_border)));
// get actual stddev value as it is needed to estimate the
// best number of iterations
double sigma_n_a = sigma_n_array.at<double>(yblock_counter, xblock_counter);
// actual extrapolation
Mat extrapolated_block_2d;
icvExtrapolateBlock(distorted_block_2d, error_mask_2d, fsr_params, rho, sigma_n_a, extrapolated_block_2d);
// update image and mask
extrapolated_block_2d(Range(top_border, extrapolated_block_2d.rows - bottom_border), Range(left_border, extrapolated_block_2d.cols - right_border)).copyTo(reconstructed_img(Range(yblock_counter*block_size, std::min(img_height, (yblock_counter + 1)*block_size)), Range(xblock_counter*block_size, std::min(img_width, (xblock_counter + 1)*block_size))));
Mat signs;
icvSgnMat(error_mask_2d(Range(top_border, error_mask_2d.rows - bottom_border), Range(left_border, error_mask_2d.cols - right_border)), signs);
Mat tmp_mask = error_mask_2d(Range(top_border, error_mask_2d.rows - bottom_border), Range(left_border, error_mask_2d.cols - right_border)) + (1 - signs) *conc_weighting;
tmp_mask.copyTo(sampling_mask(Range(yblock_counter*block_size, std::min(img_height, (yblock_counter + 1)*block_size)), Range(xblock_counter*block_size, std::min(img_width, (xblock_counter + 1)*block_size))));
// update nen-array
nen_array.at<double>(yblock_counter, xblock_counter) = -1;
if (yblock_counter > 0 && xblock_counter > 0)
{
nen_array.at<double>(yblock_counter - 1, xblock_counter - 1)--;
}
if (yblock_counter > 0)
{
nen_array.at<double>(yblock_counter - 1, xblock_counter)--;
}
if (yblock_counter > 0 && xblock_counter < blocks_per_line - 1)
{
nen_array.at<double>(yblock_counter - 1, xblock_counter + 1)--;
}
if (xblock_counter > 0)
{
nen_array.at<double>(yblock_counter, xblock_counter - 1)--;
}
if (xblock_counter < blocks_per_line - 1)
{
nen_array.at<double>(yblock_counter, xblock_counter + 1)--;
}
if (yblock_counter < blocks_per_column - 1 && xblock_counter>0)
{
nen_array.at<double>(yblock_counter + 1, xblock_counter - 1)--;
}
if (yblock_counter < blocks_per_column - 1)
{
nen_array.at<double>(yblock_counter + 1, xblock_counter)--;
}
if (yblock_counter < blocks_per_column - 1 && xblock_counter < blocks_per_line - 1)
{
nen_array.at<double>(yblock_counter + 1, xblock_counter + 1)--;
}
}
}
// set parameters for next extrapolation tasks (higher texture)
block_size = block_size / 2;
border_width = (fft_size - block_size) / 2;
if (block_size == 8)
{
threshold_stddev = threshold_stddev_LUT[1];
rho = fsr_params.rhos[1];
}
if (block_size == 4)
{
threshold_stddev = threshold_stddev_LUT[2];
rho = fsr_params.rhos[2];
}
if (block_size == 2)
{
rho = fsr_params.rhos[3];
}
// terminate function - no heterogeneous blocks left
if (set_later.empty())
{
break;
}
}
}
static
void inpaint_fsr(const Mat &src, const Mat &mask, Mat &dst, const int algorithmType)
{
CV_Assert(algorithmType == xphoto::INPAINT_FSR_BEST || algorithmType == xphoto::INPAINT_FSR_FAST);
CV_Check(src.channels(), src.channels() == 1 || src.channels() == 3, "");
switch (src.type())
{
case CV_8UC1:
case CV_8UC3:
break;
case CV_16UC1:
case CV_16UC3:
{
double minRange, maxRange;
minMaxLoc(src, &minRange, &maxRange);
if (minRange < 0 || maxRange > 65535)
{
CV_Error(Error::StsUnsupportedFormat, "Unsupported source image format!");
break;
}
src.convertTo(src, CV_8U, 1/257.0);
break;
}
case CV_32FC1:
case CV_64FC1:
case CV_32FC3:
case CV_64FC3:
{
double minRange, maxRange;
minMaxLoc(src, &minRange, &maxRange);
if (minRange < -FLT_EPSILON || maxRange > (1.0 + FLT_EPSILON))
{
CV_Error(Error::StsUnsupportedFormat, "Unsupported source image format!");
break;
}
src.convertTo(src, CV_8U, 255.0);
break;
}
default:
CV_Error(Error::StsUnsupportedFormat, "Unsupported source image format!");
break;
}
dst.create(src.size(), src.type());
Mat mask_01;
threshold(mask, mask_01, 0.0, 1.0, THRESH_BINARY);
if (src.channels() == 1)
{ // grayscale image
Mat y_reconstructed;
icvDetermineProcessingOrder(src, mask_01, algorithmType, "Y", y_reconstructed);
y_reconstructed.convertTo(dst, CV_8U);
}
else if (src.channels() == 3)
{ // RGB image
Mat ycrcb;
cvtColor(src, ycrcb, COLOR_BGR2YCrCb);
std::vector<Mat> channels(3);
split(ycrcb, channels);
Mat y = channels[0];
Mat cb = channels[2];
Mat cr = channels[1];
Mat y_reconstructed, cb_reconstructed, cr_reconstructed;
y = y.mul(mask_01);
cb = cb.mul(mask_01);
cr = cr.mul(mask_01);
icvDetermineProcessingOrder(y, mask_01, algorithmType, "Y", y_reconstructed);
icvDetermineProcessingOrder(cb, mask_01, algorithmType, "Cx", cb_reconstructed);
icvDetermineProcessingOrder(cr, mask_01, algorithmType, "Cx", cr_reconstructed);
Mat ycrcb_reconstructed;
y_reconstructed.convertTo(channels[0], CV_8U);
cr_reconstructed.convertTo(channels[1], CV_8U);
cb_reconstructed.convertTo(channels[2], CV_8U);
merge(channels, ycrcb_reconstructed);
cvtColor(ycrcb_reconstructed, dst, COLOR_YCrCb2BGR);
}
}
}} // namespace
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
#include "test_precomp.hpp"
namespace opencv_test { namespace {
using namespace xphoto;
static void test_inpainting(const Size inputSize, InpaintTypes mode, double expected_psnr, ImreadModes inputMode = IMREAD_COLOR)
{
string original_path = cvtest::findDataFile("cv/shared/lena.png");
string mask_path = cvtest::findDataFile("cv/inpaint/mask.png");
Mat original_ = imread(original_path, inputMode);
ASSERT_FALSE(original_.empty()) << "Could not load input image " << original_path;
Mat mask_ = imread(mask_path, IMREAD_GRAYSCALE);
ASSERT_FALSE(mask_.empty()) << "Could not load error mask " << mask_path;
Mat original, mask;
resize(original_, original, inputSize, 0.0, 0.0, INTER_AREA);
resize(mask_, mask, inputSize, 0.0, 0.0, INTER_NEAREST);
Mat mask_valid = (mask == 0);
Mat im_distorted(inputSize, original.type(), Scalar::all(0));
original.copyTo(im_distorted, mask_valid);
Mat reconstructed;
xphoto::inpaint(im_distorted, mask_valid, reconstructed, mode);
double adiff_psnr = cvtest::PSNR(original, reconstructed);
EXPECT_LE(expected_psnr, adiff_psnr);
#if 0
imshow("original", original);
imshow("im_distorted", im_distorted);
imshow("reconstructed", reconstructed);
std::cout << "adiff_psnr=" << adiff_psnr << std::endl;
waitKey();
#endif
}
TEST(xphoto_inpaint, smoke_FSR_FAST) // fast smoke test, input doesn't fit well for tested algorithm
{
test_inpainting(Size(128, 128), INPAINT_FSR_FAST, 30);
}
TEST(xphoto_inpaint, smoke_FSR_BEST) // fast smoke test, input doesn't fit well for tested algorithm
{
applyTestTag(CV_TEST_TAG_LONG);
test_inpainting(Size(128, 128), INPAINT_FSR_BEST, 30);
}
TEST(xphoto_inpaint, smoke_grayscale_FSR_FAST) // fast smoke test, input doesn't fit well for tested algorithm
{
test_inpainting(Size(128, 128), INPAINT_FSR_FAST, 30, IMREAD_GRAYSCALE);
}
TEST(xphoto_inpaint, smoke_grayscale_FSR_BEST) // fast smoke test, input doesn't fit well for tested algorithm
{
test_inpainting(Size(128, 128), INPAINT_FSR_BEST, 30, IMREAD_GRAYSCALE);
}
TEST(xphoto_inpaint, regression_FSR_FAST)
{
test_inpainting(Size(512, 512), INPAINT_FSR_FAST, 39.5);
}
TEST(xphoto_inpaint, regression_FSR_BEST)
{
applyTestTag(CV_TEST_TAG_VERYLONG); // add --test_tag_enable=verylong to run this test
test_inpainting(Size(512, 512), INPAINT_FSR_BEST, 39.6);
}
}} // namespace
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment