Commit c9ace388 authored by Fedor Morozov's avatar Fedor Morozov Committed by Alexander Shishkov

Docs updated, added InputArray, fixes for makePtr,...

parent f99be6bd
...@@ -39,4 +39,3 @@ ...@@ -39,4 +39,3 @@
#7 & #8 & #9 #7 & #8 & #9
\end{bmatrix} \end{bmatrix}
} }
...@@ -110,7 +110,7 @@ bool HdrDecoder::checkSignature( const String& signature ) const ...@@ -110,7 +110,7 @@ bool HdrDecoder::checkSignature( const String& signature ) const
ImageDecoder HdrDecoder::newDecoder() const ImageDecoder HdrDecoder::newDecoder() const
{ {
return new HdrDecoder; return makePtr<HdrDecoder>();
} }
HdrEncoder::HdrEncoder() HdrEncoder::HdrEncoder()
...@@ -154,7 +154,7 @@ bool HdrEncoder::write( const Mat& input_img, const std::vector<int>& params ) ...@@ -154,7 +154,7 @@ bool HdrEncoder::write( const Mat& input_img, const std::vector<int>& params )
ImageEncoder HdrEncoder::newEncoder() const ImageEncoder HdrEncoder::newEncoder() const
{ {
return new HdrEncoder; return makePtr<HdrEncoder>();
} }
bool HdrEncoder::isFormatSupported( int depth ) const { bool HdrEncoder::isFormatSupported( int depth ) const {
......
...@@ -59,15 +59,10 @@ struct ImageCodecInitializer ...@@ -59,15 +59,10 @@ struct ImageCodecInitializer
{ {
ImageCodecInitializer() ImageCodecInitializer()
{ {
<<<<<<< HEAD
decoders.push_back( new BmpDecoder );
encoders.push_back( new BmpEncoder );
decoders.push_back( new HdrDecoder );
encoders.push_back( new HdrEncoder );
=======
decoders.push_back( makePtr<BmpDecoder>() ); decoders.push_back( makePtr<BmpDecoder>() );
encoders.push_back( makePtr<BmpEncoder>() ); encoders.push_back( makePtr<BmpEncoder>() );
>>>>>>> 99a43257d5912ff215016e1cf5f4e0c2a934b72f decoders.push_back( makePtr<HdrDecoder>() );
encoders.push_back( makePtr<HdrEncoder>() );
#ifdef HAVE_JPEG #ifdef HAVE_JPEG
decoders.push_back( makePtr<JpegDecoder>() ); decoders.push_back( makePtr<JpegDecoder>() );
encoders.push_back( makePtr<JpegEncoder>() ); encoders.push_back( makePtr<JpegEncoder>() );
......
...@@ -83,7 +83,7 @@ enum rgbe_error_codes { ...@@ -83,7 +83,7 @@ enum rgbe_error_codes {
}; };
/* default error routine. change this to change error handling */ /* default error routine. change this to change error handling */
static int rgbe_error(int rgbe_error_code, char *msg) static int rgbe_error(int rgbe_error_code, const char *msg)
{ {
switch (rgbe_error_code) { switch (rgbe_error_code) {
case rgbe_read_error: case rgbe_read_error:
...@@ -119,7 +119,7 @@ float2rgbe(unsigned char rgbe[4], float red, float green, float blue) ...@@ -119,7 +119,7 @@ float2rgbe(unsigned char rgbe[4], float red, float green, float blue)
rgbe[0] = rgbe[1] = rgbe[2] = rgbe[3] = 0; rgbe[0] = rgbe[1] = rgbe[2] = rgbe[3] = 0;
} }
else { else {
v = frexp(v,&e) * 256.0/v; v = static_cast<float>(frexp(v,&e) * 256.0/v);
rgbe[0] = (unsigned char) (red * v); rgbe[0] = (unsigned char) (red * v);
rgbe[1] = (unsigned char) (green * v); rgbe[1] = (unsigned char) (green * v);
rgbe[2] = (unsigned char) (blue * v); rgbe[2] = (unsigned char) (blue * v);
...@@ -136,7 +136,7 @@ rgbe2float(float *red, float *green, float *blue, unsigned char rgbe[4]) ...@@ -136,7 +136,7 @@ rgbe2float(float *red, float *green, float *blue, unsigned char rgbe[4])
float f; float f;
if (rgbe[3]) { /*nonzero pixel*/ if (rgbe[3]) { /*nonzero pixel*/
f = ldexp(1.0,rgbe[3]-(int)(128+8)); f = static_cast<float>(ldexp(1.0,rgbe[3]-(int)(128+8)));
*red = rgbe[0] * f; *red = rgbe[0] * f;
*green = rgbe[1] * f; *green = rgbe[1] * f;
*blue = rgbe[2] * f; *blue = rgbe[2] * f;
...@@ -148,7 +148,7 @@ rgbe2float(float *red, float *green, float *blue, unsigned char rgbe[4]) ...@@ -148,7 +148,7 @@ rgbe2float(float *red, float *green, float *blue, unsigned char rgbe[4])
/* default minimal header. modify if you want more information in header */ /* default minimal header. modify if you want more information in header */
int RGBE_WriteHeader(FILE *fp, int width, int height, rgbe_header_info *info) int RGBE_WriteHeader(FILE *fp, int width, int height, rgbe_header_info *info)
{ {
char *programtype = "RGBE"; const char *programtype = "RGBE";
if (info && (info->valid & RGBE_VALID_PROGRAMTYPE)) if (info && (info->valid & RGBE_VALID_PROGRAMTYPE))
programtype = info->programtype; programtype = info->programtype;
...@@ -174,11 +174,9 @@ int RGBE_WriteHeader(FILE *fp, int width, int height, rgbe_header_info *info) ...@@ -174,11 +174,9 @@ int RGBE_WriteHeader(FILE *fp, int width, int height, rgbe_header_info *info)
int RGBE_ReadHeader(FILE *fp, int *width, int *height, rgbe_header_info *info) int RGBE_ReadHeader(FILE *fp, int *width, int *height, rgbe_header_info *info)
{ {
char buf[128]; char buf[128];
int found_format;
float tempf; float tempf;
int i; int i;
found_format = 0;
if (info) { if (info) {
info->valid = 0; info->valid = 0;
info->programtype[0] = 0; info->programtype[0] = 0;
...@@ -287,7 +285,7 @@ static int RGBE_WriteBytes_RLE(FILE *fp, unsigned char *data, int numbytes) ...@@ -287,7 +285,7 @@ static int RGBE_WriteBytes_RLE(FILE *fp, unsigned char *data, int numbytes)
} }
/* if data before next big run is a short run then write it as such */ /* if data before next big run is a short run then write it as such */
if ((old_run_count > 1)&&(old_run_count == beg_run - cur)) { if ((old_run_count > 1)&&(old_run_count == beg_run - cur)) {
buf[0] = 128 + old_run_count; /*write short run*/ buf[0] = static_cast<unsigned char>(128 + old_run_count); /*write short run*/
buf[1] = data[cur]; buf[1] = data[cur];
if (fwrite(buf,sizeof(buf[0])*2,1,fp) < 1) if (fwrite(buf,sizeof(buf[0])*2,1,fp) < 1)
return rgbe_error(rgbe_write_error,NULL); return rgbe_error(rgbe_write_error,NULL);
...@@ -298,7 +296,7 @@ static int RGBE_WriteBytes_RLE(FILE *fp, unsigned char *data, int numbytes) ...@@ -298,7 +296,7 @@ static int RGBE_WriteBytes_RLE(FILE *fp, unsigned char *data, int numbytes)
nonrun_count = beg_run - cur; nonrun_count = beg_run - cur;
if (nonrun_count > 128) if (nonrun_count > 128)
nonrun_count = 128; nonrun_count = 128;
buf[0] = nonrun_count; buf[0] = static_cast<unsigned char>(nonrun_count);
if (fwrite(buf,sizeof(buf[0]),1,fp) < 1) if (fwrite(buf,sizeof(buf[0]),1,fp) < 1)
return rgbe_error(rgbe_write_error,NULL); return rgbe_error(rgbe_write_error,NULL);
if (fwrite(&data[cur],sizeof(data[0])*nonrun_count,1,fp) < 1) if (fwrite(&data[cur],sizeof(data[0])*nonrun_count,1,fp) < 1)
...@@ -307,7 +305,7 @@ static int RGBE_WriteBytes_RLE(FILE *fp, unsigned char *data, int numbytes) ...@@ -307,7 +305,7 @@ static int RGBE_WriteBytes_RLE(FILE *fp, unsigned char *data, int numbytes)
} }
/* write out next run if one was found */ /* write out next run if one was found */
if (run_count >= MINRUNLENGTH) { if (run_count >= MINRUNLENGTH) {
buf[0] = 128 + run_count; buf[0] = static_cast<unsigned char>(128 + run_count);
buf[1] = data[beg_run]; buf[1] = data[beg_run];
if (fwrite(buf,sizeof(buf[0])*2,1,fp) < 1) if (fwrite(buf,sizeof(buf[0])*2,1,fp) < 1)
return rgbe_error(rgbe_write_error,NULL); return rgbe_error(rgbe_write_error,NULL);
...@@ -335,7 +333,7 @@ int RGBE_WritePixels_RLE(FILE *fp, float *data, int scanline_width, ...@@ -335,7 +333,7 @@ int RGBE_WritePixels_RLE(FILE *fp, float *data, int scanline_width,
while(num_scanlines-- > 0) { while(num_scanlines-- > 0) {
rgbe[0] = 2; rgbe[0] = 2;
rgbe[1] = 2; rgbe[1] = 2;
rgbe[2] = scanline_width >> 8; rgbe[2] = static_cast<unsigned char>(scanline_width >> 8);
rgbe[3] = scanline_width & 0xFF; rgbe[3] = scanline_width & 0xFF;
if (fwrite(rgbe, sizeof(rgbe), 1, fp) < 1) { if (fwrite(rgbe, sizeof(rgbe), 1, fp) < 1) {
free(buffer); free(buffer);
...@@ -450,4 +448,3 @@ int RGBE_ReadPixels_RLE(FILE *fp, float *data, int scanline_width, ...@@ -450,4 +448,3 @@ int RGBE_ReadPixels_RLE(FILE *fp, float *data, int scanline_width,
free(scanline_buffer); free(scanline_buffer);
return RGBE_RETURN_SUCCESS; return RGBE_RETURN_SUCCESS;
} }
...@@ -87,6 +87,3 @@ int RGBE_ReadPixels_RLE(FILE *fp, float *data, int scanline_width, ...@@ -87,6 +87,3 @@ int RGBE_ReadPixels_RLE(FILE *fp, float *data, int scanline_width,
int num_scanlines); int num_scanlines);
#endif/*_RGBE_HDR_H_*/ #endif/*_RGBE_HDR_H_*/
...@@ -479,11 +479,7 @@ TEST(Highgui_WebP, encode_decode_lossless_webp) ...@@ -479,11 +479,7 @@ TEST(Highgui_WebP, encode_decode_lossless_webp)
TEST(Highgui_WebP, encode_decode_lossy_webp) TEST(Highgui_WebP, encode_decode_lossy_webp)
{ {
cvtest::TS& ts = *cvtest::TS::ptr(); cvtest::TS& ts = *cvtest::TS::ptr();
<<<<<<< HEAD
string input = string(ts.get_data_path()) + "/../cv/shared/lena.png";
=======
std::string input = std::string(ts.get_data_path()) + "../cv/shared/lena.png"; std::string input = std::string(ts.get_data_path()) + "../cv/shared/lena.png";
>>>>>>> 99a43257d5912ff215016e1cf5f4e0c2a934b72f
cv::Mat img = cv::imread(input); cv::Mat img = cv::imread(input);
ASSERT_FALSE(img.empty()); ASSERT_FALSE(img.empty());
......
...@@ -46,7 +46,7 @@ createTonemapDrago ...@@ -46,7 +46,7 @@ createTonemapDrago
--------------------------- ---------------------------
Creates TonemapDrago object Creates TonemapDrago object
.. ocv:function:: Ptr<TonemapDrago> createTonemapDrago(float gamma = 1.0f, float bias = 0.85f) .. ocv:function:: Ptr<TonemapDrago> createTonemapDrago(float gamma = 1.0f, float saturation = 1.0f, float bias = 0.85f)
:param gamma: gamma value for gamma correction. See :ocv:func:`createTonemap` :param gamma: gamma value for gamma correction. See :ocv:func:`createTonemap`
...@@ -82,9 +82,9 @@ Creates TonemapDurand object ...@@ -82,9 +82,9 @@ Creates TonemapDurand object
:param sigma_color: bilateral filter sigma in coordinate space :param sigma_color: bilateral filter sigma in coordinate space
TonemapReinhardDevlin TonemapReinhard
--------------------------- ---------------------------
.. ocv:class:: TonemapReinhardDevlin : public Tonemap .. ocv:class:: TonemapReinhard : public Tonemap
This is a global tonemapping operator that models human visual system. This is a global tonemapping operator that models human visual system.
...@@ -92,11 +92,11 @@ Mapping function is controlled by adaptation parameter, that is computed using l ...@@ -92,11 +92,11 @@ Mapping function is controlled by adaptation parameter, that is computed using l
For more information see [RD05]_. For more information see [RD05]_.
createTonemapReinhardDevlin createTonemapReinhard
--------------------------- ---------------------------
Creates TonemapReinhardDevlin object Creates TonemapReinhard object
.. ocv:function:: Ptr<TonemapReinhardDevlin> createTonemapReinhardDevlin(float gamma = 1.0f, float intensity = 0.0f, float light_adapt = 1.0f, float color_adapt = 0.0f) .. ocv:function:: Ptr<TonemapReinhard> createTonemapReinhard(float gamma = 1.0f, float intensity = 0.0f, float light_adapt = 1.0f, float color_adapt = 0.0f)
:param gamma: gamma value for gamma correction. See :ocv:func:`createTonemap` :param gamma: gamma value for gamma correction. See :ocv:func:`createTonemap`
...@@ -127,17 +127,17 @@ Creates TonemapMantiuk object ...@@ -127,17 +127,17 @@ Creates TonemapMantiuk object
:param saturation: saturation enhancement value. See :ocv:func:`createTonemapDrago` :param saturation: saturation enhancement value. See :ocv:func:`createTonemapDrago`
ExposureAlign AlignExposures
--------------------------- ---------------------------
.. ocv:class:: ExposureAlign : public Algorithm .. ocv:class:: AlignExposures : public Algorithm
The base class for algorithms that align images of the same scene with different exposures The base class for algorithms that align images of the same scene with different exposures
ExposureAlign::process AlignExposures::process
--------------------------- ---------------------------
Aligns images Aligns images
.. ocv:function:: void ExposureAlign::process(InputArrayOfArrays src, OutputArrayOfArrays dst, const std::vector<float>& times, InputArray response) .. ocv:function:: void AlignExposures::process(InputArrayOfArrays src, std::vector<Mat>& dst, InputArray times, InputArray response)
:param src: vector of input images :param src: vector of input images
...@@ -149,7 +149,7 @@ Aligns images ...@@ -149,7 +149,7 @@ Aligns images
AlignMTB AlignMTB
--------------------------- ---------------------------
.. ocv:class:: AlignMTB : public ExposureAlign .. ocv:class:: AlignMTB : public AlignExposures
This algorithm converts images to median threshold bitmaps (1 for pixels brighter than median luminance and 0 otherwise) and than aligns the resulting bitmaps using bit operations. This algorithm converts images to median threshold bitmaps (1 for pixels brighter than median luminance and 0 otherwise) and than aligns the resulting bitmaps using bit operations.
...@@ -163,7 +163,7 @@ AlignMTB::process ...@@ -163,7 +163,7 @@ AlignMTB::process
--------------------------- ---------------------------
Short version of process, that doesn't take extra arguments. Short version of process, that doesn't take extra arguments.
.. ocv:function:: void AlignMTB::process(InputArrayOfArrays src, OutputArrayOfArrays dst) .. ocv:function:: void AlignMTB::process(InputArrayOfArrays src, std::vector<Mat>& dst)
:param src: vector of input images :param src: vector of input images
...@@ -173,14 +173,12 @@ AlignMTB::calculateShift ...@@ -173,14 +173,12 @@ AlignMTB::calculateShift
--------------------------- ---------------------------
Calculates shift between two images, i. e. how to shift the second image to correspond it with the first. Calculates shift between two images, i. e. how to shift the second image to correspond it with the first.
.. ocv:function:: void AlignMTB::calculateShift(InputArray img0, InputArray img1, Point& shift) .. ocv:function:: Point AlignMTB::calculateShift(InputArray img0, InputArray img1)
:param img0: first image :param img0: first image
:param img1: second image :param img1: second image
:param shift: calculated shift
AlignMTB::shiftMat AlignMTB::shiftMat
--------------------------- ---------------------------
Helper function, that shift Mat filling new regions with zeros. Helper function, that shift Mat filling new regions with zeros.
...@@ -197,7 +195,7 @@ AlignMTB::computeBitmaps ...@@ -197,7 +195,7 @@ AlignMTB::computeBitmaps
--------------------------- ---------------------------
Computes median threshold and exclude bitmaps of given image. Computes median threshold and exclude bitmaps of given image.
.. ocv:function:: void computeBitmaps(Mat& img, Mat& tb, Mat& eb) .. ocv:function:: void AlignMTB::computeBitmaps(InputArray img, OutputArray tb, OutputArray eb)
:param img: input image :param img: input image
...@@ -217,17 +215,17 @@ Creates AlignMTB object ...@@ -217,17 +215,17 @@ Creates AlignMTB object
:param cut: if true cuts images, otherwise fills the new regions with zeros. :param cut: if true cuts images, otherwise fills the new regions with zeros.
ExposureCalibrate CalibrateCRF
--------------------------- ---------------------------
.. ocv:class:: ExposureCalibrate : public Algorithm .. ocv:class:: CalibrateCRF : public Algorithm
The base class for camera response calibration algorithms. The base class for camera response calibration algorithms.
ExposureCalibrate::process CalibrateCRF::process
--------------------------- ---------------------------
Recovers inverse camera response. Recovers inverse camera response.
.. ocv:function:: void ExposureCalibrate::process(InputArrayOfArrays src, OutputArray dst, std::vector<float>& times) .. ocv:function:: void CalibrateCRF::process(InputArrayOfArrays src, OutputArray dst, InputArray times)
:param src: vector of input images :param src: vector of input images
...@@ -237,7 +235,7 @@ Recovers inverse camera response. ...@@ -237,7 +235,7 @@ Recovers inverse camera response.
CalibrateDebevec CalibrateDebevec
--------------------------- ---------------------------
.. ocv:class:: CalibrateDebevec : public ExposureCalibrate .. ocv:class:: CalibrateDebevec : public CalibrateCRF
Inverse camera response function is extracted for each brightness value by minimizing an objective function as linear system. Inverse camera response function is extracted for each brightness value by minimizing an objective function as linear system.
Objective function is constructed using pixel values on the same position in all images, extra term is added to make the result smoother. Objective function is constructed using pixel values on the same position in all images, extra term is added to make the result smoother.
...@@ -258,7 +256,7 @@ Creates CalibrateDebevec object ...@@ -258,7 +256,7 @@ Creates CalibrateDebevec object
CalibrateRobertson CalibrateRobertson
--------------------------- ---------------------------
.. ocv:class:: CalibrateRobertson : public ExposureCalibrate .. ocv:class:: CalibrateRobertson : public CalibrateCRF
Inverse camera response function is extracted for each brightness value by minimizing an objective function as linear system. Inverse camera response function is extracted for each brightness value by minimizing an objective function as linear system.
This algorithm uses all image pixels. This algorithm uses all image pixels.
...@@ -275,17 +273,17 @@ Creates CalibrateRobertson object ...@@ -275,17 +273,17 @@ Creates CalibrateRobertson object
:param threshold: target difference between results of two successive steps of the minimization. :param threshold: target difference between results of two successive steps of the minimization.
ExposureMerge MergeExposures
--------------------------- ---------------------------
.. ocv:class:: ExposureMerge : public Algorithm .. ocv:class:: MergeExposures : public Algorithm
The base class algorithms that can merge exposure sequence to a single image. The base class algorithms that can merge exposure sequence to a single image.
ExposureMerge::process MergeExposures::process
--------------------------- ---------------------------
Merges images. Merges images.
.. ocv:function:: void process(InputArrayOfArrays src, OutputArray dst, const std::vector<float>& times, InputArray response) .. ocv:function:: void MergeExposures::process(InputArrayOfArrays src, OutputArray dst, InputArray times, InputArray response)
:param src: vector of input images :param src: vector of input images
...@@ -297,7 +295,7 @@ Merges images. ...@@ -297,7 +295,7 @@ Merges images.
MergeDebevec MergeDebevec
--------------------------- ---------------------------
.. ocv:class:: MergeDebevec : public ExposureMerge .. ocv:class:: MergeDebevec : public MergeExposures
The resulting HDR image is calculated as weighted average of the exposures considering exposure values and camera response. The resulting HDR image is calculated as weighted average of the exposures considering exposure values and camera response.
...@@ -311,7 +309,7 @@ Creates MergeDebevec object ...@@ -311,7 +309,7 @@ Creates MergeDebevec object
MergeMertens MergeMertens
--------------------------- ---------------------------
.. ocv:class:: MergeMertens : public ExposureMerge .. ocv:class:: MergeMertens : public MergeExposures
Pixels are weighted using contrast, saturation and well-exposedness measures, than images are combined using laplacian pyramids. Pixels are weighted using contrast, saturation and well-exposedness measures, than images are combined using laplacian pyramids.
...@@ -345,7 +343,7 @@ Creates MergeMertens object ...@@ -345,7 +343,7 @@ Creates MergeMertens object
MergeRobertson MergeRobertson
--------------------------- ---------------------------
.. ocv:class:: MergeRobertson : public ExposureMerge .. ocv:class:: MergeRobertson : public MergeExposures
The resulting HDR image is calculated as weighted average of the exposures considering exposure values and camera response. The resulting HDR image is calculated as weighted average of the exposures considering exposure values and camera response.
...@@ -364,7 +362,7 @@ References ...@@ -364,7 +362,7 @@ References
.. [FL02] R. Fattal, D. Lischinski, M. Werman, "Gradient Domain High Dynamic Range Compression", Proceedings OF ACM SIGGRAPH, 2002, 249 - 256. .. [FL02] R. Fattal, D. Lischinski, M. Werman, "Gradient Domain High Dynamic Range Compression", Proceedings OF ACM SIGGRAPH, 2002, 249 - 256.
.. [DD02] F. Durand and Julie Dorsey, "Fast Bilateral Filtering for the Display of High-Dynamic-Range Images", ACM Transactions on Graphics, 2002, 21, 3, 257 - –266. .. [DD02] F. Durand and Julie Dorsey, "Fast Bilateral Filtering for the Display of High-Dynamic-Range Images", ACM Transactions on Graphics, 2002, 21, 3, 257 - 266.
.. [RD05] E. Reinhard, K. Devlin, "Dynamic Range Reduction Inspired by Photoreceptor Physiology", IEEE Transactions on Visualization and Computer Graphics, 2005, 11, 13 - 24. .. [RD05] E. Reinhard, K. Devlin, "Dynamic Range Reduction Inspired by Photoreceptor Physiology", IEEE Transactions on Visualization and Computer Graphics, 2005, 11, 13 - 24.
......
...@@ -132,7 +132,7 @@ createTonemapDurand(float gamma = 1.0f, float contrast = 4.0f, float saturation ...@@ -132,7 +132,7 @@ createTonemapDurand(float gamma = 1.0f, float contrast = 4.0f, float saturation
// "Dynamic Range Reduction Inspired by Photoreceptor Physiology", Reinhard, Devlin, 2005 // "Dynamic Range Reduction Inspired by Photoreceptor Physiology", Reinhard, Devlin, 2005
class CV_EXPORTS_W TonemapReinhardDevlin : public Tonemap class CV_EXPORTS_W TonemapReinhard : public Tonemap
{ {
public: public:
CV_WRAP virtual float getIntensity() const = 0; CV_WRAP virtual float getIntensity() const = 0;
...@@ -145,8 +145,8 @@ public: ...@@ -145,8 +145,8 @@ public:
CV_WRAP virtual void setColorAdaptation(float color_adapt) = 0; CV_WRAP virtual void setColorAdaptation(float color_adapt) = 0;
}; };
CV_EXPORTS_W Ptr<TonemapReinhardDevlin> CV_EXPORTS_W Ptr<TonemapReinhard>
createTonemapReinhardDevlin(float gamma = 1.0f, float intensity = 0.0f, float light_adapt = 1.0f, float color_adapt = 0.0f); createTonemapReinhard(float gamma = 1.0f, float intensity = 0.0f, float light_adapt = 1.0f, float color_adapt = 0.0f);
// "Perceptual Framework for Contrast Processing of High Dynamic Range Images", Mantiuk et al., 2006 // "Perceptual Framework for Contrast Processing of High Dynamic Range Images", Mantiuk et al., 2006
...@@ -163,26 +163,26 @@ public: ...@@ -163,26 +163,26 @@ public:
CV_EXPORTS_W Ptr<TonemapMantiuk> CV_EXPORTS_W Ptr<TonemapMantiuk>
createTonemapMantiuk(float gamma = 1.0f, float scale = 0.7f, float saturation = 1.0f); createTonemapMantiuk(float gamma = 1.0f, float scale = 0.7f, float saturation = 1.0f);
class CV_EXPORTS_W ExposureAlign : public Algorithm class CV_EXPORTS_W AlignExposures : public Algorithm
{ {
public: public:
CV_WRAP virtual void process(InputArrayOfArrays src, std::vector<Mat>& dst, CV_WRAP virtual void process(InputArrayOfArrays src, std::vector<Mat>& dst,
const std::vector<float>& times, InputArray response) = 0; InputArray times, InputArray response) = 0;
}; };
// "Fast, Robust Image Registration for Compositing High Dynamic Range Photographs from Handheld Exposures", Ward, 2003 // "Fast, Robust Image Registration for Compositing High Dynamic Range Photographs from Handheld Exposures", Ward, 2003
class CV_EXPORTS_W AlignMTB : public ExposureAlign class CV_EXPORTS_W AlignMTB : public AlignExposures
{ {
public: public:
CV_WRAP virtual void process(InputArrayOfArrays src, std::vector<Mat>& dst, CV_WRAP virtual void process(InputArrayOfArrays src, std::vector<Mat>& dst,
const std::vector<float>& times, InputArray response) = 0; InputArray times, InputArray response) = 0;
CV_WRAP virtual void process(InputArrayOfArrays src, std::vector<Mat>& dst) = 0; CV_WRAP virtual void process(InputArrayOfArrays src, std::vector<Mat>& dst) = 0;
CV_WRAP virtual void calculateShift(InputArray img0, InputArray img1, Point& shift) = 0; CV_WRAP virtual Point calculateShift(InputArray img0, InputArray img1) = 0;
CV_WRAP virtual void shiftMat(InputArray src, OutputArray dst, const Point shift) = 0; CV_WRAP virtual void shiftMat(InputArray src, OutputArray dst, const Point shift) = 0;
CV_WRAP virtual void computeBitmaps(Mat& img, Mat& tb, Mat& eb) = 0; CV_WRAP virtual void computeBitmaps(InputArray img, OutputArray tb, OutputArray eb) = 0;
CV_WRAP virtual int getMaxBits() const = 0; CV_WRAP virtual int getMaxBits() const = 0;
CV_WRAP virtual void setMaxBits(int max_bits) = 0; CV_WRAP virtual void setMaxBits(int max_bits) = 0;
...@@ -196,15 +196,15 @@ public: ...@@ -196,15 +196,15 @@ public:
CV_EXPORTS_W Ptr<AlignMTB> createAlignMTB(int max_bits = 6, int exclude_range = 4, bool cut = true); CV_EXPORTS_W Ptr<AlignMTB> createAlignMTB(int max_bits = 6, int exclude_range = 4, bool cut = true);
class CV_EXPORTS_W ExposureCalibrate : public Algorithm class CV_EXPORTS_W CalibrateCRF : public Algorithm
{ {
public: public:
CV_WRAP virtual void process(InputArrayOfArrays src, OutputArray dst, std::vector<float>& times) = 0; CV_WRAP virtual void process(InputArrayOfArrays src, OutputArray dst, InputArray times) = 0;
}; };
// "Recovering High Dynamic Range Radiance Maps from Photographs", Debevec, Malik, 1997 // "Recovering High Dynamic Range Radiance Maps from Photographs", Debevec, Malik, 1997
class CV_EXPORTS_W CalibrateDebevec : public ExposureCalibrate class CV_EXPORTS_W CalibrateDebevec : public CalibrateCRF
{ {
public: public:
CV_WRAP virtual float getLambda() const = 0; CV_WRAP virtual float getLambda() const = 0;
...@@ -221,7 +221,7 @@ CV_EXPORTS_W Ptr<CalibrateDebevec> createCalibrateDebevec(int samples = 70, floa ...@@ -221,7 +221,7 @@ CV_EXPORTS_W Ptr<CalibrateDebevec> createCalibrateDebevec(int samples = 70, floa
// "Dynamic range improvement through multiple exposures", Robertson et al., 1999 // "Dynamic range improvement through multiple exposures", Robertson et al., 1999
class CV_EXPORTS_W CalibrateRobertson : public ExposureCalibrate class CV_EXPORTS_W CalibrateRobertson : public CalibrateCRF
{ {
public: public:
CV_WRAP virtual int getMaxIter() const = 0; CV_WRAP virtual int getMaxIter() const = 0;
...@@ -235,32 +235,32 @@ public: ...@@ -235,32 +235,32 @@ public:
CV_EXPORTS_W Ptr<CalibrateRobertson> createCalibrateRobertson(int max_iter = 30, float threshold = 0.01f); CV_EXPORTS_W Ptr<CalibrateRobertson> createCalibrateRobertson(int max_iter = 30, float threshold = 0.01f);
class CV_EXPORTS_W ExposureMerge : public Algorithm class CV_EXPORTS_W MergeExposures : public Algorithm
{ {
public: public:
CV_WRAP virtual void process(InputArrayOfArrays src, OutputArray dst, CV_WRAP virtual void process(InputArrayOfArrays src, OutputArray dst,
const std::vector<float>& times, InputArray response) = 0; InputArray times, InputArray response) = 0;
}; };
// "Recovering High Dynamic Range Radiance Maps from Photographs", Debevec, Malik, 1997 // "Recovering High Dynamic Range Radiance Maps from Photographs", Debevec, Malik, 1997
class CV_EXPORTS_W MergeDebevec : public ExposureMerge class CV_EXPORTS_W MergeDebevec : public MergeExposures
{ {
public: public:
CV_WRAP virtual void process(InputArrayOfArrays src, OutputArray dst, CV_WRAP virtual void process(InputArrayOfArrays src, OutputArray dst,
const std::vector<float>& times, InputArray response) = 0; InputArray times, InputArray response) = 0;
CV_WRAP virtual void process(InputArrayOfArrays src, OutputArray dst, const std::vector<float>& times) = 0; CV_WRAP virtual void process(InputArrayOfArrays src, OutputArray dst, InputArray times) = 0;
}; };
CV_EXPORTS_W Ptr<MergeDebevec> createMergeDebevec(); CV_EXPORTS_W Ptr<MergeDebevec> createMergeDebevec();
// "Exposure Fusion", Mertens et al., 2007 // "Exposure Fusion", Mertens et al., 2007
class CV_EXPORTS_W MergeMertens : public ExposureMerge class CV_EXPORTS_W MergeMertens : public MergeExposures
{ {
public: public:
CV_WRAP virtual void process(InputArrayOfArrays src, OutputArray dst, CV_WRAP virtual void process(InputArrayOfArrays src, OutputArray dst,
const std::vector<float>& times, InputArray response) = 0; InputArray times, InputArray response) = 0;
CV_WRAP virtual void process(InputArrayOfArrays src, OutputArray dst) = 0; CV_WRAP virtual void process(InputArrayOfArrays src, OutputArray dst) = 0;
CV_WRAP virtual float getContrastWeight() const = 0; CV_WRAP virtual float getContrastWeight() const = 0;
...@@ -278,12 +278,12 @@ createMergeMertens(float contrast_weight = 1.0f, float saturation_weight = 1.0f, ...@@ -278,12 +278,12 @@ createMergeMertens(float contrast_weight = 1.0f, float saturation_weight = 1.0f,
// "Dynamic range improvement through multiple exposures", Robertson et al., 1999 // "Dynamic range improvement through multiple exposures", Robertson et al., 1999
class CV_EXPORTS_W MergeRobertson : public ExposureMerge class CV_EXPORTS_W MergeRobertson : public MergeExposures
{ {
public: public:
CV_WRAP virtual void process(InputArrayOfArrays src, OutputArray dst, CV_WRAP virtual void process(InputArrayOfArrays src, OutputArray dst,
const std::vector<float>& times, InputArray response) = 0; InputArray times, InputArray response) = 0;
CV_WRAP virtual void process(InputArrayOfArrays src, OutputArray dst, const std::vector<float>& times) = 0; CV_WRAP virtual void process(InputArrayOfArrays src, OutputArray dst, InputArray times) = 0;
}; };
CV_EXPORTS_W Ptr<MergeRobertson> createMergeRobertson(); CV_EXPORTS_W Ptr<MergeRobertson> createMergeRobertson();
......
...@@ -50,16 +50,16 @@ namespace cv ...@@ -50,16 +50,16 @@ namespace cv
class AlignMTBImpl : public AlignMTB class AlignMTBImpl : public AlignMTB
{ {
public: public:
AlignMTBImpl(int max_bits, int exclude_range, bool cut) : AlignMTBImpl(int _max_bits, int _exclude_range, bool _cut) :
max_bits(max_bits), name("AlignMTB"),
exclude_range(exclude_range), max_bits(_max_bits),
cut(cut), exclude_range(_exclude_range),
name("AlignMTB") cut(_cut)
{ {
} }
void process(InputArrayOfArrays src, std::vector<Mat>& dst, void process(InputArrayOfArrays src, std::vector<Mat>& dst,
const std::vector<float>& times, InputArray response) InputArray, InputArray)
{ {
process(src, dst); process(src, dst);
} }
...@@ -85,8 +85,7 @@ public: ...@@ -85,8 +85,7 @@ public:
} }
Mat gray; Mat gray;
cvtColor(src[i], gray, COLOR_RGB2GRAY); cvtColor(src[i], gray, COLOR_RGB2GRAY);
Point shift; Point shift = calculateShift(gray_base, gray);
calculateShift(gray_base, gray, shift);
shifts.push_back(shift); shifts.push_back(shift);
shiftMat(src[i], dst[i], shift); shiftMat(src[i], dst[i], shift);
} }
...@@ -113,7 +112,7 @@ public: ...@@ -113,7 +112,7 @@ public:
} }
} }
void calculateShift(InputArray _img0, InputArray _img1, Point& shift) Point calculateShift(InputArray _img0, InputArray _img1)
{ {
Mat img0 = _img0.getMat(); Mat img0 = _img0.getMat();
Mat img1 = _img1.getMat(); Mat img1 = _img1.getMat();
...@@ -128,7 +127,7 @@ public: ...@@ -128,7 +127,7 @@ public:
buildPyr(img0, pyr0, maxlevel); buildPyr(img0, pyr0, maxlevel);
buildPyr(img1, pyr1, maxlevel); buildPyr(img1, pyr1, maxlevel);
shift = Point(0, 0); Point shift(0, 0);
for(int level = maxlevel; level >= 0; level--) { for(int level = maxlevel; level >= 0; level--) {
shift *= 2; shift *= 2;
...@@ -156,6 +155,7 @@ public: ...@@ -156,6 +155,7 @@ public:
} }
shift = new_shift; shift = new_shift;
} }
return shift;
} }
void shiftMat(InputArray _src, OutputArray _dst, const Point shift) void shiftMat(InputArray _src, OutputArray _dst, const Point shift)
...@@ -197,11 +197,15 @@ public: ...@@ -197,11 +197,15 @@ public:
max_bits = fn["max_bits"]; max_bits = fn["max_bits"];
exclude_range = fn["exclude_range"]; exclude_range = fn["exclude_range"];
int cut_val = fn["cut"]; int cut_val = fn["cut"];
cut = static_cast<bool>(cut_val); cut = (cut_val != 0);
} }
void computeBitmaps(Mat& img, Mat& tb, Mat& eb) void computeBitmaps(InputArray _img, OutputArray _tb, OutputArray _eb)
{ {
Mat img = _img.getMat();
_tb.create(img.size(), CV_8U);
_eb.create(img.size(), CV_8U);
Mat tb = _tb.getMat(), eb = _eb.getMat();
int median = getMedian(img); int median = getMedian(img);
compare(img, median, tb, CMP_GT); compare(img, median, tb, CMP_GT);
compare(abs(img - median), exclude_range, eb, CMP_GT); compare(abs(img - median), exclude_range, eb, CMP_GT);
...@@ -260,8 +264,7 @@ protected: ...@@ -260,8 +264,7 @@ protected:
Ptr<AlignMTB> createAlignMTB(int max_bits, int exclude_range, bool cut) Ptr<AlignMTB> createAlignMTB(int max_bits, int exclude_range, bool cut)
{ {
return new AlignMTBImpl(max_bits, exclude_range, cut); return makePtr<AlignMTBImpl>(max_bits, exclude_range, cut);
} }
} }
...@@ -52,21 +52,22 @@ namespace cv ...@@ -52,21 +52,22 @@ namespace cv
class CalibrateDebevecImpl : public CalibrateDebevec class CalibrateDebevecImpl : public CalibrateDebevec
{ {
public: public:
CalibrateDebevecImpl(int samples, float lambda, bool random) : CalibrateDebevecImpl(int _samples, float _lambda, bool _random) :
samples(samples),
lambda(lambda),
name("CalibrateDebevec"), name("CalibrateDebevec"),
w(tringleWeights()), samples(_samples),
random(random) lambda(_lambda),
random(_random),
w(tringleWeights())
{ {
} }
void process(InputArrayOfArrays src, OutputArray dst, std::vector<float>& times) void process(InputArrayOfArrays src, OutputArray dst, InputArray _times)
{ {
std::vector<Mat> images; std::vector<Mat> images;
src.getMatVector(images); src.getMatVector(images);
Mat times = _times.getMat();
CV_Assert(images.size() == times.size()); CV_Assert(images.size() == times.total());
checkImageDimensions(images); checkImageDimensions(images);
CV_Assert(images[0].depth() == CV_8U); CV_Assert(images[0].depth() == CV_8U);
...@@ -82,7 +83,7 @@ public: ...@@ -82,7 +83,7 @@ public:
sample_points.push_back(Point(rand() % images[0].cols, rand() % images[0].rows)); sample_points.push_back(Point(rand() % images[0].cols, rand() % images[0].rows));
} }
} else { } else {
int x_points = sqrt(static_cast<double>(samples) * images[0].cols / images[0].rows); int x_points = static_cast<int>(sqrt(static_cast<double>(samples) * images[0].cols / images[0].rows));
int y_points = samples / x_points; int y_points = samples / x_points;
int step_x = images[0].cols / x_points; int step_x = images[0].cols / x_points;
int step_y = images[0].rows / y_points; int step_y = images[0].rows / y_points;
...@@ -106,7 +107,7 @@ public: ...@@ -106,7 +107,7 @@ public:
int val = images[j].ptr()[3*(sample_points[i].y * images[j].cols + sample_points[j].x) + channel]; int val = images[j].ptr()[3*(sample_points[i].y * images[j].cols + sample_points[j].x) + channel];
A.at<float>(eq, val) = w.at<float>(val); A.at<float>(eq, val) = w.at<float>(val);
A.at<float>(eq, LDR_SIZE + i) = -w.at<float>(val); A.at<float>(eq, LDR_SIZE + i) = -w.at<float>(val);
B.at<float>(eq, 0) = w.at<float>(val) * log(times[j]); B.at<float>(eq, 0) = w.at<float>(val) * log(times.at<float>(j));
eq++; eq++;
} }
} }
...@@ -151,7 +152,7 @@ public: ...@@ -151,7 +152,7 @@ public:
samples = fn["samples"]; samples = fn["samples"];
lambda = fn["lambda"]; lambda = fn["lambda"];
int random_val = fn["random"]; int random_val = fn["random"];
random = static_cast<bool>(random_val); random = (random_val != 0);
} }
protected: protected:
...@@ -164,26 +165,27 @@ protected: ...@@ -164,26 +165,27 @@ protected:
Ptr<CalibrateDebevec> createCalibrateDebevec(int samples, float lambda, bool random) Ptr<CalibrateDebevec> createCalibrateDebevec(int samples, float lambda, bool random)
{ {
return new CalibrateDebevecImpl(samples, lambda, random); return makePtr<CalibrateDebevecImpl>(samples, lambda, random);
} }
class CalibrateRobertsonImpl : public CalibrateRobertson class CalibrateRobertsonImpl : public CalibrateRobertson
{ {
public: public:
CalibrateRobertsonImpl(int max_iter, float threshold) : CalibrateRobertsonImpl(int _max_iter, float _threshold) :
max_iter(max_iter),
threshold(threshold),
name("CalibrateRobertson"), name("CalibrateRobertson"),
max_iter(_max_iter),
threshold(_threshold),
weight(RobertsonWeights()) weight(RobertsonWeights())
{ {
} }
void process(InputArrayOfArrays src, OutputArray dst, std::vector<float>& times) void process(InputArrayOfArrays src, OutputArray dst, InputArray _times)
{ {
std::vector<Mat> images; std::vector<Mat> images;
src.getMatVector(images); src.getMatVector(images);
Mat times = _times.getMat();
CV_Assert(images.size() == times.size()); CV_Assert(images.size() == times.total());
checkImageDimensions(images); checkImageDimensions(images);
CV_Assert(images[0].depth() == CV_8U); CV_Assert(images[0].depth() == CV_8U);
...@@ -217,7 +219,7 @@ public: ...@@ -217,7 +219,7 @@ public:
float* rad_ptr = radiance.ptr<float>(); float* rad_ptr = radiance.ptr<float>();
for(size_t pos = 0; pos < images[i].total(); pos++) { for(size_t pos = 0; pos < images[i].total(); pos++) {
for(int c = 0; c < channels; c++, ptr++, rad_ptr++) { for(int c = 0; c < channels; c++, ptr++, rad_ptr++) {
new_response.at<Vec3f>(*ptr)[c] += times[i] * *rad_ptr; new_response.at<Vec3f>(*ptr)[c] += times.at<float>(i) * *rad_ptr;
} }
} }
} }
...@@ -228,7 +230,7 @@ public: ...@@ -228,7 +230,7 @@ public:
new_response.at<Vec3f>(i)[c] /= middle; new_response.at<Vec3f>(i)[c] /= middle;
} }
} }
float diff = sum(sum(abs(new_response - response)))[0] / channels; float diff = static_cast<float>(sum(sum(abs(new_response - response)))[0] / channels);
new_response.copyTo(response); new_response.copyTo(response);
if(diff < threshold) { if(diff < threshold) {
break; break;
...@@ -268,7 +270,7 @@ protected: ...@@ -268,7 +270,7 @@ protected:
Ptr<CalibrateRobertson> createCalibrateRobertson(int max_iter, float threshold) Ptr<CalibrateRobertson> createCalibrateRobertson(int max_iter, float threshold)
{ {
return new CalibrateRobertsonImpl(max_iter, threshold); return makePtr<CalibrateRobertsonImpl>(max_iter, threshold);
} }
} }
...@@ -97,7 +97,7 @@ Mat linearResponse(int channels) ...@@ -97,7 +97,7 @@ Mat linearResponse(int channels)
{ {
Mat response = Mat(LDR_SIZE, 1, CV_MAKETYPE(CV_32F, channels)); Mat response = Mat(LDR_SIZE, 1, CV_MAKETYPE(CV_32F, channels));
for(int i = 0; i < LDR_SIZE; i++) { for(int i = 0; i < LDR_SIZE; i++) {
response.at<Vec3f>(i) = Vec3f::all(i); response.at<Vec3f>(i) = Vec3f::all(static_cast<float>(i));
} }
return response; return response;
} }
......
...@@ -56,12 +56,13 @@ public: ...@@ -56,12 +56,13 @@ public:
{ {
} }
void process(InputArrayOfArrays src, OutputArray dst, const std::vector<float>& times, InputArray input_response) void process(InputArrayOfArrays src, OutputArray dst, InputArray _times, InputArray input_response)
{ {
std::vector<Mat> images; std::vector<Mat> images;
src.getMatVector(images); src.getMatVector(images);
Mat times = _times.getMat();
CV_Assert(images.size() == times.size()); CV_Assert(images.size() == times.total());
checkImageDimensions(images); checkImageDimensions(images);
CV_Assert(images[0].depth() == CV_8U); CV_Assert(images[0].depth() == CV_8U);
...@@ -117,7 +118,7 @@ public: ...@@ -117,7 +118,7 @@ public:
exp(result, result); exp(result, result);
} }
void process(InputArrayOfArrays src, OutputArray dst, const std::vector<float>& times) void process(InputArrayOfArrays src, OutputArray dst, InputArray times)
{ {
process(src, dst, times, Mat()); process(src, dst, times, Mat());
} }
...@@ -129,21 +130,21 @@ protected: ...@@ -129,21 +130,21 @@ protected:
Ptr<MergeDebevec> createMergeDebevec() Ptr<MergeDebevec> createMergeDebevec()
{ {
return new MergeDebevecImpl; return makePtr<MergeDebevecImpl>();
} }
class MergeMertensImpl : public MergeMertens class MergeMertensImpl : public MergeMertens
{ {
public: public:
MergeMertensImpl(float wcon, float wsat, float wexp) : MergeMertensImpl(float _wcon, float _wsat, float _wexp) :
wcon(wcon), name("MergeMertens"),
wsat(wsat), wcon(_wcon),
wexp(wexp), wsat(_wsat),
name("MergeMertens") wexp(_wexp)
{ {
} }
void process(InputArrayOfArrays src, OutputArrayOfArrays dst, const std::vector<float>& times, InputArray response) void process(InputArrayOfArrays src, OutputArrayOfArrays dst, InputArray, InputArray)
{ {
process(src, dst); process(src, dst);
} }
...@@ -283,7 +284,7 @@ protected: ...@@ -283,7 +284,7 @@ protected:
Ptr<MergeMertens> createMergeMertens(float wcon, float wsat, float wexp) Ptr<MergeMertens> createMergeMertens(float wcon, float wsat, float wexp)
{ {
return new MergeMertensImpl(wcon, wsat, wexp); return makePtr<MergeMertensImpl>(wcon, wsat, wexp);
} }
class MergeRobertsonImpl : public MergeRobertson class MergeRobertsonImpl : public MergeRobertson
...@@ -295,12 +296,13 @@ public: ...@@ -295,12 +296,13 @@ public:
{ {
} }
void process(InputArrayOfArrays src, OutputArray dst, const std::vector<float>& times, InputArray input_response) void process(InputArrayOfArrays src, OutputArray dst, InputArray _times, InputArray input_response)
{ {
std::vector<Mat> images; std::vector<Mat> images;
src.getMatVector(images); src.getMatVector(images);
Mat times = _times.getMat();
CV_Assert(images.size() == times.size()); CV_Assert(images.size() == times.total());
checkImageDimensions(images); checkImageDimensions(images);
CV_Assert(images[0].depth() == CV_8U); CV_Assert(images[0].depth() == CV_8U);
...@@ -312,7 +314,8 @@ public: ...@@ -312,7 +314,8 @@ public:
Mat response = input_response.getMat(); Mat response = input_response.getMat();
if(response.empty()) { if(response.empty()) {
response = linearResponse(channels) / (LDR_SIZE / 2.0f); float middle = LDR_SIZE / 2.0f;
response = linearResponse(channels) / middle;
} }
CV_Assert(response.rows == LDR_SIZE && response.cols == 1 && CV_Assert(response.rows == LDR_SIZE && response.cols == 1 &&
response.channels() == channels); response.channels() == channels);
...@@ -324,13 +327,13 @@ public: ...@@ -324,13 +327,13 @@ public:
LUT(images[i], weight, w); LUT(images[i], weight, w);
LUT(images[i], response, im); LUT(images[i], response, im);
result += times[i] * w.mul(im); result += times.at<float>(i) * w.mul(im);
wsum += pow(times[i], 2) * w; wsum += times.at<float>(i) * times.at<float>(i) * w;
} }
result = result.mul(1 / wsum); result = result.mul(1 / wsum);
} }
void process(InputArrayOfArrays src, OutputArray dst, const std::vector<float>& times) void process(InputArrayOfArrays src, OutputArray dst, InputArray times)
{ {
process(src, dst, times, Mat()); process(src, dst, times, Mat());
} }
...@@ -342,7 +345,7 @@ protected: ...@@ -342,7 +345,7 @@ protected:
Ptr<MergeRobertson> createMergeRobertson() Ptr<MergeRobertson> createMergeRobertson()
{ {
return new MergeRobertsonImpl; return makePtr<MergeRobertsonImpl>();
} }
} }
...@@ -50,7 +50,7 @@ namespace cv ...@@ -50,7 +50,7 @@ namespace cv
class TonemapImpl : public Tonemap class TonemapImpl : public Tonemap
{ {
public: public:
TonemapImpl(float gamma) : gamma(gamma), name("Tonemap") TonemapImpl(float _gamma) : name("Tonemap"), gamma(_gamma)
{ {
} }
...@@ -95,17 +95,17 @@ protected: ...@@ -95,17 +95,17 @@ protected:
Ptr<Tonemap> createTonemap(float gamma) Ptr<Tonemap> createTonemap(float gamma)
{ {
return new TonemapImpl(gamma); return makePtr<TonemapImpl>(gamma);
} }
class TonemapDragoImpl : public TonemapDrago class TonemapDragoImpl : public TonemapDrago
{ {
public: public:
TonemapDragoImpl(float gamma, float saturation, float bias) : TonemapDragoImpl(float _gamma, float _saturation, float _bias) :
gamma(gamma), name("TonemapDrago"),
saturation(saturation), gamma(_gamma),
bias(bias), saturation(_saturation),
name("TonemapDrago") bias(_bias)
{ {
} }
...@@ -177,19 +177,19 @@ protected: ...@@ -177,19 +177,19 @@ protected:
Ptr<TonemapDrago> createTonemapDrago(float gamma, float saturation, float bias) Ptr<TonemapDrago> createTonemapDrago(float gamma, float saturation, float bias)
{ {
return new TonemapDragoImpl(gamma, saturation, bias); return makePtr<TonemapDragoImpl>(gamma, saturation, bias);
} }
class TonemapDurandImpl : public TonemapDurand class TonemapDurandImpl : public TonemapDurand
{ {
public: public:
TonemapDurandImpl(float gamma, float contrast, float saturation, float sigma_color, float sigma_space) : TonemapDurandImpl(float _gamma, float _contrast, float _saturation, float _sigma_color, float _sigma_space) :
gamma(gamma), name("TonemapDurand"),
contrast(contrast), gamma(_gamma),
saturation(saturation), contrast(_contrast),
sigma_color(sigma_color), saturation(_saturation),
sigma_space(sigma_space), sigma_color(_sigma_color),
name("TonemapDurand") sigma_space(_sigma_space)
{ {
} }
...@@ -257,23 +257,23 @@ public: ...@@ -257,23 +257,23 @@ public:
protected: protected:
String name; String name;
float gamma, saturation, contrast, sigma_color, sigma_space; float gamma, contrast, saturation, sigma_color, sigma_space;
}; };
Ptr<TonemapDurand> createTonemapDurand(float gamma, float contrast, float saturation, float sigma_color, float sigma_space) Ptr<TonemapDurand> createTonemapDurand(float gamma, float contrast, float saturation, float sigma_color, float sigma_space)
{ {
return new TonemapDurandImpl(gamma, contrast, saturation, sigma_color, sigma_space); return makePtr<TonemapDurandImpl>(gamma, contrast, saturation, sigma_color, sigma_space);
} }
class TonemapReinhardDevlinImpl : public TonemapReinhardDevlin class TonemapReinhardImpl : public TonemapReinhard
{ {
public: public:
TonemapReinhardDevlinImpl(float gamma, float intensity, float light_adapt, float color_adapt) : TonemapReinhardImpl(float _gamma, float _intensity, float _light_adapt, float _color_adapt) :
gamma(gamma), name("TonemapReinhard"),
intensity(intensity), gamma(_gamma),
light_adapt(light_adapt), intensity(_intensity),
color_adapt(color_adapt), light_adapt(_light_adapt),
name("TonemapReinhardDevlin") color_adapt(_color_adapt)
{ {
} }
...@@ -355,19 +355,19 @@ protected: ...@@ -355,19 +355,19 @@ protected:
float gamma, intensity, light_adapt, color_adapt; float gamma, intensity, light_adapt, color_adapt;
}; };
Ptr<TonemapReinhardDevlin> createTonemapReinhardDevlin(float gamma, float contrast, float sigma_color, float sigma_space) Ptr<TonemapReinhard> createTonemapReinhard(float gamma, float contrast, float sigma_color, float sigma_space)
{ {
return new TonemapReinhardDevlinImpl(gamma, contrast, sigma_color, sigma_space); return makePtr<TonemapReinhardImpl>(gamma, contrast, sigma_color, sigma_space);
} }
class TonemapMantiukImpl : public TonemapMantiuk class TonemapMantiukImpl : public TonemapMantiuk
{ {
public: public:
TonemapMantiukImpl(float gamma, float scale, float saturation) : TonemapMantiukImpl(float _gamma, float _scale, float _saturation) :
gamma(gamma), name("TonemapMantiuk"),
scale(scale), gamma(_gamma),
saturation(saturation), scale(_scale),
name("TonemapMantiuk") saturation(_saturation)
{ {
} }
...@@ -389,8 +389,8 @@ public: ...@@ -389,8 +389,8 @@ public:
getContrast(log_img, x_contrast, y_contrast); getContrast(log_img, x_contrast, y_contrast);
for(size_t i = 0; i < x_contrast.size(); i++) { for(size_t i = 0; i < x_contrast.size(); i++) {
mapContrast(x_contrast[i], scale); mapContrast(x_contrast[i]);
mapContrast(y_contrast[i], scale); mapContrast(y_contrast[i]);
} }
Mat right(src.size(), CV_32F); Mat right(src.size(), CV_32F);
...@@ -468,7 +468,7 @@ protected: ...@@ -468,7 +468,7 @@ protected:
dst = dst.mul(sign); dst = dst.mul(sign);
} }
void mapContrast(Mat& contrast, float scale) void mapContrast(Mat& contrast)
{ {
const float response_power = 0.4185f; const float response_power = 0.4185f;
signedPow(contrast, response_power, contrast); signedPow(contrast, response_power, contrast);
...@@ -525,7 +525,7 @@ protected: ...@@ -525,7 +525,7 @@ protected:
Ptr<TonemapMantiuk> createTonemapMantiuk(float gamma, float scale, float saturation) Ptr<TonemapMantiuk> createTonemapMantiuk(float gamma, float scale, float saturation)
{ {
return new TonemapMantiukImpl(gamma, scale, saturation); return makePtr<TonemapMantiukImpl>(gamma, scale, saturation);
} }
} }
...@@ -115,9 +115,9 @@ TEST(Photo_Tonemap, regression) ...@@ -115,9 +115,9 @@ TEST(Photo_Tonemap, regression)
result.convertTo(result, CV_8UC3, 255); result.convertTo(result, CV_8UC3, 255);
checkEqual(result, expected, 3); checkEqual(result, expected, 3);
Ptr<TonemapReinhardDevlin> reinhard_devlin = createTonemapReinhardDevlin(gamma); Ptr<TonemapReinhard> reinhard = createTonemapReinhard(gamma);
reinhard_devlin->process(img, result); reinhard->process(img, result);
loadImage(test_path + "reinharddevlin.png", expected); loadImage(test_path + "reinhard.png", expected);
result.convertTo(result, CV_8UC3, 255); result.convertTo(result, CV_8UC3, 255);
checkEqual(result, expected, 3); checkEqual(result, expected, 3);
...@@ -149,8 +149,7 @@ TEST(Photo_AlignMTB, regression) ...@@ -149,8 +149,7 @@ TEST(Photo_AlignMTB, regression)
Point shift(rand() % max_shift, rand() % max_shift); Point shift(rand() % max_shift, rand() % max_shift);
Mat res; Mat res;
align->shiftMat(img, res, shift); align->shiftMat(img, res, shift);
Point calc; Point calc = align->calculateShift(img, res);
align->calculateShift(img, res, calc);
errors += (calc != -shift); errors += (calc != -shift);
} }
ASSERT_TRUE(errors < 5) << errors << " errors"; ASSERT_TRUE(errors < 5) << errors << " errors";
......
...@@ -137,14 +137,16 @@ typedef Ptr<StereoSGBM> Ptr_StereoSGBM; ...@@ -137,14 +137,16 @@ typedef Ptr<StereoSGBM> Ptr_StereoSGBM;
typedef Ptr<Tonemap> Ptr_Tonemap; typedef Ptr<Tonemap> Ptr_Tonemap;
typedef Ptr<TonemapDrago> Ptr_TonemapDrago; typedef Ptr<TonemapDrago> Ptr_TonemapDrago;
typedef Ptr<TonemapReinhardDevlin> Ptr_TonemapReinhardDevlin; typedef Ptr<TonemapReinhard> Ptr_TonemapReinhard;
typedef Ptr<TonemapDurand> Ptr_TonemapDurand; typedef Ptr<TonemapDurand> Ptr_TonemapDurand;
typedef Ptr<TonemapMantiuk> Ptr_TonemapMantiuk; typedef Ptr<TonemapMantiuk> Ptr_TonemapMantiuk;
typedef Ptr<AlignMTB> Ptr_AlignMTB; typedef Ptr<AlignMTB> Ptr_AlignMTB;
typedef Ptr<CalibrateDebevec> Ptr_CalibrateDebevec; typedef Ptr<CalibrateDebevec> Ptr_CalibrateDebevec;
typedef Ptr<CalibrateRobertson> Ptr_CalibrateRobertson; typedef Ptr<CalibrateRobertson> Ptr_CalibrateRobertson;
typedef Ptr<MergeDebevec> Ptr_MergeDebevec; typedef Ptr<MergeDebevec> Ptr_MergeDebevec;
typedef Ptr<MergeRobertson> Ptr_MergeRobertson;
typedef Ptr<MergeMertens> Ptr_MergeMertens; typedef Ptr<MergeMertens> Ptr_MergeMertens;
typedef Ptr<MergeRobertson> Ptr_MergeRobertson;
typedef Ptr<cv::softcascade::ChannelFeatureBuilder> Ptr_ChannelFeatureBuilder; typedef Ptr<cv::softcascade::ChannelFeatureBuilder> Ptr_ChannelFeatureBuilder;
typedef Ptr<CLAHE> Ptr_CLAHE; typedef Ptr<CLAHE> Ptr_CLAHE;
......
...@@ -7,21 +7,9 @@ ...@@ -7,21 +7,9 @@
using namespace cv; using namespace cv;
using namespace std; using namespace std;
void loadExposureSeq(String path, vector<Mat>& images, vector<float>& times) void loadExposureSeq(String, vector<Mat>&, vector<float>&);
{
path += "/";
ifstream list_file((path + "list.txt").c_str());
string name;
float val;
while(list_file >> name >> val) {
Mat img = imread(path + name);
images.push_back(img);
times.push_back(1 / val);
}
list_file.close();
}
int main(int argc, char**argv) int main(int, char**argv)
{ {
vector<Mat> images; vector<Mat> images;
vector<float> times; vector<float> times;
...@@ -49,3 +37,17 @@ int main(int argc, char**argv) ...@@ -49,3 +37,17 @@ int main(int argc, char**argv)
return 0; return 0;
} }
void loadExposureSeq(String path, vector<Mat>& images, vector<float>& times)
{
path = path + std::string("/");
ifstream list_file((path + "list.txt").c_str());
string name;
float val;
while(list_file >> name >> val) {
Mat img = imread(path + name);
images.push_back(img);
times.push_back(1 / val);
}
list_file.close();
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment