Commit 22f58433 authored by Maksim Shabunin's avatar Maksim Shabunin

Merge pull request #338 from patricksnape:msvc_python_fixes

parents 88a72a05 0507e168
......@@ -454,7 +454,7 @@ void TransientAreasSegmentationModuleImpl::_run(const std::valarray<float> &inpu
// first square the input in order to increase the signal to noise ratio
// get motion local energy
_squaringSpatiotemporalLPfilter(&inputToSegment[channelIndex*getNBpixels()], &_localMotion[0]);
_squaringSpatiotemporalLPfilter(&const_cast<std::valarray<float>&>(inputToSegment)[channelIndex*getNBpixels()], &_localMotion[0]);
// second low pass filter: access to the neighborhood motion energy
_spatiotemporalLPfilter(&_localMotion[0], &_neighborhoodMotion[0], 1);
......
......@@ -179,7 +179,7 @@ void LSDDetector::detectImpl( const Mat& imageSrc, std::vector<KeyLine>& keyline
kl.sPointInOctaveY = (float) extremes[1];
kl.ePointInOctaveX = (float) extremes[2];
kl.ePointInOctaveY = (float) extremes[3];
kl.lineLength = (float) sqrt( pow( extremes[0] - extremes[2], 2 ) + pow( extremes[1] - extremes[3], 2 ) );
kl.lineLength = (float) sqrt( pow( (float) extremes[0] - extremes[2], 2 ) + pow( (float) extremes[1] - extremes[3], 2 ) );
/* compute number of pixels covered by line */
LineIterator li( gaussianPyrs[j], Point( extremes[0], extremes[1] ), Point( extremes[2], extremes[3] ) );
......
......@@ -653,7 +653,7 @@ void BinaryDescriptor::computeImpl( const Mat& imageSrc, std::vector<KeyLine>& k
uchar* pointerToRow = descriptors.ptr( originalIndex );
/* get LBD data */
float* desVec = sl[k][lineC].descriptor.data();
float* desVec = &sl[k][lineC].descriptor.front();
/* fill current row with binary descriptor */
for ( int comb = 0; comb < 32; comb++ )
......@@ -692,7 +692,7 @@ int BinaryDescriptor::OctaveKeyLines( cv::Mat& image, ScaleLines &keyLines )
/* sigma values and reduction factor used in Gaussian pyramids */
float preSigma2 = 0; //orignal image is not blurred, has zero sigma;
float curSigma2 = 1.0; //[sqrt(2)]^0=1;
double factor = sqrt( 2 ); //the down sample factor between connective two octave images
double factor = sqrt( 2.0 ); //the down sample factor between connective two octave images
/* loop over number of octaves */
for ( int octaveCount = 0; octaveCount < params.numOfOctave_; octaveCount++ )
......@@ -1241,7 +1241,7 @@ int BinaryDescriptor::computeLBD( ScaleLines &keyLines, bool useDetectionData )
/* construct line descriptor */
pSingleLine->descriptor.resize( descriptor_size );
desVec = pSingleLine->descriptor.data();
desVec = &pSingleLine->descriptor.front();
short desID;
......@@ -1280,7 +1280,7 @@ int BinaryDescriptor::computeLBD( ScaleLines &keyLines, bool useDetectionData )
float tempM, tempS;
tempM = 0;
tempS = 0;
desVec = pSingleLine->descriptor.data();
desVec = &pSingleLine->descriptor.front();
int base = 0;
for ( short i = 0; i < (short) ( NUM_OF_BANDS * 8 ); ++base, i = (short) ( base * 8 ) )
......@@ -1297,7 +1297,7 @@ int BinaryDescriptor::computeLBD( ScaleLines &keyLines, bool useDetectionData )
tempM = 1 / sqrt( tempM );
tempS = 1 / sqrt( tempS );
desVec = pSingleLine->descriptor.data();
desVec = &pSingleLine->descriptor.front();
base = 0;
for ( short i = 0; i < (short) ( NUM_OF_BANDS * 8 ); ++base, i = (short) ( base * 8 ) )
{
......@@ -1315,7 +1315,7 @@ int BinaryDescriptor::computeLBD( ScaleLines &keyLines, bool useDetectionData )
* a threshold is used to limit the value of element in the unit feature
* vector no larger than this threshold. In Z.Wang's work, a value of 0.4 is found
* empirically to be a proper threshold.*/
desVec = pSingleLine->descriptor.data();
desVec = &pSingleLine->descriptor.front();
for ( short i = 0; i < descriptor_size; i++ )
{
if( desVec[i] > 0.4 )
......@@ -1344,7 +1344,7 @@ int BinaryDescriptor::computeLBD( ScaleLines &keyLines, bool useDetectionData )
for ( int g = 0; g < 32; g++ )
{
/* get LBD data */
float* des_Vec = keyLines[lineIDInScaleVec][0].descriptor.data();
float* des_Vec = &keyLines[lineIDInScaleVec][0].descriptor.front();
*pointerToRow = des_Vec[g];
pointerToRow++;
......@@ -2204,9 +2204,9 @@ int BinaryDescriptor::EDLineDetector::EdgeDrawing( cv::Mat &image, EdgeChains &e
edgeChains.xCors.resize( offsetPFirst + offsetPSecond );
edgeChains.yCors.resize( offsetPFirst + offsetPSecond );
edgeChains.sId.resize( offsetPS + 1 );
unsigned int *pxCors = edgeChains.xCors.data();
unsigned int *pyCors = edgeChains.yCors.data();
unsigned int *psId = edgeChains.sId.data();
unsigned int *pxCors = &edgeChains.xCors.front();
unsigned int *pyCors = &edgeChains.yCors.front();
unsigned int *psId = &edgeChains.sId.front();
offsetPFirst = 0;
offsetPSecond = 0;
unsigned int indexInCors = 0;
......@@ -2252,12 +2252,12 @@ int BinaryDescriptor::EDLineDetector::EDline( cv::Mat &image, LineChains &lines
lines.xCors.resize( linePixelID );
lines.yCors.resize( linePixelID );
lines.sId.resize( 5 * edges.numOfEdges );
unsigned int *pEdgeXCors = edges.xCors.data();
unsigned int *pEdgeYCors = edges.yCors.data();
unsigned int *pEdgeSID = edges.sId.data();
unsigned int *pLineXCors = lines.xCors.data();
unsigned int *pLineYCors = lines.yCors.data();
unsigned int *pLineSID = lines.sId.data();
unsigned int *pEdgeXCors = &edges.xCors.front();
unsigned int *pEdgeYCors = &edges.yCors.front();
unsigned int *pEdgeSID = &edges.sId.front();
unsigned int *pLineXCors = &lines.xCors.front();
unsigned int *pLineYCors = &lines.yCors.front();
unsigned int *pLineSID = &lines.sId.front();
logNT_ = 2.0 * ( log10( (double) imageWidth ) + log10( (double) imageHeight ) );
double lineFitErr = 0; //the line fit error;
std::vector<double> lineEquation( 2, 0 );
......@@ -2732,9 +2732,9 @@ int BinaryDescriptor::EDLineDetector::EDline( cv::Mat &image )
lineSalience_.resize( lines_.numOfLines );
unsigned char *pgImg = gImgWO_.ptr();
unsigned int indexInLineArray;
unsigned int *pXCor = lines_.xCors.data();
unsigned int *pYCor = lines_.yCors.data();
unsigned int *pSID = lines_.sId.data();
unsigned int *pXCor = &lines_.xCors.front();
unsigned int *pYCor = &lines_.yCors.front();
unsigned int *pSID = &lines_.sId.front();
for ( unsigned int i = 0; i < lineSalience_.size(); i++ )
{
int salience = 0;
......
......@@ -39,6 +39,7 @@
//
//M*/
#include <limits>
#include "precomp.hpp"
//TODO delete highgui include
//#include <opencv2/highgui.hpp>
......@@ -81,7 +82,7 @@ bool MotionSaliencyBinWangApr2014::init()
// Since data is even, the median is estimated using two values ​​that occupy
// the position (n / 2) and ((n / 2) +1) (choose their arithmetic mean).
potentialBackground = Mat( imgSize.height, imgSize.width, CV_32FC2, Scalar( NAN, 0 ) );
potentialBackground = Mat( imgSize.height, imgSize.width, CV_32FC2, Scalar( std::numeric_limits<float>::quiet_NaN(), 0 ) );
backgroundModel.resize( K + 1 );
......@@ -89,7 +90,7 @@ bool MotionSaliencyBinWangApr2014::init()
{
Mat* tmpm = new Mat;
tmpm->create( imgSize.height, imgSize.width, CV_32FC2 );
tmpm->setTo( Scalar( NAN, 0 ) );
tmpm->setTo( Scalar( std::numeric_limits<float>::quiet_NaN(), 0 ) );
Ptr<Mat> tmp = Ptr<Mat>( tmpm );
backgroundModel[i] = tmp;
}
......@@ -418,50 +419,50 @@ bool MotionSaliencyBinWangApr2014::templateReplacement( const Mat& finalBFMask,
if( i > 0 && j > 0 && i < ( backgroundModel[z]->rows - 1 ) && j < ( backgroundModel[z]->cols - 1 ) )
{
split( *backgroundModel[z], mv );
backgroundModelROI = mv[0]( Rect( j - (int) floor( roiSize / 2 ), i - (int) floor( roiSize / 2 ), roiSize, roiSize ) );
backgroundModelROI = mv[0]( Rect( j - (int) floor((float) roiSize / 2 ), i - (int) floor((float) roiSize / 2 ), roiSize, roiSize ) );
}
else if( i == 0 && j == 0 ) // upper leftt
{
split( *backgroundModel[z], mv );
backgroundModelROI = mv[0]( Rect( j, i, (int) ceil( roiSize / 2 ), (int) ceil( roiSize / 2 ) ) );
backgroundModelROI = mv[0]( Rect( j, i, (int) ceil((float) roiSize / 2 ), (int) ceil((float) roiSize / 2 ) ) );
}
else if( j == 0 && i > 0 && i < ( backgroundModel[z]->rows - 1 ) ) // middle left
{
split( *backgroundModel[z], mv );
backgroundModelROI = mv[0]( Rect( j, i - (int) floor( roiSize / 2 ), (int) ceil( roiSize / 2 ), roiSize ) );
backgroundModelROI = mv[0]( Rect( j, i - (int) floor((float) roiSize / 2 ), (int) ceil((float) roiSize / 2 ), roiSize ) );
}
else if( i == ( backgroundModel[z]->rows - 1 ) && j == 0 ) //down left
{
split( *backgroundModel[z], mv );
backgroundModelROI = mv[0]( Rect( j, i - (int) floor( roiSize / 2 ), (int) ceil( roiSize / 2 ), (int) ceil( roiSize / 2 ) ) );
backgroundModelROI = mv[0]( Rect( j, i - (int) floor((float) roiSize / 2 ), (int) ceil((float) roiSize / 2 ), (int) ceil((float) roiSize / 2 ) ) );
}
else if( i == 0 && j > 0 && j < ( backgroundModel[z]->cols - 1 ) ) // upper - middle
{
split( *backgroundModel[z], mv );
backgroundModelROI = mv[0]( Rect( ( j - (int) floor( roiSize / 2 ) ), i, roiSize, (int) ceil( roiSize / 2 ) ) );
backgroundModelROI = mv[0]( Rect( ( j - (int) floor((float) roiSize / 2 ) ), i, roiSize, (int) ceil((float) roiSize / 2 ) ) );
}
else if( i == ( backgroundModel[z]->rows - 1 ) && j > 0 && j < ( backgroundModel[z]->cols - 1 ) ) //down middle
{
split( *backgroundModel[z], mv );
backgroundModelROI = mv[0](
Rect( j - (int) floor( roiSize / 2 ), i - (int) floor( roiSize / 2 ), roiSize, (int) ceil( roiSize / 2 ) ) );
Rect( j - (int) floor((float) roiSize / 2 ), i - (int) floor((float) roiSize / 2 ), roiSize, (int) ceil((float) roiSize / 2 ) ) );
}
else if( i == 0 && j == ( backgroundModel[z]->cols - 1 ) ) // upper right
{
split( *backgroundModel[z], mv );
backgroundModelROI = mv[0]( Rect( j - (int) floor( roiSize / 2 ), i, (int) ceil( roiSize / 2 ), (int) ceil( roiSize / 2 ) ) );
backgroundModelROI = mv[0]( Rect( j - (int) floor((float) roiSize / 2 ), i, (int) ceil((float) roiSize / 2 ), (int) ceil((float) roiSize / 2 ) ) );
}
else if( j == ( backgroundModel[z]->cols - 1 ) && i > 0 && i < ( backgroundModel[z]->rows - 1 ) ) // middle - right
{
split( *backgroundModel[z], mv );
backgroundModelROI = mv[0](
Rect( j - (int) floor( roiSize / 2 ), i - (int) floor( roiSize / 2 ), (int) ceil( roiSize / 2 ), roiSize ) );
Rect( j - (int) floor((float) roiSize / 2 ), i - (int) floor((float) roiSize / 2 ), (int) ceil((float) roiSize / 2 ), roiSize ) );
}
else if( i == ( backgroundModel[z]->rows - 1 ) && j == ( backgroundModel[z]->cols - 1 ) ) // down right
{
split( *backgroundModel[z], mv );
backgroundModelROI = mv[0](
Rect( j - (int) floor( roiSize / 2 ), i - (int) floor( roiSize / 2 ), (int) ceil( roiSize / 2 ), (int) ceil( roiSize / 2 ) ) );
Rect( j - (int) floor((float) roiSize / 2 ), i - (int) floor((float) roiSize / 2 ), (int) ceil((float) roiSize / 2 ), (int) ceil((float) roiSize / 2 ) ) );
}
/* Check if the value of current pixel BA in potentialBackground model is already contained in at least one of its neighbors'
......@@ -479,7 +480,7 @@ bool MotionSaliencyBinWangApr2014::templateReplacement( const Mat& finalBFMask,
/////////////////// REPLACEMENT of backgroundModel template ///////////////////
//replace TA with current TK
backgroundModel[backgroundModel.size() - 1]->at<Vec2f>( i, j ) = potentialBackground.at<Vec2f>( i, j );
potentialBackground.at<Vec2f>( i, j )[0] = (float)NAN;
potentialBackground.at<Vec2f>( i, j )[0] = std::numeric_limits<float>::quiet_NaN();
potentialBackground.at<Vec2f>( i, j )[1] = 0;
break;
......@@ -489,7 +490,7 @@ bool MotionSaliencyBinWangApr2014::templateReplacement( const Mat& finalBFMask,
else
{
backgroundModel[backgroundModel.size() - 1]->at<Vec2f>( i, j ) = potentialBackground.at<Vec2f>( i, j );
potentialBackground.at<Vec2f>( i, j )[0] = (float)NAN;
potentialBackground.at<Vec2f>( i, j )[0] = std::numeric_limits<float>::quiet_NaN();
potentialBackground.at<Vec2f>( i, j )[1] = 0;
}
} // close if of EVALUATION
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment