Commit d648cc8c authored by baisheng lai's avatar baisheng lai

remove warnings

parent fc471beb
......@@ -118,7 +118,7 @@ namespace omnidir
@param R Rotation transform between the original and object space : 3x3 1-channel, or vector: 3x1/1x3, with depth CV_32F or CV_64F
@param P New camera matrix (3x3) or new projection matrix (3x4)
@param size Undistorted image size.
@param m1type Type of the first output map that can be CV_32FC1 or CV_16SC2 . See convertMaps()
@param mltype Type of the first output map that can be CV_32FC1 or CV_16SC2 . See convertMaps()
for details.
@param map1 The first output map.
@param map2 The second output map.
......@@ -135,6 +135,7 @@ namespace omnidir
@param K Camera matrix \f$K = \vecthreethree{f_x}{s}{c_x}{0}{f_y}{c_y}{0}{0}{_1}\f$.
@param D Input vector of distortion coefficients \f$(k_1, k_2, p_1, p_2)\f$.
@param xi The parameter xi for CMei's model.
@param flags Flags indicates the rectification type, RECTIFY_PERSPECTIVE, RECTIFY_CYLINDRICAL, RECTIFY_LONGLATI and RECTIFY_STEREOGRAPHIC
@param Knew Camera matrix of the distorted image. If it is not assigned, it is just K.
@param new_size The new image size. By default, it is the size of distorted.
@param R Rotation matrix between the input and output images. By default, it is identity matrix.
......@@ -152,14 +153,14 @@ namespace omnidir
@param K Output calibrated camera matrix.
@param xi Output parameter xi for CMei's model
@param D Output distortion parameters \f$(k_1, k_2, p_1, p_2)\f$
@param omAll Output rotations for each calibration images
@param tAll Output translation for each calibration images
@param rvecs Output rotations for each calibration images
@param tvecs Output translation for each calibration images
@param flags The flags that control calibrate
@param criteria Termination criteria for optimization
@param idx Indices of images that pass initialization, which are really used in calibration. So the size of rvecs is the
same as idx.total().
*/
CV_EXPORTS_W double calibrate(InputArray patternPoints, InputArray imagePoints, Size size,
CV_EXPORTS_W double calibrate(InputArray objectPoints, InputArray imagePoints, Size size,
InputOutputArray K, InputOutputArray xi, InputOutputArray D, OutputArrayOfArrays rvecs, OutputArrayOfArrays tvecs,
int flags, TermCriteria criteria, OutputArray idx=noArray());
......@@ -172,7 +173,8 @@ namespace omnidir
It must be the same size and the same type as objectPoints.
@param imagePoints2 The corresponding image points of the second camera, with type vector<vector<Vec2f> >.
It must be the same size and the same type as objectPoints.
@param imageSize Image size of calibration images.
@param imageSize1 Image size of calibration images of the first camera.
@param imageSize2 Image size of calibration images of the second camera.
@param K1 Output camera matrix for the first camera.
@param xi1 Output parameter xi of Mei's model for the first camera
@param D1 Output distortion parameters \f$(k_1, k_2, p_1, p_2)\f$ for the first camera
......@@ -215,7 +217,7 @@ namespace omnidir
@param flag Flag of rectification type, RECTIFY_PERSPECTIVE or RECTIFY_LONGLATI
@param numDisparities The parameter 'numDisparities' in StereoSGBM, see StereoSGBM for details.
@param SADWindowSize The parameter 'SADWindowSize' in StereoSGBM, see StereoSGBM for details.
@param depth Depth map generated by stereo matching
@param disparity Disparity map generated by stereo matching
@param pointCloud Point cloud of 3D reconstruction, with type CV_64FC3
@param newSize Image size of rectified image, see omnidir::undistortImage
@param Knew New camera matrix of rectified image, see omnidir::undistortImage
......
......@@ -144,13 +144,13 @@ void multiCameraCalibration::loadImages()
{
int cameraVertex, timestamp;
std::string filename = file_list[i].substr(0, file_list[i].find('.'));
int spritPosition1 = filename.rfind('/');
int spritPosition2 = filename.rfind('\\');
if (spritPosition1!=(int)std::string::npos)
size_t spritPosition1 = filename.rfind('/');
size_t spritPosition2 = filename.rfind('\\');
if (spritPosition1!=std::string::npos)
{
filename = filename.substr(spritPosition1+1, filename.size() - 1);
}
else if(spritPosition2!= (int)std::string::npos)
else if(spritPosition2!= std::string::npos)
{
filename = filename.substr(spritPosition2+1, filename.size() - 1);
}
......@@ -383,7 +383,7 @@ void multiCameraCalibration::computeJacobianExtrinsic(const Mat& extrinsicParams
for (int edgeIdx = 0; edgeIdx < nEdge; ++edgeIdx)
{
int nPoints = _objectPointsForEachCamera[_edgeList[edgeIdx].cameraVertex][_edgeList[edgeIdx].photoIndex].total();
int nPoints = (int)_objectPointsForEachCamera[_edgeList[edgeIdx].cameraVertex][_edgeList[edgeIdx].photoIndex].total();
pointsLocation[edgeIdx+1] = pointsLocation[edgeIdx] + nPoints*2;
}
......
......@@ -894,7 +894,7 @@ void cv::omnidir::internal::computeJacobian(InputArrayOfArrays objectPoints, Inp
int nPointsAll = 0;
for (int i = 0; i < n; ++i)
{
nPointsAll += objectPoints.getMat(i).total();
nPointsAll += (int)objectPoints.getMat(i).total();
}
//Mat J = Mat::zeros(2*n*objectPoints.getMat(0).total(), 10+6*n, CV_64F);
......@@ -940,8 +940,8 @@ void cv::omnidir::internal::computeJacobian(InputArrayOfArrays objectPoints, Inp
Mat(JIn.t()*JEx).copyTo(JTJ(Rect(i*6, 6*n, 6, 10)));
JTE(Rect(0, 6*n, 1, 10)) = JTE(Rect(0, 6*n,1, 10)) + JIn.t() * projError.reshape(1, 2*projError.total());
JTE(Rect(0, i*6, 1, 6)) = JEx.t() * projError.reshape(1, 2*projError.total());
JTE(Rect(0, 6*n, 1, 10)) = JTE(Rect(0, 6*n,1, 10)) + JIn.t() * projError.reshape(1, 2*(int)projError.total());
JTE(Rect(0, i*6, 1, 6)) = JEx.t() * projError.reshape(1, 2*(int)projError.total());
//int nPoints = objectPoints.getMat(i).total();
//JIn.copyTo(J(Rect(6*n, i*nPoints*2, 10, nPoints*2)));
......@@ -970,8 +970,8 @@ void cv::omnidir::internal::computeJacobianStereo(InputArrayOfArrays objectPoint
CV_Assert((imagePoints1.total() == imagePoints2.total()) && (imagePoints1.total() == objectPoints.total()));
// compute Jacobian matrix by naive way
int n_img = objectPoints.total();
int n_points = objectPoints.getMat(0).total();
int n_img = (int)objectPoints.total();
int n_points = (int)objectPoints.getMat(0).total();
Mat J = Mat::zeros(4 * n_points * n_img, 20 + 6 * (n_img + 1), CV_64F);
Mat exAll = Mat::zeros(4 * n_points * n_img, 1, CV_64F);
double *para = parameters.getMat().ptr<double>();
......@@ -1298,7 +1298,7 @@ double cv::omnidir::calibrate(InputArray patternPoints, InputArray imagePoints,
if (idx.needed())
{
idx.create(1, _idx.total(), CV_32S);
idx.create(1, (int)_idx.total(), CV_Assert_32S);
_idx.copyTo(idx.getMat());
}
......@@ -1620,7 +1620,7 @@ void cv::omnidir::stereoReconstruct(InputArray image1, InputArray image2, InputA
}
}
}
int nPoints;
int nPoints = 0;
if (pointType == XYZ)
{
nPoints = (int)_pointCloud.size();
......@@ -1638,7 +1638,7 @@ void cv::omnidir::internal::encodeParameters(InputArray K, InputArrayOfArrays om
{
CV_Assert(K.type() == CV_64F && K.size() == Size(3,3));
CV_Assert(distoaration.total() == 4 && distoaration.type() == CV_64F);
int n = omAll.total();
int n = (int)omAll.total();
Mat _omAll = omAll.getMat(), _tAll = tAll.getMat();
Mat tmp = Mat(_omAll.at<Vec3d>(0)).reshape(1,3).clone();
Matx33d _K = K.getMat();
......@@ -1771,7 +1771,7 @@ void cv::omnidir::internal::encodeParametersStereo(InputArray K1, InputArray K2,
if(T.empty())
T.create(3, 1, CV_64F);
int n = (parameters.total() - 20) / 6 - 1;
int n = ((int)parameters.total() - 20) / 6 - 1;
if(omL.empty())
omL.create(1, n, CV_64FC3);
......@@ -1839,7 +1839,7 @@ void cv::omnidir::internal::estimateUncertainties(InputArrayOfArrays objectPoint
int nPointsAll = 0;
for (int i = 0; i < n; ++i)
{
nPointsAll += objectPoints.getMat(i).total();
nPointsAll += (int)objectPoints.getMat(i).total();
}
Mat reprojError = Mat(nPointsAll, 1, CV_64FC2);
......@@ -1907,7 +1907,7 @@ void cv::omnidir::internal::estimateUncertaintiesStereo(InputArrayOfArrays objec
CV_Assert(!objectPoints.empty() && objectPoints.type() == CV_64FC3);
CV_Assert(!imagePoints1.empty() && imagePoints1.type() == CV_64FC2 && imagePoints1.total() == objectPoints.total());
CV_Assert(!imagePoints2.empty() && imagePoints2.type() == CV_64FC2 && imagePoints1.total() == imagePoints2.total());
int n_img = objectPoints.total();
int n_img = (int)objectPoints.total();
CV_Assert((int)parameters.total() == (6*(n_img+1)+20));
Mat _K1, _K2, _D1, _D2;
......@@ -1917,7 +1917,7 @@ void cv::omnidir::internal::estimateUncertaintiesStereo(InputArrayOfArrays objec
double _xi1, _xi2;
internal::decodeParametersStereo(_parameters, _K1, _K2, _om, _T, _omL, _tL, _D1, _D2, _xi1, _xi2);
int n_points = objectPoints.getMat(0).total();
int n_points = (int)objectPoints.getMat(0).total();
Mat reprojErrorAll = Mat::zeros(2*n_points*n_img, 1, CV_64FC2);
// error for left image
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment