Commit 7f7965bc authored by Vadim Pisarevsky's avatar Vadim Pisarevsky

separated opencv 1.x and opencv 2.x wrappers. moved tests/python/* to modules/python/test

parent 0c9e5f6c
......@@ -5,7 +5,8 @@ project(opencv_python)
include_directories(${PYTHON_INCLUDE_PATH})
include_directories(
"${CMAKE_CURRENT_SOURCE_DIR}/src"
"${CMAKE_CURRENT_SOURCE_DIR}/src1"
"${CMAKE_CURRENT_SOURCE_DIR}/src2"
"${CMAKE_SOURCE_DIR}/modules/core/include"
"${CMAKE_SOURCE_DIR}/modules/imgproc/include"
"${CMAKE_SOURCE_DIR}/modules/video/include"
......@@ -30,64 +31,75 @@ set(opencv_hdrs "${CMAKE_SOURCE_DIR}/modules/core/include/opencv2/core/core.hpp"
"${CMAKE_SOURCE_DIR}/modules/features2d/include/opencv2/features2d/features2d.hpp"
"${CMAKE_SOURCE_DIR}/modules/calib3d/include/opencv2/calib3d/calib3d.hpp"
"${CMAKE_SOURCE_DIR}/modules/objdetect/include/opencv2/objdetect/objdetect.hpp"
"${CMAKE_SOURCE_DIR}/modules/python/src/opencv_extra_api.hpp")
set(generated_hdrs
"${CMAKE_CURRENT_BINARY_DIR}/pyopencv_generated_funcs.h"
"${CMAKE_CURRENT_BINARY_DIR}/pyopencv_generated_func_tab.h"
"${CMAKE_CURRENT_BINARY_DIR}/pyopencv_generated_types.h"
"${CMAKE_CURRENT_BINARY_DIR}/pyopencv_generated_type_reg.h"
"${CMAKE_CURRENT_BINARY_DIR}/pyopencv_generated_const_reg.h")
"${CMAKE_SOURCE_DIR}/modules/python/src2/opencv_extra_api.hpp")
if(MSVC)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /W3")
endif()
file(GLOB lib_srcs "src/*.cpp")
file(GLOB lib_hdrs "src/*.h")
add_custom_command(
OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/generated0.i
COMMAND ${PYTHON_EXECUTABLE} "${CMAKE_CURRENT_SOURCE_DIR}/src/gen.py" "${CMAKE_CURRENT_SOURCE_DIR}/src"
DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/src/api
DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/src/defs
DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/src/gen.py
COMMAND ${PYTHON_EXECUTABLE} "${CMAKE_CURRENT_SOURCE_DIR}/src1/gen.py" "${CMAKE_CURRENT_SOURCE_DIR}/src1"
DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/src1/api
DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/src1/defs
DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/src1/gen.py
)
add_custom_command(
OUTPUT ${generated_hdrs}
COMMAND ${PYTHON_EXECUTABLE} "${CMAKE_CURRENT_SOURCE_DIR}/src/gen2.py" ${CMAKE_CURRENT_BINARY_DIR} ${opencv_hdrs}
DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/src/gen2.py
DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/src/hdr_parser.py
DEPENDS ${opencv_hdrs}
)
set(the_target "opencv_python")
add_library(${the_target} ${lib_srcs} ${lib_hdrs} ${lib_int_hdrs} ${CMAKE_CURRENT_BINARY_DIR}/generated0.i src/opencv2x.h src/opencv_extra_api.hpp ${generated_hdrs})
target_link_libraries(${the_target} ${PYTHON_LIBRARIES} opencv_core opencv_imgproc opencv_video opencv_ml opencv_features2d opencv_highgui opencv_calib3d opencv_objdetect opencv_legacy opencv_contrib)
set(cv_target "opencv_python")
add_library(${cv_target} src1/cv.cpp ${CMAKE_CURRENT_BINARY_DIR}/generated0.i)
target_link_libraries(${cv_target} ${PYTHON_LIBRARIES} opencv_core opencv_imgproc opencv_video opencv_ml opencv_features2d opencv_highgui opencv_calib3d opencv_objdetect opencv_legacy opencv_contrib)
set_target_properties(${the_target} PROPERTIES PREFIX "")
set_target_properties(${the_target} PROPERTIES OUTPUT_NAME "cv")
set_target_properties(${cv_target} PROPERTIES PREFIX "")
set_target_properties(${cv_target} PROPERTIES OUTPUT_NAME "cv")
execute_process(COMMAND ${PYTHON_EXECUTABLE} -c "import distutils.sysconfig; print distutils.sysconfig.get_config_var('SO')"
RESULT_VARIABLE PYTHON_CVPY_PROCESS
OUTPUT_VARIABLE CVPY_SUFFIX
OUTPUT_STRIP_TRAILING_WHITESPACE)
set_target_properties(${the_target} PROPERTIES SUFFIX ${CVPY_SUFFIX})
set_target_properties(${cv_target} PROPERTIES SUFFIX ${CVPY_SUFFIX})
set(cvpymodules ${cv_target})
if(PYTHON_USE_NUMPY)
set(cv2_generated_hdrs
"${CMAKE_CURRENT_BINARY_DIR}/pyopencv_generated_funcs.h"
"${CMAKE_CURRENT_BINARY_DIR}/pyopencv_generated_func_tab.h"
"${CMAKE_CURRENT_BINARY_DIR}/pyopencv_generated_types.h"
"${CMAKE_CURRENT_BINARY_DIR}/pyopencv_generated_type_reg.h"
"${CMAKE_CURRENT_BINARY_DIR}/pyopencv_generated_const_reg.h")
set(cvpy_files cv${CVPY_SUFFIX})
add_custom_command(
OUTPUT ${cv2_generated_hdrs}
COMMAND ${PYTHON_EXECUTABLE} "${CMAKE_CURRENT_SOURCE_DIR}/src2/gen2.py" ${CMAKE_CURRENT_BINARY_DIR} ${opencv_hdrs}
DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/src2/gen2.py
DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/src2/hdr_parser.py
DEPENDS ${opencv_hdrs})
set(cv2_target "opencv2_python")
add_library(${cv2_target} src2/cv2.cpp src2/opencv_extra_api.hpp ${cv2_generated_headers})
target_link_libraries(${cv2_target} ${PYTHON_LIBRARIES} opencv_core opencv_imgproc opencv_video opencv_ml opencv_features2d opencv_highgui opencv_calib3d opencv_objdetect opencv_legacy opencv_contrib)
set_target_properties(${cv2_target} PROPERTIES PREFIX "")
set_target_properties(${cv2_target} PROPERTIES OUTPUT_NAME "cv2")
set_target_properties(${cv2_target} PROPERTIES SUFFIX ${CVPY_SUFFIX})
set(cvpymodules ${cvpymodules} ${cv2_target})
endif()
if(WIN32)
install(TARGETS ${the_target}
RUNTIME DESTINATION "Python${PYTHON_VERSION_MAJOR_MINOR}/Lib/site-packages" COMPONENT main
LIBRARY DESTINATION "Python${PYTHON_VERSION_MAJOR_MINOR}/Lib/site-packages" COMPONENT main
ARCHIVE DESTINATION "Python${PYTHON_VERSION_MAJOR_MINOR}/Lib/site-packages" COMPONENT main
)
install(TARGETS ${cvpymodules}
RUNTIME DESTINATION "Python${PYTHON_VERSION_MAJOR_MINOR}/Lib/site-packages" COMPONENT main
LIBRARY DESTINATION "Python${PYTHON_VERSION_MAJOR_MINOR}/Lib/site-packages" COMPONENT main
ARCHIVE DESTINATION "Python${PYTHON_VERSION_MAJOR_MINOR}/Lib/site-packages" COMPONENT main
)
else()
#install(FILES ${LIBRARY_OUTPUT_PATH}/cv${CVPY_SUFFIX} DESTINATION ${PYTHON_PACKAGES_PATH})
install(TARGETS ${the_target}
RUNTIME DESTINATION ${PYTHON_PACKAGES_PATH} COMPONENT main
LIBRARY DESTINATION ${PYTHON_PACKAGES_PATH} COMPONENT main
ARCHIVE DESTINATION ${PYTHON_PACKAGES_PATH} COMPONENT main)
#install(FILES ${LIBRARY_OUTPUT_PATH}/cv${CVPY_SUFFIX} DESTINATION ${PYTHON_PACKAGES_PATH})
install(TARGETS ${cvpymodules}
RUNTIME DESTINATION ${PYTHON_PACKAGES_PATH} COMPONENT main
LIBRARY DESTINATION ${PYTHON_PACKAGES_PATH} COMPONENT main
ARCHIVE DESTINATION ${PYTHON_PACKAGES_PATH} COMPONENT main)
endif()
# Macros
CV_RGB CvScalar
double red
double grn
double blu
CV_MAT_CN int
int i
CV_MAT_DEPTH int
int i
Scalar CvScalar
double val0
double val1 0
double val2 0
double val3 0
ScalarAll CvScalar
double val0123
RealScalar CvScalar
double val0
CV_IABS int
int a
CV_CMP int
int a
int b
CV_SIGN int
int a
CV_FOURCC int
char c1
char c2
char c3
char c4
CV_MAKETYPE int
int depth
int cn
CV_8UC int
int n
CV_8SC int
int n
CV_16UC int
int n
CV_16SC int
int n
CV_32SC int
int n
CV_32FC int
int n
CV_64FC int
int n
# Initialization
CloneImage IplImage*
IplImage image
SetImageCOI
IplImage image
int coi
GetImageCOI int
IplImage image
SetImageROI
IplImage image
CvRect rect
ResetImageROI
IplImage image
GetImageROI CvRect
IplImage image
CloneMat CvMat*
CvMat mat
CloneMatND CvMatND*
CvMatND mat
# Accessing Elements and sub-Arrays
Get1D CvScalar
CvArr arr
int idx
Get2D CvScalar
CvArr arr
int idx0
int idx1
Get3D CvScalar
CvArr arr
int idx0
int idx1
int idx2
GetND CvScalar
CvArr arr
ints indices
GetReal1D double
CvArr arr
int idx0
GetReal2D double
CvArr arr
int idx0
int idx1
GetReal3D double
CvArr arr
int idx0
int idx1
int idx2
GetRealND double
CvArr arr
ints idx
mGet double
CvMat mat
int row
int col
Set1D
CvArr arr
int idx
CvScalar value
Set2D
CvArr arr
int idx0
int idx1
CvScalar value
Set3D
CvArr arr
int idx0
int idx1
int idx2
CvScalar value
SetND
CvArr arr
ints indices
CvScalar value
SetReal1D
CvArr arr
int idx
double value
SetReal2D
CvArr arr
int idx0
int idx1
double value
SetReal3D
CvArr arr
int idx0
int idx1
int idx2
double value
SetRealND
CvArr arr
ints indices
double value
mSet
CvMat mat
int row
int col
double value
ClearND
CvArr arr
ints idx
# Sequences
CV_IS_SEQ_INDEX int
CvSeq s
CV_IS_SEQ_CURVE int
CvSeq s
CV_IS_SEQ_CLOSED int
CvSeq s
CV_IS_SEQ_CONVEX int
CvSeq s
CV_IS_SEQ_HOLE int
CvSeq s
CV_IS_SEQ_SIMPLE int
CvSeq s
# Curves and Shapes
Line
CvArr img
CvPoint pt1
CvPoint pt2
CvScalar color
int thickness 1
int lineType 8
int shift 0
Rectangle
CvArr img
CvPoint pt1
CvPoint pt2
CvScalar color
int thickness 1
int lineType 8
int shift 0
Circle
CvArr img
CvPoint center
int radius
CvScalar color
int thickness 1
int lineType 8
int shift 0
Ellipse
CvArr img
CvPoint center
CvSize axes
double angle
double start_angle
double end_angle
CvScalar color
int thickness 1
int lineType 8
int shift 0
EllipseBox
CvArr img
CvBox2D box
CvScalar color
int thickness 1
int lineType 8
int shift 0
FillPoly
CvArr img
pts_npts_contours polys
CvScalar color
int lineType 8
int shift 0
FillConvexPoly
CvArr img
CvPoints pn
CvScalar color
int lineType 8
int shift 0
PolyLine
CvArr img
pts_npts_contours polys
int is_closed
CvScalar color
int thickness 1
int lineType 8
int shift 0
#Text
InitFont font
CvFont font /O
int fontFace
double hscale
double vscale
double shear 0
int thickness 1
int lineType 8
PutText
CvArr img
char* text
CvPoint org
CvFont* font
CvScalar color
GetTextSize textSize,baseline
char* textString
CvFont* font
CvSize textSize /O
int baseline /O
# Point Sets and Contours
DrawContours
CvArr img
CvSeq contour
CvScalar external_color
CvScalar hole_color
int max_level
int thickness 1
int lineType 8
CvPoint offset cvPoint(0,0)
# RTTI and Generic Functions
Save
char* filename
generic structPtr
char* name NULL
char* comment NULL
Load generic
char* filename
CvMemStorage storage NULL
char* name NULL
# Accessing Elements and sub-Arrays
GetRow submat
CvArr arr
CvMat submat /J:arr,O,A
int row
GetRows submat
CvArr arr
CvMat submat /J:arr,O,A
int startRow
int endRow
int deltaRow 1
GetCol submat
CvArr arr
CvMat submat /J:arr,O,A
int col
GetCols submat
CvArr arr
CvMat submat /J:arr,O,A
int startCol
int endCol
GetDiag submat
CvArr arr
CvMat submat /J:arr,O,A
int diag 0
GetSubRect submat
CvArr arr
CvMat submat /J:arr,O,A
CvRect rect
GetSize CvSize
CvArr arr
GetElemType int
CvArr arr
# Copying and Filling
Copy
CvArr src
CvArr dst
CvArr mask NULL
Set
CvArr arr
CvScalar value
CvArr mask NULL
SetZero
CvArr arr
Zero
CvArr arr
SetIdentity
CvArr mat
CvScalar value cvRealScalar(1)
Range
CvArr mat
double start
double end
# Transforms and Permutations
# Reshape, ReshapeND - requires special data refcount code
Repeat
CvArr src
CvArr dst
Flip
CvArr src
CvArr dst NULL
int flipMode 0
Split
CvArr src
CvArr dst0
CvArr dst1
CvArr dst2
CvArr dst3
CvtPixToPlane
CvArr src
CvArr dst0
CvArr dst1
CvArr dst2
CvArr dst3
Merge
CvArr src0
CvArr src1
CvArr src2
CvArr src3
CvArr dst
MixChannels
cvarr_count src /K
cvarr_count dst
intpair fromTo
RandShuffle
CvArr mat
CvRNG* rng
double iter_factor 1.0
Sort
CvArr src
CvArr dst
CvArr idxmat
int flags 0
# Arithmetic, Logic and Comparison
LUT
CvArr src
CvArr dst
CvArr lut
ConvertScale
CvArr src
CvArr dst
double scale 1.0
double shift 0.0
CvtScale
CvArr src
CvArr dst
double scale 1.0
double shift 0.0
Scale
CvArr src
CvArr dst
double scale 1.0
double shift 0.0
Convert
CvArr src
CvArr dst
ConvertScaleAbs
CvArr src
CvArr dst
double scale 1.0
double shift 0.0
Add
CvArr src1
CvArr src2
CvArr dst
CvArr mask NULL
AddS
CvArr src
CvScalar value
CvArr dst
CvArr mask NULL
AddWeighted
CvArr src1
double alpha
CvArr src2
double beta
double gamma
CvArr dst
Sub
CvArr src1
CvArr src2
CvArr dst
CvArr mask NULL
SubS
CvArr src
CvScalar value
CvArr dst
CvArr mask NULL
SubRS
CvArr src
CvScalar value
CvArr dst
CvArr mask NULL
Mul
CvArr src1
CvArr src2
CvArr dst
double scale 1.0
Div
CvArr src1
CvArr src2
CvArr dst
double scale 1.0
And
CvArr src1
CvArr src2
CvArr dst
CvArr mask NULL
AndS
CvArr src
CvScalar value
CvArr dst
CvArr mask NULL
Or
CvArr src1
CvArr src2
CvArr dst
CvArr mask NULL
OrS
CvArr src
CvScalar value
CvArr dst
CvArr mask NULL
Xor
CvArr src1
CvArr src2
CvArr dst
CvArr mask NULL
XorS
CvArr src
CvScalar value
CvArr dst
CvArr mask NULL
Not
CvArr src
CvArr dst
Cmp
CvArr src1
CvArr src2
CvArr dst
int cmpOp
CmpS
CvArr src
double value
CvArr dst
int cmpOp
InRange
CvArr src
CvArr lower
CvArr upper
CvArr dst
InRangeS
CvArr src
CvScalar lower
CvScalar upper
CvArr dst
Max
CvArr src1
CvArr src2
CvArr dst
MaxS
CvArr src
double value
CvArr dst
Min
CvArr src1
CvArr src2
CvArr dst
MinS
CvArr src
double value
CvArr dst
AbsDiff
CvArr src1
CvArr src2
CvArr dst
AbsDiffS
CvArr src
CvArr dst
CvScalar value
Abs
CvArr src
CvArr dst
# Statistics
CountNonZero int
CvArr arr
Sum CvScalar
CvArr arr
Avg CvScalar
CvArr arr
CvArr mask NULL
AvgSdv mean,stdDev
CvArr arr
CvScalar mean /O
CvScalar stdDev /O
CvArr mask NULL
MinMaxLoc minVal,maxVal,minLoc,maxLoc
CvArr arr
double minVal /O
double maxVal /O
CvPoint minLoc /O
CvPoint maxLoc /O
CvArr mask NULL
Norm double
CvArr arr1
CvArr arr2
int normType CV_L2
CvArr mask NULL
Reduce
CvArr src
CvArr dst
int dim -1
int op CV_REDUCE_SUM
# Linear Algebra
DotProduct double
CvArr src1
CvArr src2
Normalize
CvArr src
CvArr dst
double a 1.0
double b 0.0
int norm_type CV_L2
CvArr mask NULL
CrossProduct
CvArr src1
CvArr src2
CvArr dst
ScaleAdd
CvArr src1
CvScalar scale
CvArr src2
CvArr dst
GEMM
CvArr src1
CvArr src2
double alpha
CvArr src3
double beta
CvArr dst
int tABC 0
MatMulAdd
CvArr src1
CvArr src2
CvArr src3
CvArr dst
MatMul
CvArr src1
CvArr src2
CvArr dst
Transform
CvArr src
CvArr dst
CvMat transmat
CvMat shiftvec NULL
PerspectiveTransform
CvArr src
CvArr dst
CvMat mat
MulTransposed
CvArr src
CvArr dst
int order
CvArr delta NULL
double scale 1.0
Trace CvScalar
CvArr mat
Transpose
CvArr src
CvArr dst
Det double
CvArr mat
Invert double
CvArr src
CvArr dst
int method CV_LU
Solve
CvArr A
CvArr B
CvArr X
int method CV_LU
SVD
CvArr A
CvArr W
CvArr U NULL
CvArr V NULL
int flags 0
SVBkSb
CvArr W
CvArr U
CvArr V
CvArr B
CvArr X
int flags
EigenVV
CvArr mat
CvArr evects
CvArr evals
double eps
int lowindex 0
int highindex 0
CalcCovarMatrix
cvarr_count vects /K
CvArr covMat
CvArr avg
int flags
Mahalonobis
CvArr vec1
CvArr vec2
CvArr mat
CalcPCA
CvArr data
CvArr avg
CvArr eigenvalues
CvArr eigenvectors
int flags
ProjectPCA
CvArr data
CvArr avg
CvArr eigenvectors
CvArr result
BackProjectPCA
CvArr proj
CvArr avg
CvArr eigenvects
CvArr result
# Math Functions
Round int
double value
Floor int
double value
Ceil int
double value
Sqrt float
float value
InvSqrt float
float value
Cbrt float
float value
FastArctan float
float y
float x
IsNaN int
double value
IsInf int
double value
CartToPolar
CvArr x
CvArr y
CvArr magnitude
CvArr angle NULL
int angleInDegrees 0
PolarToCart
CvArr magnitude
CvArr angle
CvArr x
CvArr y
int angleInDegrees 0
Pow
CvArr src
CvArr dst
double power
Exp
CvArr src
CvArr dst
Log
CvArr src
CvArr dst
SolveCubic
CvMat coeffs
CvMat roots
SolvePoly
CvMat coeffs
CvMat roots
int maxiter 10
int fig 10
# Random Number Generation
RNG CvRNG
int64 seed -1LL
RandArr
CvRNG* rng
CvArr arr
int distType
CvScalar param1
CvScalar param2
RandInt unsigned
CvRNG* rng
RandReal double
CvRNG* rng
# Discrete Transforms
DFT
CvArr src
CvArr dst
int flags
int nonzeroRows 0
GetOptimalDFTSize int
int size0
MulSpectrums
CvArr src1
CvArr src2
CvArr dst
int flags
DCT
CvArr src
CvArr dst
int flags
# Sequences
SeqRemove
CvSeq seq
int index
ClearSeq
CvSeq seq
CloneSeq
CvSeq seq
CvMemStorage storage
SeqRemoveSlice
CvSeq seq
CvSlice slice
SeqInvert
CvSeq seq
# Miscellaneous Functions
CheckArr int
CvArr arr
int flags 0
double min_val 0
double max_val 0
KMeans2 double
CvArr samples
int nclusters
CvArr labels
CvTermCriteria termcrit
int attempts 1
int flags 0
CvArr centers NULL
# Gradients, Edges, Corners and Features
Sobel
CvArr src
CvArr dst
int xorder
int yorder
int apertureSize 3
Laplace
CvArr src
CvArr dst
int apertureSize 3
Canny
CvArr image
CvArr edges
double threshold1
double threshold2
int aperture_size 3
PreCornerDetect
CvArr image
CvArr corners
int apertureSize 3
CornerEigenValsAndVecs
CvArr image
CvArr eigenvv
int blockSize
int aperture_size 3
CornerMinEigenVal
CvArr image
CvArr eigenval
int blockSize
int aperture_size 3
CornerHarris
CvArr image
CvArr harris_dst
int blockSize
int aperture_size 3
double k 0.04
FindCornerSubPix corners
CvArr image
CvPoint2D32fs corners
CvSize win
CvSize zero_zone
CvTermCriteria criteria
GoodFeaturesToTrack cornerCount
CvArr image
CvArr eigImage
CvArr tempImage
cvpoint2d32f_count cornerCount
double qualityLevel
double minDistance
CvArr mask NULL
int blockSize 3
int useHarris 0
double k 0.04
ExtractSURF keypoints,descriptors
CvArr image
CvArr mask
CvSeqOfCvSURFPoint* keypoints /O
CvSeqOfCvSURFDescriptor* descriptors /O
CvMemStorage storage
CvSURFParams params
GetStarKeypoints CvSeqOfCvStarKeypoint*
CvArr image
CvMemStorage storage
CvStarDetectorParams params cvStarDetectorParams()
# Sampling, Interpolation and Geometrical Transforms
GetRectSubPix
CvArr src
CvArr dst
CvPoint2D32f center
GetQuadrangleSubPix
CvArr src
CvArr dst
CvMat mapMatrix
Resize
CvArr src
CvArr dst
int interpolation CV_INTER_LINEAR
WarpAffine
CvArr src
CvArr dst
CvMat mapMatrix
int flags CV_INTER_LINEAR+CV_WARP_FILL_OUTLIERS
CvScalar fillval cvScalarAll(0)
GetAffineTransform
CvPoint2D32f* src
CvPoint2D32f* dst
CvMat mapMatrix
GetRotationMatrix2D
CvPoint2D32f center
double angle
double scale
CvMat mapMatrix
WarpPerspective
CvArr src
CvArr dst
CvMat mapMatrix
int flags CV_INTER_LINEAR+CV_WARP_FILL_OUTLIERS
CvScalar fillval cvScalarAll(0)
GetPerspectiveTransform
CvPoint2D32f* src
CvPoint2D32f* dst
CvMat mapMatrix
Remap
CvArr src
CvArr dst
CvArr mapx
CvArr mapy
int flags CV_INTER_LINEAR+CV_WARP_FILL_OUTLIERS
CvScalar fillval cvScalarAll(0)
ConvertMaps
CvArr mapx
CvArr mapy
CvArr mapxy
CvArr mapalpha
LogPolar
CvArr src
CvArr dst
CvPoint2D32f center
double M
int flags CV_INTER_LINEAR+CV_WARP_FILL_OUTLIERS
# Morphological Operations
CreateStructuringElementEx IplConvKernel*
int cols
int rows
int anchorX
int anchorY
int shape
ints values {NULL,0}
Erode
CvArr src
CvArr dst
IplConvKernel* element NULL
int iterations 1
Dilate
CvArr src
CvArr dst
IplConvKernel* element NULL
int iterations 1
MorphologyEx
CvArr src
CvArr dst
CvArr temp
IplConvKernel* element
int operation
int iterations 1
# Filters and Color Conversion
Smooth
CvArr src
CvArr dst
int smoothtype CV_GAUSSIAN
int param1 3
int param2 0
double param3 0
double param4 0
Filter2D
CvArr src
CvArr dst
CvMat kernel
CvPoint anchor cvPoint(-1,-1)
CopyMakeBorder
CvArr src
CvArr dst
CvPoint offset
int bordertype
CvScalar value cvScalarAll(0)
Integral
CvArr image
CvArr sum
CvArr sqsum NULL
CvArr tiltedSum NULL
CvtColor
CvArr src
CvArr dst
int code
Threshold
CvArr src
CvArr dst
double threshold
double maxValue
int thresholdType
AdaptiveThreshold
CvArr src
CvArr dst
double maxValue
int adaptive_method CV_ADAPTIVE_THRESH_MEAN_C /ch_adaptive_method
int thresholdType CV_THRESH_BINARY /ch_threshold_type
int blockSize 3
double param1 5
# Pyramids and the Applications
PyrDown
CvArr src
CvArr dst
int filter CV_GAUSSIAN_5x5
PyrUp
CvArr src
CvArr dst
int filter CV_GAUSSIAN_5x5
PyrSegmentation comp
IplImage src
IplImage dst
CvMemStorage storage
CvSeq* comp /O
int level
double threshold1
double threshold2
PyrMeanShiftFiltering
CvArr src
CvArr dst
double sp
double sr
int max_level 1
CvTermCriteria termcrit cvTermCriteria(CV_TERMCRIT_ITER+CV_TERMCRIT_EPS,5,1)
# Image Segmentation, Connected Components and Contour Retrieval
FloodFill comp
CvArr image
CvPoint seed_point
CvScalar new_val
CvScalar lo_diff cvScalarAll(0)
CvScalar up_diff cvScalarAll(0)
CvConnectedComp comp /O
int flags 4
CvArr mask NULL
Watershed
CvArr image
CvArr markers
# Image and Contour Moments
Moments moments
cvarrseq arr
CvMoments moments /O
int binary 0
GetSpatialMoment double
CvMoments* moments
int x_order
int y_order
GetCentralMoment double
CvMoments* moments
int x_order
int y_order
GetNormalizedCentralMoment double
CvMoments* moments
int x_order
int y_order
# Special Image Transforms
HoughLines2 CvSeq*
CvArr image
CvMemStorage storage
int method
double rho
double theta
int threshold
double param1 0
double param2 0
HoughCircles
CvArr image
CvMat circle_storage
int method
double dp
double min_dist
double param1 100
double param2 100
int min_radius 0
int max_radius 0
DistTransform
CvArr src
CvArr dst
int distance_type CV_DIST_L2
int mask_size 3
floats mask {NULL,0}
CvArr labels NULL
Inpaint
CvArr src
CvArr mask
CvArr dst
double inpaintRadius
int flags
# Histograms
ClearHist
CvHistogram hist
CalcArrHist
CvArrs image
CvHistogram hist
int accumulate 0
CvArr mask NULL
CalcHist
IplImages image
CvHistogram hist
int accumulate 0
CvArr mask NULL
NormalizeHist
CvHistogram hist
double factor
ThreshHist
CvHistogram hist
double threshold
CompareHist double
CvHistogram hist1
CvHistogram hist2
int method
# CopyHist
CalcBackProject
IplImages image
CvArr back_project
CvHistogram hist
CalcArrBackProject
CvArrs image
CvArr back_project
CvHistogram hist
CalcBackProjectPatch
IplImages images
CvArr dst
CvSize patch_size
CvHistogram hist
int method
float factor
CalcProbDensity
CvHistogram hist1
CvHistogram hist2
CvHistogram dst_hist
double scale 255
EqualizeHist
CvArr src
CvArr dst
QueryHistValue_1D double
CvHistogram hist
int idx0
QueryHistValue_2D double
CvHistogram hist
int idx0
int idx1
QueryHistValue_3D double
CvHistogram hist
int idx0
int idx1
int idx2
QueryHistValue_nD double
CvHistogram hist
ints idx
# Matching
MatchTemplate
CvArr image
CvArr templ
CvArr result
int method
MatchShapes double
CvSeq object1
CvSeq object2
int method
double parameter 0
# Contour Processing Functions
ApproxChains CvSeq*
CvSeq src_seq
CvMemStorage storage
int method CV_CHAIN_APPROX_SIMPLE
double parameter 0
int minimal_perimeter 0
int recursive 0
BoundingRect CvRect
cvarrseq points
int update 0
ContourArea double
cvarrseq contour
CvSlice slice CV_WHOLE_SEQ
ArcLength double
cvarrseq curve
CvSlice slice CV_WHOLE_SEQ
int isClosed -1
# Computational Geometry
MaxRect CvRect
CvRect* rect1
CvRect* rect2
# TODO PointSeqFromMat
BoxPoints points
CvBox2D box
CvPoint2D32f_4 points /O,A
FitEllipse2 CvBox2D
CvArr points
ConvexHull2 CvSeq*
cvarrseq points
CvMemStorage storage
int orientation CV_CLOCKWISE
int return_points 0
CheckContourConvexity int
cvarrseq contour
ConvexityDefects CvSeqOfCvConvexityDefect*
cvarrseq contour
CvSeq convexhull
CvMemStorage storage
PointPolygonTest double
cvarrseq contour
CvPoint2D32f pt
int measure_dist
MinAreaRect2 CvBox2D
cvarrseq points
CvMemStorage storage NULL
MinEnclosingCircle int,center,radius
cvarrseq points
CvPoint2D32f center /O
float radius /O
# Planar Subdivisions
Subdiv2DGetEdge CvSubdiv2DEdge
CvSubdiv2DEdge edge
CvNextEdgeType type
Subdiv2DNextEdge CvSubdiv2DEdge
CvSubdiv2DEdge edge
Subdiv2DRotateEdge CvSubdiv2DEdge
CvSubdiv2DEdge edge
int rotate
Subdiv2DEdgeOrg CvSubdiv2DPoint*
CvSubdiv2DEdge edge
Subdiv2DEdgeDst CvSubdiv2DPoint*
CvSubdiv2DEdge edge
CreateSubdivDelaunay2D CvSubdiv2D*
CvRect rect
CvMemStorage storage
SubdivDelaunay2DInsert CvSubdiv2DPoint*
CvSubdiv2D* subdiv
CvPoint2D32f pt
CalcSubdivVoronoi2D
CvSubdiv2D* subdiv
ClearSubdivVoronoi2D
CvSubdiv2D* subdiv
FindNearestPoint2D CvSubdiv2DPoint*
CvSubdiv2D* subdiv
CvPoint2D32f pt
# Object Detection
HaarDetectObjects CvSeqOfCvAvgComp*
CvArr image
CvHaarClassifierCascade* cascade
CvMemStorage storage
double scale_factor 1.1 /ch_doubleAbove1
int min_neighbors 3
int flags 0
CvSize min_size cvSize(0,0)
ComputeCorrespondEpilines
CvMat points
int whichImage
CvMat F
CvMat lines
ConvertPointsHomogeneous
CvMat src
CvMat dst
ProjectPoints2
CvMat objectPoints
CvMat rvec
CvMat tvec
CvMat cameraMatrix
CvMat distCoeffs
CvMat imagePoints
CvMat dpdrot NULL
CvMat dpdt NULL
CvMat dpdf NULL
CvMat dpdc NULL
CvMat dpddist NULL
ReprojectImageTo3D
CvArr disparity
CvArr _3dImage
CvMat Q
int handleMissingValues 0
RQDecomp3x3 eulerAngles
CvMat M
CvMat R
CvMat Q
CvMat Qx NULL
CvMat Qy NULL
CvMat Qz NULL
CvPoint3D64f eulerAngles /O
FindHomography
CvMat srcPoints
CvMat dstPoints
CvMat H
int method 0
double ransacReprojThreshold 3.0
CvMat status NULL
CreateStereoBMState CvStereoBMState*
int preset CV_STEREO_BM_BASIC
int numberOfDisparities 0
CreateStereoGCState CvStereoGCState*
int numberOfDisparities
int maxIters
FindStereoCorrespondenceBM
CvArr left
CvArr right
CvArr disparity
CvStereoBMState* state
FindStereoCorrespondenceGC
CvArr left
CvArr right
CvArr dispLeft
CvArr dispRight
CvStereoGCState* state
int useDisparityGuess 0
CalibrateCamera2
CvMat objectPoints
CvMat imagePoints
CvMat pointCounts
CvSize imageSize
CvMat cameraMatrix
CvMat distCoeffs
CvMat rvecs
CvMat tvecs
int flags 0
CalibrationMatrixValues fovx,fovy,focalLength,principalPoint,pixelAspectRatio
CvMat calibMatr
CvSize image_size
double apertureWidth 0
double apertureHeight 0
double fovx /O
double fovy /O
double focalLength /O
CvPoint2D64f principalPoint /O
double pixelAspectRatio /O
FindExtrinsicCameraParams2
CvMat objectPoints
CvMat imagePoints
CvMat cameraMatrix
CvMat distCoeffs
CvMat rvec
CvMat tvec
int useExtrinsicGuess 0
FindFundamentalMat int
CvMat points1
CvMat points2
CvMat fundamentalMatrix
int method CV_FM_RANSAC
double param1 1.
double param2 0.99
CvMat status NULL
StereoCalibrate
CvMat objectPoints
CvMat imagePoints1
CvMat imagePoints2
CvMat pointCounts
CvMat cameraMatrix1
CvMat distCoeffs1
CvMat cameraMatrix2
CvMat distCoeffs2
CvSize imageSize
CvMat R
CvMat T
CvMat E NULL
CvMat F NULL
CvTermCriteria term_crit cvTermCriteria(CV_TERMCRIT_ITER+CV_TERMCRIT_EPS,30,1e-6)
int flags CV_CALIB_FIX_INTRINSIC
GetOptimalNewCameraMatrix
CvMat cameraMatrix
CvMat distCoeffs
CvSize imageSize
double alpha
CvMat newCameraMatrix
CvSize newImageSize cvSize(0,0)
CvRect* validPixROI NULL
InitIntrinsicParams2D
CvMat objectPoints
CvMat imagePoints
CvMat npoints
CvSize imageSize
CvMat cameraMatrix
double aspectRatio 1.
StereoRectify roi1,roi2
CvMat cameraMatrix1
CvMat cameraMatrix2
CvMat distCoeffs1
CvMat distCoeffs2
CvSize imageSize
CvMat R
CvMat T
CvMat R1
CvMat R2
CvMat P1
CvMat P2
CvMat Q NULL
int flags CV_CALIB_ZERO_DISPARITY
double alpha -1
CvSize newImageSize cvSize(0,0)
CvRect roi1 /O
CvRect roi2 /O
StereoRectifyUncalibrated
CvMat points1
CvMat points2
CvMat F
CvSize imageSize
CvMat H1
CvMat H2
double threshold 5
Rodrigues2
CvMat src
CvMat dst
CvMat jacobian 0
Undistort2
CvArr src
CvArr dst
CvMat cameraMatrix
CvMat distCoeffs
InitUndistortMap
CvMat cameraMatrix
CvMat distCoeffs
CvArr map1
CvArr map2
InitUndistortRectifyMap
CvMat cameraMatrix
CvMat distCoeffs
CvMat R
CvMat newCameraMatrix
CvArr map1
CvArr map2
UndistortPoints
CvMat src
CvMat dst
CvMat cameraMatrix
CvMat distCoeffs
CvMat R NULL
CvMat P NULL
DecomposeProjectionMatrix eulerAngles
CvMat projMatrix
CvMat cameraMatrix
CvMat rotMatrix
CvMat transVect
CvMat rotMatrX NULL
CvMat rotMatrY NULL
CvMat rotMatrZ NULL
CvPoint3D64f eulerAngles /O
DrawChessboardCorners
CvArr image
CvSize patternSize
CvPoint2D32fs corners
int patternWasFound
CreatePOSITObject CvPOSITObject*
CvPoint3D32fs points
POSIT rotationMatrix,translation_vector
CvPOSITObject* posit_object
CvPoint2D32f* imagePoints
double focal_length
CvTermCriteria criteria
CvMatr32f_i rotationMatrix /O,A
CvVect32f_i translation_vector /O,A
EstimateRigidTransform
CvArr A
CvArr B
CvMat M
int full_affine
# Accumulation of Background Statistics
Acc
CvArr image
CvArr sum
CvArr mask NULL
SquareAcc
CvArr image
CvArr sqsum
CvArr mask NULL
MultiplyAcc
CvArr image1
CvArr image2
CvArr acc
CvArr mask NULL
RunningAvg
CvArr image
CvArr acc
double alpha
CvArr mask NULL
# Motion Templates
UpdateMotionHistory
CvArr silhouette
CvArr mhi
double timestamp
double duration
CalcMotionGradient
CvArr mhi /ch_matF
CvArr mask
CvArr orientation /ch_matF
double delta1
double delta2
int apertureSize 3 /ch_aperture
CalcGlobalOrientation double
CvArr orientation
CvArr mask
CvArr mhi
double timestamp
double duration
SegmentMotion CvSeq*
CvArr mhi
CvArr seg_mask
CvMemStorage storage
double timestamp
double seg_thresh
# Object Tracking
MeanShift comp
CvArr prob_image
CvRect window
CvTermCriteria criteria
CvConnectedComp comp /O
CamShift int,comp,box
CvArr prob_image
CvRect window
CvTermCriteria criteria
CvConnectedComp comp /O
CvBox2D box /O
CreateKalman CvKalman*
int dynam_params
int measure_params
int control_params 0
KalmanCorrect ROCvMat*
CvKalman* kalman
CvMat measurement
KalmanPredict ROCvMat*
CvKalman* kalman
CvMat control NULL
SnakeImage points
IplImage image
CvPoints points
floats alpha
floats beta
floats gamma
CvSize win
CvTermCriteria criteria
int calc_gradient 1
# Optical Flow
CalcOpticalFlowLK
CvArr prev
CvArr curr
CvSize winSize
CvArr velx
CvArr vely
CalcOpticalFlowBM
CvArr prev /ch_image8
CvArr curr /ch_image8
CvSize blockSize
CvSize shiftSize
CvSize max_range
int usePrevious
CvArr velx /ch_vel
CvArr vely /ch_vel
CalcOpticalFlowHS
CvArr prev /ch_image8
CvArr curr /ch_image8
int usePrevious
CvArr velx /ch_vel_64
CvArr vely /ch_vel_64
double lambda
CvTermCriteria criteria
CalcOpticalFlowFarneback
CvArr prev /ch_image8
CvArr curr /ch_image8
CvArr flow
double pyr_scale 0.5
int levels 3
int winsize 15
int iterations 3
int poly_n 7
double poly_sigma 1.5
int flags 0
# Highgui
ConvertImage
CvArr src
CvArr dst
int flags 0
NamedWindow
char* name
int flags CV_WINDOW_AUTOSIZE
DestroyWindow
char* name
DestroyAllWindows
ResizeWindow
char* name
int width
int height
MoveWindow
char* name
int x
int y
ShowImage
char* name
CvArr image
GetTrackbarPos int
char* trackbarName
char* windowName
SetTrackbarPos
char* trackbarName
char* windowName
int pos
#WaitKey int
# int delay 0
SaveImage
char* filename
CvArr image
CaptureFromFile CvCapture*
char* filename
CreateFileCapture CvCapture*
char* filename
CaptureFromCAM CvCapture*
int index
CreateCameraCapture CvCapture*
int index
GrabFrame int
CvCapture* capture
RetrieveFrame ROIplImage*
CvCapture* capture
QueryFrame ROIplImage*
CvCapture* capture
GetCaptureProperty double
CvCapture* capture
int property_id
SetCaptureProperty int
CvCapture* capture
int property_id
double value
CreateVideoWriter CvVideoWriter*
char* filename
int fourcc
double fps
CvSize frame_size
int is_color 1
WriteFrame int
CvVideoWriter* writer
IplImage image
EncodeImage CvMat*
char* ext
CvArr image
ints0 params {&zero,1}
DecodeImage IplImage*
CvMat buf
int iscolor CV_LOAD_IMAGE_COLOR
DecodeImageM CvMat*
CvMat buf
int iscolor CV_LOAD_IMAGE_COLOR
StartWindowThread
SetWindowProperty
char* name
int prop_id
double prop_value
GetWindowProperty double
char* name
int prop_id
GetTickCount int64
GetTickFrequency int64
# cvaux stuff
HOGDetectMultiScale CvSeq*
CvArr image
CvMemStorage storage
CvArr svm_classifier NULL
CvSize win_stride cvSize(0,0)
double hit_threshold 0
double scale 1.05
int group_threshold 2
CvSize padding cvSize(0,0)
CvSize win_size cvSize(64,128)
CvSize block_size cvSize(16,16)
CvSize block_stride cvSize(8,8)
CvSize cell_size cvSize(8,8)
int nbins 9
int gammaCorrection 1
GrabCut
CvArr image
CvArr mask
CvRect rect
CvArr bgdModel
CvArr fgdModel
int iterCount
int mode
# These functions are handwritten in cv.cpp; they appear here as 'doconly' declarations
# so that their documentation can be auto-generated
ApproxPoly /doconly
cvarrseq src_seq
CvMemStorage storage
int method
double parameter 0.0
int parameter2 0
CalcEMD2 /doconly
CvArr signature1
CvArr signature2
int distance_type
PyCallableObject* distance_func NULL
CvArr cost_matrix NULL
CvArr flow NULL
float lower_bound 0.0
PyObject* userdata NULL
CalcOpticalFlowPyrLK currFeatures,status,track_error /doconly
CvArr prev
CvArr curr
CvArr prevPyr
CvArr currPyr
CvPoint2D32f* prevFeatures
CvSize winSize
int level
CvTermCriteria criteria
int flags
CvPoint2D32f* guesses
CvPoint2D32f currFeatures /O
char status /O
float track_error /O
ClipLine point1,point2 /doconly
CvSize imgSize
CvPoint pt1
CvPoint pt2
CreateData /doconly
CvArr arr
CreateHist CvHistogram /doconly
ints dims
int type
ranges ranges None
int uniform 1
CreateImageHeader IplImage* /doconly
CvSize size
int depth
int channels
CreateImage IplImage* /doconly
CvSize size
int depth
int channels
CreateMatHeader CvMat /doconly
int rows
int cols
int type
CreateMat CvMat /doconly
int rows
int cols
int type
CreateMatNDHeader CvMatND /doconly
ints dims
int type
CreateMatND CvMatND /doconly
ints dims
int type
CreateMemStorage CvMemStorage /doconly
int blockSize
CreateTrackbar /doconly
char* trackbarName
char* windowName
int value
int count
PyCallableObject* onChange
FindChessboardCorners corners /doconly
CvArr image
CvSize patternSize
CvPoint2D32fs corners /O
int flags CV_CALIB_CB_ADAPTIVE_THRESH
FindContours /doconly
CvArr image
CvMemStorage storage
int mode CV_RETR_LIST
int method CV_CHAIN_APPROX_SIMPLE
CvPoint offset (0,0)
FitLine line /doconly
CvArr points
int dist_type
double param
double reps
double aeps
PyObject* line /O
GetDims /doconly
CvArr arr
GetHuMoments hu /doconly
CvMoments moments
PyObject* hu /O
GetImage /doconly
CvMat arr
GetMat /doconly
IplImage arr
int allowND 0
GetMinMaxHistValue min_value,max_value,min_idx,max_idx /doconly
CvHistogram hist
CvScalar min_value /O
CvScalar max_value /O
ints min_idx /O
ints max_idx /O
InitLineIterator line_iterator /doconly
CvArr image
CvPoint pt1
CvPoint pt2
iter line_iterator /O
int connectivity 8
int left_to_right 0
LoadImageM /doconly
char* filename
int iscolor CV_LOAD_IMAGE_COLOR
LoadImage /doconly
char* filename
int iscolor CV_LOAD_IMAGE_COLOR
ReshapeMatND /doconly
CvMat arr
int newCn
ints newDims
Reshape /doconly
CvArr arr
int newCn
int newRows
SetData /doconly
CvArr arr
PyObject* data
int step
SetMouseCallback /doconly
char* windowName
PyCallableObject* onMouse
PyObject* param None
Subdiv2DLocate loc,where /doconly
CvSubdiv2D* subdiv
CvPoint2D32f pt
int loc /O
edgeorpoint where /O
WaitKey /doconly
int delay 0
This source diff could not be displayed because it is too large. You can view the blob instead.
#define CV_BLUR_NO_SCALE 0
#define CV_BLUR 1
#define CV_GAUSSIAN 2
#define CV_MEDIAN 3
#define CV_BILATERAL 4
#define CV_INPAINT_NS 0
#define CV_INPAINT_TELEA 1
#define CV_SCHARR -1
#define CV_MAX_SOBEL_KSIZE 7
#define CV_BGR2BGRA 0
#define CV_RGB2RGBA CV_BGR2BGRA
#define CV_BGRA2BGR 1
#define CV_RGBA2RGB CV_BGRA2BGR
#define CV_BGR2RGBA 2
#define CV_RGB2BGRA CV_BGR2RGBA
#define CV_RGBA2BGR 3
#define CV_BGRA2RGB CV_RGBA2BGR
#define CV_BGR2RGB 4
#define CV_RGB2BGR CV_BGR2RGB
#define CV_BGRA2RGBA 5
#define CV_RGBA2BGRA CV_BGRA2RGBA
#define CV_BGR2GRAY 6
#define CV_RGB2GRAY 7
#define CV_GRAY2BGR 8
#define CV_GRAY2RGB CV_GRAY2BGR
#define CV_GRAY2BGRA 9
#define CV_GRAY2RGBA CV_GRAY2BGRA
#define CV_BGRA2GRAY 10
#define CV_RGBA2GRAY 11
#define CV_BGR2BGR565 12
#define CV_RGB2BGR565 13
#define CV_BGR5652BGR 14
#define CV_BGR5652RGB 15
#define CV_BGRA2BGR565 16
#define CV_RGBA2BGR565 17
#define CV_BGR5652BGRA 18
#define CV_BGR5652RGBA 19
#define CV_GRAY2BGR565 20
#define CV_BGR5652GRAY 21
#define CV_BGR2BGR555 22
#define CV_RGB2BGR555 23
#define CV_BGR5552BGR 24
#define CV_BGR5552RGB 25
#define CV_BGRA2BGR555 26
#define CV_RGBA2BGR555 27
#define CV_BGR5552BGRA 28
#define CV_BGR5552RGBA 29
#define CV_GRAY2BGR555 30
#define CV_BGR5552GRAY 31
#define CV_BGR2XYZ 32
#define CV_RGB2XYZ 33
#define CV_XYZ2BGR 34
#define CV_XYZ2RGB 35
#define CV_BGR2YCrCb 36
#define CV_RGB2YCrCb 37
#define CV_YCrCb2BGR 38
#define CV_YCrCb2RGB 39
#define CV_BGR2HSV 40
#define CV_RGB2HSV 41
#define CV_BGR2Lab 44
#define CV_RGB2Lab 45
#define CV_BayerBG2BGR 46
#define CV_BayerGB2BGR 47
#define CV_BayerRG2BGR 48
#define CV_BayerGR2BGR 49
#define CV_BayerBG2RGB CV_BayerRG2BGR
#define CV_BayerGB2RGB CV_BayerGR2BGR
#define CV_BayerRG2RGB CV_BayerBG2BGR
#define CV_BayerGR2RGB CV_BayerGB2BGR
#define CV_BayerBG2BGR_VNG 62
#define CV_BayerGB2BGR_VNG 63
#define CV_BayerRG2BGR_VNG 64
#define CV_BayerGR2BGR_VNG 65
#define CV_BGR2Luv 50
#define CV_RGB2Luv 51
#define CV_BGR2HLS 52
#define CV_RGB2HLS 53
#define CV_HSV2BGR 54
#define CV_HSV2RGB 55
#define CV_Lab2BGR 56
#define CV_Lab2RGB 57
#define CV_Luv2BGR 58
#define CV_Luv2RGB 59
#define CV_HLS2BGR 60
#define CV_HLS2RGB 61
#define CV_COLORCVT_MAX 100
#define CV_INTER_NN 0
#define CV_INTER_LINEAR 1
#define CV_INTER_CUBIC 2
#define CV_INTER_AREA 3
#define CV_WARP_FILL_OUTLIERS 8
#define CV_WARP_INVERSE_MAP 16
#define CV_SHAPE_RECT 0
#define CV_SHAPE_CROSS 1
#define CV_SHAPE_ELLIPSE 2
#define CV_SHAPE_CUSTOM 100
#define CV_MOP_OPEN 2
#define CV_MOP_CLOSE 3
#define CV_MOP_GRADIENT 4
#define CV_MOP_TOPHAT 5
#define CV_MOP_BLACKHAT 6
#define CV_TM_SQDIFF 0
#define CV_TM_SQDIFF_NORMED 1
#define CV_TM_CCORR 2
#define CV_TM_CCORR_NORMED 3
#define CV_TM_CCOEFF 4
#define CV_TM_CCOEFF_NORMED 5
#define CV_LKFLOW_PYR_A_READY 1
#define CV_LKFLOW_PYR_B_READY 2
#define CV_LKFLOW_INITIAL_GUESSES 4
#define CV_LKFLOW_GET_MIN_EIGENVALS 8
#define CV_POLY_APPROX_DP 0
#define CV_CONTOURS_MATCH_I1 1
#define CV_CONTOURS_MATCH_I2 2
#define CV_CONTOURS_MATCH_I3 3
#define CV_CLOCKWISE 1
#define CV_COUNTER_CLOCKWISE 2
#define CV_COMP_CORREL 0
#define CV_COMP_CHISQR 1
#define CV_COMP_INTERSECT 2
#define CV_COMP_BHATTACHARYYA 3
#define CV_DIST_MASK_3 3
#define CV_DIST_MASK_5 5
#define CV_DIST_MASK_PRECISE 0
#define CV_THRESH_BINARY 0 /* value = value > threshold ? max_value : 0 */
#define CV_THRESH_BINARY_INV 1 /* value = value > threshold ? 0 : max_value */
#define CV_THRESH_TRUNC 2 /* value = value > threshold ? threshold : value */
#define CV_THRESH_TOZERO 3 /* value = value > threshold ? value : 0 */
#define CV_THRESH_TOZERO_INV 4 /* value = value > threshold ? 0 : value */
#define CV_THRESH_MASK 7
#define CV_THRESH_OTSU 8 /* use Otsu algorithm to choose the optimal threshold value;
#define CV_ADAPTIVE_THRESH_MEAN_C 0
#define CV_ADAPTIVE_THRESH_GAUSSIAN_C 1
#define CV_FLOODFILL_FIXED_RANGE (1 << 16)
#define CV_FLOODFILL_MASK_ONLY (1 << 17)
#define CV_CANNY_L2_GRADIENT (1 << 31)
#define CV_HOUGH_STANDARD 0
#define CV_HOUGH_PROBABILISTIC 1
#define CV_HOUGH_MULTI_SCALE 2
#define CV_HOUGH_GRADIENT 3
#define CV_HAAR_DO_CANNY_PRUNING 1
#define CV_HAAR_SCALE_IMAGE 2
#define CV_HAAR_FIND_BIGGEST_OBJECT 4
#define CV_HAAR_DO_ROUGH_SEARCH 8
#define CV_LMEDS 4
#define CV_RANSAC 8
#define CV_CALIB_CB_ADAPTIVE_THRESH 1
#define CV_CALIB_CB_NORMALIZE_IMAGE 2
#define CV_CALIB_CB_FILTER_QUADS 4
#define CV_CALIB_USE_INTRINSIC_GUESS 1
#define CV_CALIB_FIX_ASPECT_RATIO 2
#define CV_CALIB_FIX_PRINCIPAL_POINT 4
#define CV_CALIB_ZERO_TANGENT_DIST 8
#define CV_CALIB_FIX_FOCAL_LENGTH 16
#define CV_CALIB_FIX_K1 32
#define CV_CALIB_FIX_K2 64
#define CV_CALIB_FIX_K3 128
#define CV_CALIB_FIX_INTRINSIC 256
#define CV_CALIB_SAME_FOCAL_LENGTH 512
#define CV_CALIB_ZERO_DISPARITY 1024
#define CV_FM_7POINT 1
#define CV_FM_8POINT 2
#define CV_FM_LMEDS_ONLY CV_LMEDS
#define CV_FM_RANSAC_ONLY CV_RANSAC
#define CV_FM_LMEDS CV_LMEDS
#define CV_FM_RANSAC CV_RANSAC
#define CV_STEREO_BM_NORMALIZED_RESPONSE 0
#define CV_STEREO_BM_BASIC 0
#define CV_STEREO_BM_FISH_EYE 1
#define CV_STEREO_BM_NARROW 2
#define CV_STEREO_GC_OCCLUDED SHRT_MAX
#define CV_AUTOSTEP 0x7fffffff
#define CV_MAX_ARR 10
#define CV_NO_DEPTH_CHECK 1
#define CV_NO_CN_CHECK 2
#define CV_NO_SIZE_CHECK 4
#define CV_CMP_EQ 0
#define CV_CMP_GT 1
#define CV_CMP_GE 2
#define CV_CMP_LT 3
#define CV_CMP_LE 4
#define CV_CMP_NE 5
#define CV_CHECK_RANGE 1
#define CV_CHECK_QUIET 2
#define CV_RAND_UNI 0
#define CV_RAND_NORMAL 1
#define CV_SORT_EVERY_ROW 0
#define CV_SORT_EVERY_COLUMN 1
#define CV_SORT_ASCENDING 0
#define CV_SORT_DESCENDING 16
#define CV_GEMM_A_T 1
#define CV_GEMM_B_T 2
#define CV_GEMM_C_T 4
#define CV_SVD_MODIFY_A 1
#define CV_SVD_U_T 2
#define CV_SVD_V_T 4
#define CV_LU 0
#define CV_SVD 1
#define CV_SVD_SYM 2
#define CV_CHOLESKY 3
#define CV_QR 4
#define CV_NORMAL 16
#define CV_COVAR_SCRAMBLED 0
#define CV_COVAR_NORMAL 1
#define CV_COVAR_USE_AVG 2
#define CV_COVAR_SCALE 4
#define CV_COVAR_ROWS 8
#define CV_COVAR_COLS 16
#define CV_PCA_DATA_AS_ROW 0
#define CV_PCA_DATA_AS_COL 1
#define CV_PCA_USE_AVG 2
#define CV_C 1
#define CV_L1 2
#define CV_L2 4
#define CV_NORM_MASK 7
#define CV_RELATIVE 8
#define CV_DIFF 16
#define CV_MINMAX 32
#define CV_DIFF_C (CV_DIFF | CV_C)
#define CV_DIFF_L1 (CV_DIFF | CV_L1)
#define CV_DIFF_L2 (CV_DIFF | CV_L2)
#define CV_RELATIVE_C (CV_RELATIVE | CV_C)
#define CV_RELATIVE_L1 (CV_RELATIVE | CV_L1)
#define CV_RELATIVE_L2 (CV_RELATIVE | CV_L2)
#define CV_REDUCE_SUM 0
#define CV_REDUCE_AVG 1
#define CV_REDUCE_MAX 2
#define CV_REDUCE_MIN 3
#define CV_DXT_FORWARD 0
#define CV_DXT_INVERSE 1
#define CV_DXT_SCALE 2 /* divide result by size of array */
#define CV_DXT_INV_SCALE (CV_DXT_INVERSE + CV_DXT_SCALE)
#define CV_DXT_INVERSE_SCALE CV_DXT_INV_SCALE
#define CV_DXT_ROWS 4 /* transform each row individually */
#define CV_DXT_MUL_CONJ 8 /* conjugate the second argument of cvMulSpectrums */
#define CV_FRONT 1
#define CV_BACK 0
#define CV_GRAPH_VERTEX 1
#define CV_GRAPH_TREE_EDGE 2
#define CV_GRAPH_BACK_EDGE 4
#define CV_GRAPH_FORWARD_EDGE 8
#define CV_GRAPH_CROSS_EDGE 16
#define CV_GRAPH_ANY_EDGE 30
#define CV_GRAPH_NEW_TREE 32
#define CV_GRAPH_BACKTRACKING 64
#define CV_GRAPH_OVER -1
#define CV_GRAPH_ALL_ITEMS -1
#define CV_GRAPH_ITEM_VISITED_FLAG (1 << 30)
#define CV_GRAPH_SEARCH_TREE_NODE_FLAG (1 << 29)
#define CV_GRAPH_FORWARD_EDGE_FLAG (1 << 28)
#define CV_FILLED -1
#define CV_AA 16
#define CV_FONT_HERSHEY_SIMPLEX 0
#define CV_FONT_HERSHEY_PLAIN 1
#define CV_FONT_HERSHEY_DUPLEX 2
#define CV_FONT_HERSHEY_COMPLEX 3
#define CV_FONT_HERSHEY_TRIPLEX 4
#define CV_FONT_HERSHEY_COMPLEX_SMALL 5
#define CV_FONT_HERSHEY_SCRIPT_SIMPLEX 6
#define CV_FONT_HERSHEY_SCRIPT_COMPLEX 7
#define CV_FONT_ITALIC 16
#define CV_FONT_VECTOR0 CV_FONT_HERSHEY_SIMPLEX
#define CV_KMEANS_USE_INITIAL_LABELS 1
#define CV_ErrModeLeaf 0 /* Print error and exit program */
#define CV_ErrModeParent 1 /* Print error and continue */
#define CV_ErrModeSilent 2 /* Don't print and continue */
#define CV_RETR_EXTERNAL 0
#define CV_RETR_LIST 1
#define CV_RETR_CCOMP 2
#define CV_RETR_TREE 3
#define CV_CHAIN_CODE 0
#define CV_CHAIN_APPROX_NONE 1
#define CV_CHAIN_APPROX_SIMPLE 2
#define CV_CHAIN_APPROX_TC89_L1 3
#define CV_CHAIN_APPROX_TC89_KCOS 4
#define CV_LINK_RUNS 5
#define CV_SUBDIV2D_VIRTUAL_POINT_FLAG (1 << 30)
#define CV_DIST_USER -1 /* User defined distance */
#define CV_DIST_L1 1 /* distance = |x1-x2| + |y1-y2| */
#define CV_DIST_L2 2 /* the simple euclidean distance */
#define CV_DIST_C 3 /* distance = max(|x1-x2|,|y1-y2|) */
#define CV_DIST_L12 4 /* L1-L2 metric: distance = 2(sqrt(1+x*x/2) - 1)) */
#define CV_DIST_FAIR 5 /* distance = c^2(|x|/c-log(1+|x|/c)), c = 1.3998 */
#define CV_DIST_WELSCH 6 /* distance = c^2/2(1-exp(-(x/c)^2)), c = 2.9846 */
#define CV_DIST_HUBER 7 /* distance = |x|<c ? x^2/2 : c(|x|-c/2), c=1.345 */
#define CV_HAAR_MAGIC_VAL 0x42500000
#define CV_HAAR_FEATURE_MAX 3
#define CV_TERMCRIT_ITER 1
#define CV_TERMCRIT_NUMBER CV_TERMCRIT_ITER
#define CV_TERMCRIT_EPS 2
#define CV_EVENT_MOUSEMOVE 0
#define CV_EVENT_LBUTTONDOWN 1
#define CV_EVENT_RBUTTONDOWN 2
#define CV_EVENT_MBUTTONDOWN 3
#define CV_EVENT_LBUTTONUP 4
#define CV_EVENT_RBUTTONUP 5
#define CV_EVENT_MBUTTONUP 6
#define CV_EVENT_LBUTTONDBLCLK 7
#define CV_EVENT_RBUTTONDBLCLK 8
#define CV_EVENT_MBUTTONDBLCLK 9
#define CV_EVENT_FLAG_LBUTTON 1
#define CV_EVENT_FLAG_RBUTTON 2
#define CV_EVENT_FLAG_MBUTTON 4
#define CV_EVENT_FLAG_CTRLKEY 8
#define CV_EVENT_FLAG_SHIFTKEY 16
#define CV_EVENT_FLAG_ALTKEY 32
#define CV_MAX_DIM 32
#define CV_CAP_PROP_POS_MSEC 0
#define CV_CAP_PROP_POS_FRAMES 1
#define CV_CAP_PROP_POS_AVI_RATIO 2
#define CV_CAP_PROP_FRAME_WIDTH 3
#define CV_CAP_PROP_FRAME_HEIGHT 4
#define CV_CAP_PROP_FPS 5
#define CV_CAP_PROP_FOURCC 6
#define CV_CAP_PROP_FRAME_COUNT 7
#define CV_CAP_PROP_FORMAT 8
#define CV_CAP_PROP_MODE 9
#define CV_CAP_PROP_BRIGHTNESS 10
#define CV_CAP_PROP_CONTRAST 11
#define CV_CAP_PROP_SATURATION 12
#define CV_CAP_PROP_HUE 13
#define CV_CAP_PROP_GAIN 14
#define CV_CAP_PROP_EXPOSURE 15
#define CV_CAP_PROP_CONVERT_RGB 16
#define CV_CAP_PROP_RECTIFICATION 18
#define CV_CN_SHIFT 3
#define CV_IMWRITE_JPEG_QUALITY 1
#define CV_IMWRITE_PNG_COMPRESSION 16
#define CV_IMWRITE_PXM_BINARY 32
#define IPL_ORIGIN_TL 0
#define IPL_ORIGIN_BL 1
#define CV_GAUSSIAN_5x5
#define CV_CN_MAX
#define CV_WINDOW_AUTOSIZE 1
#define CV_WINDOW_NORMAL 0
#define CV_WINDOW_FULLSCREEN 1
#define HG_AUTOSIZE CV_WINDOW_AUTOSIZE
#define CV_CVTIMG_FLIP 1
#define CV_CVTIMG_SWAP_RB 2
import sys
from string import Template
class argument:
def __init__(self, fields):
self.ty = fields[0]
self.nm = fields[1]
self.flags = ""
self.init = None
if len(fields) > 2:
if fields[2][0] == '/':
self.flags = fields[2][1:].split(",")
else:
self.init = fields[2]
api = []
for l in open("%s/api" % sys.argv[1]):
if l[0] == '#':
continue
l = l.rstrip()
if (not l.startswith(' ')) and ('/' in l):
(l, flags) = l.split('/')
else:
flags = ""
f = l.split()
if len(f) != 0:
if l[0] != ' ':
if len(f) > 1:
ty = f[1]
else:
ty = None
api.append((f[0], [], ty, flags))
else:
api[-1][1].append(argument(f))
# Validation: check that any optional arguments are last
had_error = False
for (f, args, ty, flags) in api:
if f == 'PolarToCart':
print f, [(a.init != None) for a in args]
has_init = [(a.init != None) for a in args if not 'O' in a.flags]
if True in has_init and not all(has_init[has_init.index(True):]):
print 'Error in definition for "%s", optional arguments must be last' % f
had_error = True
if had_error:
sys.exit(1)
def cname(n):
if n.startswith("CV"):
return '_' + n
elif n[0].isdigit():
return '_' + n
else:
return n
# RHS is how the aggregate gets expanded in the C call
aggregate = {
'pts_npts_contours' : '!.pts,!.npts,!.contours',
'cvarr_count' : '!.cvarr,!.count',
'cvarr_plane_count' : '!.cvarr,!.count',
'floats' : '!.f',
'ints' : '!.i',
'ints0' : '!.i',
'CvPoints' : '!.p,!.count',
'CvPoint2D32fs' : '!.p,!.count',
'CvPoint3D32fs' : '!.p,!.count',
'cvarrseq' : '!.seq',
'CvArrs' : '!.ims',
'IplImages' : '!.ims',
'intpair' : '!.pairs,!.count',
'cvpoint2d32f_count' : '!.points,&!.count'
}
conversion_types = [
'char',
'CvArr',
'CvArrSeq',
'CvBox2D', # '((ff)(ff)f)',
'CvBox2D*',
'CvCapture*',
'CvStereoBMState*',
'CvStereoGCState*',
'CvKalman*',
'CvVideoWriter*',
'CvContourTree*',
'CvFont',
'CvFont*',
'CvHaarClassifierCascade*',
'CvHistogram',
'CvMat',
'CvMatND',
'CvMemStorage',
'CvMoments',
'CvMoments*',
'CvNextEdgeType',
'CvPoint',
'CvPoint*',
'CvPoint2D32f', # '(ff)',
'CvPoint2D32f*',
'CvPoint3D32f*',
'CvPoint2D64f',
'CvPOSITObject*',
'CvRect',
'CvRect*',
'CvRNG*',
'CvScalar',
'CvSeq',
'CvSeqOfCvConvexityDefect',
'CvSize',
'CvSlice',
'CvStarDetectorParams',
'CvSubdiv2D*',
'CvSubdiv2DEdge',
'CvTermCriteria',
'generic',
'IplConvKernel*',
'IplImage',
'PyObject*',
'PyCallableObject*'
]
def safename(s):
return s.replace('*', 'PTR').replace('[', '_').replace(']', '_')
def has_optional(al):
""" return true if any argument is optional """
return any([a.init for a in al])
def gen(name, args, ty, flags):
yield ""
if has_optional(args):
yield "static PyObject *pycv%s(PyObject *self, PyObject *args, PyObject *kw)" % cname(name)
else:
yield "static PyObject *pycv%s(PyObject *self, PyObject *args)" % cname(name)
if 'doconly' in flags:
yield ";"
else:
yield "{"
destinations = []
for a in args:
remap = {
'CvArr' : 'CvArr*',
'CvMat' : 'CvMat*',
'CvMatND' : 'CvMatND*',
'IplImage' : 'IplImage*',
'CvMemStorage' : 'CvMemStorage*',
'CvHistogram':'CvHistogram*',
'CvSeq':'CvSeq*',
'CvHaarClassifierCascade' : 'CvHaarClassifierCascade*'
}
ctype = remap.get(a.ty, a.ty)
if a.init:
init = " = %s" % a.init
else:
init = ''
yield " %s %s%s;" % (ctype, a.nm, init)
if 'O' in a.flags:
continue
if a.ty in (conversion_types + aggregate.keys()):
yield ' PyObject *pyobj_%s = NULL;' % (a.nm)
destinations.append('&pyobj_%s' % (a.nm))
elif a.ty in [ 'CvPoint2D32f' ]:
destinations.append('&%s.x, &%s.y' % (a.nm, a.nm))
elif a.ty in [ 'CvTermCriteria' ]:
destinations.append('&%s.type, &%s.max_iter, &%s.epsilon' % ((a.nm,)*3))
elif a.ty in [ 'CvSURFParams' ]:
destinations.append('&%s.extended, &%s.hessianThreshold, &%s.nOctaves, &%s.nOctaveLayers' % ((a.nm,)*4))
elif a.nm in [ 'CvBox2D' ]:
s = ", ".join([('&' + a.nm +'.' + fld) for fld in [ 'center.x', 'center.y', 'size.width', 'size.height', 'angle' ] ])
destinations.append(s)
else:
destinations.append('&%s' % a.nm)
fmap = {
'CvSURFParams' : '(idii)',
'double' : 'd',
'float' : 'f',
'int' : 'i',
'int64' : 'L',
'char*' : 's',
}
for k in (conversion_types + aggregate.keys()):
fmap[k] = 'O'
in_args = [ a for a in args if not 'O' in a.flags ]
fmt0 = "".join([ fmap[a.ty] for a in in_args if not a.init])
fmt1 = "".join([ fmap[a.ty] for a in in_args if a.init])
yield ''
if len(fmt0 + fmt1) > 0:
if len(fmt1) > 0:
yield ' const char *keywords[] = { %s };' % (", ".join([ '"%s"' % arg.nm for arg in args if not 'O' in arg.flags ] + ['NULL']))
yield ' if (!PyArg_ParseTupleAndKeywords(args, kw, "%s|%s", %s))' % (fmt0, fmt1, ", ".join(['(char**)keywords'] + destinations))
if '(' in (fmt0 + fmt1):
print "Tuple with kwargs is not allowed, function", name
sys.exit(1)
else:
yield ' if (!PyArg_ParseTuple(args, "%s", %s))' % (fmt0, ", ".join(destinations))
yield ' return NULL;'
# Do the conversions:
for a in args:
joinwith = [f[2:] for f in a.flags if f.startswith("J:")]
if len(joinwith) > 0:
yield 'preShareData(%s, &%s);' % (joinwith[0], a.nm)
if 'O' in a.flags:
continue
if a.ty in (conversion_types + aggregate.keys()):
if a.init:
pred = '(pyobj_%s != NULL) && ' % a.nm
else:
pred = ''
yield ' if (%s!convert_to_%s(pyobj_%s, &%s, "%s")) return NULL;' % (pred, safename(a.ty), a.nm, a.nm, a.nm)
yield '#ifdef CVPY_VALIDATE_%s' % name
yield 'CVPY_VALIDATE_%s();' % name
yield '#endif'
def invokename(a):
if 'K' in a.flags:
prefix = "(const CvArr **)"
elif 'O' in a.flags and not 'A' in a.flags:
prefix = "&"
else:
prefix = ""
if a.ty in aggregate:
return prefix + aggregate[a.ty].replace('!', a.nm)
else:
return prefix + a.nm
def funcname(s):
# The name by which the function is called, in C
if s.startswith("CV"):
return s
else:
return "cv" + s
tocall = '%s(%s)' % (funcname(name), ", ".join(invokename(a) for a in args))
if 'stub' in flags:
yield ' return stub%s(%s);' % (name, ", ".join(invokename(a) for a in args))
elif ty == None:
yield ' ERRWRAP(%s);' % tocall
yield ' Py_RETURN_NONE;'
else:
Rtypes = [
'int',
'int64',
'double',
'CvCapture*',
'CvVideoWriter*',
'CvPOSITObject*',
'CvScalar',
'CvSize',
'CvRect',
'CvSeq*',
'CvBox2D',
'CvSeqOfCvAvgComp*',
'CvSeqOfCvConvexityDefect*',
'CvSeqOfCvStarKeypoint*',
'CvSeqOfCvSURFPoint*',
'CvSeqOfCvSURFDescriptor*',
'CvContourTree*',
'IplConvKernel*',
'IplImage*',
'CvMat*',
'constCvMat*',
'ROCvMat*',
'CvMatND*',
'CvPoint2D32f_4',
'CvRNG',
'CvSubdiv2D*',
'CvSubdiv2DPoint*',
'CvSubdiv2DEdge',
'ROIplImage*',
'CvStereoBMState*',
'CvStereoGCState*',
'CvKalman*',
'float',
'generic',
'unsigned' ]
if ty in Rtypes:
yield ' %s r;' % (ty)
yield ' ERRWRAP(r = %s);' % (tocall)
yield ' return FROM_%s(r);' % safename(ty)
else:
all_returns = ty.split(",")
return_value_from_call = len(set(Rtypes) & set(all_returns)) != 0
if return_value_from_call:
yield ' %s r;' % list(set(Rtypes) & set(all_returns))[0]
yield ' ERRWRAP(r = %s);' % (tocall)
else:
yield ' ERRWRAP(%s);' % (tocall)
typed = dict([ (a.nm,a.ty) for a in args])
for i in range(len(all_returns)):
if all_returns[i] in Rtypes:
typed['r'] = all_returns[i]
all_returns[i] = "r"
if len(all_returns) == 1:
af = dict([ (a.nm,a.flags) for a in args])
joinwith = [f[2:] for f in af.get(all_returns[0], []) if f.startswith("J:")]
if len(joinwith) > 0:
yield ' return shareData(pyobj_%s, %s, %s);' % (joinwith[0], joinwith[0], all_returns[0])
else:
yield ' return FROM_%s(%s);' % (safename(typed[all_returns[0]]), all_returns[0])
else:
yield ' return Py_BuildValue("%s", %s);' % ("N" * len(all_returns), ", ".join(["FROM_%s(%s)" % (safename(typed[n]), n) for n in all_returns]))
yield '}'
gen_c = [ open("generated%d.i" % i, "w") for i in range(5) ]
print "Generated %d functions" % len(api)
for nm,args,ty,flags in sorted(api):
# Figure out docstring into ds_*
ds_args = []
mandatory = [a.nm for a in args if not ('O' in a.flags) and not a.init]
optional = [a.nm for a in args if not ('O' in a.flags) and a.init]
ds_args = ", ".join(mandatory)
def o2s(o):
if o == []:
return ""
else:
return ' [, %s%s]' % (o[0], o2s(o[1:]))
ds_args += o2s(optional)
ds = "%s(%s) -> %s" % (nm, ds_args, str(ty))
print ds
if has_optional(args):
entry = '{"%%s", (PyCFunction)pycv%s, METH_KEYWORDS, "%s"},' % (cname(nm), ds)
else:
entry = '{"%%s", pycv%s, METH_VARARGS, "%s"},' % (cname(nm), ds)
print >>gen_c[1], entry % (nm)
if nm.startswith('CV_'):
print >>gen_c[1], entry % (nm[3:])
for l in gen(nm,args,ty,flags):
print >>gen_c[0], l
for l in open("%s/defs" % sys.argv[1]):
print >>gen_c[2], "PUBLISH(%s);" % l.split()[1]
########################################################################
# Generated objects.
########################################################################
# gen_c[3] is the code, gen_c[4] initializers
gensimple = Template("""
/*
${cvtype} is the OpenCV C struct
${ourname}_t is the Python object
*/
struct ${ourname}_t {
PyObject_HEAD
${cvtype} v;
};
static PyObject *${ourname}_repr(PyObject *self)
{
${ourname}_t *p = (${ourname}_t*)self;
char str[1000];
sprintf(str, "<${ourname} %p>", p);
return PyString_FromString(str);
}
${getset_funcs}
static PyGetSetDef ${ourname}_getseters[] = {
${getset_inits}
{NULL} /* Sentinel */
};
static PyTypeObject ${ourname}_Type = {
PyObject_HEAD_INIT(&PyType_Type)
0, /*size*/
MODULESTR".${ourname}", /*name*/
sizeof(${ourname}_t), /*basicsize*/
};
static void ${ourname}_specials(void)
{
${ourname}_Type.tp_repr = ${ourname}_repr;
${ourname}_Type.tp_getset = ${ourname}_getseters;
}
static PyObject *FROM_${cvtype}(${cvtype} r)
{
${ourname}_t *m = PyObject_NEW(${ourname}_t, &${ourname}_Type);
m->v = r;
return (PyObject*)m;
}
static int convert_to_${cvtype}PTR(PyObject *o, ${cvtype}** dst, const char *name = "no_name")
{
${allownull}
if (PyType_IsSubtype(o->ob_type, &${ourname}_Type)) {
*dst = &(((${ourname}_t*)o)->v);
return 1;
} else {
(*dst) = (${cvtype}*)NULL;
return failmsg("Expected ${cvtype} for argument '%s'", name);
}
}
""")
genptr = Template("""
/*
${cvtype} is the OpenCV C struct
${ourname}_t is the Python object
*/
struct ${ourname}_t {
PyObject_HEAD
${cvtype} *v;
};
static void ${ourname}_dealloc(PyObject *self)
{
${ourname}_t *p = (${ourname}_t*)self;
cvRelease${ourname}(&p->v);
PyObject_Del(self);
}
static PyObject *${ourname}_repr(PyObject *self)
{
${ourname}_t *p = (${ourname}_t*)self;
char str[1000];
sprintf(str, "<${ourname} %p>", p);
return PyString_FromString(str);
}
${getset_funcs}
static PyGetSetDef ${ourname}_getseters[] = {
${getset_inits}
{NULL} /* Sentinel */
};
static PyTypeObject ${ourname}_Type = {
PyObject_HEAD_INIT(&PyType_Type)
0, /*size*/
MODULESTR".${ourname}", /*name*/
sizeof(${ourname}_t), /*basicsize*/
};
static void ${ourname}_specials(void)
{
${ourname}_Type.tp_dealloc = ${ourname}_dealloc;
${ourname}_Type.tp_repr = ${ourname}_repr;
${ourname}_Type.tp_getset = ${ourname}_getseters;
}
static PyObject *FROM_${cvtype}PTR(${cvtype} *r)
{
${ourname}_t *m = PyObject_NEW(${ourname}_t, &${ourname}_Type);
m->v = r;
return (PyObject*)m;
}
static int convert_to_${cvtype}PTR(PyObject *o, ${cvtype}** dst, const char *name = "no_name")
{
${allownull}
if (PyType_IsSubtype(o->ob_type, &${ourname}_Type)) {
*dst = ((${ourname}_t*)o)->v;
return 1;
} else {
(*dst) = (${cvtype}*)NULL;
return failmsg("Expected ${cvtype} for argument '%s'", name);
}
}
""")
getset_func_template = Template("""
static PyObject *${ourname}_get_${member}(${ourname}_t *p, void *closure)
{
return ${rconverter}(p->v${accessor}${member});
}
static int ${ourname}_set_${member}(${ourname}_t *p, PyObject *value, void *closure)
{
if (value == NULL) {
PyErr_SetString(PyExc_TypeError, "Cannot delete the ${member} attribute");
return -1;
}
if (! ${checker}(value)) {
PyErr_SetString(PyExc_TypeError, "The ${member} attribute value must be a ${typename}");
return -1;
}
p->v${accessor}${member} = ${converter}(value);
return 0;
}
""")
getset_init_template = Template("""
{(char*)"${member}", (getter)${ourname}_get_${member}, (setter)${ourname}_set_${member}, (char*)"${member}", NULL},
""")
objects = [
( 'IplConvKernel', ['allownull'], {
"nCols" : 'i',
"nRows" : 'i',
"anchorX" : 'i',
"anchorY" : 'i',
}),
( 'CvCapture', [], {}),
( 'CvHaarClassifierCascade', [], {}),
( 'CvPOSITObject', [], {}),
( 'CvVideoWriter', [], {}),
( 'CvStereoBMState', [], {
"preFilterType" : 'i',
"preFilterSize" : 'i',
"preFilterCap" : 'i',
"SADWindowSize" : 'i',
"minDisparity" : 'i',
"numberOfDisparities" : 'i',
"textureThreshold" : 'i',
"uniquenessRatio" : 'i',
"speckleWindowSize" : 'i',
"speckleRange" : 'i',
}),
( 'CvStereoGCState', [], {
"Ithreshold" : 'i',
"interactionRadius" : 'i',
"K" : 'f',
"lambda" : 'f',
"lambda1" : 'f',
"lambda2" : 'f',
"occlusionCost" : 'i',
"minDisparity" : 'i',
"numberOfDisparities" : 'i',
"maxIters" : 'i',
}),
( 'CvKalman', [], {
"MP" : 'i',
"DP" : 'i',
"CP" : 'i',
"state_pre" : 'mr',
"state_post" : 'mr',
"transition_matrix" : 'mr',
"control_matrix" : 'mr',
"measurement_matrix" : 'mr',
"control_matrix" : 'mr',
"process_noise_cov" : 'mr',
"measurement_noise_cov" : 'mr',
"error_cov_pre" : 'mr',
"gain" : 'mr',
"error_cov_post" : 'mr',
}),
( 'CvMoments', ['copy'], {
"m00" : 'f',
"m10" : 'f',
"m01" : 'f',
"m20" : 'f',
"m11" : 'f',
"m02" : 'f',
"m30" : 'f',
"m21" : 'f',
"m12" : 'f',
"m03" : 'f',
"mu20" : 'f',
"mu11" : 'f',
"mu02" : 'f',
"mu30" : 'f',
"mu21" : 'f',
"mu12" : 'f',
"mu03" : 'f',
"inv_sqrt_m00" : 'f',
}),
]
checkers = {
'i' : 'PyNumber_Check',
'f' : 'PyNumber_Check',
'm' : 'is_cvmat',
'mr' : 'is_cvmat'
}
# Python -> C
converters = {
'i' : 'PyInt_AsLong',
'f' : 'PyFloat_AsDouble',
'm' : 'PyCvMat_AsCvMat',
'mr' : 'PyCvMat_AsCvMat'
}
# C -> Python
rconverters = {
'i' : 'PyInt_FromLong',
'f' : 'PyFloat_FromDouble',
'm' : 'FROM_CvMat',
'mr' : 'FROM_ROCvMatPTR'
}
# Human-readable type names
typenames = {
'i' : 'integer',
'f' : 'float',
'm' : 'list of CvMat',
'mr' : 'list of CvMat',
}
for (t, flags, members) in objects:
map = {'cvtype' : t,
'ourname' : t.replace('Cv', '')}
# gsf is all the generated code for the member accessors
if 'copy' in flags:
a = '.'
else:
a = '->'
gsf = "".join([getset_func_template.substitute(map, accessor = a, member = m, checker = checkers[t], converter = converters[t], rconverter = rconverters[t], typename = typenames[t]) for (m, t) in members.items()])
# gsi is the generated code for the initializer for each accessor
gsi = "".join([getset_init_template.substitute(map, member = m) for (m, t) in members.items()])
# s is the template that pulls everything together
if 'allownull' in flags:
nullcode = """if (o == Py_None) { *dst = (%s*)NULL; return 1; }""" % map['cvtype']
else:
nullcode = ""
if 'copy' in flags:
print >>gen_c[3], gensimple.substitute(map, getset_funcs = gsf, getset_inits = gsi, allownull = nullcode)
else:
print >>gen_c[3], genptr.substitute(map, getset_funcs = gsf, getset_inits = gsi, allownull = nullcode)
print >>gen_c[4], "MKTYPE(%s);" % map['ourname']
for f in gen_c:
f.close()
#ifndef OPENCV2X_PYTHON_WRAPPERS
#define OPENCV2X_PYTHON_WRAPPERS
#include <Python.h>
#if !PYTHON_USE_NUMPY
#error "The module can only be built if NumPy is available"
#endif
#define MODULESTR "cv2"
#include "numpy/ndarrayobject.h"
#include "opencv2/core/core.hpp"
#include "opencv2/imgproc/imgproc.hpp"
......@@ -12,6 +19,21 @@
#include "opencv2/highgui/highgui.hpp"
#include "opencv_extra_api.hpp"
static PyObject* opencv_error = 0;
static int failmsg(const char *fmt, ...)
{
char str[1000];
va_list ap;
va_start(ap, fmt);
vsnprintf(str, sizeof(str), fmt, ap);
va_end(ap);
PyErr_SetString(PyExc_TypeError, str);
return 0;
}
#define ERRWRAP2(expr) \
try \
{ \
......@@ -693,4 +715,131 @@ static inline PyObject* pyopencv_from(const CvDTreeNode* node)
return value == ivalue ? PyInt_FromLong(ivalue) : PyFloat_FromDouble(value);
}
#define MKTYPE2(NAME) pyopencv_##NAME##_specials(); if (!to_ok(&pyopencv_##NAME##_Type)) return
#include "pyopencv_generated_types.h"
#include "pyopencv_generated_funcs.h"
static PyMethodDef methods[] = {
#include "pyopencv_generated_func_tab.h"
{NULL, NULL},
};
/************************************************************************/
/* Module init */
static int to_ok(PyTypeObject *to)
{
to->tp_alloc = PyType_GenericAlloc;
to->tp_new = PyType_GenericNew;
to->tp_flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE;
return (PyType_Ready(to) == 0);
}
extern "C"
#if defined WIN32 || defined _WIN32
__declspec(dllexport)
#endif
void initcv2()
{
#if PYTHON_USE_NUMPY
import_array();
#endif
#if PYTHON_USE_NUMPY
#include "pyopencv_generated_type_reg.h"
#endif
PyObject* m = Py_InitModule(MODULESTR"", methods);
PyObject* d = PyModule_GetDict(m);
PyDict_SetItemString(d, "__version__", PyString_FromString("$Rev: 4557 $"));
opencv_error = PyErr_NewException((char*)MODULESTR".error", NULL, NULL);
PyDict_SetItemString(d, "error", opencv_error);
// AFAIK the only floating-point constant
PyDict_SetItemString(d, "CV_PI", PyFloat_FromDouble(CV_PI));
#define PUBLISH(I) PyDict_SetItemString(d, #I, PyInt_FromLong(I))
#define PUBLISHU(I) PyDict_SetItemString(d, #I, PyLong_FromUnsignedLong(I))
#define PUBLISH2(I, value) PyDict_SetItemString(d, #I, PyLong_FromLong(value))
PUBLISHU(IPL_DEPTH_8U);
PUBLISHU(IPL_DEPTH_8S);
PUBLISHU(IPL_DEPTH_16U);
PUBLISHU(IPL_DEPTH_16S);
PUBLISHU(IPL_DEPTH_32S);
PUBLISHU(IPL_DEPTH_32F);
PUBLISHU(IPL_DEPTH_64F);
PUBLISH(CV_LOAD_IMAGE_COLOR);
PUBLISH(CV_LOAD_IMAGE_GRAYSCALE);
PUBLISH(CV_LOAD_IMAGE_UNCHANGED);
PUBLISH(CV_HIST_ARRAY);
PUBLISH(CV_HIST_SPARSE);
PUBLISH(CV_8U);
PUBLISH(CV_8UC1);
PUBLISH(CV_8UC2);
PUBLISH(CV_8UC3);
PUBLISH(CV_8UC4);
PUBLISH(CV_8S);
PUBLISH(CV_8SC1);
PUBLISH(CV_8SC2);
PUBLISH(CV_8SC3);
PUBLISH(CV_8SC4);
PUBLISH(CV_16U);
PUBLISH(CV_16UC1);
PUBLISH(CV_16UC2);
PUBLISH(CV_16UC3);
PUBLISH(CV_16UC4);
PUBLISH(CV_16S);
PUBLISH(CV_16SC1);
PUBLISH(CV_16SC2);
PUBLISH(CV_16SC3);
PUBLISH(CV_16SC4);
PUBLISH(CV_32S);
PUBLISH(CV_32SC1);
PUBLISH(CV_32SC2);
PUBLISH(CV_32SC3);
PUBLISH(CV_32SC4);
PUBLISH(CV_32F);
PUBLISH(CV_32FC1);
PUBLISH(CV_32FC2);
PUBLISH(CV_32FC3);
PUBLISH(CV_32FC4);
PUBLISH(CV_64F);
PUBLISH(CV_64FC1);
PUBLISH(CV_64FC2);
PUBLISH(CV_64FC3);
PUBLISH(CV_64FC4);
PUBLISH(CV_NEXT_AROUND_ORG);
PUBLISH(CV_NEXT_AROUND_DST);
PUBLISH(CV_PREV_AROUND_ORG);
PUBLISH(CV_PREV_AROUND_DST);
PUBLISH(CV_NEXT_AROUND_LEFT);
PUBLISH(CV_NEXT_AROUND_RIGHT);
PUBLISH(CV_PREV_AROUND_LEFT);
PUBLISH(CV_PREV_AROUND_RIGHT);
PUBLISH(CV_WINDOW_AUTOSIZE);
PUBLISH(CV_PTLOC_INSIDE);
PUBLISH(CV_PTLOC_ON_EDGE);
PUBLISH(CV_PTLOC_VERTEX);
PUBLISH(CV_PTLOC_OUTSIDE_RECT);
PUBLISH(GC_BGD);
PUBLISH(GC_FGD);
PUBLISH(GC_PR_BGD);
PUBLISH(GC_PR_FGD);
PUBLISH(GC_INIT_WITH_RECT);
PUBLISH(GC_INIT_WITH_MASK);
PUBLISH(GC_EVAL);
#include "pyopencv_generated_const_reg.h"
}
import sys
import math
import time
import random
import numpy
import transformations
import cv
def clamp(a, x, b):
return numpy.maximum(a, numpy.minimum(x, b))
def norm(v):
mag = numpy.sqrt(sum([e * e for e in v]))
return v / mag
class Vec3:
def __init__(self, x, y, z):
self.v = (x, y, z)
def x(self):
return self.v[0]
def y(self):
return self.v[1]
def z(self):
return self.v[2]
def __repr__(self):
return "<Vec3 (%s,%s,%s)>" % tuple([repr(c) for c in self.v])
def __add__(self, other):
return Vec3(*[self.v[i] + other.v[i] for i in range(3)])
def __sub__(self, other):
return Vec3(*[self.v[i] - other.v[i] for i in range(3)])
def __mul__(self, other):
if isinstance(other, Vec3):
return Vec3(*[self.v[i] * other.v[i] for i in range(3)])
else:
return Vec3(*[self.v[i] * other for i in range(3)])
def mag2(self):
return sum([e * e for e in self.v])
def __abs__(self):
return numpy.sqrt(sum([e * e for e in self.v]))
def norm(self):
return self * (1.0 / abs(self))
def dot(self, other):
return sum([self.v[i] * other.v[i] for i in range(3)])
def cross(self, other):
(ax, ay, az) = self.v
(bx, by, bz) = other.v
return Vec3(ay * bz - by * az, az * bx - bz * ax, ax * by - bx * ay)
class Ray:
def __init__(self, o, d):
self.o = o
self.d = d
def project(self, d):
return self.o + self.d * d
class Camera:
def __init__(self, F):
R = Vec3(1., 0., 0.)
U = Vec3(0, 1., 0)
self.center = Vec3(0, 0, 0)
self.pcenter = Vec3(0, 0, F)
self.up = U
self.right = R
def genray(self, x, y):
""" -1 <= y <= 1 """
r = numpy.sqrt(x * x + y * y)
if 0:
rprime = r + (0.17 * r**2)
else:
rprime = (10 * numpy.sqrt(17 * r + 25) - 50) / 17
print "scale", rprime / r
x *= rprime / r
y *= rprime / r
o = self.center
r = (self.pcenter + (self.right * x) + (self.up * y)) - o
return Ray(o, r.norm())
class Sphere:
def __init__(self, center, radius):
self.center = center
self.radius = radius
def hit(self, r):
# a = mag2(r.d)
a = 1.
v = r.o - self.center
b = 2 * r.d.dot(v)
c = self.center.mag2() + r.o.mag2() + -2 * self.center.dot(r.o) - (self.radius ** 2)
det = (b * b) - (4 * c)
pred = 0 < det
sq = numpy.sqrt(abs(det))
h0 = (-b - sq) / (2)
h1 = (-b + sq) / (2)
h = numpy.minimum(h0, h1)
pred = pred & (h > 0)
normal = (r.project(h) - self.center) * (1.0 / self.radius)
return (pred, numpy.where(pred, h, 999999.), normal)
def pt2plane(p, plane):
return p.dot(plane) * (1. / abs(plane))
class Plane:
def __init__(self, p, n, right):
self.D = -pt2plane(p, n)
self.Pn = n
self.right = right
self.rightD = -pt2plane(p, right)
self.up = n.cross(right)
self.upD = -pt2plane(p, self.up)
def hit(self, r):
Vd = self.Pn.dot(r.d)
V0 = -(self.Pn.dot(r.o) + self.D)
h = V0 / Vd
pred = (0 <= h)
return (pred, numpy.where(pred, h, 999999.), self.Pn)
def localxy(self, loc):
x = (loc.dot(self.right) + self.rightD)
y = (loc.dot(self.up) + self.upD)
return (x, y)
# lena = numpy.fromstring(cv.LoadImage("../samples/c/lena.jpg", 0).tostring(), numpy.uint8) / 255.0
def texture(xy):
x,y = xy
xa = numpy.floor(x * 512)
ya = numpy.floor(y * 512)
a = (512 * ya) + xa
safe = (0 <= x) & (0 <= y) & (x < 1) & (y < 1)
if 0:
a = numpy.where(safe, a, 0).astype(numpy.int)
return numpy.where(safe, numpy.take(lena, a), 0.0)
else:
xi = numpy.floor(x * 11).astype(numpy.int)
yi = numpy.floor(y * 11).astype(numpy.int)
inside = (1 <= xi) & (xi < 10) & (2 <= yi) & (yi < 9)
checker = (xi & 1) ^ (yi & 1)
final = numpy.where(inside, checker, 1.0)
return numpy.where(safe, final, 0.5)
def under(vv, m):
return Vec3(*(numpy.dot(m, vv.v + (1,))[:3]))
class Renderer:
def __init__(self, w, h, oversample):
self.w = w
self.h = h
random.seed(1)
x = numpy.arange(self.w*self.h) % self.w
y = numpy.floor(numpy.arange(self.w*self.h) / self.w)
h2 = h / 2.0
w2 = w / 2.0
self.r = [ None ] * oversample
for o in range(oversample):
stoch_x = numpy.random.rand(self.w * self.h)
stoch_y = numpy.random.rand(self.w * self.h)
nx = (x + stoch_x - 0.5 - w2) / h2
ny = (y + stoch_y - 0.5 - h2) / h2
self.r[o] = cam.genray(nx, ny)
self.rnds = [random.random() for i in range(10)]
def frame(self, i):
rnds = self.rnds
roll = math.sin(i * .01 * rnds[0] + rnds[1])
pitch = math.sin(i * .01 * rnds[2] + rnds[3])
yaw = math.pi * math.sin(i * .01 * rnds[4] + rnds[5])
x = math.sin(i * 0.01 * rnds[6])
y = math.sin(i * 0.01 * rnds[7])
x,y,z = -0.5,0.5,1
roll,pitch,yaw = (0,0,0)
z = 4 + 3 * math.sin(i * 0.1 * rnds[8])
print z
rz = transformations.euler_matrix(roll, pitch, yaw)
p = Plane(Vec3(x, y, z), under(Vec3(0,0,-1), rz), under(Vec3(1, 0, 0), rz))
acc = 0
for r in self.r:
(pred, h, norm) = p.hit(r)
l = numpy.where(pred, texture(p.localxy(r.project(h))), 0.0)
acc += l
acc *= (1.0 / len(self.r))
# print "took", time.time() - st
img = cv.CreateMat(self.h, self.w, cv.CV_8UC1)
cv.SetData(img, (clamp(0, acc, 1) * 255).astype(numpy.uint8).tostring(), self.w)
return img
#########################################################################
num_x_ints = 8
num_y_ints = 6
num_pts = num_x_ints * num_y_ints
def get_corners(mono, refine = False):
(ok, corners) = cv.FindChessboardCorners(mono, (num_x_ints, num_y_ints), cv.CV_CALIB_CB_ADAPTIVE_THRESH | cv.CV_CALIB_CB_NORMALIZE_IMAGE)
if refine and ok:
corners = cv.FindCornerSubPix(mono, corners, (5,5), (-1,-1), ( cv.CV_TERMCRIT_EPS+cv.CV_TERMCRIT_ITER, 30, 0.1 ))
return (ok, corners)
def mk_object_points(nimages, squaresize = 1):
opts = cv.CreateMat(nimages * num_pts, 3, cv.CV_32FC1)
for i in range(nimages):
for j in range(num_pts):
opts[i * num_pts + j, 0] = (j / num_x_ints) * squaresize
opts[i * num_pts + j, 1] = (j % num_x_ints) * squaresize
opts[i * num_pts + j, 2] = 0
return opts
def mk_image_points(goodcorners):
ipts = cv.CreateMat(len(goodcorners) * num_pts, 2, cv.CV_32FC1)
for (i, co) in enumerate(goodcorners):
for j in range(num_pts):
ipts[i * num_pts + j, 0] = co[j][0]
ipts[i * num_pts + j, 1] = co[j][1]
return ipts
def mk_point_counts(nimages):
npts = cv.CreateMat(nimages, 1, cv.CV_32SC1)
for i in range(nimages):
npts[i, 0] = num_pts
return npts
def cvmat_iterator(cvmat):
for i in range(cvmat.rows):
for j in range(cvmat.cols):
yield cvmat[i,j]
cam = Camera(3.0)
rend = Renderer(640, 480, 2)
cv.NamedWindow("snap")
#images = [rend.frame(i) for i in range(0, 2000, 400)]
images = [rend.frame(i) for i in [1200]]
if 0:
for i,img in enumerate(images):
cv.SaveImage("final/%06d.png" % i, img)
size = cv.GetSize(images[0])
corners = [get_corners(i) for i in images]
goodcorners = [co for (im, (ok, co)) in zip(images, corners) if ok]
def checkerboard_error(xformed):
def pt2line(a, b, c):
x0,y0 = a
x1,y1 = b
x2,y2 = c
return abs((x2 - x1) * (y1 - y0) - (x1 - x0) * (y2 - y1)) / math.sqrt((x2 - x1) ** 2 + (y2 - y1) ** 2)
errorsum = 0.
for im in xformed:
for row in range(6):
l0 = im[8 * row]
l1 = im[8 * row + 7]
for col in range(1, 7):
e = pt2line(im[8 * row + col], l0, l1)
#print "row", row, "e", e
errorsum += e
return errorsum
if True:
from scipy.optimize import fmin
def xf(pt, poly):
x, y = pt
r = math.sqrt((x - 320) ** 2 + (y - 240) ** 2)
fr = poly(r) / r
return (320 + (x - 320) * fr, 240 + (y - 240) * fr)
def silly(p, goodcorners):
# print "eval", p
d = 1.0 # - sum(p)
poly = numpy.poly1d(list(p) + [d, 0.])
xformed = [[xf(pt, poly) for pt in co] for co in goodcorners]
return checkerboard_error(xformed)
x0 = [ 0. ]
#print silly(x0, goodcorners)
print "initial error", silly(x0, goodcorners)
xopt = fmin(silly, x0, args=(goodcorners,))
print "xopt", xopt
print "final error", silly(xopt, goodcorners)
d = 1.0 # - sum(xopt)
poly = numpy.poly1d(list(xopt) + [d, 0.])
print "final polynomial"
print poly
for co in goodcorners:
scrib = cv.CreateMat(480, 640, cv.CV_8UC3)
cv.SetZero(scrib)
cv.DrawChessboardCorners(scrib, (num_x_ints, num_y_ints), [xf(pt, poly) for pt in co], True)
cv.ShowImage("snap", scrib)
cv.WaitKey()
sys.exit(0)
for (i, (img, (ok, co))) in enumerate(zip(images, corners)):
scrib = cv.CreateMat(img.rows, img.cols, cv.CV_8UC3)
cv.CvtColor(img, scrib, cv.CV_GRAY2BGR)
if ok:
cv.DrawChessboardCorners(scrib, (num_x_ints, num_y_ints), co, True)
cv.ShowImage("snap", scrib)
cv.WaitKey()
print len(goodcorners)
ipts = mk_image_points(goodcorners)
opts = mk_object_points(len(goodcorners), .1)
npts = mk_point_counts(len(goodcorners))
intrinsics = cv.CreateMat(3, 3, cv.CV_64FC1)
distortion = cv.CreateMat(4, 1, cv.CV_64FC1)
cv.SetZero(intrinsics)
cv.SetZero(distortion)
# focal lengths have 1/1 ratio
intrinsics[0,0] = 1.0
intrinsics[1,1] = 1.0
cv.CalibrateCamera2(opts, ipts, npts,
cv.GetSize(images[0]),
intrinsics,
distortion,
cv.CreateMat(len(goodcorners), 3, cv.CV_32FC1),
cv.CreateMat(len(goodcorners), 3, cv.CV_32FC1),
flags = 0) # cv.CV_CALIB_ZERO_TANGENT_DIST)
print "D =", list(cvmat_iterator(distortion))
print "K =", list(cvmat_iterator(intrinsics))
mapx = cv.CreateImage((640, 480), cv.IPL_DEPTH_32F, 1)
mapy = cv.CreateImage((640, 480), cv.IPL_DEPTH_32F, 1)
cv.InitUndistortMap(intrinsics, distortion, mapx, mapy)
for img in images:
r = cv.CloneMat(img)
cv.Remap(img, r, mapx, mapy)
cv.ShowImage("snap", r)
cv.WaitKey()
import cv
import unittest
class TestGoodFeaturesToTrack(unittest.TestCase):
def test(self):
arr = cv.LoadImage("../samples/c/lena.jpg", 0)
original = cv.CloneImage(arr)
size = cv.GetSize(arr)
eig_image = cv.CreateImage(size, cv.IPL_DEPTH_32F, 1)
temp_image = cv.CreateImage(size, cv.IPL_DEPTH_32F, 1)
threshes = [ x / 100. for x in range(1,10) ]
results = dict([(t, cv.GoodFeaturesToTrack(arr, eig_image, temp_image, 20000, t, 2, use_harris = 1)) for t in threshes])
# Check that GoodFeaturesToTrack has not modified input image
self.assert_(arr.tostring() == original.tostring())
# Check for repeatability
for i in range(10):
results2 = dict([(t, cv.GoodFeaturesToTrack(arr, eig_image, temp_image, 20000, t, 2, use_harris = 1)) for t in threshes])
self.assert_(results == results2)
for t0,t1 in zip(threshes, threshes[1:]):
r0 = results[t0]
r1 = results[t1]
# Increasing thresh should make result list shorter
self.assert_(len(r0) > len(r1))
# Increasing thresh should monly truncate result list
self.assert_(r0[:len(r1)] == r1)
if __name__ == '__main__':
unittest.main()
import cv
import numpy as np
cv.NamedWindow('Leak')
while 1:
leak = np.random.random((480, 640)) * 255
cv.ShowImage('Leak', leak.astype(np.uint8))
cv.WaitKey(10)
import cv
import numpy as np
import time
while True:
for i in range(4000):
a = cv.CreateImage((1024,1024), cv.IPL_DEPTH_8U, 1)
b = cv.CreateMat(1024, 1024, cv.CV_8UC1)
c = cv.CreateMatND([1024,1024], cv.CV_8UC1)
print "pause..."
import cv
import math
import time
while True:
h = cv.CreateHist([40], cv.CV_HIST_ARRAY, [[0,255]], 1)
import cv
import math
import time
N=50000
print "leak4"
while True:
seq=list((i*1., i*1.) for i in range(N))
cv.Moments(seq)
import unittest
import random
import time
import math
import sys
import array
import urllib
import tarfile
import hashlib
import os
import getopt
import operator
import functools
import cv
class OpenCVTests(unittest.TestCase):
depths = [ cv.IPL_DEPTH_8U, cv.IPL_DEPTH_8S, cv.IPL_DEPTH_16U, cv.IPL_DEPTH_16S, cv.IPL_DEPTH_32S, cv.IPL_DEPTH_32F, cv.IPL_DEPTH_64F ]
mat_types = [
cv.CV_8UC1,
cv.CV_8UC2,
cv.CV_8UC3,
cv.CV_8UC4,
cv.CV_8SC1,
cv.CV_8SC2,
cv.CV_8SC3,
cv.CV_8SC4,
cv.CV_16UC1,
cv.CV_16UC2,
cv.CV_16UC3,
cv.CV_16UC4,
cv.CV_16SC1,
cv.CV_16SC2,
cv.CV_16SC3,
cv.CV_16SC4,
cv.CV_32SC1,
cv.CV_32SC2,
cv.CV_32SC3,
cv.CV_32SC4,
cv.CV_32FC1,
cv.CV_32FC2,
cv.CV_32FC3,
cv.CV_32FC4,
cv.CV_64FC1,
cv.CV_64FC2,
cv.CV_64FC3,
cv.CV_64FC4,
]
mat_types_single = [
cv.CV_8UC1,
cv.CV_8SC1,
cv.CV_16UC1,
cv.CV_16SC1,
cv.CV_32SC1,
cv.CV_32FC1,
cv.CV_64FC1,
]
def depthsize(self, d):
return { cv.IPL_DEPTH_8U : 1,
cv.IPL_DEPTH_8S : 1,
cv.IPL_DEPTH_16U : 2,
cv.IPL_DEPTH_16S : 2,
cv.IPL_DEPTH_32S : 4,
cv.IPL_DEPTH_32F : 4,
cv.IPL_DEPTH_64F : 8 }[d]
def get_sample(self, filename, iscolor = cv.CV_LOAD_IMAGE_COLOR):
if not filename in self.image_cache:
filedata = urllib.urlopen("https://code.ros.org/svn/opencv/trunk/opencv/" + filename).read()
imagefiledata = cv.CreateMatHeader(1, len(filedata), cv.CV_8UC1)
cv.SetData(imagefiledata, filedata, len(filedata))
self.image_cache[filename] = cv.DecodeImageM(imagefiledata, iscolor)
return self.image_cache[filename]
def setUp(self):
self.image_cache = {}
def snap(self, img):
self.snapL([img])
def snapL(self, L):
for i,img in enumerate(L):
cv.NamedWindow("snap-%d" % i, 1)
cv.ShowImage("snap-%d" % i, img)
cv.WaitKey()
cv.DestroyAllWindows()
def hashimg(self, im):
""" Compute a hash for an image, useful for image comparisons """
return hashlib.md5(im.tostring()).digest()
# Tests to run first; check the handful of basic operations that the later tests rely on
class PreliminaryTests(OpenCVTests):
def test_lena(self):
# Check that the lena jpg image has loaded correctly
# This test uses a 'golden' MD5 hash of the Lena image
# If the JPEG decompressor changes, it is possible that the MD5 hash will change,
# so the hash here will need to change.
im = self.get_sample("samples/c/lena.jpg")
# self.snap(im) # uncomment this line to view the image, when regilding
self.assertEqual(hashlib.md5(im.tostring()).hexdigest(), "9dcd9247f9811c6ce86675ba7b0297b6")
def test_LoadImage(self):
self.assertRaises(TypeError, lambda: cv.LoadImage())
self.assertRaises(TypeError, lambda: cv.LoadImage(4))
self.assertRaises(TypeError, lambda: cv.LoadImage('foo.jpg', 1, 1))
self.assertRaises(TypeError, lambda: cv.LoadImage('foo.jpg', xiscolor=cv.CV_LOAD_IMAGE_COLOR))
def test_types(self):
self.assert_(type(cv.CreateImage((7,5), cv.IPL_DEPTH_8U, 1)) == cv.iplimage)
self.assert_(type(cv.CreateMat(5, 7, cv.CV_32FC1)) == cv.cvmat)
for i,t in enumerate(self.mat_types):
basefunc = [
cv.CV_8UC,
cv.CV_8SC,
cv.CV_16UC,
cv.CV_16SC,
cv.CV_32SC,
cv.CV_32FC,
cv.CV_64FC,
][i / 4]
self.assertEqual(basefunc(1 + (i % 4)), t)
def test_tostring(self):
for w in [ 1, 4, 64, 512, 640]:
for h in [ 1, 4, 64, 480, 512]:
for c in [1, 2, 3, 4]:
for d in self.depths:
a = cv.CreateImage((w,h), d, c);
self.assert_(len(a.tostring()) == w * h * c * self.depthsize(d))
for w in [ 32, 96, 480 ]:
for h in [ 32, 96, 480 ]:
depth_size = {
cv.IPL_DEPTH_8U : 1,
cv.IPL_DEPTH_8S : 1,
cv.IPL_DEPTH_16U : 2,
cv.IPL_DEPTH_16S : 2,
cv.IPL_DEPTH_32S : 4,
cv.IPL_DEPTH_32F : 4,
cv.IPL_DEPTH_64F : 8
}
for f in self.depths:
for channels in (1,2,3,4):
img = cv.CreateImage((w, h), f, channels)
esize = (w * h * channels * depth_size[f])
self.assert_(len(img.tostring()) == esize)
cv.SetData(img, " " * esize, w * channels * depth_size[f])
self.assert_(len(img.tostring()) == esize)
mattype_size = {
cv.CV_8UC1 : 1,
cv.CV_8UC2 : 1,
cv.CV_8UC3 : 1,
cv.CV_8UC4 : 1,
cv.CV_8SC1 : 1,
cv.CV_8SC2 : 1,
cv.CV_8SC3 : 1,
cv.CV_8SC4 : 1,
cv.CV_16UC1 : 2,
cv.CV_16UC2 : 2,
cv.CV_16UC3 : 2,
cv.CV_16UC4 : 2,
cv.CV_16SC1 : 2,
cv.CV_16SC2 : 2,
cv.CV_16SC3 : 2,
cv.CV_16SC4 : 2,
cv.CV_32SC1 : 4,
cv.CV_32SC2 : 4,
cv.CV_32SC3 : 4,
cv.CV_32SC4 : 4,
cv.CV_32FC1 : 4,
cv.CV_32FC2 : 4,
cv.CV_32FC3 : 4,
cv.CV_32FC4 : 4,
cv.CV_64FC1 : 8,
cv.CV_64FC2 : 8,
cv.CV_64FC3 : 8,
cv.CV_64FC4 : 8
}
for t in self.mat_types:
for im in [cv.CreateMat(h, w, t), cv.CreateMatND([h, w], t)]:
elemsize = cv.CV_MAT_CN(cv.GetElemType(im)) * mattype_size[cv.GetElemType(im)]
cv.SetData(im, " " * (w * h * elemsize), (w * elemsize))
esize = (w * h * elemsize)
self.assert_(len(im.tostring()) == esize)
cv.SetData(im, " " * esize, w * elemsize)
self.assert_(len(im.tostring()) == esize)
# Tests for specific OpenCV functions
class FunctionTests(OpenCVTests):
def test_AvgSdv(self):
m = cv.CreateMat(1, 8, cv.CV_32FC1)
for i,v in enumerate([2, 4, 4, 4, 5, 5, 7, 9]):
m[0,i] = (v,)
self.assertAlmostEqual(cv.Avg(m)[0], 5.0, 3)
avg,sdv = cv.AvgSdv(m)
self.assertAlmostEqual(avg[0], 5.0, 3)
self.assertAlmostEqual(sdv[0], 2.0, 3)
def test_CalcEMD2(self):
cc = {}
for r in [ 5, 10, 37, 38 ]:
scratch = cv.CreateImage((100,100), 8, 1)
cv.SetZero(scratch)
cv.Circle(scratch, (50,50), r, 255, -1)
storage = cv.CreateMemStorage()
seq = cv.FindContours(scratch, storage, cv.CV_RETR_TREE, cv.CV_CHAIN_APPROX_SIMPLE)
arr = cv.CreateMat(len(seq), 3, cv.CV_32FC1)
for i,e in enumerate(seq):
arr[i,0] = 1
arr[i,1] = e[0]
arr[i,2] = e[1]
cc[r] = arr
def myL1(A, B, D):
return abs(A[0]-B[0]) + abs(A[1]-B[1])
def myL2(A, B, D):
return math.sqrt((A[0]-B[0])**2 + (A[1]-B[1])**2)
def myC(A, B, D):
return max(abs(A[0]-B[0]), abs(A[1]-B[1]))
contours = set(cc.values())
for c0 in contours:
for c1 in contours:
self.assert_(abs(cv.CalcEMD2(c0, c1, cv.CV_DIST_L1) - cv.CalcEMD2(c0, c1, cv.CV_DIST_USER, myL1)) < 1e-3)
self.assert_(abs(cv.CalcEMD2(c0, c1, cv.CV_DIST_L2) - cv.CalcEMD2(c0, c1, cv.CV_DIST_USER, myL2)) < 1e-3)
self.assert_(abs(cv.CalcEMD2(c0, c1, cv.CV_DIST_C) - cv.CalcEMD2(c0, c1, cv.CV_DIST_USER, myC)) < 1e-3)
def test_CalcOpticalFlowBM(self):
a = self.get_sample("samples/c/lena.jpg", 0)
b = self.get_sample("samples/c/lena.jpg", 0)
(w,h) = cv.GetSize(a)
vel_size = (w - 8, h - 8)
velx = cv.CreateImage(vel_size, cv.IPL_DEPTH_32F, 1)
vely = cv.CreateImage(vel_size, cv.IPL_DEPTH_32F, 1)
cv.CalcOpticalFlowBM(a, b, (8,8), (1,1), (8,8), 0, velx, vely)
def test_CalcOpticalFlowPyrLK(self):
a = self.get_sample("samples/c/lena.jpg", 0)
map = cv.CreateMat(2, 3, cv.CV_32FC1)
cv.GetRotationMatrix2D((256, 256), 10, 1.0, map)
b = cv.CloneMat(a)
cv.WarpAffine(a, b, map)
eig_image = cv.CreateMat(a.rows, a.cols, cv.CV_32FC1)
temp_image = cv.CreateMat(a.rows, a.cols, cv.CV_32FC1)
prevPyr = cv.CreateMat(a.rows / 3, a.cols + 8, cv.CV_8UC1)
currPyr = cv.CreateMat(a.rows / 3, a.cols + 8, cv.CV_8UC1)
prevFeatures = cv.GoodFeaturesToTrack(a, eig_image, temp_image, 400, 0.01, 0.01)
(currFeatures, status, track_error) = cv.CalcOpticalFlowPyrLK(a,
b,
prevPyr,
currPyr,
prevFeatures,
(10, 10),
3,
(cv.CV_TERMCRIT_ITER|cv.CV_TERMCRIT_EPS,20, 0.03),
0)
if 0: # enable visualization
print
print sum(status), "Points found in curr image"
for prev,this in zip(prevFeatures, currFeatures):
iprev = tuple([int(c) for c in prev])
ithis = tuple([int(c) for c in this])
cv.Circle(a, iprev, 3, 255)
cv.Circle(a, ithis, 3, 0)
cv.Line(a, iprev, ithis, 128)
self.snapL([a, b])
def test_CartToPolar(self):
x = cv.CreateMat(5, 5, cv.CV_32F)
y = cv.CreateMat(5, 5, cv.CV_32F)
mag = cv.CreateMat(5, 5, cv.CV_32F)
angle = cv.CreateMat(5, 5, cv.CV_32F)
x2 = cv.CreateMat(5, 5, cv.CV_32F)
y2 = cv.CreateMat(5, 5, cv.CV_32F)
for i in range(5):
for j in range(5):
x[i, j] = i
y[i, j] = j
for in_degrees in [False, True]:
cv.CartToPolar(x, y, mag, angle, in_degrees)
cv.PolarToCart(mag, angle, x2, y2, in_degrees)
for i in range(5):
for j in range(5):
self.assertAlmostEqual(x[i, j], x2[i, j], 1)
self.assertAlmostEqual(y[i, j], y2[i, j], 1)
def test_Circle(self):
for w,h in [(2,77), (77,2), (256, 256), (640,480)]:
img = cv.CreateImage((w,h), cv.IPL_DEPTH_8U, 1)
cv.SetZero(img)
tricky = [ -8000, -2, -1, 0, 1, h/2, h-1, h, h+1, w/2, w-1, w, w+1, 8000]
for x0 in tricky:
for y0 in tricky:
for r in [ 0, 1, 2, 3, 4, 5, w/2, w-1, w, w+1, h/2, h-1, h, h+1, 8000 ]:
for thick in [1, 2, 10]:
for t in [0, 8, 4, cv.CV_AA]:
cv.Circle(img, (x0,y0), r, 255, thick, t)
# just check that something was drawn
self.assert_(cv.Sum(img)[0] > 0)
def test_ConvertImage(self):
i1 = cv.GetImage(self.get_sample("samples/c/lena.jpg", 1))
i2 = cv.CloneImage(i1)
i3 = cv.CloneImage(i1)
cv.ConvertImage(i1, i2, cv.CV_CVTIMG_FLIP + cv.CV_CVTIMG_SWAP_RB)
self.assertNotEqual(self.hashimg(i1), self.hashimg(i2))
cv.ConvertImage(i2, i3, cv.CV_CVTIMG_FLIP + cv.CV_CVTIMG_SWAP_RB)
self.assertEqual(self.hashimg(i1), self.hashimg(i3))
def test_ConvexHull2(self):
# Draw a series of N-pointed stars, find contours, assert the contour is not convex,
# assert the hull has N segments, assert that there are N convexity defects.
def polar2xy(th, r):
return (int(400 + r * math.cos(th)), int(400 + r * math.sin(th)))
storage = cv.CreateMemStorage(0)
for way in ['CvSeq', 'CvMat', 'list']:
for points in range(3,20):
scratch = cv.CreateImage((800,800), 8, 1)
cv.SetZero(scratch)
sides = 2 * points
cv.FillPoly(scratch, [ [ polar2xy(i * 2 * math.pi / sides, [100,350][i&1]) for i in range(sides) ] ], 255)
seq = cv.FindContours(scratch, storage, cv.CV_RETR_TREE, cv.CV_CHAIN_APPROX_SIMPLE)
if way == 'CvSeq':
# pts is a CvSeq
pts = seq
elif way == 'CvMat':
# pts is a CvMat
arr = cv.CreateMat(len(seq), 1, cv.CV_32SC2)
for i,e in enumerate(seq):
arr[i,0] = e
pts = arr
elif way == 'list':
# pts is a list of 2-tuples
pts = list(seq)
else:
assert False
self.assert_(cv.CheckContourConvexity(pts) == 0)
hull = cv.ConvexHull2(pts, storage, return_points = 1)
self.assert_(cv.CheckContourConvexity(hull) == 1)
self.assert_(len(hull) == points)
if way in [ 'CvSeq', 'CvMat' ]:
defects = cv.ConvexityDefects(pts, cv.ConvexHull2(pts, storage), storage)
self.assert_(len([depth for (_,_,_,depth) in defects if (depth > 5)]) == points)
def test_CreateImage(self):
for w in [ 1, 4, 64, 512, 640]:
for h in [ 1, 4, 64, 480, 512]:
for c in [1, 2, 3, 4]:
for d in self.depths:
a = cv.CreateImage((w,h), d, c);
self.assert_(a.width == w)
self.assert_(a.height == h)
self.assert_(a.nChannels == c)
self.assert_(a.depth == d)
self.assert_(cv.GetSize(a) == (w, h))
# self.assert_(cv.GetElemType(a) == d)
self.assertRaises(cv.error, lambda: cv.CreateImage((100, 100), 9, 1))
def test_CreateMat(self):
for rows in [1, 2, 4, 16, 64, 512, 640]:
for cols in [1, 2, 4, 16, 64, 512, 640]:
for t in self.mat_types:
m = cv.CreateMat(rows, cols, t)
self.assertEqual(cv.GetElemType(m), t)
self.assertEqual(m.type, t)
self.assertRaises(cv.error, lambda: cv.CreateMat(-1, 100, cv.CV_8SC4))
self.assertRaises(cv.error, lambda: cv.CreateMat(100, -1, cv.CV_8SC4))
self.assertRaises(cv.error, lambda: cv.cvmat())
def test_DrawChessboardCorners(self):
im = cv.CreateImage((512,512), cv.IPL_DEPTH_8U, 3)
cv.SetZero(im)
cv.DrawChessboardCorners(im, (5, 5), [ (100,100) for i in range(5 * 5) ], 1)
self.assert_(cv.Sum(im)[0] > 0)
self.assertRaises(TypeError, lambda: cv.DrawChessboardCorners(im, (4, 5), [ (100,100) for i in range(5 * 5) ], 1))
def test_ExtractSURF(self):
img = self.get_sample("samples/c/lena.jpg", 0)
w,h = cv.GetSize(img)
for hessthresh in [ 300,400,500]:
for dsize in [0,1]:
for layers in [1,3,10]:
kp,desc = cv.ExtractSURF(img, None, cv.CreateMemStorage(), (dsize, hessthresh, 3, layers))
self.assert_(len(kp) == len(desc))
for d in desc:
self.assert_(len(d) == {0:64, 1:128}[dsize])
for pt,laplacian,size,dir,hessian in kp:
self.assert_((0 <= pt[0]) and (pt[0] <= w))
self.assert_((0 <= pt[1]) and (pt[1] <= h))
self.assert_(laplacian in [-1, 0, 1])
self.assert_((0 <= dir) and (dir <= 360))
self.assert_(hessian >= hessthresh)
def test_FillPoly(self):
scribble = cv.CreateImage((640,480), cv.IPL_DEPTH_8U, 1)
random.seed(0)
for i in range(50):
cv.SetZero(scribble)
self.assert_(cv.CountNonZero(scribble) == 0)
cv.FillPoly(scribble, [ [ (random.randrange(640), random.randrange(480)) for i in range(100) ] ], (255,))
self.assert_(cv.CountNonZero(scribble) != 0)
def test_FindChessboardCorners(self):
im = cv.CreateImage((512,512), cv.IPL_DEPTH_8U, 1)
cv.Set(im, 128)
# Empty image run
status,corners = cv.FindChessboardCorners( im, (7,7) )
# Perfect checkerboard
def xf(i,j, o):
return ((96 + o) + 40 * i, (96 + o) + 40 * j)
for i in range(8):
for j in range(8):
color = ((i ^ j) & 1) * 255
cv.Rectangle(im, xf(i,j, 0), xf(i,j, 39), color, cv.CV_FILLED)
status,corners = cv.FindChessboardCorners( im, (7,7) )
self.assert_(status)
self.assert_(len(corners) == (7 * 7))
# Exercise corner display
im3 = cv.CreateImage(cv.GetSize(im), cv.IPL_DEPTH_8U, 3)
cv.Merge(im, im, im, None, im3)
cv.DrawChessboardCorners(im3, (7,7), corners, status)
if 0:
self.snap(im3)
# Run it with too many corners
cv.Set(im, 128)
for i in range(40):
for j in range(40):
color = ((i ^ j) & 1) * 255
x = 30 + 6 * i
y = 30 + 4 * j
cv.Rectangle(im, (x, y), (x+4, y+4), color, cv.CV_FILLED)
status,corners = cv.FindChessboardCorners( im, (7,7) )
# XXX - this is very slow
if 0:
rng = cv.RNG(0)
cv.RandArr(rng, im, cv.CV_RAND_UNI, 0, 255.0)
self.snap(im)
status,corners = cv.FindChessboardCorners( im, (7,7) )
def test_FindContours(self):
random.seed(0)
storage = cv.CreateMemStorage()
# First run FindContours on a black image.
for mode in [cv.CV_RETR_EXTERNAL, cv.CV_RETR_LIST, cv.CV_RETR_CCOMP, cv.CV_RETR_TREE]:
for method in [cv.CV_CHAIN_CODE, cv.CV_CHAIN_APPROX_NONE, cv.CV_CHAIN_APPROX_SIMPLE, cv.CV_CHAIN_APPROX_TC89_L1, cv.CV_CHAIN_APPROX_TC89_KCOS, cv.CV_LINK_RUNS]:
scratch = cv.CreateImage((800,800), 8, 1)
cv.SetZero(scratch)
seq = cv.FindContours(scratch, storage, mode, method)
x = len(seq)
if seq:
pass
for s in seq:
pass
for trial in range(10):
scratch = cv.CreateImage((800,800), 8, 1)
cv.SetZero(scratch)
def plot(center, radius, mode):
cv.Circle(scratch, center, radius, mode, -1)
if radius < 20:
return 0
else:
newmode = 255 - mode
subs = random.choice([1,2,3])
if subs == 1:
return [ plot(center, radius - 5, newmode) ]
else:
newradius = int({ 2: radius / 2, 3: radius / 2.3 }[subs] - 5)
r = radius / 2
ret = []
for i in range(subs):
th = i * (2 * math.pi) / subs
ret.append(plot((int(center[0] + r * math.cos(th)), int(center[1] + r * math.sin(th))), newradius, newmode))
return sorted(ret)
actual = plot((400,400), 390, 255 )
seq = cv.FindContours(scratch, storage, cv.CV_RETR_TREE, cv.CV_CHAIN_APPROX_SIMPLE)
def traverse(s):
if s == None:
return 0
else:
self.assert_(abs(cv.ContourArea(s)) > 0.0)
((x,y),(w,h),th) = cv.MinAreaRect2(s, cv.CreateMemStorage())
self.assert_(((w / h) - 1.0) < 0.01)
self.assert_(abs(cv.ContourArea(s)) > 0.0)
r = []
while s:
r.append(traverse(s.v_next()))
s = s.h_next()
return sorted(r)
self.assert_(traverse(seq.v_next()) == actual)
if 1:
original = cv.CreateImage((800,800), 8, 1)
cv.SetZero(original)
cv.Circle(original, (400, 400), 200, 255, -1)
cv.Circle(original, (100, 100), 20, 255, -1)
else:
original = self.get_sample("samples/c/lena.jpg", 0)
cv.Threshold(original, original, 128, 255, cv.CV_THRESH_BINARY);
contours = cv.FindContours(original, storage, cv.CV_RETR_CCOMP, cv.CV_CHAIN_APPROX_SIMPLE)
def contour_iterator(contour):
while contour:
yield contour
contour = contour.h_next()
# Should be 2 contours from the two circles above
self.assertEqual(len(list(contour_iterator(contours))), 2)
# Smoke DrawContours
sketch = cv.CreateImage(cv.GetSize(original), 8, 3)
cv.SetZero(sketch)
red = cv.RGB(255, 0, 0)
green = cv.RGB(0, 255, 0)
for c in contour_iterator(contours):
cv.DrawContours(sketch, c, red, green, 0)
# self.snap(sketch)
def test_GetAffineTransform(self):
mapping = cv.CreateMat(2, 3, cv.CV_32FC1)
cv.GetAffineTransform([ (0,0), (1,0), (0,1) ], [ (0,0), (17,0), (0,17) ], mapping)
self.assertAlmostEqual(mapping[0,0], 17, 2)
self.assertAlmostEqual(mapping[1,1], 17, 2)
def test_GetRotationMatrix2D(self):
mapping = cv.CreateMat(2, 3, cv.CV_32FC1)
for scale in [0.0, 1.0, 2.0]:
for angle in [0.0, 360.0]:
cv.GetRotationMatrix2D((0,0), angle, scale, mapping)
for r in [0, 1]:
for c in [0, 1, 2]:
if r == c:
e = scale
else:
e = 0.0
self.assertAlmostEqual(mapping[r, c], e, 2)
def test_GetSize(self):
self.assert_(cv.GetSize(cv.CreateMat(5, 7, cv.CV_32FC1)) == (7,5))
self.assert_(cv.GetSize(cv.CreateImage((7,5), cv.IPL_DEPTH_8U, 1)) == (7,5))
def test_GetStarKeypoints(self):
src = self.get_sample("samples/c/lena.jpg", 0)
storage = cv.CreateMemStorage()
kp = cv.GetStarKeypoints(src, storage)
self.assert_(len(kp) > 0)
for (x,y),scale,r in kp:
self.assert_(0 <= x)
self.assert_(x <= cv.GetSize(src)[0])
self.assert_(0 <= y)
self.assert_(y <= cv.GetSize(src)[1])
return
scribble = cv.CreateImage(cv.GetSize(src), 8, 3)
cv.CvtColor(src, scribble, cv.CV_GRAY2BGR)
for (x,y),scale,r in kp:
print x,y,scale,r
cv.Circle(scribble, (x,y), scale, cv.RGB(255,0,0))
self.snap(scribble)
def test_GetSubRect(self):
src = cv.CreateImage((100,100), 8, 1)
data = "z" * (100 * 100)
cv.SetData(src, data, 100)
start_count = sys.getrefcount(data)
iter = 77
subs = []
for i in range(iter):
sub = cv.GetSubRect(src, (0, 0, 10, 10))
subs.append(sub)
self.assert_(sys.getrefcount(data) == (start_count + iter))
src = self.get_sample("samples/c/lena.jpg", 0)
made = cv.CreateImage(cv.GetSize(src), 8, 1)
sub = cv.CreateMat(32, 32, cv.CV_8UC1)
for x in range(0, 512, 32):
for y in range(0, 512, 32):
sub = cv.GetSubRect(src, (x, y, 32, 32))
cv.SetImageROI(made, (x, y, 32, 32))
cv.Copy(sub, made)
cv.ResetImageROI(made)
cv.AbsDiff(made, src, made)
self.assert_(cv.CountNonZero(made) == 0)
for m1 in [cv.CreateMat(1, 10, cv.CV_8UC1), cv.CreateImage((10, 1), 8, 1)]:
for i in range(10):
m1[0, i] = i
def aslist(cvmat): return list(array.array('B', cvmat.tostring()))
m2 = cv.GetSubRect(m1, (5, 0, 4, 1))
m3 = cv.GetSubRect(m2, (1, 0, 2, 1))
self.assertEqual(aslist(m1), range(10))
self.assertEqual(aslist(m2), range(5, 9))
self.assertEqual(aslist(m3), range(6, 8))
def xtest_grabCut(self):
image = self.get_sample("samples/c/lena.jpg", cv.CV_LOAD_IMAGE_COLOR)
tmp1 = cv.CreateMat(1, 13 * 5, cv.CV_32FC1)
tmp2 = cv.CreateMat(1, 13 * 5, cv.CV_32FC1)
mask = cv.CreateMat(image.rows, image.cols, cv.CV_8UC1)
cv.GrabCut(image, mask, (10,10,200,200), tmp1, tmp2, 10, cv.GC_INIT_WITH_RECT)
def test_HoughLines2_PROBABILISTIC(self):
li = cv.HoughLines2(self.yield_line_image(),
cv.CreateMemStorage(),
cv.CV_HOUGH_PROBABILISTIC,
1,
math.pi/180,
50,
50,
10)
self.assert_(len(li) > 0)
self.assert_(li[0] != None)
def test_HoughLines2_STANDARD(self):
li = cv.HoughLines2(self.yield_line_image(),
cv.CreateMemStorage(),
cv.CV_HOUGH_STANDARD,
1,
math.pi/180,
100,
0,
0)
self.assert_(len(li) > 0)
self.assert_(li[0] != None)
def test_InPaint(self):
src = self.get_sample("doc/pics/building.jpg")
msk = cv.CreateImage(cv.GetSize(src), cv.IPL_DEPTH_8U, 1)
damaged = cv.CloneMat(src)
repaired = cv.CreateImage(cv.GetSize(src), cv.IPL_DEPTH_8U, 3)
difference = cv.CloneImage(repaired)
cv.SetZero(msk)
for method in [ cv.CV_INPAINT_NS, cv.CV_INPAINT_TELEA ]:
for (p0,p1) in [ ((10,10), (400,400)) ]:
cv.Line(damaged, p0, p1, cv.RGB(255, 0, 255), 2)
cv.Line(msk, p0, p1, 255, 2)
cv.Inpaint(damaged, msk, repaired, 10., cv.CV_INPAINT_NS)
cv.AbsDiff(src, repaired, difference)
#self.snapL([src, damaged, repaired, difference])
def test_InitLineIterator(self):
scribble = cv.CreateImage((640,480), cv.IPL_DEPTH_8U, 1)
self.assert_(len(list(cv.InitLineIterator(scribble, (20,10), (30,10)))) == 11)
def test_InRange(self):
sz = (256,256)
Igray1 = cv.CreateImage(sz,cv.IPL_DEPTH_32F,1)
Ilow1 = cv.CreateImage(sz,cv.IPL_DEPTH_32F,1)
Ihi1 = cv.CreateImage(sz,cv.IPL_DEPTH_32F,1)
Igray2 = cv.CreateImage(sz,cv.IPL_DEPTH_32F,1)
Ilow2 = cv.CreateImage(sz,cv.IPL_DEPTH_32F,1)
Ihi2 = cv.CreateImage(sz,cv.IPL_DEPTH_32F,1)
Imask = cv.CreateImage(sz, cv.IPL_DEPTH_8U,1)
Imaskt = cv.CreateImage(sz,cv.IPL_DEPTH_8U,1)
cv.InRange(Igray1, Ilow1, Ihi1, Imask);
cv.InRange(Igray2, Ilow2, Ihi2, Imaskt);
cv.Or(Imask, Imaskt, Imask);
def test_Line(self):
w,h = 640,480
img = cv.CreateImage((w,h), cv.IPL_DEPTH_8U, 1)
cv.SetZero(img)
tricky = [ -8000, -2, -1, 0, 1, h/2, h-1, h, h+1, w/2, w-1, w, w+1, 8000]
for x0 in tricky:
for y0 in tricky:
for x1 in tricky:
for y1 in tricky:
for thickness in [ 0, 1, 8 ]:
for line_type in [0, 4, 8, cv.CV_AA ]:
cv.Line(img, (x0,y0), (x1,y1), 255, thickness, line_type)
# just check that something was drawn
self.assert_(cv.Sum(img)[0] > 0)
def test_MinMaxLoc(self):
scribble = cv.CreateImage((640,480), cv.IPL_DEPTH_8U, 1)
los = [ (random.randrange(480), random.randrange(640)) for i in range(100) ]
his = [ (random.randrange(480), random.randrange(640)) for i in range(100) ]
for (lo,hi) in zip(los,his):
cv.Set(scribble, 128)
scribble[lo] = 0
scribble[hi] = 255
r = cv.MinMaxLoc(scribble)
self.assert_(r == (0, 255, tuple(reversed(lo)), tuple(reversed(hi))))
def xxx_test_PyrMeanShiftFiltering(self): # XXX - ticket #306
if 0:
src = self.get_sample("samples/c/lena.jpg", cv.CV_LOAD_IMAGE_COLOR)
dst = cv.CloneMat(src)
cv.PyrMeanShiftFiltering(src, dst, 5, 5)
print src, dst
self.snap(src)
else:
r = cv.temp_test()
print r
print len(r.tostring())
self.snap(r)
def test_Reshape(self):
# 97 rows
# 12 cols
rows = 97
cols = 12
im = cv.CreateMat( rows, cols, cv.CV_32FC1 )
elems = rows * cols * 1
def crd(im):
return cv.GetSize(im) + (cv.CV_MAT_CN(cv.GetElemType(im)),)
for c in (1, 2, 3, 4):
nc,nr,nd = crd(cv.Reshape(im, c))
self.assert_(nd == c)
self.assert_((nc * nr * nd) == elems)
nc,nr,nd = crd(cv.Reshape(im, 0, 97*2))
self.assert_(nr == 97*2)
self.assert_((nc * nr * nd) == elems)
nc,nr,nd = crd(cv.Reshape(im, 3, 97*2))
self.assert_(nr == 97*2)
self.assert_(nd == 3)
self.assert_((nc * nr * nd) == elems)
# Now test ReshapeMatND
mat = cv.CreateMatND([24], cv.CV_32FC1)
cv.Set(mat, 1.0)
self.assertEqual(cv.GetDims(cv.ReshapeMatND(mat, 0, [24, 1])), (24, 1))
self.assertEqual(cv.GetDims(cv.ReshapeMatND(mat, 0, [6, 4])), (6, 4))
self.assertEqual(cv.GetDims(cv.ReshapeMatND(mat, 24, [1])), (1,))
self.assertRaises(TypeError, lambda: cv.ReshapeMatND(mat, 12, [1]))
def test_Save(self):
for o in [ cv.CreateImage((128,128), cv.IPL_DEPTH_8U, 1), cv.CreateMat(16, 16, cv.CV_32FC1), cv.CreateMatND([7,9,4], cv.CV_32FC1) ]:
cv.Save("test.save", o)
loaded = cv.Load("test.save", cv.CreateMemStorage())
self.assert_(type(o) == type(loaded))
def test_SetIdentity(self):
for r in range(1,16):
for c in range(1, 16):
for t in self.mat_types_single:
M = cv.CreateMat(r, c, t)
cv.SetIdentity(M)
for rj in range(r):
for cj in range(c):
if rj == cj:
expected = 1.0
else:
expected = 0.0
self.assertEqual(M[rj,cj], expected)
def test_SnakeImage(self):
src = self.get_sample("samples/c/lena.jpg", 0)
pts = [ (512-i,i) for i in range(0, 512, 8) ]
# Make sure that weight arguments get validated
self.assertRaises(TypeError, lambda: cv.SnakeImage(cv.GetImage(src), pts, [1,2], .01, .01, (7,7), (cv.CV_TERMCRIT_ITER, 100, 0.1)))
# Smoke by making sure that points are changed by call
r = cv.SnakeImage(cv.GetImage(src), pts, .01, .01, .01, (7,7), (cv.CV_TERMCRIT_ITER, 100, 0.1))
if 0:
cv.PolyLine(src, [ r ], 0, 255)
self.snap(src)
self.assertEqual(len(r), len(pts))
self.assertNotEqual(r, pts)
# Ensure that list of weights is same as scalar weight
w = [.01] * len(pts)
r2 = cv.SnakeImage(cv.GetImage(src), pts, w, w, w, (7,7), (cv.CV_TERMCRIT_ITER, 100, 0.1))
self.assertEqual(r, r2)
def test_KMeans2(self):
size = 500
samples = cv.CreateMat(size, 1, cv.CV_32FC3)
labels = cv.CreateMat(size, 1, cv.CV_32SC1)
centers = cv.CreateMat(2, 3, cv.CV_32FC1)
cv.Zero(samples)
cv.Zero(labels)
cv.Zero(centers)
cv.Set(cv.GetSubRect(samples, (0, 0, 1, size/2)), (255, 255, 255))
compact = cv.KMeans2(samples, 2, labels, (cv.CV_TERMCRIT_ITER, 100, 0.1), 1, 0, centers)
self.assertEqual(int(compact), 0)
random.seed(0)
for i in range(50):
index = random.randrange(size)
if index < size/2:
self.assertEqual(samples[index, 0], (255, 255, 255))
self.assertEqual(labels[index, 0], 1)
else:
self.assertEqual(samples[index, 0], (0, 0, 0))
self.assertEqual(labels[index, 0], 0)
for cluster in (0, 1):
for channel in (0, 1, 2):
self.assertEqual(int(centers[cluster, channel]), cluster*255)
def test_Sum(self):
for r in range(1,11):
for c in range(1, 11):
for t in self.mat_types_single:
M = cv.CreateMat(r, c, t)
cv.Set(M, 1)
self.assertEqual(cv.Sum(M)[0], r * c)
def test_Threshold(self):
#""" directed test for bug 2790622 """
src = self.get_sample("samples/c/lena.jpg", 0)
results = set()
for i in range(10):
dst = cv.CreateImage(cv.GetSize(src), cv.IPL_DEPTH_8U, 1)
cv.Threshold(src, dst, 128, 128, cv.CV_THRESH_BINARY)
results.add(dst.tostring())
# Should have produced the same answer every time, so results set should have size 1
self.assert_(len(results) == 1)
# ticket #71 repro attempt
image = self.get_sample("samples/c/lena.jpg", 0)
red = cv.CreateImage(cv.GetSize(image), 8, 1)
binary = cv.CreateImage(cv.GetSize(image), 8, 1)
cv.Split(image, red, None, None, None)
cv.Threshold(red, binary, 42, 255, cv.CV_THRESH_BINARY)
##############################################################################
def yield_line_image(self):
""" Needed by HoughLines tests """
src = self.get_sample("doc/pics/building.jpg", 0)
dst = cv.CreateImage(cv.GetSize(src), 8, 1)
cv.Canny(src, dst, 50, 200, 3)
return dst
# Tests for functional areas
class AreaTests(OpenCVTests):
def test_numpy(self):
if 'fromarray' in dir(cv):
import numpy
def convert(numpydims):
""" Create a numpy array with specified dims, return the OpenCV CvMat """
a1 = numpy.array([1] * reduce(operator.__mul__, numpydims)).reshape(*numpydims).astype(numpy.float32)
return cv.fromarray(a1)
def row_col_chan(m):
col = m.cols
row = m.rows
chan = cv.CV_MAT_CN(cv.GetElemType(m))
return (row, col, chan)
self.assertEqual(row_col_chan(convert((2, 13))), (2, 13, 1))
self.assertEqual(row_col_chan(convert((2, 13, 4))), (2, 13, 4))
self.assertEqual(row_col_chan(convert((2, 13, cv.CV_CN_MAX))), (2, 13, cv.CV_CN_MAX))
self.assertRaises(TypeError, lambda: convert((2,)))
self.assertRaises(TypeError, lambda: convert((11, 17, cv.CV_CN_MAX + 1)))
for t in [cv.CV_16UC1, cv.CV_32SC1, cv.CV_32FC1]:
for d in [ (8,), (1,7), (2,3,4), (7,9,2,1,8), (1,2,3,4,5,6,7,8) ]:
total = reduce(operator.__mul__, d)
m = cv.CreateMatND(d, t)
for i in range(total):
cv.Set1D(m, i, i)
na = numpy.asarray(m).reshape((total,))
self.assertEqual(list(na), range(total))
# now do numpy -> cvmat, and verify
m2 = cv.fromarray(na, True)
# Check that new cvmat m2 contains same counting sequence
for i in range(total):
self.assertEqual(cv.Get1D(m, i)[0], i)
# Verify round-trip for 2D arrays
for rows in [2, 3, 7, 13]:
for cols in [2, 3, 7, 13]:
for allowND in [False, True]:
im = cv.CreateMatND([rows, cols], cv.CV_16UC1)
cv.SetZero(im)
a = numpy.asarray(im)
self.assertEqual(a.shape, (rows, cols))
cvmatnd = cv.fromarray(a, allowND)
self.assertEqual(cv.GetDims(cvmatnd), (rows, cols))
# im, a and cvmatnd all point to the same data, so...
for i,coord in enumerate([(0,0), (0,1), (1,0), (1,1)]):
v = 5 + i + 7
a[coord] = v
self.assertEqual(im[coord], v)
self.assertEqual(cvmatnd[coord], v)
# Cv -> Numpy 3 channel check
im = cv.CreateMatND([2, 13], cv.CV_16UC3)
self.assertEqual(numpy.asarray(im).shape, (2, 13, 3))
# multi-dimensional NumPy array
na = numpy.ones([7,9,2,1,8])
cm = cv.fromarray(na, True)
self.assertEqual(cv.GetDims(cm), (7,9,2,1,8))
# Using an array object for a CvArr parameter
ones = numpy.ones((640, 480))
r = numpy.ones((640, 480))
cv.AddS(ones, 7, r)
self.assert_(numpy.alltrue(r == (8 * ones)))
# create arrays, use them in OpenCV and replace the the array
# looking for leaks
def randdim():
return [random.randrange(1,6) for i in range(random.randrange(1, 6))]
arrays = [numpy.ones(randdim()).astype(numpy.uint8) for i in range(10)]
cs = [cv.fromarray(a, True) for a in arrays]
for i in range(1000):
arrays[random.randrange(10)] = numpy.ones(randdim()).astype(numpy.uint8)
cs[random.randrange(10)] = cv.fromarray(arrays[random.randrange(10)], True)
for j in range(10):
self.assert_(all([c == chr(1) for c in cs[j].tostring()]))
#
m = numpy.identity(4, dtype = numpy.float32)
rvec = cv.CreateMat(3, 1, cv.CV_32FC1)
rvec[0,0] = 1
rvec[1,0] = 1
rvec[2,0] = 1
cv.Rodrigues2(rvec, m[:3,:3])
#print m
else:
print "SKIPPING test_numpy - numpy support not built"
def test_boundscatch(self):
l2 = cv.CreateMat(256, 1, cv.CV_8U)
l2[0,0] # should be OK
self.assertRaises(cv.error, lambda: l2[1,1])
l2[0] # should be OK
self.assertRaises(cv.error, lambda: l2[299])
for n in range(1, 8):
l = cv.CreateMatND([2] * n, cv.CV_8U)
l[0] # should be OK
self.assertRaises(cv.error, lambda: l[999])
tup0 = (0,) * n
l[tup0] # should be OK
tup2 = (2,) * n
self.assertRaises(cv.error, lambda: l[tup2])
def test_stereo(self):
bm = cv.CreateStereoBMState()
def illegal_delete():
bm = cv.CreateStereoBMState()
del bm.preFilterType
def illegal_assign():
bm = cv.CreateStereoBMState()
bm.preFilterType = "foo"
self.assertRaises(TypeError, illegal_delete)
self.assertRaises(TypeError, illegal_assign)
left = self.get_sample("samples/c/lena.jpg", 0)
right = self.get_sample("samples/c/lena.jpg", 0)
disparity = cv.CreateMat(512, 512, cv.CV_16SC1)
cv.FindStereoCorrespondenceBM(left, right, disparity, bm)
gc = cv.CreateStereoGCState(16, 2)
left_disparity = cv.CreateMat(512, 512, cv.CV_16SC1)
right_disparity = cv.CreateMat(512, 512, cv.CV_16SC1)
def test_stereo(self):
bm = cv.CreateStereoBMState()
def illegal_delete():
bm = cv.CreateStereoBMState()
del bm.preFilterType
def illegal_assign():
bm = cv.CreateStereoBMState()
bm.preFilterType = "foo"
self.assertRaises(TypeError, illegal_delete)
self.assertRaises(TypeError, illegal_assign)
left = self.get_sample("samples/c/lena.jpg", 0)
right = self.get_sample("samples/c/lena.jpg", 0)
disparity = cv.CreateMat(512, 512, cv.CV_16SC1)
cv.FindStereoCorrespondenceBM(left, right, disparity, bm)
gc = cv.CreateStereoGCState(16, 2)
left_disparity = cv.CreateMat(512, 512, cv.CV_16SC1)
right_disparity = cv.CreateMat(512, 512, cv.CV_16SC1)
cv.FindStereoCorrespondenceGC(left, right, left_disparity, right_disparity, gc)
def test_kalman(self):
k = cv.CreateKalman(2, 1, 0)
def failing_test_exception(self):
a = cv.CreateImage((640, 480), cv.IPL_DEPTH_8U, 1)
b = cv.CreateImage((640, 480), cv.IPL_DEPTH_8U, 1)
self.assertRaises(cv.error, lambda: cv.Laplace(a, b))
def test_cvmat_accessors(self):
cvm = cv.CreateMat(20, 10, cv.CV_32FC1)
def test_depths(self):
#""" Make sure that the depth enums are unique """
self.assert_(len(self.depths) == len(set(self.depths)))
def test_leak(self):
#""" If CreateImage is not releasing image storage, then the loop below should use ~4GB of memory. """
for i in range(64000):
a = cv.CreateImage((1024,1024), cv.IPL_DEPTH_8U, 1)
for i in range(64000):
a = cv.CreateMat(1024, 1024, cv.CV_8UC1)
def test_histograms(self):
def split(im):
nchans = cv.CV_MAT_CN(cv.GetElemType(im))
c = [ cv.CreateImage(cv.GetSize(im), cv.IPL_DEPTH_8U, 1) for i in range(nchans) ] + [None] * (4 - nchans)
cv.Split(im, c[0], c[1], c[2], c[3])
return c[:nchans]
def imh(im):
s = split(im)
hist = cv.CreateHist([256] * len(s), cv.CV_HIST_ARRAY, [ (0,255) ] * len(s), 1)
cv.CalcHist(s, hist, 0)
return hist
dims = [180]
ranges = [(0,180)]
a = cv.CreateHist(dims, cv.CV_HIST_ARRAY , ranges, 1)
src = self.get_sample("samples/c/lena.jpg", 0)
h = imh(src)
(minv, maxv, minl, maxl) = cv.GetMinMaxHistValue(h)
self.assert_(cv.QueryHistValue_nD(h, minl) == minv)
self.assert_(cv.QueryHistValue_nD(h, maxl) == maxv)
bp = cv.CreateImage(cv.GetSize(src), cv.IPL_DEPTH_8U, 1)
cv.CalcBackProject(split(src), bp, h)
bp = cv.CreateImage((cv.GetSize(src)[0]-2, cv.GetSize(src)[1]-2), cv.IPL_DEPTH_32F, 1)
cv.CalcBackProjectPatch(split(src), bp, (3,3), h, cv.CV_COMP_INTERSECT, 1)
for meth,expected in [(cv.CV_COMP_CORREL, 1.0), (cv.CV_COMP_CHISQR, 0.0), (cv.CV_COMP_INTERSECT, 1.0), (cv.CV_COMP_BHATTACHARYYA, 0.0)]:
self.assertEqual(cv.CompareHist(h, h, meth), expected)
def test_arithmetic(self):
a = cv.CreateMat(4, 4, cv.CV_8UC1)
a[0,0] = 50.0
b = cv.CreateMat(4, 4, cv.CV_8UC1)
b[0,0] = 4.0
d = cv.CreateMat(4, 4, cv.CV_8UC1)
cv.Add(a, b, d)
self.assertEqual(d[0,0], 54.0)
cv.Mul(a, b, d)
self.assertEqual(d[0,0], 200.0)
def failing_test_cvtcolor(self):
src3 = self.get_sample("samples/c/lena.jpg")
src1 = self.get_sample("samples/c/lena.jpg", 0)
dst8u = dict([(c,cv.CreateImage(cv.GetSize(src1), cv.IPL_DEPTH_8U, c)) for c in (1,2,3,4)])
dst16u = dict([(c,cv.CreateImage(cv.GetSize(src1), cv.IPL_DEPTH_16U, c)) for c in (1,2,3,4)])
dst32f = dict([(c,cv.CreateImage(cv.GetSize(src1), cv.IPL_DEPTH_32F, c)) for c in (1,2,3,4)])
for srcf in ["BGR", "RGB"]:
for dstf in ["Luv"]:
cv.CvtColor(src3, dst8u[3], eval("cv.CV_%s2%s" % (srcf, dstf)))
cv.CvtColor(src3, dst32f[3], eval("cv.CV_%s2%s" % (srcf, dstf)))
cv.CvtColor(src3, dst8u[3], eval("cv.CV_%s2%s" % (dstf, srcf)))
for srcf in ["BayerBG", "BayerGB", "BayerGR"]:
for dstf in ["RGB", "BGR"]:
cv.CvtColor(src1, dst8u[3], eval("cv.CV_%s2%s" % (srcf, dstf)))
def test_voronoi(self):
w,h = 500,500
storage = cv.CreateMemStorage(0)
def facet_edges(e0):
e = e0
while True:
e = cv.Subdiv2DGetEdge(e, cv.CV_NEXT_AROUND_LEFT)
yield e
if e == e0:
break
def areas(edges):
seen = []
seensorted = []
for edge in edges:
pts = [ cv.Subdiv2DEdgeOrg(e) for e in facet_edges(edge) ]
if not (None in pts):
l = [p.pt for p in pts]
ls = sorted(l)
if not(ls in seensorted):
seen.append(l)
seensorted.append(ls)
return seen
for npoints in range(1, 200):
points = [ (random.randrange(w), random.randrange(h)) for i in range(npoints) ]
subdiv = cv.CreateSubdivDelaunay2D( (0,0,w,h), storage )
for p in points:
cv.SubdivDelaunay2DInsert( subdiv, p)
cv.CalcSubdivVoronoi2D(subdiv)
ars = areas([ cv.Subdiv2DRotateEdge(e, 1) for e in subdiv.edges ] + [ cv.Subdiv2DRotateEdge(e, 3) for e in subdiv.edges ])
self.assert_(len(ars) == len(set(points)))
if False:
img = cv.CreateImage((w,h), cv.IPL_DEPTH_8U, 3)
cv.SetZero(img)
def T(x): return int(x) # int(300+x/16)
for pts in ars:
cv.FillConvexPoly( img, [(T(x),T(y)) for (x,y) in pts], cv.RGB(100+random.randrange(156),random.randrange(256),random.randrange(256)), cv.CV_AA, 0 );
for x,y in points:
cv.Circle(img, (T(x), T(y)), 3, cv.RGB(0,0,0), -1)
cv.ShowImage("snap", img)
if cv.WaitKey(10) > 0:
break
def perf_test_pow(self):
mt = cv.CreateMat(1000, 1000, cv.CV_32FC1)
dst = cv.CreateMat(1000, 1000, cv.CV_32FC1)
rng = cv.RNG(0)
cv.RandArr(rng, mt, cv.CV_RAND_UNI, 0, 1000.0)
mt[0,0] = 10
print
for a in [0.5, 2.0, 2.3, 2.4, 3.0, 37.1786] + [2.4]*10:
started = time.time()
for i in range(10):
cv.Pow(mt, dst, a)
took = (time.time() - started) / 1e7
print "%4.1f took %f ns" % (a, took * 1e9)
print dst[0,0], 10 ** 2.4
def test_access_row_col(self):
src = cv.CreateImage((8,3), 8, 1)
# Put these words
# Achilles
# Benedict
# Congreve
# in an array (3 rows, 8 columns).
# Then extract the array in various ways.
for r,w in enumerate(("Achilles", "Benedict", "Congreve")):
for c,v in enumerate(w):
src[r,c] = ord(v)
self.assertEqual(src.tostring(), "AchillesBenedictCongreve")
self.assertEqual(src[:,:].tostring(), "AchillesBenedictCongreve")
self.assertEqual(src[:,:4].tostring(), "AchiBeneCong")
self.assertEqual(src[:,0].tostring(), "ABC")
self.assertEqual(src[:,4:].tostring(), "llesdictreve")
self.assertEqual(src[::2,:].tostring(), "AchillesCongreve")
self.assertEqual(src[1:,:].tostring(), "BenedictCongreve")
self.assertEqual(src[1:2,:].tostring(), "Benedict")
self.assertEqual(src[::2,:4].tostring(), "AchiCong")
# The mats share the same storage, so updating one should update them all
lastword = src[2]
self.assertEqual(lastword.tostring(), "Congreve")
src[2,0] = ord('K')
self.assertEqual(lastword.tostring(), "Kongreve")
src[2,0] = ord('C')
# ABCD
# EFGH
# IJKL
#
# MNOP
# QRST
# UVWX
mt = cv.CreateMatND([2,3,4], cv.CV_8UC1)
for i in range(2):
for j in range(3):
for k in range(4):
mt[i,j,k] = ord('A') + k + 4 * (j + 3 * i)
self.assertEqual(mt[:,:,:1].tostring(), "AEIMQU")
self.assertEqual(mt[:,:1,:].tostring(), "ABCDMNOP")
self.assertEqual(mt[:1,:,:].tostring(), "ABCDEFGHIJKL")
self.assertEqual(mt[1,1].tostring(), "QRST")
self.assertEqual(mt[:,::2,:].tostring(), "ABCDIJKLMNOPUVWX")
# Exercise explicit GetRows
self.assertEqual(cv.GetRows(src, 0, 3).tostring(), "AchillesBenedictCongreve")
self.assertEqual(cv.GetRows(src, 0, 3, 1).tostring(), "AchillesBenedictCongreve")
self.assertEqual(cv.GetRows(src, 0, 3, 2).tostring(), "AchillesCongreve")
self.assertEqual(cv.GetRow(src, 0).tostring(), "Achilles")
self.assertEqual(cv.GetCols(src, 0, 4).tostring(), "AchiBeneCong")
self.assertEqual(cv.GetCol(src, 0).tostring(), "ABC")
self.assertEqual(cv.GetCol(src, 1).tostring(), "ceo")
self.assertEqual(cv.GetDiag(src, 0).tostring(), "Aen")
# Check that matrix type is preserved by the various operators
for mt in self.mat_types:
m = cv.CreateMat(5, 3, mt)
self.assertEqual(mt, cv.GetElemType(cv.GetRows(m, 0, 2)))
self.assertEqual(mt, cv.GetElemType(cv.GetRow(m, 0)))
self.assertEqual(mt, cv.GetElemType(cv.GetCols(m, 0, 2)))
self.assertEqual(mt, cv.GetElemType(cv.GetCol(m, 0)))
self.assertEqual(mt, cv.GetElemType(cv.GetDiag(m, 0)))
self.assertEqual(mt, cv.GetElemType(m[0]))
self.assertEqual(mt, cv.GetElemType(m[::2]))
self.assertEqual(mt, cv.GetElemType(m[:,0]))
self.assertEqual(mt, cv.GetElemType(m[:,:]))
self.assertEqual(mt, cv.GetElemType(m[::2,:]))
def test_addS_3D(self):
for dim in [ [1,1,4], [2,2,3], [7,4,3] ]:
for ty,ac in [ (cv.CV_32FC1, 'f'), (cv.CV_64FC1, 'd')]:
mat = cv.CreateMatND(dim, ty)
mat2 = cv.CreateMatND(dim, ty)
for increment in [ 0, 3, -1 ]:
cv.SetData(mat, array.array(ac, range(dim[0] * dim[1] * dim[2])), 0)
cv.AddS(mat, increment, mat2)
for i in range(dim[0]):
for j in range(dim[1]):
for k in range(dim[2]):
self.assert_(mat2[i,j,k] == mat[i,j,k] + increment)
def test_buffers(self):
ar = array.array('f', [7] * (360*640))
m = cv.CreateMat(360, 640, cv.CV_32FC1)
cv.SetData(m, ar, 4 * 640)
self.assert_(m[0,0] == 7.0)
m = cv.CreateMatND((360, 640), cv.CV_32FC1)
cv.SetData(m, ar, 4 * 640)
self.assert_(m[0,0] == 7.0)
m = cv.CreateImage((640, 360), cv.IPL_DEPTH_32F, 1)
cv.SetData(m, ar, 4 * 640)
self.assert_(m[0,0] == 7.0)
def xxtest_Filters(self):
print
m = cv.CreateMat(360, 640, cv.CV_32FC1)
d = cv.CreateMat(360, 640, cv.CV_32FC1)
for k in range(3, 21, 2):
started = time.time()
for i in range(1000):
cv.Smooth(m, m, param1=k)
print k, "took", time.time() - started
def assertSame(self, a, b):
w,h = cv.GetSize(a)
d = cv.CreateMat(h, w, cv.CV_8UC1)
cv.AbsDiff(a, b, d)
self.assert_(cv.CountNonZero(d) == 0)
def test_text(self):
img = cv.CreateImage((640,40), cv.IPL_DEPTH_8U, 1)
cv.SetZero(img)
font = cv.InitFont(cv.CV_FONT_HERSHEY_SIMPLEX, 1, 1)
message = "XgfooX"
cv.PutText(img, message, (320,30), font, 255)
((w,h),bl) = cv.GetTextSize(message, font)
# Find nonzero in X and Y
Xs = []
for x in range(640):
cv.SetImageROI(img, (x, 0, 1, 40))
Xs.append(cv.Sum(img)[0] > 0)
def firstlast(l):
return (l.index(True), len(l) - list(reversed(l)).index(True))
Ys = []
for y in range(40):
cv.SetImageROI(img, (0, y, 640, 1))
Ys.append(cv.Sum(img)[0] > 0)
x0,x1 = firstlast(Xs)
y0,y1 = firstlast(Ys)
actual_width = x1 - x0
actual_height = y1 - y0
# actual_width can be up to 8 pixels smaller than GetTextSize says
self.assert_(actual_width <= w)
self.assert_((w - actual_width) <= 8)
# actual_height can be up to 4 pixels smaller than GetTextSize says
self.assert_(actual_height <= (h + bl))
self.assert_(((h + bl) - actual_height) <= 4)
cv.ResetImageROI(img)
self.assert_(w != 0)
self.assert_(h != 0)
def test_sizes(self):
sizes = [ 1, 2, 3, 97, 255, 256, 257, 947 ]
for w in sizes:
for h in sizes:
# Create an IplImage
im = cv.CreateImage((w,h), cv.IPL_DEPTH_8U, 1)
cv.Set(im, 1)
self.assert_(cv.Sum(im)[0] == (w * h))
del im
# Create a CvMat
mt = cv.CreateMat(h, w, cv.CV_8UC1)
cv.Set(mt, 1)
self.assert_(cv.Sum(mt)[0] == (w * h))
random.seed(7)
for dim in range(1, cv.CV_MAX_DIM + 1):
for attempt in range(10):
dims = [ random.choice([1,1,1,1,2,3]) for i in range(dim) ]
mt = cv.CreateMatND(dims, cv.CV_8UC1)
cv.SetZero(mt)
self.assert_(cv.Sum(mt)[0] == 0)
# Set to all-ones, verify the sum
cv.Set(mt, 1)
expected = 1
for d in dims:
expected *= d
self.assert_(cv.Sum(mt)[0] == expected)
def test_random(self):
seeds = [ 0, 1, 2**48, 2**48 + 1 ]
sequences = set()
for s in seeds:
rng = cv.RNG(s)
sequences.add(str([cv.RandInt(rng) for i in range(10)]))
self.assert_(len(seeds) == len(sequences))
rng = cv.RNG(0)
im = cv.CreateImage((1024,1024), cv.IPL_DEPTH_8U, 1)
cv.RandArr(rng, im, cv.CV_RAND_UNI, 0, 256)
cv.RandArr(rng, im, cv.CV_RAND_NORMAL, 128, 30)
if 1:
hist = cv.CreateHist([ 256 ], cv.CV_HIST_ARRAY, [ (0,255) ], 1)
cv.CalcHist([im], hist)
rng = cv.RNG()
for i in range(1000):
v = cv.RandReal(rng)
self.assert_(0 <= v)
self.assert_(v < 1)
for mode in [ cv.CV_RAND_UNI, cv.CV_RAND_NORMAL ]:
for fmt in self.mat_types:
mat = cv.CreateMat(64, 64, fmt)
cv.RandArr(cv.RNG(), mat, mode, (0,0,0,0), (1,1,1,1))
def test_MixChannels(self):
# First part - test the single case described in the documentation
rgba = cv.CreateMat(100, 100, cv.CV_8UC4)
bgr = cv.CreateMat(100, 100, cv.CV_8UC3)
alpha = cv.CreateMat(100, 100, cv.CV_8UC1)
cv.Set(rgba, (1,2,3,4))
cv.MixChannels([rgba], [bgr, alpha], [
(0, 2), # rgba[0] -> bgr[2]
(1, 1), # rgba[1] -> bgr[1]
(2, 0), # rgba[2] -> bgr[0]
(3, 3) # rgba[3] -> alpha[0]
])
self.assert_(bgr[0,0] == (3,2,1))
self.assert_(alpha[0,0] == 4)
# Second part. Choose random sets of sources and destinations,
# fill them with known values, choose random channel assignments,
# run cvMixChannels and check that the result is as expected.
random.seed(1)
for rows in [1,2,4,13,64,1000]:
for cols in [1,2,4,13,64,1000]:
for loop in range(5):
sources = [random.choice([1, 2, 3, 4]) for i in range(8)]
dests = [random.choice([1, 2, 3, 4]) for i in range(8)]
# make sure that fromTo does not have duplicates in dests, otherwise the result is not determined
while 1:
fromTo = [(random.randrange(-1, sum(sources)), random.randrange(sum(dests))) for i in range(random.randrange(1, 30))]
dests_set = list(set([j for (i, j) in fromTo]))
if len(dests_set) == len(dests):
break
# print sources
# print dests
# print fromTo
def CV_8UC(n):
return [cv.CV_8UC1, cv.CV_8UC2, cv.CV_8UC3, cv.CV_8UC4][n-1]
source_m = [cv.CreateMat(rows, cols, CV_8UC(c)) for c in sources]
dest_m = [cv.CreateMat(rows, cols, CV_8UC(c)) for c in dests]
def m00(m):
# return the contents of the N channel mat m[0,0] as a N-length list
chans = cv.CV_MAT_CN(cv.GetElemType(m))
if chans == 1:
return [m[0,0]]
else:
return list(m[0,0])[:chans]
# Sources numbered from 50, destinations numbered from 100
for i in range(len(sources)):
s = sum(sources[:i]) + 50
cv.Set(source_m[i], (s, s+1, s+2, s+3))
self.assertEqual(m00(source_m[i]), [s, s+1, s+2, s+3][:sources[i]])
for i in range(len(dests)):
s = sum(dests[:i]) + 100
cv.Set(dest_m[i], (s, s+1, s+2, s+3))
self.assertEqual(m00(dest_m[i]), [s, s+1, s+2, s+3][:dests[i]])
# now run the sanity check
for i in range(len(sources)):
s = sum(sources[:i]) + 50
self.assertEqual(m00(source_m[i]), [s, s+1, s+2, s+3][:sources[i]])
for i in range(len(dests)):
s = sum(dests[:i]) + 100
self.assertEqual(m00(dest_m[i]), [s, s+1, s+2, s+3][:dests[i]])
cv.MixChannels(source_m, dest_m, fromTo)
expected = range(100, 100 + sum(dests))
for (i, j) in fromTo:
if i == -1:
expected[j] = 0.0
else:
expected[j] = 50 + i
actual = sum([m00(m) for m in dest_m], [])
self.assertEqual(sum([m00(m) for m in dest_m], []), expected)
def test_allocs(self):
mats = [ 0 for i in range(20) ]
for i in range(1000):
m = cv.CreateMat(random.randrange(10, 512), random.randrange(10, 512), cv.CV_8UC1)
j = random.randrange(len(mats))
mats[j] = m
cv.SetZero(m)
def test_access(self):
cnames = { 1:cv.CV_32FC1, 2:cv.CV_32FC2, 3:cv.CV_32FC3, 4:cv.CV_32FC4 }
for w in range(1,11):
for h in range(2,11):
for c in [1,2]:
for o in [ cv.CreateMat(h, w, cnames[c]), cv.CreateImage((w,h), cv.IPL_DEPTH_32F, c) ][1:]:
pattern = [ (i,j) for i in range(w) for j in range(h) ]
random.shuffle(pattern)
for k,(i,j) in enumerate(pattern):
if c == 1:
o[j,i] = k
else:
o[j,i] = (k,) * c
for k,(i,j) in enumerate(pattern):
if c == 1:
self.assert_(o[j,i] == k)
else:
self.assert_(o[j,i] == (k,)*c)
test_mat = cv.CreateMat(2, 3, cv.CV_32FC1)
cv.SetData(test_mat, array.array('f', range(6)), 12)
self.assertEqual(cv.GetDims(test_mat[0]), (1, 3))
self.assertEqual(cv.GetDims(test_mat[1]), (1, 3))
self.assertEqual(cv.GetDims(test_mat[0:1]), (1, 3))
self.assertEqual(cv.GetDims(test_mat[1:2]), (1, 3))
self.assertEqual(cv.GetDims(test_mat[-1:]), (1, 3))
self.assertEqual(cv.GetDims(test_mat[-1]), (1, 3))
def xxxtest_corners(self):
a = cv.LoadImage("foo-mono.png", 0)
cv.AdaptiveThreshold(a, a, 255, param1=5)
scribble = cv.CreateImage(cv.GetSize(a), 8, 3)
cv.CvtColor(a, scribble, cv.CV_GRAY2BGR)
if 0:
eig_image = cv.CreateImage(cv.GetSize(a), cv.IPL_DEPTH_32F, 1)
temp_image = cv.CreateImage(cv.GetSize(a), cv.IPL_DEPTH_32F, 1)
pts = cv.GoodFeaturesToTrack(a, eig_image, temp_image, 100, 0.04, 2, use_harris=1)
for p in pts:
cv.Circle( scribble, p, 1, cv.RGB(255,0,0), -1 )
self.snap(scribble)
canny = cv.CreateImage(cv.GetSize(a), 8, 1)
cv.SubRS(a, 255, canny)
self.snap(canny)
li = cv.HoughLines2(canny,
cv.CreateMemStorage(),
cv.CV_HOUGH_STANDARD,
1,
math.pi/180,
60,
0,
0)
for (rho,theta) in li:
print rho,theta
c = math.cos(theta)
s = math.sin(theta)
x0 = c*rho
y0 = s*rho
cv.Line(scribble,
(x0 + 1000*(-s), y0 + 1000*c),
(x0 + -1000*(-s), y0 - 1000*c),
(0,255,0))
self.snap(scribble)
def test_calibration(self):
def get_corners(mono, refine = False):
(ok, corners) = cv.FindChessboardCorners(mono, (num_x_ints, num_y_ints), cv.CV_CALIB_CB_ADAPTIVE_THRESH | cv.CV_CALIB_CB_NORMALIZE_IMAGE)
if refine and ok:
corners = cv.FindCornerSubPix(mono, corners, (5,5), (-1,-1), ( cv.CV_TERMCRIT_EPS+cv.CV_TERMCRIT_ITER, 30, 0.1 ))
return (ok, corners)
def mk_object_points(nimages, squaresize = 1):
opts = cv.CreateMat(nimages * num_pts, 3, cv.CV_32FC1)
for i in range(nimages):
for j in range(num_pts):
opts[i * num_pts + j, 0] = (j / num_x_ints) * squaresize
opts[i * num_pts + j, 1] = (j % num_x_ints) * squaresize
opts[i * num_pts + j, 2] = 0
return opts
def mk_image_points(goodcorners):
ipts = cv.CreateMat(len(goodcorners) * num_pts, 2, cv.CV_32FC1)
for (i, co) in enumerate(goodcorners):
for j in range(num_pts):
ipts[i * num_pts + j, 0] = co[j][0]
ipts[i * num_pts + j, 1] = co[j][1]
return ipts
def mk_point_counts(nimages):
npts = cv.CreateMat(nimages, 1, cv.CV_32SC1)
for i in range(nimages):
npts[i, 0] = num_pts
return npts
def cvmat_iterator(cvmat):
for i in range(cvmat.rows):
for j in range(cvmat.cols):
yield cvmat[i,j]
def image_from_archive(tar, name):
member = tar.getmember(name)
filedata = tar.extractfile(member).read()
imagefiledata = cv.CreateMat(1, len(filedata), cv.CV_8UC1)
cv.SetData(imagefiledata, filedata, len(filedata))
return cv.DecodeImageM(imagefiledata)
urllib.urlretrieve("http://pr.willowgarage.com/data/camera_calibration/camera_calibration.tar.gz", "camera_calibration.tar.gz")
tf = tarfile.open("camera_calibration.tar.gz")
num_x_ints = 8
num_y_ints = 6
num_pts = num_x_ints * num_y_ints
leftimages = [image_from_archive(tf, "wide/left%04d.pgm" % i) for i in range(3, 15)]
size = cv.GetSize(leftimages[0])
# Monocular test
if True:
corners = [get_corners(i) for i in leftimages]
goodcorners = [co for (im, (ok, co)) in zip(leftimages, corners) if ok]
ipts = mk_image_points(goodcorners)
opts = mk_object_points(len(goodcorners), .1)
npts = mk_point_counts(len(goodcorners))
intrinsics = cv.CreateMat(3, 3, cv.CV_64FC1)
distortion = cv.CreateMat(4, 1, cv.CV_64FC1)
cv.SetZero(intrinsics)
cv.SetZero(distortion)
# focal lengths have 1/1 ratio
intrinsics[0,0] = 1.0
intrinsics[1,1] = 1.0
cv.CalibrateCamera2(opts, ipts, npts,
cv.GetSize(leftimages[0]),
intrinsics,
distortion,
cv.CreateMat(len(goodcorners), 3, cv.CV_32FC1),
cv.CreateMat(len(goodcorners), 3, cv.CV_32FC1),
flags = 0) # cv.CV_CALIB_ZERO_TANGENT_DIST)
# print "D =", list(cvmat_iterator(distortion))
# print "K =", list(cvmat_iterator(intrinsics))
newK = cv.CreateMat(3, 3, cv.CV_64FC1)
cv.GetOptimalNewCameraMatrix(intrinsics, distortion, size, 1.0, newK)
# print "newK =", list(cvmat_iterator(newK))
mapx = cv.CreateImage((640, 480), cv.IPL_DEPTH_32F, 1)
mapy = cv.CreateImage((640, 480), cv.IPL_DEPTH_32F, 1)
for K in [ intrinsics, newK ]:
cv.InitUndistortMap(K, distortion, mapx, mapy)
for img in leftimages[:1]:
r = cv.CloneMat(img)
cv.Remap(img, r, mapx, mapy)
# cv.ShowImage("snap", r)
# cv.WaitKey()
rightimages = [image_from_archive(tf, "wide/right%04d.pgm" % i) for i in range(3, 15)]
# Stereo test
if True:
lcorners = [get_corners(i) for i in leftimages]
rcorners = [get_corners(i) for i in rightimages]
good = [(lco, rco) for ((lok, lco), (rok, rco)) in zip(lcorners, rcorners) if (lok and rok)]
lipts = mk_image_points([l for (l, r) in good])
ripts = mk_image_points([r for (l, r) in good])
opts = mk_object_points(len(good), .108)
npts = mk_point_counts(len(good))
flags = cv.CV_CALIB_FIX_ASPECT_RATIO | cv.CV_CALIB_FIX_INTRINSIC
flags = cv.CV_CALIB_SAME_FOCAL_LENGTH + cv.CV_CALIB_FIX_PRINCIPAL_POINT + cv.CV_CALIB_ZERO_TANGENT_DIST
flags = 0
T = cv.CreateMat(3, 1, cv.CV_64FC1)
R = cv.CreateMat(3, 3, cv.CV_64FC1)
lintrinsics = cv.CreateMat(3, 3, cv.CV_64FC1)
ldistortion = cv.CreateMat(4, 1, cv.CV_64FC1)
rintrinsics = cv.CreateMat(3, 3, cv.CV_64FC1)
rdistortion = cv.CreateMat(4, 1, cv.CV_64FC1)
lR = cv.CreateMat(3, 3, cv.CV_64FC1)
rR = cv.CreateMat(3, 3, cv.CV_64FC1)
lP = cv.CreateMat(3, 4, cv.CV_64FC1)
rP = cv.CreateMat(3, 4, cv.CV_64FC1)
lmapx = cv.CreateImage(size, cv.IPL_DEPTH_32F, 1)
lmapy = cv.CreateImage(size, cv.IPL_DEPTH_32F, 1)
rmapx = cv.CreateImage(size, cv.IPL_DEPTH_32F, 1)
rmapy = cv.CreateImage(size, cv.IPL_DEPTH_32F, 1)
cv.SetIdentity(lintrinsics)
cv.SetIdentity(rintrinsics)
lintrinsics[0,2] = size[0] * 0.5
lintrinsics[1,2] = size[1] * 0.5
rintrinsics[0,2] = size[0] * 0.5
rintrinsics[1,2] = size[1] * 0.5
cv.SetZero(ldistortion)
cv.SetZero(rdistortion)
cv.StereoCalibrate(opts, lipts, ripts, npts,
lintrinsics, ldistortion,
rintrinsics, rdistortion,
size,
R, # R
T, # T
cv.CreateMat(3, 3, cv.CV_32FC1), # E
cv.CreateMat(3, 3, cv.CV_32FC1), # F
(cv.CV_TERMCRIT_ITER + cv.CV_TERMCRIT_EPS, 30, 1e-5),
flags)
for a in [-1, 0, 1]:
cv.StereoRectify(lintrinsics,
rintrinsics,
ldistortion,
rdistortion,
size,
R,
T,
lR, rR, lP, rP,
alpha = a)
cv.InitUndistortRectifyMap(lintrinsics, ldistortion, lR, lP, lmapx, lmapy)
cv.InitUndistortRectifyMap(rintrinsics, rdistortion, rR, rP, rmapx, rmapy)
for l,r in zip(leftimages, rightimages)[:1]:
l_ = cv.CloneMat(l)
r_ = cv.CloneMat(r)
cv.Remap(l, l_, lmapx, lmapy)
cv.Remap(r, r_, rmapx, rmapy)
# cv.ShowImage("snap", l_)
# cv.WaitKey()
def xxx_test_Disparity(self):
print
for t in ["8U", "8S", "16U", "16S", "32S", "32F", "64F" ]:
for c in [1,2,3,4]:
nm = "%sC%d" % (t, c)
print "int32 CV_%s=%d" % (nm, eval("cv.CV_%s" % nm))
return
integral = cv.CreateImage((641,481), cv.IPL_DEPTH_32S, 1)
L = cv.LoadImage("f0-left.png", 0)
R = cv.LoadImage("f0-right.png", 0)
d = cv.CreateImage(cv.GetSize(L), cv.IPL_DEPTH_8U, 1)
Rn = cv.CreateImage(cv.GetSize(L), cv.IPL_DEPTH_8U, 1)
started = time.time()
for i in range(100):
cv.AbsDiff(L, R, d)
cv.Integral(d, integral)
cv.SetImageROI(R, (1, 1, 639, 479))
cv.SetImageROI(Rn, (0, 0, 639, 479))
cv.Copy(R, Rn)
R = Rn
cv.ResetImageROI(R)
print 1e3 * (time.time() - started) / 100, "ms"
# self.snap(d)
def local_test_lk(self):
seq = [cv.LoadImage("track/%06d.png" % i, 0) for i in range(40)]
crit = (cv.CV_TERMCRIT_ITER, 100, 0.1)
crit = (cv.CV_TERMCRIT_EPS, 0, 0.001)
for i in range(1,40):
r = cv.CalcOpticalFlowPyrLK(seq[0], seq[i], None, None, [(32,32)], (7,7), 0, crit, 0)
pos = r[0][0]
#print pos, r[2]
a = cv.CreateImage((1024,1024), 8, 1)
b = cv.CreateImage((1024,1024), 8, 1)
cv.Resize(seq[0], a, cv.CV_INTER_NN)
cv.Resize(seq[i], b, cv.CV_INTER_NN)
cv.Line(a, (0, 512), (1024, 512), 255)
cv.Line(a, (512,0), (512,1024), 255)
x,y = [int(c) for c in pos]
cv.Line(b, (0, y*16), (1024, y*16), 255)
cv.Line(b, (x*16,0), (x*16,1024), 255)
#self.snapL([a,b])
def local_test_Haar(self):
import os
hcfile = os.environ['OPENCV_ROOT'] + '/share/opencv/haarcascades/haarcascade_frontalface_default.xml'
hc = cv.Load(hcfile)
img = cv.LoadImage('Stu.jpg', 0)
faces = cv.HaarDetectObjects(img, hc, cv.CreateMemStorage())
self.assert_(len(faces) > 0)
for (x,y,w,h),n in faces:
cv.Rectangle(img, (x,y), (x+w,y+h), 255)
#self.snap(img)
def test_create(self):
#""" CvCreateImage, CvCreateMat and the header-only form """
for (w,h) in [ (320,400), (640,480), (1024, 768) ]:
data = "z" * (w * h)
im = cv.CreateImage((w,h), 8, 1)
cv.SetData(im, data, w)
im2 = cv.CreateImageHeader((w,h), 8, 1)
cv.SetData(im2, data, w)
self.assertSame(im, im2)
m = cv.CreateMat(h, w, cv.CV_8UC1)
cv.SetData(m, data, w)
m2 = cv.CreateMatHeader(h, w, cv.CV_8UC1)
cv.SetData(m2, data, w)
self.assertSame(m, m2)
self.assertSame(im, m)
self.assertSame(im2, m2)
def test_casts(self):
im = cv.GetImage(self.get_sample("samples/c/lena.jpg", 0))
data = im.tostring()
cv.SetData(im, data, cv.GetSize(im)[0])
start_count = sys.getrefcount(data)
# Conversions should produce same data
self.assertSame(im, cv.GetImage(im))
m = cv.GetMat(im)
self.assertSame(im, m)
self.assertSame(m, cv.GetImage(m))
im2 = cv.GetImage(m)
self.assertSame(im, im2)
self.assertEqual(sys.getrefcount(data), start_count + 2)
del im2
self.assertEqual(sys.getrefcount(data), start_count + 1)
del m
self.assertEqual(sys.getrefcount(data), start_count)
del im
self.assertEqual(sys.getrefcount(data), start_count - 1)
def test_morphological(self):
im = cv.CreateImage((128, 128), cv.IPL_DEPTH_8U, 1)
cv.Resize(cv.GetImage(self.get_sample("samples/c/lena.jpg", 0)), im)
dst = cv.CloneImage(im)
# Check defaults by asserting that all these operations produce the same image
funs = [
lambda: cv.Dilate(im, dst),
lambda: cv.Dilate(im, dst, None),
lambda: cv.Dilate(im, dst, iterations = 1),
lambda: cv.Dilate(im, dst, element = None),
lambda: cv.Dilate(im, dst, iterations = 1, element = None),
lambda: cv.Dilate(im, dst, element = None, iterations = 1),
]
src_h = self.hashimg(im)
hashes = set()
for f in funs:
f()
hashes.add(self.hashimg(dst))
self.assertNotEqual(src_h, self.hashimg(dst))
# Source image should be untouched
self.assertEqual(self.hashimg(im), src_h)
# All results should be same
self.assertEqual(len(hashes), 1)
# self.snap(dst)
shapes = [eval("cv.CV_SHAPE_%s" % s) for s in ['RECT', 'CROSS', 'ELLIPSE']]
elements = [cv.CreateStructuringElementEx(sz, sz, sz / 2 + 1, sz / 2 + 1, shape) for sz in [3, 4, 7, 20] for shape in shapes]
elements += [cv.CreateStructuringElementEx(7, 7, 3, 3, cv.CV_SHAPE_CUSTOM, [1] * 49)]
for e in elements:
for iter in [1, 2]:
cv.Dilate(im, dst, e, iter)
cv.Erode(im, dst, e, iter)
temp = cv.CloneImage(im)
for op in ["OPEN", "CLOSE", "GRADIENT", "TOPHAT", "BLACKHAT"]:
cv.MorphologyEx(im, dst, temp, e, eval("cv.CV_MOP_%s" % op), iter)
def test_getmat_nd(self):
# 1D CvMatND should yield (N,1) CvMat
matnd = cv.CreateMatND([13], cv.CV_8UC1)
self.assertEqual(cv.GetDims(cv.GetMat(matnd, allowND = True)), (13, 1))
# 2D CvMatND should yield 2D CvMat
matnd = cv.CreateMatND([11, 12], cv.CV_8UC1)
self.assertEqual(cv.GetDims(cv.GetMat(matnd, allowND = True)), (11, 12))
if 0: # XXX - ticket #149
# 3D CvMatND should yield (N,1) CvMat
matnd = cv.CreateMatND([7, 8, 9], cv.CV_8UC1)
self.assertEqual(cv.GetDims(cv.GetMat(matnd, allowND = True)), (7 * 8 * 9, 1))
def test_clipline(self):
self.assert_(cv.ClipLine((100,100), (-100,0), (500,0)) == ((0,0), (99,0)))
self.assert_(cv.ClipLine((100,100), (-100,0), (-200,0)) == None)
def test_smoke_image_processing(self):
src = self.get_sample("samples/c/lena.jpg", cv.CV_LOAD_IMAGE_GRAYSCALE)
#dst = cv.CloneImage(src)
for aperture_size in [1, 3, 5, 7]:
dst_16s = cv.CreateImage(cv.GetSize(src), cv.IPL_DEPTH_16S, 1)
dst_32f = cv.CreateImage(cv.GetSize(src), cv.IPL_DEPTH_32F, 1)
cv.Sobel(src, dst_16s, 1, 1, aperture_size)
cv.Laplace(src, dst_16s, aperture_size)
cv.PreCornerDetect(src, dst_32f)
eigendst = cv.CreateImage((6*cv.GetSize(src)[0], cv.GetSize(src)[1]), cv.IPL_DEPTH_32F, 1)
cv.CornerEigenValsAndVecs(src, eigendst, 8, aperture_size)
cv.CornerMinEigenVal(src, dst_32f, 8, aperture_size)
cv.CornerHarris(src, dst_32f, 8, aperture_size)
cv.CornerHarris(src, dst_32f, 8, aperture_size, 0.1)
#self.snap(dst)
def test_fitline(self):
cv.FitLine([ (1,1), (10,10) ], cv.CV_DIST_L2, 0, 0.01, 0.01)
cv.FitLine([ (1,1,1), (10,10,10) ], cv.CV_DIST_L2, 0, 0.01, 0.01)
a = self.get_sample("samples/c/lena.jpg", 0)
eig_image = cv.CreateImage(cv.GetSize(a), cv.IPL_DEPTH_32F, 1)
temp_image = cv.CreateImage(cv.GetSize(a), cv.IPL_DEPTH_32F, 1)
pts = cv.GoodFeaturesToTrack(a, eig_image, temp_image, 100, 0.04, 2, useHarris=1)
hull = cv.ConvexHull2(pts, cv.CreateMemStorage(), return_points = 1)
cv.FitLine(hull, cv.CV_DIST_L2, 0, 0.01, 0.01)
def test_moments(self):
im = self.get_sample("samples/c/lena.jpg", 0)
mo = cv.Moments(im)
for fld in ["m00", "m10", "m01", "m20", "m11", "m02", "m30", "m21", "m12", "m03", "mu20", "mu11", "mu02", "mu30", "mu21", "mu12", "mu03", "inv_sqrt_m00"]:
self.assert_(isinstance(getattr(mo, fld), float))
x = getattr(mo, fld)
self.assert_(isinstance(x, float))
orders = []
for x_order in range(4):
for y_order in range(4 - x_order):
orders.append((x_order, y_order))
# Just a smoke test for these three functions
[ cv.GetSpatialMoment(mo, xo, yo) for (xo,yo) in orders ]
[ cv.GetCentralMoment(mo, xo, yo) for (xo,yo) in orders ]
[ cv.GetNormalizedCentralMoment(mo, xo, yo) for (xo,yo) in orders ]
# Hu Moments we can do slightly better. Check that the first
# six are invariant wrt image reflection, and that the 7th
# is negated.
hu0 = cv.GetHuMoments(cv.Moments(im))
cv.Flip(im, im, 1)
hu1 = cv.GetHuMoments(cv.Moments(im))
self.assert_(len(hu0) == 7)
self.assert_(len(hu1) == 7)
for i in range(5):
self.assert_(abs(hu0[i] - hu1[i]) < 1e-6)
self.assert_(abs(hu0[i] + hu1[i]) < 1e-6)
def test_encode(self):
im = self.get_sample("samples/c/lena.jpg", 1)
jpeg = cv.EncodeImage(".jpeg", im)
# Smoke jpeg compression at various qualities
sizes = dict([(qual, cv.EncodeImage(".jpeg", im, [cv.CV_IMWRITE_JPEG_QUALITY, qual]).cols) for qual in range(5, 100, 5)])
# Check that the default QUALITY is 95
self.assertEqual(cv.EncodeImage(".jpeg", im).cols, sizes[95])
# Check that the 'round-trip' gives an image of the same size
round_trip = cv.DecodeImage(cv.EncodeImage(".jpeg", im, [cv.CV_IMWRITE_JPEG_QUALITY, 10]))
self.assert_(cv.GetSize(round_trip) == cv.GetSize(im))
def test_reduce(self):
srcmat = cv.CreateMat(2, 3, cv.CV_32FC1)
# 0 1 2
# 3 4 5
srcmat[0,0] = 0
srcmat[0,1] = 1
srcmat[0,2] = 2
srcmat[1,0] = 3
srcmat[1,1] = 4
srcmat[1,2] = 5
def doreduce(siz, rfunc):
dst = cv.CreateMat(siz[0], siz[1], cv.CV_32FC1)
rfunc(dst)
if siz[0] != 1:
return [dst[i,0] for i in range(siz[0])]
else:
return [dst[0,i] for i in range(siz[1])]
# exercise dim
self.assertEqual(doreduce((1,3), lambda dst: cv.Reduce(srcmat, dst)), [3, 5, 7])
self.assertEqual(doreduce((1,3), lambda dst: cv.Reduce(srcmat, dst, -1)), [3, 5, 7])
self.assertEqual(doreduce((1,3), lambda dst: cv.Reduce(srcmat, dst, 0)), [3, 5, 7])
self.assertEqual(doreduce((2,1), lambda dst: cv.Reduce(srcmat, dst, 1)), [3, 12])
# exercise op
self.assertEqual(doreduce((1,3), lambda dst: cv.Reduce(srcmat, dst, op = cv.CV_REDUCE_SUM)), [3, 5, 7])
self.assertEqual(doreduce((1,3), lambda dst: cv.Reduce(srcmat, dst, op = cv.CV_REDUCE_AVG)), [1.5, 2.5, 3.5])
self.assertEqual(doreduce((1,3), lambda dst: cv.Reduce(srcmat, dst, op = cv.CV_REDUCE_MAX)), [3, 4, 5])
self.assertEqual(doreduce((1,3), lambda dst: cv.Reduce(srcmat, dst, op = cv.CV_REDUCE_MIN)), [0, 1, 2])
# exercise both dim and op
self.assertEqual(doreduce((1,3), lambda dst: cv.Reduce(srcmat, dst, 0, cv.CV_REDUCE_MAX)), [3, 4, 5])
self.assertEqual(doreduce((2,1), lambda dst: cv.Reduce(srcmat, dst, 1, cv.CV_REDUCE_MAX)), [2, 5])
def test_operations(self):
class Im:
def __init__(self, data = None):
self.m = cv.CreateMat(1, 32, cv.CV_32FC1)
if data:
cv.SetData(self.m, array.array('f', data), 128)
def __add__(self, other):
r = Im()
if isinstance(other, Im):
cv.Add(self.m, other.m, r.m)
else:
cv.AddS(self.m, (other,), r.m)
return r
def __sub__(self, other):
r = Im()
if isinstance(other, Im):
cv.Sub(self.m, other.m, r.m)
else:
cv.SubS(self.m, (other,), r.m)
return r
def __rsub__(self, other):
r = Im()
cv.SubRS(self.m, (other,), r.m)
return r
def __mul__(self, other):
r = Im()
if isinstance(other, Im):
cv.Mul(self.m, other.m, r.m)
else:
cv.ConvertScale(self.m, r.m, other)
return r
def __rmul__(self, other):
r = Im()
cv.ConvertScale(self.m, r.m, other)
return r
def __div__(self, other):
r = Im()
if isinstance(other, Im):
cv.Div(self.m, other.m, r.m)
else:
cv.ConvertScale(self.m, r.m, 1.0 / other)
return r
def __pow__(self, other):
r = Im()
cv.Pow(self.m, r.m, other)
return r
def __abs__(self):
r = Im()
cv.Abs(self.m, r.m)
return r
def __getitem__(self, i):
return self.m[0,i]
def verify(op):
r = op(a, b)
for i in range(32):
expected = op(a[i], b[i])
self.assertAlmostEqual(expected, r[i], 4)
a = Im([random.randrange(1, 256) for i in range(32)])
b = Im([random.randrange(1, 256) for i in range(32)])
# simple operations first
verify(lambda x, y: x + y)
verify(lambda x, y: x + 3)
verify(lambda x, y: x + 0)
verify(lambda x, y: x + -8)
verify(lambda x, y: x - y)
verify(lambda x, y: x - 1)
verify(lambda x, y: 1 - x)
verify(lambda x, y: abs(x))
verify(lambda x, y: x * y)
verify(lambda x, y: x * 3)
verify(lambda x, y: x / y)
verify(lambda x, y: x / 2)
for p in [-2, -1, -0.5, -0.1, 0, 0.1, 0.5, 1, 2 ]:
verify(lambda x, y: (x ** p) + (y ** p))
# Combinations...
verify(lambda x, y: x - 4 * abs(y))
verify(lambda x, y: abs(y) / x)
# a polynomial
verify(lambda x, y: 2 * x + 3 * (y ** 0.5))
def temp_test(self):
cv.temp_test()
def failing_test_rand_GetStarKeypoints(self):
# GetStarKeypoints [<cvmat(type=4242400d rows=64 cols=64 step=512 )>, <cv.cvmemstorage object at 0xb7cc40d0>, (45, 0.73705234376883488, 0.64282591451367344, 0.1567738743689836, 3)]
print cv.CV_MAT_CN(0x4242400d)
mat = cv.CreateMat( 64, 64, cv.CV_32FC2)
cv.GetStarKeypoints(mat, cv.CreateMemStorage(), (45, 0.73705234376883488, 0.64282591451367344, 0.1567738743689836, 3))
print mat
def test_rand_PutText(self):
#""" Test for bug 2829336 """
mat = cv.CreateMat( 64, 64, cv.CV_8UC1)
font = cv.InitFont(cv.CV_FONT_HERSHEY_SIMPLEX, 1, 1)
cv.PutText(mat, chr(127), (20, 20), font, 255)
def failing_test_rand_FindNearestPoint2D(self):
subdiv = cv.CreateSubdivDelaunay2D((0,0,100,100), cv.CreateMemStorage())
cv.SubdivDelaunay2DInsert( subdiv, (50, 50))
cv.CalcSubdivVoronoi2D(subdiv)
print
for e in subdiv.edges:
print e,
print " ", cv.Subdiv2DEdgeOrg(e)
print " ", cv.Subdiv2DEdgeOrg(cv.Subdiv2DRotateEdge(e, 1)), cv.Subdiv2DEdgeDst(cv.Subdiv2DRotateEdge(e, 1))
print "nearest", cv.FindNearestPoint2D(subdiv, (1.0, 1.0))
class DocumentFragmentTests(OpenCVTests):
""" Test the fragments of code that are included in the documentation """
def setUp(self):
OpenCVTests.setUp(self)
sys.path.append("../doc/python_fragments")
def test_precornerdetect(self):
from precornerdetect import precornerdetect
im = self.get_sample("samples/c/right01.jpg", 0)
imf = cv.CreateMat(im.rows, im.cols, cv.CV_32FC1)
cv.ConvertScale(im, imf)
(r0,r1) = precornerdetect(imf)
for r in (r0, r1):
self.assertEqual(im.cols, r.cols)
self.assertEqual(im.rows, r.rows)
def test_findstereocorrespondence(self):
from findstereocorrespondence import findstereocorrespondence
(l,r) = [self.get_sample("doc/pics/tsukuba_%s.png" % c, cv.CV_LOAD_IMAGE_GRAYSCALE) for c in "lr"]
(disparity_left, disparity_right) = findstereocorrespondence(l, r)
disparity_left_visual = cv.CreateMat(l.rows, l.cols, cv.CV_8U)
cv.ConvertScale(disparity_left, disparity_left_visual, -16)
# self.snap(disparity_left_visual)
def test_calchist(self):
from calchist import hs_histogram
i1 = self.get_sample("samples/c/lena.jpg")
i2 = self.get_sample("doc/pics/building.jpg")
i3 = cv.CloneMat(i1)
cv.Flip(i3, i3, 1)
h1 = hs_histogram(i1)
h2 = hs_histogram(i2)
h3 = hs_histogram(i3)
self.assertEqual(self.hashimg(h1), self.hashimg(h3))
self.assertNotEqual(self.hashimg(h1), self.hashimg(h2))
class NewTests(OpenCVTests):
pass
if __name__ == '__main__':
print "testing", cv.__version__
random.seed(0)
unittest.main()
# optlist, args = getopt.getopt(sys.argv[1:], 'l:rd')
# loops = 1
# shuffle = 0
# doc_frags = False
# for o,a in optlist:
# if o == '-l':
# loops = int(a)
# if o == '-r':
# shuffle = 1
# if o == '-d':
# doc_frags = True
#
# cases = [PreliminaryTests, FunctionTests, AreaTests]
# if doc_frags:
# cases += [DocumentFragmentTests]
# everything = [(tc, t) for tc in cases for t in unittest.TestLoader().getTestCaseNames(tc) ]
# if len(args) == 0:
# # cases = [NewTests]
# args = everything
# else:
# args = [(tc, t) for (tc, t) in everything if t in args]
#
# suite = unittest.TestSuite()
# for l in range(loops):
# if shuffle:
# random.shuffle(args)
# for tc,t in args:
# suite.addTest(tc(t))
# unittest.TextTestRunner(verbosity=2).run(suite)
import urllib
import cv
import Image
import unittest
class TestLoadImage(unittest.TestCase):
def setUp(self):
open("large.jpg", "w").write(urllib.urlopen("http://www.cs.ubc.ca/labs/lci/curious_george/img/ROS_bug_imgs/IMG_3560.jpg").read())
def test_load(self):
pilim = Image.open("large.jpg")
cvim = cv.LoadImage("large.jpg")
self.assert_(len(pilim.tostring()) == len(cvim.tostring()))
if __name__ == '__main__':
unittest.main()
import unittest
import random
import time
import math
import sys
import array
import os
import cv
def find_sample(s):
for d in ["../samples/c/", "../doc/pics/"]:
path = os.path.join(d, s)
if os.access(path, os.R_OK):
return path
return s
class TestTickets(unittest.TestCase):
def test_2542670(self):
xys = [(94, 121), (94, 122), (93, 123), (92, 123), (91, 124), (91, 125), (91, 126), (92, 127), (92, 128), (92, 129), (92, 130), (92, 131), (91, 132), (90, 131), (90, 130), (90, 131), (91, 132), (92, 133), (92, 134), (93, 135), (94, 136), (94, 137), (94, 138), (95, 139), (96, 140), (96, 141), (96, 142), (96, 143), (97, 144), (97, 145), (98, 146), (99, 146), (100, 146), (101, 146), (102, 146), (103, 146), (104, 146), (105, 146), (106, 146), (107, 146), (108, 146), (109, 146), (110, 146), (111, 146), (112, 146), (113, 146), (114, 146), (115, 146), (116, 146), (117, 146), (118, 146), (119, 146), (120, 146), (121, 146), (122, 146), (123, 146), (124, 146), (125, 146), (126, 146), (126, 145), (126, 144), (126, 143), (126, 142), (126, 141), (126, 140), (127, 139), (127, 138), (127, 137), (127, 136), (127, 135), (127, 134), (127, 133), (128, 132), (129, 132), (130, 131), (131, 130), (131, 129), (131, 128), (132, 127), (133, 126), (134, 125), (134, 124), (135, 123), (136, 122), (136, 121), (135, 121), (134, 121), (133, 121), (132, 121), (131, 121), (130, 121), (129, 121), (128, 121), (127, 121), (126, 121), (125, 121), (124, 121), (123, 121), (122, 121), (121, 121), (120, 121), (119, 121), (118, 121), (117, 121), (116, 121), (115, 121), (114, 121), (113, 121), (112, 121), (111, 121), (110, 121), (109, 121), (108, 121), (107, 121), (106, 121), (105, 121), (104, 121), (103, 121), (102, 121), (101, 121), (100, 121), (99, 121), (98, 121), (97, 121), (96, 121), (95, 121)]
#xys = xys[:12] + xys[16:]
pts = cv.CreateMat(len(xys), 1, cv.CV_32SC2)
for i,(x,y) in enumerate(xys):
pts[i,0] = (x, y)
storage = cv.CreateMemStorage()
hull = cv.ConvexHull2(pts, storage)
hullp = cv.ConvexHull2(pts, storage, return_points = 1)
defects = cv.ConvexityDefects(pts, hull, storage)
vis = cv.CreateImage((1000,1000), 8, 3)
x0 = min([x for (x,y) in xys]) - 10
x1 = max([x for (x,y) in xys]) + 10
y0 = min([y for (y,y) in xys]) - 10
y1 = max([y for (y,y) in xys]) + 10
def xform(pt):
x,y = pt
return (1000 * (x - x0) / (x1 - x0),
1000 * (y - y0) / (y1 - y0))
for d in defects[:2]:
cv.Zero(vis)
# First draw the defect as a red triangle
cv.FillConvexPoly(vis, [xform(p) for p in d[:3]], cv.RGB(255,0,0))
# Draw the convex hull as a thick green line
for a,b in zip(hullp, hullp[1:]):
cv.Line(vis, xform(a), xform(b), cv.RGB(0,128,0), 3)
# Draw the original contour as a white line
for a,b in zip(xys, xys[1:]):
cv.Line(vis, xform(a), xform(b), (255,255,255))
self.snap(vis)
def test_2686307(self):
lena = cv.LoadImage(find_sample("lena.jpg"), 1)
dst = cv.CreateImage((512,512), 8, 3)
cv.Set(dst, (128,192,255))
mask = cv.CreateImage((512,512), 8, 1)
cv.Zero(mask)
cv.Rectangle(mask, (10,10), (300,100), 255, -1)
cv.Copy(lena, dst, mask)
self.snapL([lena, dst, mask])
m = cv.CreateMat(480, 640, cv.CV_8UC1)
print "ji", m
print m.rows, m.cols, m.type, m.step
def snap(self, img):
self.snapL([img])
def snapL(self, L):
for i,img in enumerate(L):
cv.NamedWindow("snap-%d" % i, 1)
cv.ShowImage("snap-%d" % i, img)
cv.WaitKey()
cv.DestroyAllWindows()
if __name__ == '__main__':
random.seed(0)
if len(sys.argv) == 1:
suite = unittest.TestLoader().loadTestsFromTestCase(TestTickets)
unittest.TextTestRunner(verbosity=2).run(suite)
else:
suite = unittest.TestSuite()
suite.addTest(TestTickets(sys.argv[1]))
unittest.TextTestRunner(verbosity=2).run(suite)
# -*- coding: utf-8 -*-
# transformations.py
# Copyright (c) 2006, Christoph Gohlke
# Copyright (c) 2006-2009, The Regents of the University of California
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holders nor the names of any
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Homogeneous Transformation Matrices and Quaternions.
A library for calculating 4x4 matrices for translating, rotating, reflecting,
scaling, shearing, projecting, orthogonalizing, and superimposing arrays of
3D homogeneous coordinates as well as for converting between rotation matrices,
Euler angles, and quaternions. Also includes an Arcball control object and
functions to decompose transformation matrices.
:Authors:
`Christoph Gohlke <http://www.lfd.uci.edu/~gohlke/>`__,
Laboratory for Fluorescence Dynamics, University of California, Irvine
:Version: 20090418
Requirements
------------
* `Python 2.6 <http://www.python.org>`__
* `Numpy 1.3 <http://numpy.scipy.org>`__
* `transformations.c 20090418 <http://www.lfd.uci.edu/~gohlke/>`__
(optional implementation of some functions in C)
Notes
-----
Matrices (M) can be inverted using numpy.linalg.inv(M), concatenated using
numpy.dot(M0, M1), or used to transform homogeneous coordinates (v) using
numpy.dot(M, v) for shape (4, \*) "point of arrays", respectively
numpy.dot(v, M.T) for shape (\*, 4) "array of points".
Calculations are carried out with numpy.float64 precision.
This Python implementation is not optimized for speed.
Vector, point, quaternion, and matrix function arguments are expected to be
"array like", i.e. tuple, list, or numpy arrays.
Return types are numpy arrays unless specified otherwise.
Angles are in radians unless specified otherwise.
Quaternions ix+jy+kz+w are represented as [x, y, z, w].
Use the transpose of transformation matrices for OpenGL glMultMatrixd().
A triple of Euler angles can be applied/interpreted in 24 ways, which can
be specified using a 4 character string or encoded 4-tuple:
*Axes 4-string*: e.g. 'sxyz' or 'ryxy'
- first character : rotations are applied to 's'tatic or 'r'otating frame
- remaining characters : successive rotation axis 'x', 'y', or 'z'
*Axes 4-tuple*: e.g. (0, 0, 0, 0) or (1, 1, 1, 1)
- inner axis: code of axis ('x':0, 'y':1, 'z':2) of rightmost matrix.
- parity : even (0) if inner axis 'x' is followed by 'y', 'y' is followed
by 'z', or 'z' is followed by 'x'. Otherwise odd (1).
- repetition : first and last axis are same (1) or different (0).
- frame : rotations are applied to static (0) or rotating (1) frame.
References
----------
(1) Matrices and transformations. Ronald Goldman.
In "Graphics Gems I", pp 472-475. Morgan Kaufmann, 1990.
(2) More matrices and transformations: shear and pseudo-perspective.
Ronald Goldman. In "Graphics Gems II", pp 320-323. Morgan Kaufmann, 1991.
(3) Decomposing a matrix into simple transformations. Spencer Thomas.
In "Graphics Gems II", pp 320-323. Morgan Kaufmann, 1991.
(4) Recovering the data from the transformation matrix. Ronald Goldman.
In "Graphics Gems II", pp 324-331. Morgan Kaufmann, 1991.
(5) Euler angle conversion. Ken Shoemake.
In "Graphics Gems IV", pp 222-229. Morgan Kaufmann, 1994.
(6) Arcball rotation control. Ken Shoemake.
In "Graphics Gems IV", pp 175-192. Morgan Kaufmann, 1994.
(7) Representing attitude: Euler angles, unit quaternions, and rotation
vectors. James Diebel. 2006.
(8) A discussion of the solution for the best rotation to relate two sets
of vectors. W Kabsch. Acta Cryst. 1978. A34, 827-828.
(9) Closed-form solution of absolute orientation using unit quaternions.
BKP Horn. J Opt Soc Am A. 1987. 4(4), 629-642.
(10) Quaternions. Ken Shoemake.
http://www.sfu.ca/~jwa3/cmpt461/files/quatut.pdf
(11) From quaternion to matrix and back. JMP van Waveren. 2005.
http://www.intel.com/cd/ids/developer/asmo-na/eng/293748.htm
(12) Uniform random rotations. Ken Shoemake.
In "Graphics Gems III", pp 124-132. Morgan Kaufmann, 1992.
Examples
--------
>>> alpha, beta, gamma = 0.123, -1.234, 2.345
>>> origin, xaxis, yaxis, zaxis = (0, 0, 0), (1, 0, 0), (0, 1, 0), (0, 0, 1)
>>> I = identity_matrix()
>>> Rx = rotation_matrix(alpha, xaxis)
>>> Ry = rotation_matrix(beta, yaxis)
>>> Rz = rotation_matrix(gamma, zaxis)
>>> R = concatenate_matrices(Rx, Ry, Rz)
>>> euler = euler_from_matrix(R, 'rxyz')
>>> numpy.allclose([alpha, beta, gamma], euler)
True
>>> Re = euler_matrix(alpha, beta, gamma, 'rxyz')
>>> is_same_transform(R, Re)
True
>>> al, be, ga = euler_from_matrix(Re, 'rxyz')
>>> is_same_transform(Re, euler_matrix(al, be, ga, 'rxyz'))
True
>>> qx = quaternion_about_axis(alpha, xaxis)
>>> qy = quaternion_about_axis(beta, yaxis)
>>> qz = quaternion_about_axis(gamma, zaxis)
>>> q = quaternion_multiply(qx, qy)
>>> q = quaternion_multiply(q, qz)
>>> Rq = quaternion_matrix(q)
>>> is_same_transform(R, Rq)
True
>>> S = scale_matrix(1.23, origin)
>>> T = translation_matrix((1, 2, 3))
>>> Z = shear_matrix(beta, xaxis, origin, zaxis)
>>> R = random_rotation_matrix(numpy.random.rand(3))
>>> M = concatenate_matrices(T, R, Z, S)
>>> scale, shear, angles, trans, persp = decompose_matrix(M)
>>> numpy.allclose(scale, 1.23)
True
>>> numpy.allclose(trans, (1, 2, 3))
True
>>> numpy.allclose(shear, (0, math.tan(beta), 0))
True
>>> is_same_transform(R, euler_matrix(axes='sxyz', *angles))
True
>>> M1 = compose_matrix(scale, shear, angles, trans, persp)
>>> is_same_transform(M, M1)
True
"""
from __future__ import division
import warnings
import math
import numpy
# Documentation in HTML format can be generated with Epydoc
__docformat__ = "restructuredtext en"
def identity_matrix():
"""Return 4x4 identity/unit matrix.
>>> I = identity_matrix()
>>> numpy.allclose(I, numpy.dot(I, I))
True
>>> numpy.sum(I), numpy.trace(I)
(4.0, 4.0)
>>> numpy.allclose(I, numpy.identity(4, dtype=numpy.float64))
True
"""
return numpy.identity(4, dtype=numpy.float64)
def translation_matrix(direction):
"""Return matrix to translate by direction vector.
>>> v = numpy.random.random(3) - 0.5
>>> numpy.allclose(v, translation_matrix(v)[:3, 3])
True
"""
M = numpy.identity(4)
M[:3, 3] = direction[:3]
return M
def translation_from_matrix(matrix):
"""Return translation vector from translation matrix.
>>> v0 = numpy.random.random(3) - 0.5
>>> v1 = translation_from_matrix(translation_matrix(v0))
>>> numpy.allclose(v0, v1)
True
"""
return numpy.array(matrix, copy=False)[:3, 3].copy()
def reflection_matrix(point, normal):
"""Return matrix to mirror at plane defined by point and normal vector.
>>> v0 = numpy.random.random(4) - 0.5
>>> v0[3] = 1.0
>>> v1 = numpy.random.random(3) - 0.5
>>> R = reflection_matrix(v0, v1)
>>> numpy.allclose(2., numpy.trace(R))
True
>>> numpy.allclose(v0, numpy.dot(R, v0))
True
>>> v2 = v0.copy()
>>> v2[:3] += v1
>>> v3 = v0.copy()
>>> v2[:3] -= v1
>>> numpy.allclose(v2, numpy.dot(R, v3))
True
"""
normal = unit_vector(normal[:3])
M = numpy.identity(4)
M[:3, :3] -= 2.0 * numpy.outer(normal, normal)
M[:3, 3] = (2.0 * numpy.dot(point[:3], normal)) * normal
return M
def reflection_from_matrix(matrix):
"""Return mirror plane point and normal vector from reflection matrix.
>>> v0 = numpy.random.random(3) - 0.5
>>> v1 = numpy.random.random(3) - 0.5
>>> M0 = reflection_matrix(v0, v1)
>>> point, normal = reflection_from_matrix(M0)
>>> M1 = reflection_matrix(point, normal)
>>> is_same_transform(M0, M1)
True
"""
M = numpy.array(matrix, dtype=numpy.float64, copy=False)
# normal: unit eigenvector corresponding to eigenvalue -1
l, V = numpy.linalg.eig(M[:3, :3])
i = numpy.where(abs(numpy.real(l) + 1.0) < 1e-8)[0]
if not len(i):
raise ValueError("no unit eigenvector corresponding to eigenvalue -1")
normal = numpy.real(V[:, i[0]]).squeeze()
# point: any unit eigenvector corresponding to eigenvalue 1
l, V = numpy.linalg.eig(M)
i = numpy.where(abs(numpy.real(l) - 1.0) < 1e-8)[0]
if not len(i):
raise ValueError("no unit eigenvector corresponding to eigenvalue 1")
point = numpy.real(V[:, i[-1]]).squeeze()
point /= point[3]
return point, normal
def rotation_matrix(angle, direction, point=None):
"""Return matrix to rotate about axis defined by point and direction.
>>> angle = (random.random() - 0.5) * (2*math.pi)
>>> direc = numpy.random.random(3) - 0.5
>>> point = numpy.random.random(3) - 0.5
>>> R0 = rotation_matrix(angle, direc, point)
>>> R1 = rotation_matrix(angle-2*math.pi, direc, point)
>>> is_same_transform(R0, R1)
True
>>> R0 = rotation_matrix(angle, direc, point)
>>> R1 = rotation_matrix(-angle, -direc, point)
>>> is_same_transform(R0, R1)
True
>>> I = numpy.identity(4, numpy.float64)
>>> numpy.allclose(I, rotation_matrix(math.pi*2, direc))
True
>>> numpy.allclose(2., numpy.trace(rotation_matrix(math.pi/2,
... direc, point)))
True
"""
sina = math.sin(angle)
cosa = math.cos(angle)
direction = unit_vector(direction[:3])
# rotation matrix around unit vector
R = numpy.array(((cosa, 0.0, 0.0),
(0.0, cosa, 0.0),
(0.0, 0.0, cosa)), dtype=numpy.float64)
R += numpy.outer(direction, direction) * (1.0 - cosa)
direction *= sina
R += numpy.array((( 0.0, -direction[2], direction[1]),
( direction[2], 0.0, -direction[0]),
(-direction[1], direction[0], 0.0)),
dtype=numpy.float64)
M = numpy.identity(4)
M[:3, :3] = R
if point is not None:
# rotation not around origin
point = numpy.array(point[:3], dtype=numpy.float64, copy=False)
M[:3, 3] = point - numpy.dot(R, point)
return M
def rotation_from_matrix(matrix):
"""Return rotation angle and axis from rotation matrix.
>>> angle = (random.random() - 0.5) * (2*math.pi)
>>> direc = numpy.random.random(3) - 0.5
>>> point = numpy.random.random(3) - 0.5
>>> R0 = rotation_matrix(angle, direc, point)
>>> angle, direc, point = rotation_from_matrix(R0)
>>> R1 = rotation_matrix(angle, direc, point)
>>> is_same_transform(R0, R1)
True
"""
R = numpy.array(matrix, dtype=numpy.float64, copy=False)
R33 = R[:3, :3]
# direction: unit eigenvector of R33 corresponding to eigenvalue of 1
l, W = numpy.linalg.eig(R33.T)
i = numpy.where(abs(numpy.real(l) - 1.0) < 1e-8)[0]
if not len(i):
raise ValueError("no unit eigenvector corresponding to eigenvalue 1")
direction = numpy.real(W[:, i[-1]]).squeeze()
# point: unit eigenvector of R33 corresponding to eigenvalue of 1
l, Q = numpy.linalg.eig(R)
i = numpy.where(abs(numpy.real(l) - 1.0) < 1e-8)[0]
if not len(i):
raise ValueError("no unit eigenvector corresponding to eigenvalue 1")
point = numpy.real(Q[:, i[-1]]).squeeze()
point /= point[3]
# rotation angle depending on direction
cosa = (numpy.trace(R33) - 1.0) / 2.0
if abs(direction[2]) > 1e-8:
sina = (R[1, 0] + (cosa-1.0)*direction[0]*direction[1]) / direction[2]
elif abs(direction[1]) > 1e-8:
sina = (R[0, 2] + (cosa-1.0)*direction[0]*direction[2]) / direction[1]
else:
sina = (R[2, 1] + (cosa-1.0)*direction[1]*direction[2]) / direction[0]
angle = math.atan2(sina, cosa)
return angle, direction, point
def scale_matrix(factor, origin=None, direction=None):
"""Return matrix to scale by factor around origin in direction.
Use factor -1 for point symmetry.
>>> v = (numpy.random.rand(4, 5) - 0.5) * 20.0
>>> v[3] = 1.0
>>> S = scale_matrix(-1.234)
>>> numpy.allclose(numpy.dot(S, v)[:3], -1.234*v[:3])
True
>>> factor = random.random() * 10 - 5
>>> origin = numpy.random.random(3) - 0.5
>>> direct = numpy.random.random(3) - 0.5
>>> S = scale_matrix(factor, origin)
>>> S = scale_matrix(factor, origin, direct)
"""
if direction is None:
# uniform scaling
M = numpy.array(((factor, 0.0, 0.0, 0.0),
(0.0, factor, 0.0, 0.0),
(0.0, 0.0, factor, 0.0),
(0.0, 0.0, 0.0, 1.0)), dtype=numpy.float64)
if origin is not None:
M[:3, 3] = origin[:3]
M[:3, 3] *= 1.0 - factor
else:
# nonuniform scaling
direction = unit_vector(direction[:3])
factor = 1.0 - factor
M = numpy.identity(4)
M[:3, :3] -= factor * numpy.outer(direction, direction)
if origin is not None:
M[:3, 3] = (factor * numpy.dot(origin[:3], direction)) * direction
return M
def scale_from_matrix(matrix):
"""Return scaling factor, origin and direction from scaling matrix.
>>> factor = random.random() * 10 - 5
>>> origin = numpy.random.random(3) - 0.5
>>> direct = numpy.random.random(3) - 0.5
>>> S0 = scale_matrix(factor, origin)
>>> factor, origin, direction = scale_from_matrix(S0)
>>> S1 = scale_matrix(factor, origin, direction)
>>> is_same_transform(S0, S1)
True
>>> S0 = scale_matrix(factor, origin, direct)
>>> factor, origin, direction = scale_from_matrix(S0)
>>> S1 = scale_matrix(factor, origin, direction)
>>> is_same_transform(S0, S1)
True
"""
M = numpy.array(matrix, dtype=numpy.float64, copy=False)
M33 = M[:3, :3]
factor = numpy.trace(M33) - 2.0
try:
# direction: unit eigenvector corresponding to eigenvalue factor
l, V = numpy.linalg.eig(M33)
i = numpy.where(abs(numpy.real(l) - factor) < 1e-8)[0][0]
direction = numpy.real(V[:, i]).squeeze()
direction /= vector_norm(direction)
except IndexError:
# uniform scaling
factor = (factor + 2.0) / 3.0
direction = None
# origin: any eigenvector corresponding to eigenvalue 1
l, V = numpy.linalg.eig(M)
i = numpy.where(abs(numpy.real(l) - 1.0) < 1e-8)[0]
if not len(i):
raise ValueError("no eigenvector corresponding to eigenvalue 1")
origin = numpy.real(V[:, i[-1]]).squeeze()
origin /= origin[3]
return factor, origin, direction
def projection_matrix(point, normal, direction=None,
perspective=None, pseudo=False):
"""Return matrix to project onto plane defined by point and normal.
Using either perspective point, projection direction, or none of both.
If pseudo is True, perspective projections will preserve relative depth
such that Perspective = dot(Orthogonal, PseudoPerspective).
>>> P = projection_matrix((0, 0, 0), (1, 0, 0))
>>> numpy.allclose(P[1:, 1:], numpy.identity(4)[1:, 1:])
True
>>> point = numpy.random.random(3) - 0.5
>>> normal = numpy.random.random(3) - 0.5
>>> direct = numpy.random.random(3) - 0.5
>>> persp = numpy.random.random(3) - 0.5
>>> P0 = projection_matrix(point, normal)
>>> P1 = projection_matrix(point, normal, direction=direct)
>>> P2 = projection_matrix(point, normal, perspective=persp)
>>> P3 = projection_matrix(point, normal, perspective=persp, pseudo=True)
>>> is_same_transform(P2, numpy.dot(P0, P3))
True
>>> P = projection_matrix((3, 0, 0), (1, 1, 0), (1, 0, 0))
>>> v0 = (numpy.random.rand(4, 5) - 0.5) * 20.0
>>> v0[3] = 1.0
>>> v1 = numpy.dot(P, v0)
>>> numpy.allclose(v1[1], v0[1])
True
>>> numpy.allclose(v1[0], 3.0-v1[1])
True
"""
M = numpy.identity(4)
point = numpy.array(point[:3], dtype=numpy.float64, copy=False)
normal = unit_vector(normal[:3])
if perspective is not None:
# perspective projection
perspective = numpy.array(perspective[:3], dtype=numpy.float64,
copy=False)
M[0, 0] = M[1, 1] = M[2, 2] = numpy.dot(perspective-point, normal)
M[:3, :3] -= numpy.outer(perspective, normal)
if pseudo:
# preserve relative depth
M[:3, :3] -= numpy.outer(normal, normal)
M[:3, 3] = numpy.dot(point, normal) * (perspective+normal)
else:
M[:3, 3] = numpy.dot(point, normal) * perspective
M[3, :3] = -normal
M[3, 3] = numpy.dot(perspective, normal)
elif direction is not None:
# parallel projection
direction = numpy.array(direction[:3], dtype=numpy.float64, copy=False)
scale = numpy.dot(direction, normal)
M[:3, :3] -= numpy.outer(direction, normal) / scale
M[:3, 3] = direction * (numpy.dot(point, normal) / scale)
else:
# orthogonal projection
M[:3, :3] -= numpy.outer(normal, normal)
M[:3, 3] = numpy.dot(point, normal) * normal
return M
def projection_from_matrix(matrix, pseudo=False):
"""Return projection plane and perspective point from projection matrix.
Return values are same as arguments for projection_matrix function:
point, normal, direction, perspective, and pseudo.
>>> point = numpy.random.random(3) - 0.5
>>> normal = numpy.random.random(3) - 0.5
>>> direct = numpy.random.random(3) - 0.5
>>> persp = numpy.random.random(3) - 0.5
>>> P0 = projection_matrix(point, normal)
>>> result = projection_from_matrix(P0)
>>> P1 = projection_matrix(*result)
>>> is_same_transform(P0, P1)
True
>>> P0 = projection_matrix(point, normal, direct)
>>> result = projection_from_matrix(P0)
>>> P1 = projection_matrix(*result)
>>> is_same_transform(P0, P1)
True
>>> P0 = projection_matrix(point, normal, perspective=persp, pseudo=False)
>>> result = projection_from_matrix(P0, pseudo=False)
>>> P1 = projection_matrix(*result)
>>> is_same_transform(P0, P1)
True
>>> P0 = projection_matrix(point, normal, perspective=persp, pseudo=True)
>>> result = projection_from_matrix(P0, pseudo=True)
>>> P1 = projection_matrix(*result)
>>> is_same_transform(P0, P1)
True
"""
M = numpy.array(matrix, dtype=numpy.float64, copy=False)
M33 = M[:3, :3]
l, V = numpy.linalg.eig(M)
i = numpy.where(abs(numpy.real(l) - 1.0) < 1e-8)[0]
if not pseudo and len(i):
# point: any eigenvector corresponding to eigenvalue 1
point = numpy.real(V[:, i[-1]]).squeeze()
point /= point[3]
# direction: unit eigenvector corresponding to eigenvalue 0
l, V = numpy.linalg.eig(M33)
i = numpy.where(abs(numpy.real(l)) < 1e-8)[0]
if not len(i):
raise ValueError("no eigenvector corresponding to eigenvalue 0")
direction = numpy.real(V[:, i[0]]).squeeze()
direction /= vector_norm(direction)
# normal: unit eigenvector of M33.T corresponding to eigenvalue 0
l, V = numpy.linalg.eig(M33.T)
i = numpy.where(abs(numpy.real(l)) < 1e-8)[0]
if len(i):
# parallel projection
normal = numpy.real(V[:, i[0]]).squeeze()
normal /= vector_norm(normal)
return point, normal, direction, None, False
else:
# orthogonal projection, where normal equals direction vector
return point, direction, None, None, False
else:
# perspective projection
i = numpy.where(abs(numpy.real(l)) > 1e-8)[0]
if not len(i):
raise ValueError(
"no eigenvector not corresponding to eigenvalue 0")
point = numpy.real(V[:, i[-1]]).squeeze()
point /= point[3]
normal = - M[3, :3]
perspective = M[:3, 3] / numpy.dot(point[:3], normal)
if pseudo:
perspective -= normal
return point, normal, None, perspective, pseudo
def clip_matrix(left, right, bottom, top, near, far, perspective=False):
"""Return matrix to obtain normalized device coordinates from frustrum.
The frustrum bounds are axis-aligned along x (left, right),
y (bottom, top) and z (near, far).
Normalized device coordinates are in range [-1, 1] if coordinates are
inside the frustrum.
If perspective is True the frustrum is a truncated pyramid with the
perspective point at origin and direction along z axis, otherwise an
orthographic canonical view volume (a box).
Homogeneous coordinates transformed by the perspective clip matrix
need to be dehomogenized (devided by w coordinate).
>>> frustrum = numpy.random.rand(6)
>>> frustrum[1] += frustrum[0]
>>> frustrum[3] += frustrum[2]
>>> frustrum[5] += frustrum[4]
>>> M = clip_matrix(*frustrum, perspective=False)
>>> numpy.dot(M, [frustrum[0], frustrum[2], frustrum[4], 1.0])
array([-1., -1., -1., 1.])
>>> numpy.dot(M, [frustrum[1], frustrum[3], frustrum[5], 1.0])
array([ 1., 1., 1., 1.])
>>> M = clip_matrix(*frustrum, perspective=True)
>>> v = numpy.dot(M, [frustrum[0], frustrum[2], frustrum[4], 1.0])
>>> v / v[3]
array([-1., -1., -1., 1.])
>>> v = numpy.dot(M, [frustrum[1], frustrum[3], frustrum[4], 1.0])
>>> v / v[3]
array([ 1., 1., -1., 1.])
"""
if left >= right or bottom >= top or near >= far:
raise ValueError("invalid frustrum")
if perspective:
if near <= _EPS:
raise ValueError("invalid frustrum: near <= 0")
t = 2.0 * near
M = ((-t/(right-left), 0.0, (right+left)/(right-left), 0.0),
(0.0, -t/(top-bottom), (top+bottom)/(top-bottom), 0.0),
(0.0, 0.0, -(far+near)/(far-near), t*far/(far-near)),
(0.0, 0.0, -1.0, 0.0))
else:
M = ((2.0/(right-left), 0.0, 0.0, (right+left)/(left-right)),
(0.0, 2.0/(top-bottom), 0.0, (top+bottom)/(bottom-top)),
(0.0, 0.0, 2.0/(far-near), (far+near)/(near-far)),
(0.0, 0.0, 0.0, 1.0))
return numpy.array(M, dtype=numpy.float64)
def shear_matrix(angle, direction, point, normal):
"""Return matrix to shear by angle along direction vector on shear plane.
The shear plane is defined by a point and normal vector. The direction
vector must be orthogonal to the plane's normal vector.
A point P is transformed by the shear matrix into P" such that
the vector P-P" is parallel to the direction vector and its extent is
given by the angle of P-P'-P", where P' is the orthogonal projection
of P onto the shear plane.
>>> angle = (random.random() - 0.5) * 4*math.pi
>>> direct = numpy.random.random(3) - 0.5
>>> point = numpy.random.random(3) - 0.5
>>> normal = numpy.cross(direct, numpy.random.random(3))
>>> S = shear_matrix(angle, direct, point, normal)
>>> numpy.allclose(1.0, numpy.linalg.det(S))
True
"""
normal = unit_vector(normal[:3])
direction = unit_vector(direction[:3])
if abs(numpy.dot(normal, direction)) > 1e-6:
raise ValueError("direction and normal vectors are not orthogonal")
angle = math.tan(angle)
M = numpy.identity(4)
M[:3, :3] += angle * numpy.outer(direction, normal)
M[:3, 3] = -angle * numpy.dot(point[:3], normal) * direction
return M
def shear_from_matrix(matrix):
"""Return shear angle, direction and plane from shear matrix.
>>> angle = (random.random() - 0.5) * 4*math.pi
>>> direct = numpy.random.random(3) - 0.5
>>> point = numpy.random.random(3) - 0.5
>>> normal = numpy.cross(direct, numpy.random.random(3))
>>> S0 = shear_matrix(angle, direct, point, normal)
>>> angle, direct, point, normal = shear_from_matrix(S0)
>>> S1 = shear_matrix(angle, direct, point, normal)
>>> is_same_transform(S0, S1)
True
"""
M = numpy.array(matrix, dtype=numpy.float64, copy=False)
M33 = M[:3, :3]
# normal: cross independent eigenvectors corresponding to the eigenvalue 1
l, V = numpy.linalg.eig(M33)
i = numpy.where(abs(numpy.real(l) - 1.0) < 1e-4)[0]
if len(i) < 2:
raise ValueError("No two linear independent eigenvectors found %s" % l)
V = numpy.real(V[:, i]).squeeze().T
lenorm = -1.0
for i0, i1 in ((0, 1), (0, 2), (1, 2)):
n = numpy.cross(V[i0], V[i1])
l = vector_norm(n)
if l > lenorm:
lenorm = l
normal = n
normal /= lenorm
# direction and angle
direction = numpy.dot(M33 - numpy.identity(3), normal)
angle = vector_norm(direction)
direction /= angle
angle = math.atan(angle)
# point: eigenvector corresponding to eigenvalue 1
l, V = numpy.linalg.eig(M)
i = numpy.where(abs(numpy.real(l) - 1.0) < 1e-8)[0]
if not len(i):
raise ValueError("no eigenvector corresponding to eigenvalue 1")
point = numpy.real(V[:, i[-1]]).squeeze()
point /= point[3]
return angle, direction, point, normal
def decompose_matrix(matrix):
"""Return sequence of transformations from transformation matrix.
matrix : array_like
Non-degenerative homogeneous transformation matrix
Return tuple of:
scale : vector of 3 scaling factors
shear : list of shear factors for x-y, x-z, y-z axes
angles : list of Euler angles about static x, y, z axes
translate : translation vector along x, y, z axes
perspective : perspective partition of matrix
Raise ValueError if matrix is of wrong type or degenerative.
>>> T0 = translation_matrix((1, 2, 3))
>>> scale, shear, angles, trans, persp = decompose_matrix(T0)
>>> T1 = translation_matrix(trans)
>>> numpy.allclose(T0, T1)
True
>>> S = scale_matrix(0.123)
>>> scale, shear, angles, trans, persp = decompose_matrix(S)
>>> scale[0]
0.123
>>> R0 = euler_matrix(1, 2, 3)
>>> scale, shear, angles, trans, persp = decompose_matrix(R0)
>>> R1 = euler_matrix(*angles)
>>> numpy.allclose(R0, R1)
True
"""
M = numpy.array(matrix, dtype=numpy.float64, copy=True).T
if abs(M[3, 3]) < _EPS:
raise ValueError("M[3, 3] is zero")
M /= M[3, 3]
P = M.copy()
P[:, 3] = 0, 0, 0, 1
if not numpy.linalg.det(P):
raise ValueError("Matrix is singular")
scale = numpy.zeros((3, ), dtype=numpy.float64)
shear = [0, 0, 0]
angles = [0, 0, 0]
if any(abs(M[:3, 3]) > _EPS):
perspective = numpy.dot(M[:, 3], numpy.linalg.inv(P.T))
M[:, 3] = 0, 0, 0, 1
else:
perspective = numpy.array((0, 0, 0, 1), dtype=numpy.float64)
translate = M[3, :3].copy()
M[3, :3] = 0
row = M[:3, :3].copy()
scale[0] = vector_norm(row[0])
row[0] /= scale[0]
shear[0] = numpy.dot(row[0], row[1])
row[1] -= row[0] * shear[0]
scale[1] = vector_norm(row[1])
row[1] /= scale[1]
shear[0] /= scale[1]
shear[1] = numpy.dot(row[0], row[2])
row[2] -= row[0] * shear[1]
shear[2] = numpy.dot(row[1], row[2])
row[2] -= row[1] * shear[2]
scale[2] = vector_norm(row[2])
row[2] /= scale[2]
shear[1:] /= scale[2]
if numpy.dot(row[0], numpy.cross(row[1], row[2])) < 0:
scale *= -1
row *= -1
angles[1] = math.asin(-row[0, 2])
if math.cos(angles[1]):
angles[0] = math.atan2(row[1, 2], row[2, 2])
angles[2] = math.atan2(row[0, 1], row[0, 0])
else:
#angles[0] = math.atan2(row[1, 0], row[1, 1])
angles[0] = math.atan2(-row[2, 1], row[1, 1])
angles[2] = 0.0
return scale, shear, angles, translate, perspective
def compose_matrix(scale=None, shear=None, angles=None, translate=None,
perspective=None):
"""Return transformation matrix from sequence of transformations.
This is the inverse of the decompose_matrix function.
Sequence of transformations:
scale : vector of 3 scaling factors
shear : list of shear factors for x-y, x-z, y-z axes
angles : list of Euler angles about static x, y, z axes
translate : translation vector along x, y, z axes
perspective : perspective partition of matrix
>>> scale = numpy.random.random(3) - 0.5
>>> shear = numpy.random.random(3) - 0.5
>>> angles = (numpy.random.random(3) - 0.5) * (2*math.pi)
>>> trans = numpy.random.random(3) - 0.5
>>> persp = numpy.random.random(4) - 0.5
>>> M0 = compose_matrix(scale, shear, angles, trans, persp)
>>> result = decompose_matrix(M0)
>>> M1 = compose_matrix(*result)
>>> is_same_transform(M0, M1)
True
"""
M = numpy.identity(4)
if perspective is not None:
P = numpy.identity(4)
P[3, :] = perspective[:4]
M = numpy.dot(M, P)
if translate is not None:
T = numpy.identity(4)
T[:3, 3] = translate[:3]
M = numpy.dot(M, T)
if angles is not None:
R = euler_matrix(angles[0], angles[1], angles[2], 'sxyz')
M = numpy.dot(M, R)
if shear is not None:
Z = numpy.identity(4)
Z[1, 2] = shear[2]
Z[0, 2] = shear[1]
Z[0, 1] = shear[0]
M = numpy.dot(M, Z)
if scale is not None:
S = numpy.identity(4)
S[0, 0] = scale[0]
S[1, 1] = scale[1]
S[2, 2] = scale[2]
M = numpy.dot(M, S)
M /= M[3, 3]
return M
def orthogonalization_matrix(lengths, angles):
"""Return orthogonalization matrix for crystallographic cell coordinates.
Angles are expected in degrees.
The de-orthogonalization matrix is the inverse.
>>> O = orthogonalization_matrix((10., 10., 10.), (90., 90., 90.))
>>> numpy.allclose(O[:3, :3], numpy.identity(3, float) * 10)
True
>>> O = orthogonalization_matrix([9.8, 12.0, 15.5], [87.2, 80.7, 69.7])
>>> numpy.allclose(numpy.sum(O), 43.063229)
True
"""
a, b, c = lengths
angles = numpy.radians(angles)
sina, sinb, _ = numpy.sin(angles)
cosa, cosb, cosg = numpy.cos(angles)
co = (cosa * cosb - cosg) / (sina * sinb)
return numpy.array((
( a*sinb*math.sqrt(1.0-co*co), 0.0, 0.0, 0.0),
(-a*sinb*co, b*sina, 0.0, 0.0),
( a*cosb, b*cosa, c, 0.0),
( 0.0, 0.0, 0.0, 1.0)),
dtype=numpy.float64)
def superimposition_matrix(v0, v1, scaling=False, usesvd=True):
"""Return matrix to transform given vector set into second vector set.
v0 and v1 are shape (3, \*) or (4, \*) arrays of at least 3 vectors.
If usesvd is True, the weighted sum of squared deviations (RMSD) is
minimized according to the algorithm by W. Kabsch [8]. Otherwise the
quaternion based algorithm by B. Horn [9] is used (slower when using
this Python implementation).
The returned matrix performs rotation, translation and uniform scaling
(if specified).
>>> v0 = numpy.random.rand(3, 10)
>>> M = superimposition_matrix(v0, v0)
>>> numpy.allclose(M, numpy.identity(4))
True
>>> R = random_rotation_matrix(numpy.random.random(3))
>>> v0 = ((1,0,0), (0,1,0), (0,0,1), (1,1,1))
>>> v1 = numpy.dot(R, v0)
>>> M = superimposition_matrix(v0, v1)
>>> numpy.allclose(v1, numpy.dot(M, v0))
True
>>> v0 = (numpy.random.rand(4, 100) - 0.5) * 20.0
>>> v0[3] = 1.0
>>> v1 = numpy.dot(R, v0)
>>> M = superimposition_matrix(v0, v1)
>>> numpy.allclose(v1, numpy.dot(M, v0))
True
>>> S = scale_matrix(random.random())
>>> T = translation_matrix(numpy.random.random(3)-0.5)
>>> M = concatenate_matrices(T, R, S)
>>> v1 = numpy.dot(M, v0)
>>> v0[:3] += numpy.random.normal(0.0, 1e-9, 300).reshape(3, -1)
>>> M = superimposition_matrix(v0, v1, scaling=True)
>>> numpy.allclose(v1, numpy.dot(M, v0))
True
>>> M = superimposition_matrix(v0, v1, scaling=True, usesvd=False)
>>> numpy.allclose(v1, numpy.dot(M, v0))
True
>>> v = numpy.empty((4, 100, 3), dtype=numpy.float64)
>>> v[:, :, 0] = v0
>>> M = superimposition_matrix(v0, v1, scaling=True, usesvd=False)
>>> numpy.allclose(v1, numpy.dot(M, v[:, :, 0]))
True
"""
v0 = numpy.array(v0, dtype=numpy.float64, copy=False)[:3]
v1 = numpy.array(v1, dtype=numpy.float64, copy=False)[:3]
if v0.shape != v1.shape or v0.shape[1] < 3:
raise ValueError("Vector sets are of wrong shape or type.")
# move centroids to origin
t0 = numpy.mean(v0, axis=1)
t1 = numpy.mean(v1, axis=1)
v0 = v0 - t0.reshape(3, 1)
v1 = v1 - t1.reshape(3, 1)
if usesvd:
# Singular Value Decomposition of covariance matrix
u, s, vh = numpy.linalg.svd(numpy.dot(v1, v0.T))
# rotation matrix from SVD orthonormal bases
R = numpy.dot(u, vh)
if numpy.linalg.det(R) < 0.0:
# R does not constitute right handed system
R -= numpy.outer(u[:, 2], vh[2, :]*2.0)
s[-1] *= -1.0
# homogeneous transformation matrix
M = numpy.identity(4)
M[:3, :3] = R
else:
# compute symmetric matrix N
xx, yy, zz = numpy.sum(v0 * v1, axis=1)
xy, yz, zx = numpy.sum(v0 * numpy.roll(v1, -1, axis=0), axis=1)
xz, yx, zy = numpy.sum(v0 * numpy.roll(v1, -2, axis=0), axis=1)
N = ((xx+yy+zz, yz-zy, zx-xz, xy-yx),
(yz-zy, xx-yy-zz, xy+yx, zx+xz),
(zx-xz, xy+yx, -xx+yy-zz, yz+zy),
(xy-yx, zx+xz, yz+zy, -xx-yy+zz))
# quaternion: eigenvector corresponding to most positive eigenvalue
l, V = numpy.linalg.eig(N)
q = V[:, numpy.argmax(l)]
q /= vector_norm(q) # unit quaternion
q = numpy.roll(q, -1) # move w component to end
# homogeneous transformation matrix
M = quaternion_matrix(q)
# scale: ratio of rms deviations from centroid
if scaling:
v0 *= v0
v1 *= v1
M[:3, :3] *= math.sqrt(numpy.sum(v1) / numpy.sum(v0))
# translation
M[:3, 3] = t1
T = numpy.identity(4)
T[:3, 3] = -t0
M = numpy.dot(M, T)
return M
def euler_matrix(ai, aj, ak, axes='sxyz'):
"""Return homogeneous rotation matrix from Euler angles and axis sequence.
ai, aj, ak : Euler's roll, pitch and yaw angles
axes : One of 24 axis sequences as string or encoded tuple
>>> R = euler_matrix(1, 2, 3, 'syxz')
>>> numpy.allclose(numpy.sum(R[0]), -1.34786452)
True
>>> R = euler_matrix(1, 2, 3, (0, 1, 0, 1))
>>> numpy.allclose(numpy.sum(R[0]), -0.383436184)
True
>>> ai, aj, ak = (4.0*math.pi) * (numpy.random.random(3) - 0.5)
>>> for axes in _AXES2TUPLE.keys():
... R = euler_matrix(ai, aj, ak, axes)
>>> for axes in _TUPLE2AXES.keys():
... R = euler_matrix(ai, aj, ak, axes)
"""
try:
firstaxis, parity, repetition, frame = _AXES2TUPLE[axes]
except (AttributeError, KeyError):
_ = _TUPLE2AXES[axes]
firstaxis, parity, repetition, frame = axes
i = firstaxis
j = _NEXT_AXIS[i+parity]
k = _NEXT_AXIS[i-parity+1]
if frame:
ai, ak = ak, ai
if parity:
ai, aj, ak = -ai, -aj, -ak
si, sj, sk = math.sin(ai), math.sin(aj), math.sin(ak)
ci, cj, ck = math.cos(ai), math.cos(aj), math.cos(ak)
cc, cs = ci*ck, ci*sk
sc, ss = si*ck, si*sk
M = numpy.identity(4)
if repetition:
M[i, i] = cj
M[i, j] = sj*si
M[i, k] = sj*ci
M[j, i] = sj*sk
M[j, j] = -cj*ss+cc
M[j, k] = -cj*cs-sc
M[k, i] = -sj*ck
M[k, j] = cj*sc+cs
M[k, k] = cj*cc-ss
else:
M[i, i] = cj*ck
M[i, j] = sj*sc-cs
M[i, k] = sj*cc+ss
M[j, i] = cj*sk
M[j, j] = sj*ss+cc
M[j, k] = sj*cs-sc
M[k, i] = -sj
M[k, j] = cj*si
M[k, k] = cj*ci
return M
def euler_from_matrix(matrix, axes='sxyz'):
"""Return Euler angles from rotation matrix for specified axis sequence.
axes : One of 24 axis sequences as string or encoded tuple
Note that many Euler angle triplets can describe one matrix.
>>> R0 = euler_matrix(1, 2, 3, 'syxz')
>>> al, be, ga = euler_from_matrix(R0, 'syxz')
>>> R1 = euler_matrix(al, be, ga, 'syxz')
>>> numpy.allclose(R0, R1)
True
>>> angles = (4.0*math.pi) * (numpy.random.random(3) - 0.5)
>>> for axes in _AXES2TUPLE.keys():
... R0 = euler_matrix(axes=axes, *angles)
... R1 = euler_matrix(axes=axes, *euler_from_matrix(R0, axes))
... if not numpy.allclose(R0, R1): print axes, "failed"
"""
try:
firstaxis, parity, repetition, frame = _AXES2TUPLE[axes.lower()]
except (AttributeError, KeyError):
_ = _TUPLE2AXES[axes]
firstaxis, parity, repetition, frame = axes
i = firstaxis
j = _NEXT_AXIS[i+parity]
k = _NEXT_AXIS[i-parity+1]
M = numpy.array(matrix, dtype=numpy.float64, copy=False)[:3, :3]
if repetition:
sy = math.sqrt(M[i, j]*M[i, j] + M[i, k]*M[i, k])
if sy > _EPS:
ax = math.atan2( M[i, j], M[i, k])
ay = math.atan2( sy, M[i, i])
az = math.atan2( M[j, i], -M[k, i])
else:
ax = math.atan2(-M[j, k], M[j, j])
ay = math.atan2( sy, M[i, i])
az = 0.0
else:
cy = math.sqrt(M[i, i]*M[i, i] + M[j, i]*M[j, i])
if cy > _EPS:
ax = math.atan2( M[k, j], M[k, k])
ay = math.atan2(-M[k, i], cy)
az = math.atan2( M[j, i], M[i, i])
else:
ax = math.atan2(-M[j, k], M[j, j])
ay = math.atan2(-M[k, i], cy)
az = 0.0
if parity:
ax, ay, az = -ax, -ay, -az
if frame:
ax, az = az, ax
return ax, ay, az
def euler_from_quaternion(quaternion, axes='sxyz'):
"""Return Euler angles from quaternion for specified axis sequence.
>>> angles = euler_from_quaternion([0.06146124, 0, 0, 0.99810947])
>>> numpy.allclose(angles, [0.123, 0, 0])
True
"""
return euler_from_matrix(quaternion_matrix(quaternion), axes)
def quaternion_from_euler(ai, aj, ak, axes='sxyz'):
"""Return quaternion from Euler angles and axis sequence.
ai, aj, ak : Euler's roll, pitch and yaw angles
axes : One of 24 axis sequences as string or encoded tuple
>>> q = quaternion_from_euler(1, 2, 3, 'ryxz')
>>> numpy.allclose(q, [0.310622, -0.718287, 0.444435, 0.435953])
True
"""
try:
firstaxis, parity, repetition, frame = _AXES2TUPLE[axes.lower()]
except (AttributeError, KeyError):
_ = _TUPLE2AXES[axes]
firstaxis, parity, repetition, frame = axes
i = firstaxis
j = _NEXT_AXIS[i+parity]
k = _NEXT_AXIS[i-parity+1]
if frame:
ai, ak = ak, ai
if parity:
aj = -aj
ai /= 2.0
aj /= 2.0
ak /= 2.0
ci = math.cos(ai)
si = math.sin(ai)
cj = math.cos(aj)
sj = math.sin(aj)
ck = math.cos(ak)
sk = math.sin(ak)
cc = ci*ck
cs = ci*sk
sc = si*ck
ss = si*sk
quaternion = numpy.empty((4, ), dtype=numpy.float64)
if repetition:
quaternion[i] = cj*(cs + sc)
quaternion[j] = sj*(cc + ss)
quaternion[k] = sj*(cs - sc)
quaternion[3] = cj*(cc - ss)
else:
quaternion[i] = cj*sc - sj*cs
quaternion[j] = cj*ss + sj*cc
quaternion[k] = cj*cs - sj*sc
quaternion[3] = cj*cc + sj*ss
if parity:
quaternion[j] *= -1
return quaternion
def quaternion_about_axis(angle, axis):
"""Return quaternion for rotation about axis.
>>> q = quaternion_about_axis(0.123, (1, 0, 0))
>>> numpy.allclose(q, [0.06146124, 0, 0, 0.99810947])
True
"""
quaternion = numpy.zeros((4, ), dtype=numpy.float64)
quaternion[:3] = axis[:3]
qlen = vector_norm(quaternion)
if qlen > _EPS:
quaternion *= math.sin(angle/2.0) / qlen
quaternion[3] = math.cos(angle/2.0)
return quaternion
def quaternion_matrix(quaternion):
"""Return homogeneous rotation matrix from quaternion.
>>> R = quaternion_matrix([0.06146124, 0, 0, 0.99810947])
>>> numpy.allclose(R, rotation_matrix(0.123, (1, 0, 0)))
True
"""
q = numpy.array(quaternion[:4], dtype=numpy.float64, copy=True)
nq = numpy.dot(q, q)
if nq < _EPS:
return numpy.identity(4)
q *= math.sqrt(2.0 / nq)
q = numpy.outer(q, q)
return numpy.array((
(1.0-q[1, 1]-q[2, 2], q[0, 1]-q[2, 3], q[0, 2]+q[1, 3], 0.0),
( q[0, 1]+q[2, 3], 1.0-q[0, 0]-q[2, 2], q[1, 2]-q[0, 3], 0.0),
( q[0, 2]-q[1, 3], q[1, 2]+q[0, 3], 1.0-q[0, 0]-q[1, 1], 0.0),
( 0.0, 0.0, 0.0, 1.0)
), dtype=numpy.float64)
def quaternion_from_matrix(matrix):
"""Return quaternion from rotation matrix.
>>> R = rotation_matrix(0.123, (1, 2, 3))
>>> q = quaternion_from_matrix(R)
>>> numpy.allclose(q, [0.0164262, 0.0328524, 0.0492786, 0.9981095])
True
"""
q = numpy.empty((4, ), dtype=numpy.float64)
M = numpy.array(matrix, dtype=numpy.float64, copy=False)[:4, :4]
t = numpy.trace(M)
if t > M[3, 3]:
q[3] = t
q[2] = M[1, 0] - M[0, 1]
q[1] = M[0, 2] - M[2, 0]
q[0] = M[2, 1] - M[1, 2]
else:
i, j, k = 0, 1, 2
if M[1, 1] > M[0, 0]:
i, j, k = 1, 2, 0
if M[2, 2] > M[i, i]:
i, j, k = 2, 0, 1
t = M[i, i] - (M[j, j] + M[k, k]) + M[3, 3]
q[i] = t
q[j] = M[i, j] + M[j, i]
q[k] = M[k, i] + M[i, k]
q[3] = M[k, j] - M[j, k]
q *= 0.5 / math.sqrt(t * M[3, 3])
return q
def quaternion_multiply(quaternion1, quaternion0):
"""Return multiplication of two quaternions.
>>> q = quaternion_multiply([1, -2, 3, 4], [-5, 6, 7, 8])
>>> numpy.allclose(q, [-44, -14, 48, 28])
True
"""
x0, y0, z0, w0 = quaternion0
x1, y1, z1, w1 = quaternion1
return numpy.array((
x1*w0 + y1*z0 - z1*y0 + w1*x0,
-x1*z0 + y1*w0 + z1*x0 + w1*y0,
x1*y0 - y1*x0 + z1*w0 + w1*z0,
-x1*x0 - y1*y0 - z1*z0 + w1*w0), dtype=numpy.float64)
def quaternion_conjugate(quaternion):
"""Return conjugate of quaternion.
>>> q0 = random_quaternion()
>>> q1 = quaternion_conjugate(q0)
>>> q1[3] == q0[3] and all(q1[:3] == -q0[:3])
True
"""
return numpy.array((-quaternion[0], -quaternion[1],
-quaternion[2], quaternion[3]), dtype=numpy.float64)
def quaternion_inverse(quaternion):
"""Return inverse of quaternion.
>>> q0 = random_quaternion()
>>> q1 = quaternion_inverse(q0)
>>> numpy.allclose(quaternion_multiply(q0, q1), [0, 0, 0, 1])
True
"""
return quaternion_conjugate(quaternion) / numpy.dot(quaternion, quaternion)
def quaternion_slerp(quat0, quat1, fraction, spin=0, shortestpath=True):
"""Return spherical linear interpolation between two quaternions.
>>> q0 = random_quaternion()
>>> q1 = random_quaternion()
>>> q = quaternion_slerp(q0, q1, 0.0)
>>> numpy.allclose(q, q0)
True
>>> q = quaternion_slerp(q0, q1, 1.0, 1)
>>> numpy.allclose(q, q1)
True
>>> q = quaternion_slerp(q0, q1, 0.5)
>>> angle = math.acos(numpy.dot(q0, q))
>>> numpy.allclose(2.0, math.acos(numpy.dot(q0, q1)) / angle) or \
numpy.allclose(2.0, math.acos(-numpy.dot(q0, q1)) / angle)
True
"""
q0 = unit_vector(quat0[:4])
q1 = unit_vector(quat1[:4])
if fraction == 0.0:
return q0
elif fraction == 1.0:
return q1
d = numpy.dot(q0, q1)
if abs(abs(d) - 1.0) < _EPS:
return q0
if shortestpath and d < 0.0:
# invert rotation
d = -d
q1 *= -1.0
angle = math.acos(d) + spin * math.pi
if abs(angle) < _EPS:
return q0
isin = 1.0 / math.sin(angle)
q0 *= math.sin((1.0 - fraction) * angle) * isin
q1 *= math.sin(fraction * angle) * isin
q0 += q1
return q0
def random_quaternion(rand=None):
"""Return uniform random unit quaternion.
rand: array like or None
Three independent random variables that are uniformly distributed
between 0 and 1.
>>> q = random_quaternion()
>>> numpy.allclose(1.0, vector_norm(q))
True
>>> q = random_quaternion(numpy.random.random(3))
>>> q.shape
(4,)
"""
if rand is None:
rand = numpy.random.rand(3)
else:
assert len(rand) == 3
r1 = numpy.sqrt(1.0 - rand[0])
r2 = numpy.sqrt(rand[0])
pi2 = math.pi * 2.0
t1 = pi2 * rand[1]
t2 = pi2 * rand[2]
return numpy.array((numpy.sin(t1)*r1,
numpy.cos(t1)*r1,
numpy.sin(t2)*r2,
numpy.cos(t2)*r2), dtype=numpy.float64)
def random_rotation_matrix(rand=None):
"""Return uniform random rotation matrix.
rnd: array like
Three independent random variables that are uniformly distributed
between 0 and 1 for each returned quaternion.
>>> R = random_rotation_matrix()
>>> numpy.allclose(numpy.dot(R.T, R), numpy.identity(4))
True
"""
return quaternion_matrix(random_quaternion(rand))
class Arcball(object):
"""Virtual Trackball Control.
>>> ball = Arcball()
>>> ball = Arcball(initial=numpy.identity(4))
>>> ball.place([320, 320], 320)
>>> ball.down([500, 250])
>>> ball.drag([475, 275])
>>> R = ball.matrix()
>>> numpy.allclose(numpy.sum(R), 3.90583455)
True
>>> ball = Arcball(initial=[0, 0, 0, 1])
>>> ball.place([320, 320], 320)
>>> ball.setaxes([1,1,0], [-1, 1, 0])
>>> ball.setconstrain(True)
>>> ball.down([400, 200])
>>> ball.drag([200, 400])
>>> R = ball.matrix()
>>> numpy.allclose(numpy.sum(R), 0.2055924)
True
>>> ball.next()
"""
def __init__(self, initial=None):
"""Initialize virtual trackball control.
initial : quaternion or rotation matrix
"""
self._axis = None
self._axes = None
self._radius = 1.0
self._center = [0.0, 0.0]
self._vdown = numpy.array([0, 0, 1], dtype=numpy.float64)
self._constrain = False
if initial is None:
self._qdown = numpy.array([0, 0, 0, 1], dtype=numpy.float64)
else:
initial = numpy.array(initial, dtype=numpy.float64)
if initial.shape == (4, 4):
self._qdown = quaternion_from_matrix(initial)
elif initial.shape == (4, ):
initial /= vector_norm(initial)
self._qdown = initial
else:
raise ValueError("initial not a quaternion or matrix.")
self._qnow = self._qpre = self._qdown
def place(self, center, radius):
"""Place Arcball, e.g. when window size changes.
center : sequence[2]
Window coordinates of trackball center.
radius : float
Radius of trackball in window coordinates.
"""
self._radius = float(radius)
self._center[0] = center[0]
self._center[1] = center[1]
def setaxes(self, *axes):
"""Set axes to constrain rotations."""
if axes is None:
self._axes = None
else:
self._axes = [unit_vector(axis) for axis in axes]
def setconstrain(self, constrain):
"""Set state of constrain to axis mode."""
self._constrain = constrain == True
def getconstrain(self):
"""Return state of constrain to axis mode."""
return self._constrain
def down(self, point):
"""Set initial cursor window coordinates and pick constrain-axis."""
self._vdown = arcball_map_to_sphere(point, self._center, self._radius)
self._qdown = self._qpre = self._qnow
if self._constrain and self._axes is not None:
self._axis = arcball_nearest_axis(self._vdown, self._axes)
self._vdown = arcball_constrain_to_axis(self._vdown, self._axis)
else:
self._axis = None
def drag(self, point):
"""Update current cursor window coordinates."""
vnow = arcball_map_to_sphere(point, self._center, self._radius)
if self._axis is not None:
vnow = arcball_constrain_to_axis(vnow, self._axis)
self._qpre = self._qnow
t = numpy.cross(self._vdown, vnow)
if numpy.dot(t, t) < _EPS:
self._qnow = self._qdown
else:
q = [t[0], t[1], t[2], numpy.dot(self._vdown, vnow)]
self._qnow = quaternion_multiply(q, self._qdown)
def next(self, acceleration=0.0):
"""Continue rotation in direction of last drag."""
q = quaternion_slerp(self._qpre, self._qnow, 2.0+acceleration, False)
self._qpre, self._qnow = self._qnow, q
def matrix(self):
"""Return homogeneous rotation matrix."""
return quaternion_matrix(self._qnow)
def arcball_map_to_sphere(point, center, radius):
"""Return unit sphere coordinates from window coordinates."""
v = numpy.array(((point[0] - center[0]) / radius,
(center[1] - point[1]) / radius,
0.0), dtype=numpy.float64)
n = v[0]*v[0] + v[1]*v[1]
if n > 1.0:
v /= math.sqrt(n) # position outside of sphere
else:
v[2] = math.sqrt(1.0 - n)
return v
def arcball_constrain_to_axis(point, axis):
"""Return sphere point perpendicular to axis."""
v = numpy.array(point, dtype=numpy.float64, copy=True)
a = numpy.array(axis, dtype=numpy.float64, copy=True)
v -= a * numpy.dot(a, v) # on plane
n = vector_norm(v)
if n > _EPS:
if v[2] < 0.0:
v *= -1.0
v /= n
return v
if a[2] == 1.0:
return numpy.array([1, 0, 0], dtype=numpy.float64)
return unit_vector([-a[1], a[0], 0])
def arcball_nearest_axis(point, axes):
"""Return axis, which arc is nearest to point."""
point = numpy.array(point, dtype=numpy.float64, copy=False)
nearest = None
mx = -1.0
for axis in axes:
t = numpy.dot(arcball_constrain_to_axis(point, axis), point)
if t > mx:
nearest = axis
mx = t
return nearest
# epsilon for testing whether a number is close to zero
_EPS = numpy.finfo(float).eps * 4.0
# axis sequences for Euler angles
_NEXT_AXIS = [1, 2, 0, 1]
# map axes strings to/from tuples of inner axis, parity, repetition, frame
_AXES2TUPLE = {
'sxyz': (0, 0, 0, 0), 'sxyx': (0, 0, 1, 0), 'sxzy': (0, 1, 0, 0),
'sxzx': (0, 1, 1, 0), 'syzx': (1, 0, 0, 0), 'syzy': (1, 0, 1, 0),
'syxz': (1, 1, 0, 0), 'syxy': (1, 1, 1, 0), 'szxy': (2, 0, 0, 0),
'szxz': (2, 0, 1, 0), 'szyx': (2, 1, 0, 0), 'szyz': (2, 1, 1, 0),
'rzyx': (0, 0, 0, 1), 'rxyx': (0, 0, 1, 1), 'ryzx': (0, 1, 0, 1),
'rxzx': (0, 1, 1, 1), 'rxzy': (1, 0, 0, 1), 'ryzy': (1, 0, 1, 1),
'rzxy': (1, 1, 0, 1), 'ryxy': (1, 1, 1, 1), 'ryxz': (2, 0, 0, 1),
'rzxz': (2, 0, 1, 1), 'rxyz': (2, 1, 0, 1), 'rzyz': (2, 1, 1, 1)}
_TUPLE2AXES = dict((v, k) for k, v in _AXES2TUPLE.items())
# helper functions
def vector_norm(data, axis=None, out=None):
"""Return length, i.e. eucledian norm, of ndarray along axis.
>>> v = numpy.random.random(3)
>>> n = vector_norm(v)
>>> numpy.allclose(n, numpy.linalg.norm(v))
True
>>> v = numpy.random.rand(6, 5, 3)
>>> n = vector_norm(v, axis=-1)
>>> numpy.allclose(n, numpy.sqrt(numpy.sum(v*v, axis=2)))
True
>>> n = vector_norm(v, axis=1)
>>> numpy.allclose(n, numpy.sqrt(numpy.sum(v*v, axis=1)))
True
>>> v = numpy.random.rand(5, 4, 3)
>>> n = numpy.empty((5, 3), dtype=numpy.float64)
>>> vector_norm(v, axis=1, out=n)
>>> numpy.allclose(n, numpy.sqrt(numpy.sum(v*v, axis=1)))
True
>>> vector_norm([])
0.0
>>> vector_norm([1.0])
1.0
"""
data = numpy.array(data, dtype=numpy.float64, copy=True)
if out is None:
if data.ndim == 1:
return math.sqrt(numpy.dot(data, data))
data *= data
out = numpy.atleast_1d(numpy.sum(data, axis=axis))
numpy.sqrt(out, out)
return out
else:
data *= data
numpy.sum(data, axis=axis, out=out)
numpy.sqrt(out, out)
def unit_vector(data, axis=None, out=None):
"""Return ndarray normalized by length, i.e. eucledian norm, along axis.
>>> v0 = numpy.random.random(3)
>>> v1 = unit_vector(v0)
>>> numpy.allclose(v1, v0 / numpy.linalg.norm(v0))
True
>>> v0 = numpy.random.rand(5, 4, 3)
>>> v1 = unit_vector(v0, axis=-1)
>>> v2 = v0 / numpy.expand_dims(numpy.sqrt(numpy.sum(v0*v0, axis=2)), 2)
>>> numpy.allclose(v1, v2)
True
>>> v1 = unit_vector(v0, axis=1)
>>> v2 = v0 / numpy.expand_dims(numpy.sqrt(numpy.sum(v0*v0, axis=1)), 1)
>>> numpy.allclose(v1, v2)
True
>>> v1 = numpy.empty((5, 4, 3), dtype=numpy.float64)
>>> unit_vector(v0, axis=1, out=v1)
>>> numpy.allclose(v1, v2)
True
>>> list(unit_vector([]))
[]
>>> list(unit_vector([1.0]))
[1.0]
"""
if out is None:
data = numpy.array(data, dtype=numpy.float64, copy=True)
if data.ndim == 1:
data /= math.sqrt(numpy.dot(data, data))
return data
else:
if out is not data:
out[:] = numpy.array(data, copy=False)
data = out
length = numpy.atleast_1d(numpy.sum(data*data, axis))
numpy.sqrt(length, length)
if axis is not None:
length = numpy.expand_dims(length, axis)
data /= length
if out is None:
return data
def random_vector(size):
"""Return array of random doubles in the half-open interval [0.0, 1.0).
>>> v = random_vector(10000)
>>> numpy.all(v >= 0.0) and numpy.all(v < 1.0)
True
>>> v0 = random_vector(10)
>>> v1 = random_vector(10)
>>> numpy.any(v0 == v1)
False
"""
return numpy.random.random(size)
def inverse_matrix(matrix):
"""Return inverse of square transformation matrix.
>>> M0 = random_rotation_matrix()
>>> M1 = inverse_matrix(M0.T)
>>> numpy.allclose(M1, numpy.linalg.inv(M0.T))
True
>>> for size in range(1, 7):
... M0 = numpy.random.rand(size, size)
... M1 = inverse_matrix(M0)
... if not numpy.allclose(M1, numpy.linalg.inv(M0)): print size
"""
return numpy.linalg.inv(matrix)
def concatenate_matrices(*matrices):
"""Return concatenation of series of transformation matrices.
>>> M = numpy.random.rand(16).reshape((4, 4)) - 0.5
>>> numpy.allclose(M, concatenate_matrices(M))
True
>>> numpy.allclose(numpy.dot(M, M.T), concatenate_matrices(M, M.T))
True
"""
M = numpy.identity(4)
for i in matrices:
M = numpy.dot(M, i)
return M
def is_same_transform(matrix0, matrix1):
"""Return True if two matrices perform same transformation.
>>> is_same_transform(numpy.identity(4), numpy.identity(4))
True
>>> is_same_transform(numpy.identity(4), random_rotation_matrix())
False
"""
matrix0 = numpy.array(matrix0, dtype=numpy.float64, copy=True)
matrix0 /= matrix0[3, 3]
matrix1 = numpy.array(matrix1, dtype=numpy.float64, copy=True)
matrix1 /= matrix1[3, 3]
return numpy.allclose(matrix0, matrix1)
def _import_module(module_name, warn=True, prefix='_py_', ignore='_'):
"""Try import all public attributes from module into global namespace.
Existing attributes with name clashes are renamed with prefix.
Attributes starting with underscore are ignored by default.
Return True on successful import.
"""
try:
module = __import__(module_name)
except ImportError:
if warn:
warnings.warn("Failed to import module " + module_name)
else:
for attr in dir(module):
if ignore and attr.startswith(ignore):
continue
if prefix:
if attr in globals():
globals()[prefix + attr] = globals()[attr]
elif warn:
warnings.warn("No Python implementation of " + attr)
globals()[attr] = getattr(module, attr)
return True
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment