Commit f886651c authored by Andrey Kamaev's avatar Andrey Kamaev

Drop old python samples and tests

parent b2ba8b99
......@@ -1102,6 +1102,17 @@ static PyObject *pycvCreateTrackbar(PyObject*, PyObject *args)
///////////////////////////////////////////////////////////////////////////////////////
static int convert_to_char(PyObject *o, char *dst, const char *name = "no_name")
{
if (PyString_Check(o) && PyString_Size(o) == 1) {
*dst = PyString_AsString(o)[0];
return 1;
} else {
(*dst) = 0;
return failmsg("Expected single character string for argument '%s'", name);
}
}
#define MKTYPE2(NAME) pyopencv_##NAME##_specials(); if (!to_ok(&pyopencv_##NAME##_Type)) return
#ifdef __GNUC__
......
......@@ -213,7 +213,6 @@ gen_template_rw_prop_init = Template("""
simple_argtype_mapping = {
"bool": ("bool", "b", "0"),
"char": ("char", "b", "0"),
"int": ("int", "i", "0"),
"float": ("float", "f", "0.f"),
"double": ("double", "d", "0"),
......@@ -619,7 +618,10 @@ class FuncInfo(object):
if amapping[1] == "O":
code_decl += " PyObject* pyobj_%s = NULL;\n" % (a.name,)
parse_name = "pyobj_" + a.name
code_cvt_list.append("pyopencv_to(pyobj_%s, %s, %s)" % (a.name, a.name, a.crepr()))
if a.tp == 'char':
code_cvt_list.append("convert_to_char(pyobj_%s, &%s, %s)"% (a.name, a.name, a.crepr()))
else:
code_cvt_list.append("pyopencv_to(pyobj_%s, %s, %s)" % (a.name, a.name, a.crepr()))
all_cargs.append([amapping, parse_name])
......
......@@ -13,75 +13,12 @@ import os
import getopt
import operator
import functools
import numpy as np
import cv2
import cv2.cv as cv
from test2 import *
class OpenCVTests(unittest.TestCase):
depths = [ cv.IPL_DEPTH_8U, cv.IPL_DEPTH_8S, cv.IPL_DEPTH_16U, cv.IPL_DEPTH_16S, cv.IPL_DEPTH_32S, cv.IPL_DEPTH_32F, cv.IPL_DEPTH_64F ]
mat_types = [
cv.CV_8UC1,
cv.CV_8UC2,
cv.CV_8UC3,
cv.CV_8UC4,
cv.CV_8SC1,
cv.CV_8SC2,
cv.CV_8SC3,
cv.CV_8SC4,
cv.CV_16UC1,
cv.CV_16UC2,
cv.CV_16UC3,
cv.CV_16UC4,
cv.CV_16SC1,
cv.CV_16SC2,
cv.CV_16SC3,
cv.CV_16SC4,
cv.CV_32SC1,
cv.CV_32SC2,
cv.CV_32SC3,
cv.CV_32SC4,
cv.CV_32FC1,
cv.CV_32FC2,
cv.CV_32FC3,
cv.CV_32FC4,
cv.CV_64FC1,
cv.CV_64FC2,
cv.CV_64FC3,
cv.CV_64FC4,
]
mat_types_single = [
cv.CV_8UC1,
cv.CV_8SC1,
cv.CV_16UC1,
cv.CV_16SC1,
cv.CV_32SC1,
cv.CV_32FC1,
cv.CV_64FC1,
]
def depthsize(self, d):
return { cv.IPL_DEPTH_8U : 1,
cv.IPL_DEPTH_8S : 1,
cv.IPL_DEPTH_16U : 2,
cv.IPL_DEPTH_16S : 2,
cv.IPL_DEPTH_32S : 4,
cv.IPL_DEPTH_32F : 4,
cv.IPL_DEPTH_64F : 8 }[d]
class NewOpenCVTests(unittest.TestCase):
def get_sample(self, filename, iscolor = cv.CV_LOAD_IMAGE_COLOR):
if not filename in self.image_cache:
filedata = urllib.urlopen("https://raw.github.com/Itseez/opencv/master/" + filename).read()
imagefiledata = cv.CreateMatHeader(1, len(filedata), cv.CV_8UC1)
cv.SetData(imagefiledata, filedata, len(filedata))
self.image_cache[filename] = cv.DecodeImageM(imagefiledata, iscolor)
return self.image_cache[filename]
def get_sample2(self, filename, iscolor = cv.CV_LOAD_IMAGE_COLOR):
def get_sample(self, filename, iscolor = cv2.IMREAD_COLOR):
if not filename in self.image_cache:
filedata = urllib.urlopen("https://raw.github.com/Itseez/opencv/master/" + filename).read()
self.image_cache[filename] = cv2.imdecode(np.fromstring(filedata, dtype=np.uint8), iscolor)
......@@ -90,2123 +27,106 @@ class OpenCVTests(unittest.TestCase):
def setUp(self):
self.image_cache = {}
def snap(self, img):
self.snapL([img])
def snapL(self, L):
for i,img in enumerate(L):
cv.NamedWindow("snap-%d" % i, 1)
cv.ShowImage("snap-%d" % i, img)
cv.WaitKey()
cv.DestroyAllWindows()
def hashimg(self, im):
""" Compute a hash for an image, useful for image comparisons """
return hashlib.md5(im.tostring()).digest()
# Tests to run first; check the handful of basic operations that the later tests rely on
class PreliminaryTests(OpenCVTests):
def test_lena(self):
# Check that the lena jpg image has loaded correctly
# This test uses a 'golden' MD5 hash of the Lena image
# If the JPEG decompressor changes, it is possible that the MD5 hash will change,
# so the hash here will need to change.
im = self.get_sample("samples/c/lena.jpg")
# self.snap(im) # uncomment this line to view the image, when regilding
self.assertEqual(hashlib.md5(im.tostring()).hexdigest(), "9dcd9247f9811c6ce86675ba7b0297b6")
def test_LoadImage(self):
self.assertRaises(TypeError, lambda: cv.LoadImage())
self.assertRaises(TypeError, lambda: cv.LoadImage(4))
self.assertRaises(TypeError, lambda: cv.LoadImage('foo.jpg', 1, 1))
self.assertRaises(TypeError, lambda: cv.LoadImage('foo.jpg', xiscolor=cv.CV_LOAD_IMAGE_COLOR))
def test_types(self):
self.assert_(type(cv.CreateImage((7,5), cv.IPL_DEPTH_8U, 1)) == cv.iplimage)
self.assert_(type(cv.CreateMat(5, 7, cv.CV_32FC1)) == cv.cvmat)
for i,t in enumerate(self.mat_types):
basefunc = [
cv.CV_8UC,
cv.CV_8SC,
cv.CV_16UC,
cv.CV_16SC,
cv.CV_32SC,
cv.CV_32FC,
cv.CV_64FC,
][i / 4]
self.assertEqual(basefunc(1 + (i % 4)), t)
def test_tostring(self):
for w in [ 1, 4, 64, 512, 640]:
for h in [ 1, 4, 64, 480, 512]:
for c in [1, 2, 3, 4]:
for d in self.depths:
a = cv.CreateImage((w,h), d, c);
self.assert_(len(a.tostring()) == w * h * c * self.depthsize(d))
for w in [ 32, 96, 480 ]:
for h in [ 32, 96, 480 ]:
depth_size = {
cv.IPL_DEPTH_8U : 1,
cv.IPL_DEPTH_8S : 1,
cv.IPL_DEPTH_16U : 2,
cv.IPL_DEPTH_16S : 2,
cv.IPL_DEPTH_32S : 4,
cv.IPL_DEPTH_32F : 4,
cv.IPL_DEPTH_64F : 8
}
for f in self.depths:
for channels in (1,2,3,4):
img = cv.CreateImage((w, h), f, channels)
esize = (w * h * channels * depth_size[f])
self.assert_(len(img.tostring()) == esize)
cv.SetData(img, " " * esize, w * channels * depth_size[f])
self.assert_(len(img.tostring()) == esize)
mattype_size = {
cv.CV_8UC1 : 1,
cv.CV_8UC2 : 1,
cv.CV_8UC3 : 1,
cv.CV_8UC4 : 1,
cv.CV_8SC1 : 1,
cv.CV_8SC2 : 1,
cv.CV_8SC3 : 1,
cv.CV_8SC4 : 1,
cv.CV_16UC1 : 2,
cv.CV_16UC2 : 2,
cv.CV_16UC3 : 2,
cv.CV_16UC4 : 2,
cv.CV_16SC1 : 2,
cv.CV_16SC2 : 2,
cv.CV_16SC3 : 2,
cv.CV_16SC4 : 2,
cv.CV_32SC1 : 4,
cv.CV_32SC2 : 4,
cv.CV_32SC3 : 4,
cv.CV_32SC4 : 4,
cv.CV_32FC1 : 4,
cv.CV_32FC2 : 4,
cv.CV_32FC3 : 4,
cv.CV_32FC4 : 4,
cv.CV_64FC1 : 8,
cv.CV_64FC2 : 8,
cv.CV_64FC3 : 8,
cv.CV_64FC4 : 8
}
for t in self.mat_types:
for im in [cv.CreateMat(h, w, t), cv.CreateMatND([h, w], t)]:
elemsize = cv.CV_MAT_CN(cv.GetElemType(im)) * mattype_size[cv.GetElemType(im)]
cv.SetData(im, " " * (w * h * elemsize), (w * elemsize))
esize = (w * h * elemsize)
self.assert_(len(im.tostring()) == esize)
cv.SetData(im, " " * esize, w * elemsize)
self.assert_(len(im.tostring()) == esize)
# Tests for specific OpenCV functions
class FunctionTests(OpenCVTests):
def test_AvgSdv(self):
m = cv.CreateMat(1, 8, cv.CV_32FC1)
for i,v in enumerate([2, 4, 4, 4, 5, 5, 7, 9]):
m[0,i] = (v,)
self.assertAlmostEqual(cv.Avg(m)[0], 5.0, 3)
avg,sdv = cv.AvgSdv(m)
self.assertAlmostEqual(avg[0], 5.0, 3)
self.assertAlmostEqual(sdv[0], 2.0, 3)
def test_CalcEMD2(self):
cc = {}
for r in [ 5, 10, 37, 38 ]:
scratch = cv.CreateImage((100,100), 8, 1)
cv.SetZero(scratch)
cv.Circle(scratch, (50,50), r, 255, -1)
storage = cv.CreateMemStorage()
seq = cv.FindContours(scratch, storage, cv.CV_RETR_TREE, cv.CV_CHAIN_APPROX_SIMPLE)
arr = cv.CreateMat(len(seq), 3, cv.CV_32FC1)
for i,e in enumerate(seq):
arr[i,0] = 1
arr[i,1] = e[0]
arr[i,2] = e[1]
cc[r] = arr
def myL1(A, B, D):
return abs(A[0]-B[0]) + abs(A[1]-B[1])
def myL2(A, B, D):
return math.sqrt((A[0]-B[0])**2 + (A[1]-B[1])**2)
def myC(A, B, D):
return max(abs(A[0]-B[0]), abs(A[1]-B[1]))
contours = set(cc.values())
for c0 in contours:
for c1 in contours:
self.assert_(abs(cv.CalcEMD2(c0, c1, cv.CV_DIST_L1) - cv.CalcEMD2(c0, c1, cv.CV_DIST_USER, myL1)) < 1e-3)
self.assert_(abs(cv.CalcEMD2(c0, c1, cv.CV_DIST_L2) - cv.CalcEMD2(c0, c1, cv.CV_DIST_USER, myL2)) < 1e-3)
self.assert_(abs(cv.CalcEMD2(c0, c1, cv.CV_DIST_C) - cv.CalcEMD2(c0, c1, cv.CV_DIST_USER, myC)) < 1e-3)
def test_CalcOpticalFlowBM(self):
a = self.get_sample("samples/c/lena.jpg", 0)
b = self.get_sample("samples/c/lena.jpg", 0)
(w,h) = cv.GetSize(a)
vel_size = (w - 8 + 1, h - 8 + 1)
velx = cv.CreateImage(vel_size, cv.IPL_DEPTH_32F, 1)
vely = cv.CreateImage(vel_size, cv.IPL_DEPTH_32F, 1)
cv.CalcOpticalFlowBM(a, b, (8,8), (1,1), (8,8), 0, velx, vely)
def test_CalcOpticalFlowPyrLK(self):
a = self.get_sample("samples/c/lena.jpg", 0)
map = cv.CreateMat(2, 3, cv.CV_32FC1)
cv.GetRotationMatrix2D((256, 256), 10, 1.0, map)
b = cv.CloneMat(a)
cv.WarpAffine(a, b, map)
eig_image = cv.CreateMat(a.rows, a.cols, cv.CV_32FC1)
temp_image = cv.CreateMat(a.rows, a.cols, cv.CV_32FC1)
prevPyr = cv.CreateMat(a.rows / 3, a.cols + 8, cv.CV_8UC1)
currPyr = cv.CreateMat(a.rows / 3, a.cols + 8, cv.CV_8UC1)
prevFeatures = cv.GoodFeaturesToTrack(a, eig_image, temp_image, 400, 0.01, 0.01)
(currFeatures, status, track_error) = cv.CalcOpticalFlowPyrLK(a,
b,
prevPyr,
currPyr,
prevFeatures,
(10, 10),
3,
(cv.CV_TERMCRIT_ITER|cv.CV_TERMCRIT_EPS,20, 0.03),
0)
if 0: # enable visualization
print
print sum(status), "Points found in curr image"
for prev,this in zip(prevFeatures, currFeatures):
iprev = tuple([int(c) for c in prev])
ithis = tuple([int(c) for c in this])
cv.Circle(a, iprev, 3, 255)
cv.Circle(a, ithis, 3, 0)
cv.Line(a, iprev, ithis, 128)
self.snapL([a, b])
def test_CartToPolar(self):
x = cv.CreateMat(5, 5, cv.CV_32F)
y = cv.CreateMat(5, 5, cv.CV_32F)
mag = cv.CreateMat(5, 5, cv.CV_32F)
angle = cv.CreateMat(5, 5, cv.CV_32F)
x2 = cv.CreateMat(5, 5, cv.CV_32F)
y2 = cv.CreateMat(5, 5, cv.CV_32F)
for i in range(5):
for j in range(5):
x[i, j] = i
y[i, j] = j
for in_degrees in [False, True]:
cv.CartToPolar(x, y, mag, angle, in_degrees)
cv.PolarToCart(mag, angle, x2, y2, in_degrees)
for i in range(5):
for j in range(5):
self.assertAlmostEqual(x[i, j], x2[i, j], 1)
self.assertAlmostEqual(y[i, j], y2[i, j], 1)
def test_Circle(self):
for w,h in [(2,77), (77,2), (256, 256), (640,480)]:
img = cv.CreateImage((w,h), cv.IPL_DEPTH_8U, 1)
cv.SetZero(img)
tricky = [ -8000, -2, -1, 0, 1, h/2, h-1, h, h+1, w/2, w-1, w, w+1, 8000]
for x0 in tricky:
for y0 in tricky:
for r in [ 0, 1, 2, 3, 4, 5, w/2, w-1, w, w+1, h/2, h-1, h, h+1, 8000 ]:
for thick in [1, 2, 10]:
for t in [0, 8, 4, cv.CV_AA]:
cv.Circle(img, (x0,y0), r, 255, thick, t)
# just check that something was drawn
self.assert_(cv.Sum(img)[0] > 0)
def test_ConvertImage(self):
i1 = cv.GetImage(self.get_sample("samples/c/lena.jpg", 1))
i2 = cv.CloneImage(i1)
i3 = cv.CloneImage(i1)
cv.ConvertImage(i1, i2, cv.CV_CVTIMG_FLIP + cv.CV_CVTIMG_SWAP_RB)
self.assertNotEqual(self.hashimg(i1), self.hashimg(i2))
cv.ConvertImage(i2, i3, cv.CV_CVTIMG_FLIP + cv.CV_CVTIMG_SWAP_RB)
self.assertEqual(self.hashimg(i1), self.hashimg(i3))
def test_ConvexHull2(self):
# Draw a series of N-pointed stars, find contours, assert the contour is not convex,
# assert the hull has N segments, assert that there are N convexity defects.
def polar2xy(th, r):
return (int(400 + r * math.cos(th)), int(400 + r * math.sin(th)))
storage = cv.CreateMemStorage(0)
for way in ['CvSeq', 'CvMat', 'list']:
for points in range(3,20):
scratch = cv.CreateImage((800,800), 8, 1)
cv.SetZero(scratch)
sides = 2 * points
cv.FillPoly(scratch, [ [ polar2xy(i * 2 * math.pi / sides, [100,350][i&1]) for i in range(sides) ] ], 255)
seq = cv.FindContours(scratch, storage, cv.CV_RETR_TREE, cv.CV_CHAIN_APPROX_SIMPLE)
if way == 'CvSeq':
# pts is a CvSeq
pts = seq
elif way == 'CvMat':
# pts is a CvMat
arr = cv.CreateMat(len(seq), 1, cv.CV_32SC2)
for i,e in enumerate(seq):
arr[i,0] = e
pts = arr
elif way == 'list':
# pts is a list of 2-tuples
pts = list(seq)
else:
assert False
self.assert_(cv.CheckContourConvexity(pts) == 0)
hull = cv.ConvexHull2(pts, storage, return_points = 1)
self.assert_(cv.CheckContourConvexity(hull) == 1)
self.assert_(len(hull) == points)
if way in [ 'CvSeq', 'CvMat' ]:
defects = cv.ConvexityDefects(pts, cv.ConvexHull2(pts, storage), storage)
self.assert_(len([depth for (_,_,_,depth) in defects if (depth > 5)]) == points)
def test_CreateImage(self):
for w in [ 1, 4, 64, 512, 640]:
for h in [ 1, 4, 64, 480, 512]:
for c in [1, 2, 3, 4]:
for d in self.depths:
a = cv.CreateImage((w,h), d, c);
self.assert_(a.width == w)
self.assert_(a.height == h)
self.assert_(a.nChannels == c)
self.assert_(a.depth == d)
self.assert_(cv.GetSize(a) == (w, h))
# self.assert_(cv.GetElemType(a) == d)
self.assertRaises(cv.error, lambda: cv.CreateImage((100, 100), 9, 1))
def test_CreateMat(self):
for rows in [1, 2, 4, 16, 64, 512, 640]:
for cols in [1, 2, 4, 16, 64, 512, 640]:
for t in self.mat_types:
m = cv.CreateMat(rows, cols, t)
self.assertEqual(cv.GetElemType(m), t)
self.assertEqual(m.type, t)
self.assertRaises(cv.error, lambda: cv.CreateMat(-1, 100, cv.CV_8SC4))
self.assertRaises(cv.error, lambda: cv.CreateMat(100, -1, cv.CV_8SC4))
self.assertRaises(cv.error, lambda: cv.cvmat())
def test_DrawChessboardCorners(self):
im = cv.CreateImage((512,512), cv.IPL_DEPTH_8U, 3)
cv.SetZero(im)
cv.DrawChessboardCorners(im, (5, 5), [ ((i/5)*100+50,(i%5)*100+50) for i in range(5 * 5) ], 1)
def test_ExtractSURF(self):
img = self.get_sample("samples/c/lena.jpg", 0)
w,h = cv.GetSize(img)
for hessthresh in [ 300,400,500]:
for dsize in [0,1]:
for layers in [1,3,10]:
kp,desc = cv.ExtractSURF(img, None, cv.CreateMemStorage(), (dsize, hessthresh, 3, layers))
self.assert_(len(kp) == len(desc))
for d in desc:
self.assert_(len(d) == {0:64, 1:128}[dsize])
for pt,laplacian,size,dir,hessian in kp:
self.assert_((0 <= pt[0]) and (pt[0] <= w))
self.assert_((0 <= pt[1]) and (pt[1] <= h))
self.assert_(laplacian in [-1, 0, 1])
self.assert_((0 <= dir) and (dir <= 360))
self.assert_(hessian >= hessthresh)
def test_FillPoly(self):
scribble = cv.CreateImage((640,480), cv.IPL_DEPTH_8U, 1)
random.seed(0)
for i in range(50):
cv.SetZero(scribble)
self.assert_(cv.CountNonZero(scribble) == 0)
cv.FillPoly(scribble, [ [ (random.randrange(640), random.randrange(480)) for i in range(100) ] ], (255,))
self.assert_(cv.CountNonZero(scribble) != 0)
def test_FindChessboardCorners(self):
im = cv.CreateImage((512,512), cv.IPL_DEPTH_8U, 1)
cv.Set(im, 128)
# Empty image run
status,corners = cv.FindChessboardCorners( im, (7,7) )
# Perfect checkerboard
def xf(i,j, o):
return ((96 + o) + 40 * i, (96 + o) + 40 * j)
for i in range(8):
for j in range(8):
color = ((i ^ j) & 1) * 255
cv.Rectangle(im, xf(i,j, 0), xf(i,j, 39), color, cv.CV_FILLED)
status,corners = cv.FindChessboardCorners( im, (7,7) )
self.assert_(status)
self.assert_(len(corners) == (7 * 7))
# Exercise corner display
im3 = cv.CreateImage(cv.GetSize(im), cv.IPL_DEPTH_8U, 3)
cv.Merge(im, im, im, None, im3)
cv.DrawChessboardCorners(im3, (7,7), corners, status)
if 0:
self.snap(im3)
# Run it with too many corners
cv.Set(im, 128)
for i in range(40):
for j in range(40):
color = ((i ^ j) & 1) * 255
x = 30 + 6 * i
y = 30 + 4 * j
cv.Rectangle(im, (x, y), (x+4, y+4), color, cv.CV_FILLED)
status,corners = cv.FindChessboardCorners( im, (7,7) )
# XXX - this is very slow
if 0:
rng = cv.RNG(0)
cv.RandArr(rng, im, cv.CV_RAND_UNI, 0, 255.0)
self.snap(im)
status,corners = cv.FindChessboardCorners( im, (7,7) )
def test_FindContours(self):
random.seed(0)
storage = cv.CreateMemStorage()
# First run FindContours on a black image.
for mode in [cv.CV_RETR_EXTERNAL, cv.CV_RETR_LIST, cv.CV_RETR_CCOMP, cv.CV_RETR_TREE]:
for method in [cv.CV_CHAIN_CODE, cv.CV_CHAIN_APPROX_NONE, cv.CV_CHAIN_APPROX_SIMPLE, cv.CV_CHAIN_APPROX_TC89_L1, cv.CV_CHAIN_APPROX_TC89_KCOS, cv.CV_LINK_RUNS]:
scratch = cv.CreateImage((800,800), 8, 1)
cv.SetZero(scratch)
seq = cv.FindContours(scratch, storage, mode, method)
x = len(seq)
if seq:
pass
for s in seq:
pass
for trial in range(10):
scratch = cv.CreateImage((800,800), 8, 1)
cv.SetZero(scratch)
def plot(center, radius, mode):
cv.Circle(scratch, center, radius, mode, -1)
if radius < 20:
return 0
else:
newmode = 255 - mode
subs = random.choice([1,2,3])
if subs == 1:
return [ plot(center, radius - 5, newmode) ]
else:
newradius = int({ 2: radius / 2, 3: radius / 2.3 }[subs] - 5)
r = radius / 2
ret = []
for i in range(subs):
th = i * (2 * math.pi) / subs
ret.append(plot((int(center[0] + r * math.cos(th)), int(center[1] + r * math.sin(th))), newradius, newmode))
return sorted(ret)
actual = plot((400,400), 390, 255 )
seq = cv.FindContours(scratch, storage, cv.CV_RETR_TREE, cv.CV_CHAIN_APPROX_SIMPLE)
def traverse(s):
if s == None:
return 0
else:
self.assert_(abs(cv.ContourArea(s)) > 0.0)
((x,y),(w,h),th) = cv.MinAreaRect2(s, cv.CreateMemStorage())
self.assert_(((w / h) - 1.0) < 0.01)
self.assert_(abs(cv.ContourArea(s)) > 0.0)
r = []
while s:
r.append(traverse(s.v_next()))
s = s.h_next()
return sorted(r)
self.assert_(traverse(seq.v_next()) == actual)
if 1:
original = cv.CreateImage((800,800), 8, 1)
cv.SetZero(original)
cv.Circle(original, (400, 400), 200, 255, -1)
cv.Circle(original, (100, 100), 20, 255, -1)
else:
original = self.get_sample("samples/c/lena.jpg", 0)
cv.Threshold(original, original, 128, 255, cv.CV_THRESH_BINARY);
contours = cv.FindContours(original, storage, cv.CV_RETR_CCOMP, cv.CV_CHAIN_APPROX_SIMPLE)
def contour_iterator(contour):
while contour:
yield contour
contour = contour.h_next()
# Should be 2 contours from the two circles above
self.assertEqual(len(list(contour_iterator(contours))), 2)
# Smoke DrawContours
sketch = cv.CreateImage(cv.GetSize(original), 8, 3)
cv.SetZero(sketch)
red = cv.RGB(255, 0, 0)
green = cv.RGB(0, 255, 0)
for c in contour_iterator(contours):
cv.DrawContours(sketch, c, red, green, 0)
# self.snap(sketch)
def test_GetAffineTransform(self):
mapping = cv.CreateMat(2, 3, cv.CV_32FC1)
cv.GetAffineTransform([ (0,0), (1,0), (0,1) ], [ (0,0), (17,0), (0,17) ], mapping)
self.assertAlmostEqual(mapping[0,0], 17, 2)
self.assertAlmostEqual(mapping[1,1], 17, 2)
def test_GetRotationMatrix2D(self):
mapping = cv.CreateMat(2, 3, cv.CV_32FC1)
for scale in [0.0, 1.0, 2.0]:
for angle in [0.0, 360.0]:
cv.GetRotationMatrix2D((0,0), angle, scale, mapping)
for r in [0, 1]:
for c in [0, 1, 2]:
if r == c:
e = scale
else:
e = 0.0
self.assertAlmostEqual(mapping[r, c], e, 2)
def test_GetSize(self):
self.assert_(cv.GetSize(cv.CreateMat(5, 7, cv.CV_32FC1)) == (7,5))
self.assert_(cv.GetSize(cv.CreateImage((7,5), cv.IPL_DEPTH_8U, 1)) == (7,5))
def test_GetStarKeypoints(self):
src = self.get_sample("samples/c/lena.jpg", 0)
storage = cv.CreateMemStorage()
kp = cv.GetStarKeypoints(src, storage)
self.assert_(len(kp) > 0)
for (x,y),scale,r in kp:
self.assert_(0 <= x)
self.assert_(x <= cv.GetSize(src)[0])
self.assert_(0 <= y)
self.assert_(y <= cv.GetSize(src)[1])
return
scribble = cv.CreateImage(cv.GetSize(src), 8, 3)
cv.CvtColor(src, scribble, cv.CV_GRAY2BGR)
for (x,y),scale,r in kp:
print x,y,scale,r
cv.Circle(scribble, (x,y), scale, cv.RGB(255,0,0))
self.snap(scribble)
def test_GetSubRect(self):
src = cv.CreateImage((100,100), 8, 1)
data = "z" * (100 * 100)
cv.SetData(src, data, 100)
start_count = sys.getrefcount(data)
iter = 77
subs = []
for i in range(iter):
sub = cv.GetSubRect(src, (0, 0, 10, 10))
subs.append(sub)
self.assert_(sys.getrefcount(data) == (start_count + iter))
src = self.get_sample("samples/c/lena.jpg", 0)
made = cv.CreateImage(cv.GetSize(src), 8, 1)
sub = cv.CreateMat(32, 32, cv.CV_8UC1)
for x in range(0, 512, 32):
for y in range(0, 512, 32):
sub = cv.GetSubRect(src, (x, y, 32, 32))
cv.SetImageROI(made, (x, y, 32, 32))
cv.Copy(sub, made)
cv.ResetImageROI(made)
cv.AbsDiff(made, src, made)
self.assert_(cv.CountNonZero(made) == 0)
for m1 in [cv.CreateMat(1, 10, cv.CV_8UC1), cv.CreateImage((10, 1), 8, 1)]:
for i in range(10):
m1[0, i] = i
def aslist(cvmat): return list(array.array('B', cvmat.tostring()))
m2 = cv.GetSubRect(m1, (5, 0, 4, 1))
m3 = cv.GetSubRect(m2, (1, 0, 2, 1))
self.assertEqual(aslist(m1), range(10))
self.assertEqual(aslist(m2), range(5, 9))
self.assertEqual(aslist(m3), range(6, 8))
def xtest_grabCut(self):
image = self.get_sample("samples/c/lena.jpg", cv.CV_LOAD_IMAGE_COLOR)
tmp1 = cv.CreateMat(1, 13 * 5, cv.CV_32FC1)
tmp2 = cv.CreateMat(1, 13 * 5, cv.CV_32FC1)
mask = cv.CreateMat(image.rows, image.cols, cv.CV_8UC1)
cv.GrabCut(image, mask, (10,10,200,200), tmp1, tmp2, 10, cv.GC_INIT_WITH_RECT)
def test_HoughLines2_PROBABILISTIC(self):
li = cv.HoughLines2(self.yield_line_image(),
cv.CreateMemStorage(),
cv.CV_HOUGH_PROBABILISTIC,
1,
math.pi/180,
50,
50,
10)
self.assert_(len(li) > 0)
self.assert_(li[0] != None)
def test_HoughLines2_STANDARD(self):
li = cv.HoughLines2(self.yield_line_image(),
cv.CreateMemStorage(),
cv.CV_HOUGH_STANDARD,
1,
math.pi/180,
100,
0,
0)
self.assert_(len(li) > 0)
self.assert_(li[0] != None)
def test_InPaint(self):
src = self.get_sample("samples/cpp/building.jpg")
msk = cv.CreateImage(cv.GetSize(src), cv.IPL_DEPTH_8U, 1)
damaged = cv.CloneMat(src)
repaired = cv.CreateImage(cv.GetSize(src), cv.IPL_DEPTH_8U, 3)
difference = cv.CloneImage(repaired)
cv.SetZero(msk)
for method in [ cv.CV_INPAINT_NS, cv.CV_INPAINT_TELEA ]:
for (p0,p1) in [ ((10,10), (400,400)) ]:
cv.Line(damaged, p0, p1, cv.RGB(255, 0, 255), 2)
cv.Line(msk, p0, p1, 255, 2)
cv.Inpaint(damaged, msk, repaired, 10., cv.CV_INPAINT_NS)
cv.AbsDiff(src, repaired, difference)
#self.snapL([src, damaged, repaired, difference])
def test_InitLineIterator(self):
scribble = cv.CreateImage((640,480), cv.IPL_DEPTH_8U, 1)
self.assert_(len(list(cv.InitLineIterator(scribble, (20,10), (30,10)))) == 11)
def test_InRange(self):
sz = (256,256)
Igray1 = cv.CreateImage(sz,cv.IPL_DEPTH_32F,1)
Ilow1 = cv.CreateImage(sz,cv.IPL_DEPTH_32F,1)
Ihi1 = cv.CreateImage(sz,cv.IPL_DEPTH_32F,1)
Igray2 = cv.CreateImage(sz,cv.IPL_DEPTH_32F,1)
Ilow2 = cv.CreateImage(sz,cv.IPL_DEPTH_32F,1)
Ihi2 = cv.CreateImage(sz,cv.IPL_DEPTH_32F,1)
Imask = cv.CreateImage(sz, cv.IPL_DEPTH_8U,1)
Imaskt = cv.CreateImage(sz,cv.IPL_DEPTH_8U,1)
cv.InRange(Igray1, Ilow1, Ihi1, Imask);
cv.InRange(Igray2, Ilow2, Ihi2, Imaskt);
cv.Or(Imask, Imaskt, Imask);
def test_Line(self):
w,h = 640,480
img = cv.CreateImage((w,h), cv.IPL_DEPTH_8U, 1)
cv.SetZero(img)
tricky = [ -8000, -2, -1, 0, 1, h/2, h-1, h, h+1, w/2, w-1, w, w+1, 8000]
for x0 in tricky:
for y0 in tricky:
for x1 in tricky:
for y1 in tricky:
for thickness in [ 0, 1, 8 ]:
for line_type in [0, 4, 8, cv.CV_AA ]:
cv.Line(img, (x0,y0), (x1,y1), 255, thickness, line_type)
# just check that something was drawn
self.assert_(cv.Sum(img)[0] > 0)
def test_MinMaxLoc(self):
scribble = cv.CreateImage((640,480), cv.IPL_DEPTH_8U, 1)
los = [ (random.randrange(480), random.randrange(640)) for i in range(100) ]
his = [ (random.randrange(480), random.randrange(640)) for i in range(100) ]
for (lo,hi) in zip(los,his):
cv.Set(scribble, 128)
scribble[lo] = 0
scribble[hi] = 255
r = cv.MinMaxLoc(scribble)
self.assert_(r == (0, 255, tuple(reversed(lo)), tuple(reversed(hi))))
def xxx_test_PyrMeanShiftFiltering(self): # XXX - ticket #306
if 0:
src = self.get_sample("samples/c/lena.jpg", cv.CV_LOAD_IMAGE_COLOR)
dst = cv.CloneMat(src)
cv.PyrMeanShiftFiltering(src, dst, 5, 5)
print src, dst
self.snap(src)
else:
r = cv.temp_test()
print r
print len(r.tostring())
self.snap(r)
def test_Reshape(self):
# 97 rows
# 12 cols
rows = 97
cols = 12
im = cv.CreateMat( rows, cols, cv.CV_32FC1 )
elems = rows * cols * 1
def crd(im):
return cv.GetSize(im) + (cv.CV_MAT_CN(cv.GetElemType(im)),)
for c in (1, 2, 3, 4):
nc,nr,nd = crd(cv.Reshape(im, c))
self.assert_(nd == c)
self.assert_((nc * nr * nd) == elems)
nc,nr,nd = crd(cv.Reshape(im, 0, 97*2))
self.assert_(nr == 97*2)
self.assert_((nc * nr * nd) == elems)
nc,nr,nd = crd(cv.Reshape(im, 3, 97*2))
self.assert_(nr == 97*2)
self.assert_(nd == 3)
self.assert_((nc * nr * nd) == elems)
# Now test ReshapeMatND
mat = cv.CreateMatND([24], cv.CV_32FC1)
cv.Set(mat, 1.0)
self.assertEqual(cv.GetDims(cv.ReshapeMatND(mat, 0, [24, 1])), (24, 1))
self.assertEqual(cv.GetDims(cv.ReshapeMatND(mat, 0, [6, 4])), (6, 4))
self.assertEqual(cv.GetDims(cv.ReshapeMatND(mat, 24, [1])), (1,))
self.assertRaises(TypeError, lambda: cv.ReshapeMatND(mat, 12, [1]))
def test_Save(self):
for o in [ cv.CreateImage((128,128), cv.IPL_DEPTH_8U, 1), cv.CreateMat(16, 16, cv.CV_32FC1), cv.CreateMatND([7,9,4], cv.CV_32FC1) ]:
cv.Save("test.save", o)
loaded = cv.Load("test.save", cv.CreateMemStorage())
self.assert_(type(o) == type(loaded))
def test_SetIdentity(self):
for r in range(1,16):
for c in range(1, 16):
for t in self.mat_types_single:
M = cv.CreateMat(r, c, t)
cv.SetIdentity(M)
for rj in range(r):
for cj in range(c):
if rj == cj:
expected = 1.0
else:
expected = 0.0
self.assertEqual(M[rj,cj], expected)
def test_SnakeImage(self):
src = self.get_sample("samples/c/lena.jpg", 0)
pts = [ (512-i,i) for i in range(0, 512, 8) ]
# Make sure that weight arguments get validated
self.assertRaises(TypeError, lambda: cv.SnakeImage(cv.GetImage(src), pts, [1,2], .01, .01, (7,7), (cv.CV_TERMCRIT_ITER, 100, 0.1)))
# Smoke by making sure that points are changed by call
r = cv.SnakeImage(cv.GetImage(src), pts, .01, .01, .01, (7,7), (cv.CV_TERMCRIT_ITER, 100, 0.1))
if 0:
cv.PolyLine(src, [ r ], 0, 255)
self.snap(src)
self.assertEqual(len(r), len(pts))
self.assertNotEqual(r, pts)
# Ensure that list of weights is same as scalar weight
w = [.01] * len(pts)
r2 = cv.SnakeImage(cv.GetImage(src), pts, w, w, w, (7,7), (cv.CV_TERMCRIT_ITER, 100, 0.1))
self.assertEqual(r, r2)
def test_KMeans2(self):
size = 500
samples = cv.CreateMat(size, 1, cv.CV_32FC3)
labels = cv.CreateMat(size, 1, cv.CV_32SC1)
centers = cv.CreateMat(2, 3, cv.CV_32FC1)
cv.Zero(samples)
cv.Zero(labels)
cv.Zero(centers)
cv.Set(cv.GetSubRect(samples, (0, 0, 1, size/2)), (255, 255, 255))
compact = cv.KMeans2(samples, 2, labels, (cv.CV_TERMCRIT_ITER, 100, 0.1), 1, 0, centers)
self.assertEqual(int(compact), 0)
random.seed(0)
for i in range(50):
index = random.randrange(size)
if index < size/2:
self.assertEqual(samples[index, 0], (255, 255, 255))
self.assertEqual(labels[index, 0], 1)
else:
self.assertEqual(samples[index, 0], (0, 0, 0))
self.assertEqual(labels[index, 0], 0)
for cluster in (0, 1):
for channel in (0, 1, 2):
self.assertEqual(int(centers[cluster, channel]), cluster*255)
def test_Sum(self):
for r in range(1,11):
for c in range(1, 11):
for t in self.mat_types_single:
M = cv.CreateMat(r, c, t)
cv.Set(M, 1)
self.assertEqual(cv.Sum(M)[0], r * c)
def test_Threshold(self):
#""" directed test for bug 2790622 """
src = self.get_sample("samples/c/lena.jpg", 0)
results = set()
for i in range(10):
dst = cv.CreateImage(cv.GetSize(src), cv.IPL_DEPTH_8U, 1)
cv.Threshold(src, dst, 128, 128, cv.CV_THRESH_BINARY)
results.add(dst.tostring())
# Should have produced the same answer every time, so results set should have size 1
self.assert_(len(results) == 1)
# ticket #71 repro attempt
image = self.get_sample("samples/c/lena.jpg", 0)
red = cv.CreateImage(cv.GetSize(image), 8, 1)
binary = cv.CreateImage(cv.GetSize(image), 8, 1)
cv.Split(image, red, None, None, None)
cv.Threshold(red, binary, 42, 255, cv.CV_THRESH_BINARY)
##############################################################################
def yield_line_image(self):
""" Needed by HoughLines tests """
src = self.get_sample("samples/cpp/building.jpg", 0)
dst = cv.CreateImage(cv.GetSize(src), 8, 1)
cv.Canny(src, dst, 50, 200, 3)
return dst
# Tests for functional areas
class AreaTests(OpenCVTests):
def test_numpy(self):
if 'fromarray' in dir(cv):
import numpy
def convert(numpydims):
""" Create a numpy array with specified dims, return the OpenCV CvMat """
a1 = numpy.array([1] * reduce(operator.__mul__, numpydims)).reshape(*numpydims).astype(numpy.float32)
return cv.fromarray(a1)
def row_col_chan(m):
col = m.cols
row = m.rows
chan = cv.CV_MAT_CN(cv.GetElemType(m))
return (row, col, chan)
self.assertEqual(row_col_chan(convert((2, 13))), (2, 13, 1))
self.assertEqual(row_col_chan(convert((2, 13, 4))), (2, 13, 4))
self.assertEqual(row_col_chan(convert((2, 13, cv.CV_CN_MAX))), (2, 13, cv.CV_CN_MAX))
self.assertRaises(TypeError, lambda: convert((2,)))
self.assertRaises(TypeError, lambda: convert((11, 17, cv.CV_CN_MAX + 1)))
for t in [cv.CV_16UC1, cv.CV_32SC1, cv.CV_32FC1]:
for d in [ (8,), (1,7), (2,3,4), (7,9,2,1,8), (1,2,3,4,5,6,7,8) ]:
total = reduce(operator.__mul__, d)
m = cv.CreateMatND(d, t)
for i in range(total):
cv.Set1D(m, i, i)
na = numpy.asarray(m).reshape((total,))
self.assertEqual(list(na), range(total))
# now do numpy -> cvmat, and verify
m2 = cv.fromarray(na, True)
# Check that new cvmat m2 contains same counting sequence
for i in range(total):
self.assertEqual(cv.Get1D(m, i)[0], i)
# Verify round-trip for 2D arrays
for rows in [2, 3, 7, 13]:
for cols in [2, 3, 7, 13]:
for allowND in [False, True]:
im = cv.CreateMatND([rows, cols], cv.CV_16UC1)
cv.SetZero(im)
a = numpy.asarray(im)
self.assertEqual(a.shape, (rows, cols))
cvmatnd = cv.fromarray(a, allowND)
self.assertEqual(cv.GetDims(cvmatnd), (rows, cols))
# im, a and cvmatnd all point to the same data, so...
for i,coord in enumerate([(0,0), (0,1), (1,0), (1,1)]):
v = 5 + i + 7
a[coord] = v
self.assertEqual(im[coord], v)
self.assertEqual(cvmatnd[coord], v)
# Cv -> Numpy 3 channel check
im = cv.CreateMatND([2, 13], cv.CV_16UC3)
self.assertEqual(numpy.asarray(im).shape, (2, 13, 3))
# multi-dimensional NumPy array
na = numpy.ones([7,9,2,1,8])
cm = cv.fromarray(na, True)
self.assertEqual(cv.GetDims(cm), (7,9,2,1,8))
# Using an array object for a CvArr parameter
ones = numpy.ones((640, 480))
r = cv.fromarray(numpy.ones((640, 480)))
cv.AddS(cv.fromarray(ones), 7, r)
self.assert_(numpy.alltrue(r == (8 * ones)))
# create arrays, use them in OpenCV and replace the the array
# looking for leaks
def randdim():
return [random.randrange(1,6) for i in range(random.randrange(1, 6))]
arrays = [numpy.ones(randdim()).astype(numpy.uint8) for i in range(10)]
cs = [cv.fromarray(a, True) for a in arrays]
for i in range(1000):
arrays[random.randrange(10)] = numpy.ones(randdim()).astype(numpy.uint8)
cs[random.randrange(10)] = cv.fromarray(arrays[random.randrange(10)], True)
for j in range(10):
self.assert_(all([c == chr(1) for c in cs[j].tostring()]))
#
m = numpy.identity(4, dtype = numpy.float32)
m = cv.fromarray(m[:3, :3])
rvec = cv.CreateMat(3, 1, cv.CV_32FC1)
rvec[0,0] = 1
rvec[1,0] = 1
rvec[2,0] = 1
cv.Rodrigues2(rvec, m)
#print m
else:
print "SKIPPING test_numpy - numpy support not built"
def test_boundscatch(self):
l2 = cv.CreateMat(256, 1, cv.CV_8U)
l2[0,0] # should be OK
self.assertRaises(cv.error, lambda: l2[1,1])
l2[0] # should be OK
self.assertRaises(cv.error, lambda: l2[299])
for n in range(1, 8):
l = cv.CreateMatND([2] * n, cv.CV_8U)
l[0] # should be OK
self.assertRaises(cv.error, lambda: l[999])
tup0 = (0,) * n
l[tup0] # should be OK
tup2 = (2,) * n
self.assertRaises(cv.error, lambda: l[tup2])
def test_stereo(self):
left = self.get_sample2("samples/cpp/tsukuba_l.png", 0)
right = self.get_sample2("samples/cpp/tsukuba_r.png", 0)
bm = cv2.createStereoBM(32, 11)
disparity = bm.compute(left, right)
self.assertEqual(left.shape, disparity.shape)
sgbm = cv2.createStereoSGBM(0, 32, 5)
disparity2 = sgbm.compute(left, right)
self.assertEqual(left.shape, disparity2.shape)
def test_kalman(self):
k = cv.CreateKalman(2, 1, 0)
def failing_test_exception(self):
a = cv.CreateImage((640, 480), cv.IPL_DEPTH_8U, 1)
b = cv.CreateImage((640, 480), cv.IPL_DEPTH_8U, 1)
self.assertRaises(cv.error, lambda: cv.Laplace(a, b))
def test_cvmat_accessors(self):
cvm = cv.CreateMat(20, 10, cv.CV_32FC1)
def test_depths(self):
#""" Make sure that the depth enums are unique """
self.assert_(len(self.depths) == len(set(self.depths)))
def test_leak(self):
#""" If CreateImage is not releasing image storage, then the loop below should use ~4GB of memory. """
for i in range(64000):
a = cv.CreateImage((1024,1024), cv.IPL_DEPTH_8U, 1)
for i in range(64000):
a = cv.CreateMat(1024, 1024, cv.CV_8UC1)
def test_histograms(self):
def split(im):
nchans = cv.CV_MAT_CN(cv.GetElemType(im))
c = [ cv.CreateImage(cv.GetSize(im), cv.IPL_DEPTH_8U, 1) for i in range(nchans) ] + [None] * (4 - nchans)
cv.Split(im, c[0], c[1], c[2], c[3])
return c[:nchans]
def imh(im):
s = split(im)
hist = cv.CreateHist([256] * len(s), cv.CV_HIST_ARRAY, [ (0,255) ] * len(s), 1)
cv.CalcHist(s, hist, 0)
return hist
dims = [180]
ranges = [(0,180)]
a = cv.CreateHist(dims, cv.CV_HIST_ARRAY , ranges, 1)
src = self.get_sample("samples/c/lena.jpg", 0)
h = imh(src)
(minv, maxv, minl, maxl) = cv.GetMinMaxHistValue(h)
self.assert_(cv.QueryHistValue_nD(h, minl) == minv)
self.assert_(cv.QueryHistValue_nD(h, maxl) == maxv)
bp = cv.CreateImage(cv.GetSize(src), cv.IPL_DEPTH_8U, 1)
cv.CalcBackProject(split(src), bp, h)
bp = cv.CreateImage((cv.GetSize(src)[0]-2, cv.GetSize(src)[1]-2), cv.IPL_DEPTH_32F, 1)
cv.CalcBackProjectPatch(split(src), bp, (3,3), h, cv.CV_COMP_INTERSECT, 1)
for meth,expected in [(cv.CV_COMP_CORREL, 1.0), (cv.CV_COMP_CHISQR, 0.0), (cv.CV_COMP_INTERSECT, 1.0), (cv.CV_COMP_BHATTACHARYYA, 0.0)]:
self.assertEqual(cv.CompareHist(h, h, meth), expected)
def test_remap(self):
rng = cv.RNG(0)
maxError = 1e-6
raw = cv.CreateImage((640, 480), cv.IPL_DEPTH_8U, 1)
for x in range(0, 640, 20):
cv.Line(raw, (x,0), (x,480), 255, 1)
for y in range(0, 480, 20):
cv.Line(raw, (0,y), (640,y), 255, 1)
intrinsic_mat = cv.CreateMat(3, 3, cv.CV_32FC1)
distortion_coeffs = cv.CreateMat(1, 4, cv.CV_32FC1)
cv.SetZero(intrinsic_mat)
intrinsic_mat[0,2] = 320.0
intrinsic_mat[1,2] = 240.0
intrinsic_mat[0,0] = 320.0
intrinsic_mat[1,1] = 320.0
intrinsic_mat[2,2] = 1.0
cv.SetZero(distortion_coeffs)
distortion_coeffs[0,0] = 1e-1
mapx = cv.CreateImage((640, 480), cv.IPL_DEPTH_32F, 1)
mapy = cv.CreateImage((640, 480), cv.IPL_DEPTH_32F, 1)
cv.SetZero(mapx)
cv.SetZero(mapy)
cv.InitUndistortMap(intrinsic_mat, distortion_coeffs, mapx, mapy)
rect = cv.CreateImage((640, 480), cv.IPL_DEPTH_8U, 1)
(w,h) = (640,480)
rMapxy = cv.CreateMat(h, w, cv.CV_16SC2)
rMapa = cv.CreateMat(h, w, cv.CV_16UC1)
cv.ConvertMaps(mapx,mapy,rMapxy,rMapa)
cv.Remap(raw, rect, mapx, mapy)
cv.Remap(raw, rect, rMapxy, rMapa)
cv.Undistort2(raw, rect, intrinsic_mat, distortion_coeffs)
for w in [1, 4, 4095, 4096, 4097, 4100]:
p = cv.CreateImage((w,256), 8, 1)
up = cv.CreateImage((w,256), 8, 1)
cv.Undistort2(p, up, intrinsic_mat, distortion_coeffs)
fptypes = [cv.CV_32FC1, cv.CV_64FC1]
pointsCount = 7
for t0 in fptypes:
for t1 in fptypes:
for t2 in fptypes:
for t3 in fptypes:
rotation_vector = cv.CreateMat(1, 3, t0)
translation_vector = cv.CreateMat(1, 3, t1)
cv.RandArr(rng, rotation_vector, cv.CV_RAND_UNI, -1.0, 1.0)
cv.RandArr(rng, translation_vector, cv.CV_RAND_UNI, -1.0, 1.0)
object_points = cv.CreateMat(pointsCount, 3, t2)
image_points = cv.CreateMat(pointsCount, 2, t3)
cv.RandArr(rng, object_points, cv.CV_RAND_UNI, -100.0, 100.0)
cv.ProjectPoints2(object_points, rotation_vector, translation_vector, intrinsic_mat, distortion_coeffs, image_points)
reshaped_object_points = cv.Reshape(object_points, 1, 3)
reshaped_image_points = cv.CreateMat(2, pointsCount, t3)
cv.ProjectPoints2(object_points, rotation_vector, translation_vector, intrinsic_mat, distortion_coeffs, reshaped_image_points)
error = cv.Norm(reshaped_image_points, cv.Reshape(image_points, 1, 2))
self.assert_(error < maxError)
def test_arithmetic(self):
a = cv.CreateMat(4, 4, cv.CV_8UC1)
a[0,0] = 50.0
b = cv.CreateMat(4, 4, cv.CV_8UC1)
b[0,0] = 4.0
d = cv.CreateMat(4, 4, cv.CV_8UC1)
cv.Add(a, b, d)
self.assertEqual(d[0,0], 54.0)
cv.Mul(a, b, d)
self.assertEqual(d[0,0], 200.0)
def failing_test_cvtcolor(self):
src3 = self.get_sample("samples/c/lena.jpg")
src1 = self.get_sample("samples/c/lena.jpg", 0)
dst8u = dict([(c,cv.CreateImage(cv.GetSize(src1), cv.IPL_DEPTH_8U, c)) for c in (1,2,3,4)])
dst16u = dict([(c,cv.CreateImage(cv.GetSize(src1), cv.IPL_DEPTH_16U, c)) for c in (1,2,3,4)])
dst32f = dict([(c,cv.CreateImage(cv.GetSize(src1), cv.IPL_DEPTH_32F, c)) for c in (1,2,3,4)])
for srcf in ["BGR", "RGB"]:
for dstf in ["Luv"]:
cv.CvtColor(src3, dst8u[3], eval("cv.CV_%s2%s" % (srcf, dstf)))
cv.CvtColor(src3, dst32f[3], eval("cv.CV_%s2%s" % (srcf, dstf)))
cv.CvtColor(src3, dst8u[3], eval("cv.CV_%s2%s" % (dstf, srcf)))
for srcf in ["BayerBG", "BayerGB", "BayerGR"]:
for dstf in ["RGB", "BGR"]:
cv.CvtColor(src1, dst8u[3], eval("cv.CV_%s2%s" % (srcf, dstf)))
def test_voronoi(self):
w,h = 500,500
storage = cv.CreateMemStorage(0)
def facet_edges(e0):
e = e0
while True:
e = cv.Subdiv2DGetEdge(e, cv.CV_NEXT_AROUND_LEFT)
yield e
if e == e0:
break
if sys.version_info[:2] == (2, 6):
def assertLess(self, a, b, msg=None):
if not a < b:
self.fail('%s not less than %s' % (repr(a), repr(b)))
def areas(edges):
seen = []
seensorted = []
for edge in edges:
pts = [ cv.Subdiv2DEdgeOrg(e) for e in facet_edges(edge) ]
if not (None in pts):
l = [p.pt for p in pts]
ls = sorted(l)
if not(ls in seensorted):
seen.append(l)
seensorted.append(ls)
return seen
def assertLessEqual(self, a, b, msg=None):
if not a <= b:
self.fail('%s not less than or equal to %s' % (repr(a), repr(b)))
for npoints in range(1, 200):
points = [ (random.randrange(w), random.randrange(h)) for i in range(npoints) ]
subdiv = cv.CreateSubdivDelaunay2D( (0,0,w,h), storage )
for p in points:
cv.SubdivDelaunay2DInsert( subdiv, p)
cv.CalcSubdivVoronoi2D(subdiv)
ars = areas([ cv.Subdiv2DRotateEdge(e, 1) for e in subdiv.edges ] + [ cv.Subdiv2DRotateEdge(e, 3) for e in subdiv.edges ])
self.assert_(len(ars) == len(set(points)))
def assertGreater(self, a, b, msg=None):
if not a > b:
self.fail('%s not greater than %s' % (repr(a), repr(b)))
if False:
img = cv.CreateImage((w,h), cv.IPL_DEPTH_8U, 3)
cv.SetZero(img)
def T(x): return int(x) # int(300+x/16)
for pts in ars:
cv.FillConvexPoly( img, [(T(x),T(y)) for (x,y) in pts], cv.RGB(100+random.randrange(156),random.randrange(256),random.randrange(256)), cv.CV_AA, 0 );
for x,y in points:
cv.Circle(img, (T(x), T(y)), 3, cv.RGB(0,0,0), -1)
cv.ShowImage("snap", img)
if cv.WaitKey(10) > 0:
break
def perf_test_pow(self):
mt = cv.CreateMat(1000, 1000, cv.CV_32FC1)
dst = cv.CreateMat(1000, 1000, cv.CV_32FC1)
rng = cv.RNG(0)
cv.RandArr(rng, mt, cv.CV_RAND_UNI, 0, 1000.0)
mt[0,0] = 10
print
for a in [0.5, 2.0, 2.3, 2.4, 3.0, 37.1786] + [2.4]*10:
started = time.time()
for i in range(10):
cv.Pow(mt, dst, a)
took = (time.time() - started) / 1e7
print "%4.1f took %f ns" % (a, took * 1e9)
print dst[0,0], 10 ** 2.4
def test_access_row_col(self):
src = cv.CreateImage((8,3), 8, 1)
# Put these words
# Achilles
# Benedict
# Congreve
# in an array (3 rows, 8 columns).
# Then extract the array in various ways.
for r,w in enumerate(("Achilles", "Benedict", "Congreve")):
for c,v in enumerate(w):
src[r,c] = ord(v)
self.assertEqual(src.tostring(), "AchillesBenedictCongreve")
self.assertEqual(src[:,:].tostring(), "AchillesBenedictCongreve")
self.assertEqual(src[:,:4].tostring(), "AchiBeneCong")
self.assertEqual(src[:,0].tostring(), "ABC")
self.assertEqual(src[:,4:].tostring(), "llesdictreve")
self.assertEqual(src[::2,:].tostring(), "AchillesCongreve")
self.assertEqual(src[1:,:].tostring(), "BenedictCongreve")
self.assertEqual(src[1:2,:].tostring(), "Benedict")
self.assertEqual(src[::2,:4].tostring(), "AchiCong")
# The mats share the same storage, so updating one should update them all
lastword = src[2]
self.assertEqual(lastword.tostring(), "Congreve")
src[2,0] = ord('K')
self.assertEqual(lastword.tostring(), "Kongreve")
src[2,0] = ord('C')
# ABCD
# EFGH
# IJKL
#
# MNOP
# QRST
# UVWX
mt = cv.CreateMatND([2,3,4], cv.CV_8UC1)
for i in range(2):
for j in range(3):
for k in range(4):
mt[i,j,k] = ord('A') + k + 4 * (j + 3 * i)
self.assertEqual(mt[:,:,:1].tostring(), "AEIMQU")
self.assertEqual(mt[:,:1,:].tostring(), "ABCDMNOP")
self.assertEqual(mt[:1,:,:].tostring(), "ABCDEFGHIJKL")
self.assertEqual(mt[1,1].tostring(), "QRST")
self.assertEqual(mt[:,::2,:].tostring(), "ABCDIJKLMNOPUVWX")
# Exercise explicit GetRows
self.assertEqual(cv.GetRows(src, 0, 3).tostring(), "AchillesBenedictCongreve")
self.assertEqual(cv.GetRows(src, 0, 3, 1).tostring(), "AchillesBenedictCongreve")
self.assertEqual(cv.GetRows(src, 0, 3, 2).tostring(), "AchillesCongreve")
self.assertEqual(cv.GetRow(src, 0).tostring(), "Achilles")
self.assertEqual(cv.GetCols(src, 0, 4).tostring(), "AchiBeneCong")
self.assertEqual(cv.GetCol(src, 0).tostring(), "ABC")
self.assertEqual(cv.GetCol(src, 1).tostring(), "ceo")
self.assertEqual(cv.GetDiag(src, 0).tostring(), "Aen")
# Check that matrix type is preserved by the various operators
for mt in self.mat_types:
m = cv.CreateMat(5, 3, mt)
self.assertEqual(mt, cv.GetElemType(cv.GetRows(m, 0, 2)))
self.assertEqual(mt, cv.GetElemType(cv.GetRow(m, 0)))
self.assertEqual(mt, cv.GetElemType(cv.GetCols(m, 0, 2)))
self.assertEqual(mt, cv.GetElemType(cv.GetCol(m, 0)))
self.assertEqual(mt, cv.GetElemType(cv.GetDiag(m, 0)))
self.assertEqual(mt, cv.GetElemType(m[0]))
self.assertEqual(mt, cv.GetElemType(m[::2]))
self.assertEqual(mt, cv.GetElemType(m[:,0]))
self.assertEqual(mt, cv.GetElemType(m[:,:]))
self.assertEqual(mt, cv.GetElemType(m[::2,:]))
def test_addS_3D(self):
for dim in [ [1,1,4], [2,2,3], [7,4,3] ]:
for ty,ac in [ (cv.CV_32FC1, 'f'), (cv.CV_64FC1, 'd')]:
mat = cv.CreateMatND(dim, ty)
mat2 = cv.CreateMatND(dim, ty)
for increment in [ 0, 3, -1 ]:
cv.SetData(mat, array.array(ac, range(dim[0] * dim[1] * dim[2])), 0)
cv.AddS(mat, increment, mat2)
for i in range(dim[0]):
for j in range(dim[1]):
for k in range(dim[2]):
self.assert_(mat2[i,j,k] == mat[i,j,k] + increment)
def test_buffers(self):
ar = array.array('f', [7] * (360*640))
m = cv.CreateMat(360, 640, cv.CV_32FC1)
cv.SetData(m, ar, 4 * 640)
self.assert_(m[0,0] == 7.0)
m = cv.CreateMatND((360, 640), cv.CV_32FC1)
cv.SetData(m, ar, 4 * 640)
self.assert_(m[0,0] == 7.0)
m = cv.CreateImage((640, 360), cv.IPL_DEPTH_32F, 1)
cv.SetData(m, ar, 4 * 640)
self.assert_(m[0,0] == 7.0)
def xxtest_Filters(self):
print
m = cv.CreateMat(360, 640, cv.CV_32FC1)
d = cv.CreateMat(360, 640, cv.CV_32FC1)
for k in range(3, 21, 2):
started = time.time()
for i in range(1000):
cv.Smooth(m, m, param1=k)
print k, "took", time.time() - started
def assertSame(self, a, b):
w,h = cv.GetSize(a)
d = cv.CreateMat(h, w, cv.CV_8UC1)
cv.AbsDiff(a, b, d)
self.assert_(cv.CountNonZero(d) == 0)
def test_text(self):
img = cv.CreateImage((640,40), cv.IPL_DEPTH_8U, 1)
cv.SetZero(img)
font = cv.InitFont(cv.CV_FONT_HERSHEY_SIMPLEX, 1, 1)
message = "XgfooX"
cv.PutText(img, message, (320,30), font, 255)
((w,h),bl) = cv.GetTextSize(message, font)
# Find nonzero in X and Y
Xs = []
for x in range(640):
cv.SetImageROI(img, (x, 0, 1, 40))
Xs.append(cv.Sum(img)[0] > 0)
def firstlast(l):
return (l.index(True), len(l) - list(reversed(l)).index(True))
Ys = []
for y in range(40):
cv.SetImageROI(img, (0, y, 640, 1))
Ys.append(cv.Sum(img)[0] > 0)
x0,x1 = firstlast(Xs)
y0,y1 = firstlast(Ys)
actual_width = x1 - x0
actual_height = y1 - y0
# actual_width can be up to 8 pixels smaller than GetTextSize says
self.assert_(actual_width <= w)
self.assert_((w - actual_width) <= 8)
# actual_height can be up to 4 pixels smaller than GetTextSize says
self.assert_(actual_height <= (h + bl))
self.assert_(((h + bl) - actual_height) <= 4)
cv.ResetImageROI(img)
self.assert_(w != 0)
self.assert_(h != 0)
def test_sizes(self):
sizes = [ 1, 2, 3, 97, 255, 256, 257, 947 ]
for w in sizes:
for h in sizes:
# Create an IplImage
im = cv.CreateImage((w,h), cv.IPL_DEPTH_8U, 1)
cv.Set(im, 1)
self.assert_(cv.Sum(im)[0] == (w * h))
del im
# Create a CvMat
mt = cv.CreateMat(h, w, cv.CV_8UC1)
cv.Set(mt, 1)
self.assert_(cv.Sum(mt)[0] == (w * h))
random.seed(7)
for dim in range(1, cv.CV_MAX_DIM + 1):
for attempt in range(10):
dims = [ random.choice([1,1,1,1,2,3]) for i in range(dim) ]
mt = cv.CreateMatND(dims, cv.CV_8UC1)
cv.SetZero(mt)
self.assert_(cv.Sum(mt)[0] == 0)
# Set to all-ones, verify the sum
cv.Set(mt, 1)
expected = 1
for d in dims:
expected *= d
self.assert_(cv.Sum(mt)[0] == expected)
def test_random(self):
seeds = [ 0, 1, 2**48, 2**48 + 1 ]
sequences = set()
for s in seeds:
rng = cv.RNG(s)
sequences.add(str([cv.RandInt(rng) for i in range(10)]))
self.assert_(len(seeds) == len(sequences))
rng = cv.RNG(0)
im = cv.CreateImage((1024,1024), cv.IPL_DEPTH_8U, 1)
cv.RandArr(rng, im, cv.CV_RAND_UNI, 0, 256)
cv.RandArr(rng, im, cv.CV_RAND_NORMAL, 128, 30)
if 1:
hist = cv.CreateHist([ 256 ], cv.CV_HIST_ARRAY, [ (0,255) ], 1)
cv.CalcHist([im], hist)
rng = cv.RNG()
for i in range(1000):
v = cv.RandReal(rng)
self.assert_(0 <= v)
self.assert_(v < 1)
for mode in [ cv.CV_RAND_UNI, cv.CV_RAND_NORMAL ]:
for fmt in self.mat_types:
mat = cv.CreateMat(64, 64, fmt)
cv.RandArr(cv.RNG(), mat, mode, (0,0,0,0), (1,1,1,1))
def test_MixChannels(self):
# First part - test the single case described in the documentation
rgba = cv.CreateMat(100, 100, cv.CV_8UC4)
bgr = cv.CreateMat(100, 100, cv.CV_8UC3)
alpha = cv.CreateMat(100, 100, cv.CV_8UC1)
cv.Set(rgba, (1,2,3,4))
cv.MixChannels([rgba], [bgr, alpha], [
(0, 2), # rgba[0] -> bgr[2]
(1, 1), # rgba[1] -> bgr[1]
(2, 0), # rgba[2] -> bgr[0]
(3, 3) # rgba[3] -> alpha[0]
])
self.assert_(bgr[0,0] == (3,2,1))
self.assert_(alpha[0,0] == 4)
# Second part. Choose random sets of sources and destinations,
# fill them with known values, choose random channel assignments,
# run cvMixChannels and check that the result is as expected.
random.seed(1)
for rows in [1,2,4,13,64,1000]:
for cols in [1,2,4,13,64,1000]:
for loop in range(5):
sources = [random.choice([1, 2, 3, 4]) for i in range(8)]
dests = [random.choice([1, 2, 3, 4]) for i in range(8)]
# make sure that fromTo does not have duplicates in dests, otherwise the result is not determined
while 1:
fromTo = [(random.randrange(-1, sum(sources)), random.randrange(sum(dests))) for i in range(random.randrange(1, 30))]
dests_set = list(set([j for (i, j) in fromTo]))
if len(dests_set) == len(dests):
break
# print sources
# print dests
# print fromTo
def CV_8UC(n):
return [cv.CV_8UC1, cv.CV_8UC2, cv.CV_8UC3, cv.CV_8UC4][n-1]
source_m = [cv.CreateMat(rows, cols, CV_8UC(c)) for c in sources]
dest_m = [cv.CreateMat(rows, cols, CV_8UC(c)) for c in dests]
def m00(m):
# return the contents of the N channel mat m[0,0] as a N-length list
chans = cv.CV_MAT_CN(cv.GetElemType(m))
if chans == 1:
return [m[0,0]]
else:
return list(m[0,0])[:chans]
# Sources numbered from 50, destinations numbered from 100
for i in range(len(sources)):
s = sum(sources[:i]) + 50
cv.Set(source_m[i], (s, s+1, s+2, s+3))
self.assertEqual(m00(source_m[i]), [s, s+1, s+2, s+3][:sources[i]])
for i in range(len(dests)):
s = sum(dests[:i]) + 100
cv.Set(dest_m[i], (s, s+1, s+2, s+3))
self.assertEqual(m00(dest_m[i]), [s, s+1, s+2, s+3][:dests[i]])
# now run the sanity check
for i in range(len(sources)):
s = sum(sources[:i]) + 50
self.assertEqual(m00(source_m[i]), [s, s+1, s+2, s+3][:sources[i]])
for i in range(len(dests)):
s = sum(dests[:i]) + 100
self.assertEqual(m00(dest_m[i]), [s, s+1, s+2, s+3][:dests[i]])
cv.MixChannels(source_m, dest_m, fromTo)
expected = range(100, 100 + sum(dests))
for (i, j) in fromTo:
if i == -1:
expected[j] = 0.0
else:
expected[j] = 50 + i
actual = sum([m00(m) for m in dest_m], [])
self.assertEqual(sum([m00(m) for m in dest_m], []), expected)
def test_allocs(self):
mats = [ 0 for i in range(20) ]
for i in range(1000):
m = cv.CreateMat(random.randrange(10, 512), random.randrange(10, 512), cv.CV_8UC1)
j = random.randrange(len(mats))
mats[j] = m
cv.SetZero(m)
def test_access(self):
cnames = { 1:cv.CV_32FC1, 2:cv.CV_32FC2, 3:cv.CV_32FC3, 4:cv.CV_32FC4 }
for w in range(1,11):
for h in range(2,11):
for c in [1,2]:
for o in [ cv.CreateMat(h, w, cnames[c]), cv.CreateImage((w,h), cv.IPL_DEPTH_32F, c) ][1:]:
pattern = [ (i,j) for i in range(w) for j in range(h) ]
random.shuffle(pattern)
for k,(i,j) in enumerate(pattern):
if c == 1:
o[j,i] = k
else:
o[j,i] = (k,) * c
for k,(i,j) in enumerate(pattern):
if c == 1:
self.assert_(o[j,i] == k)
else:
self.assert_(o[j,i] == (k,)*c)
test_mat = cv.CreateMat(2, 3, cv.CV_32FC1)
cv.SetData(test_mat, array.array('f', range(6)), 12)
self.assertEqual(cv.GetDims(test_mat[0]), (1, 3))
self.assertEqual(cv.GetDims(test_mat[1]), (1, 3))
self.assertEqual(cv.GetDims(test_mat[0:1]), (1, 3))
self.assertEqual(cv.GetDims(test_mat[1:2]), (1, 3))
self.assertEqual(cv.GetDims(test_mat[-1:]), (1, 3))
self.assertEqual(cv.GetDims(test_mat[-1]), (1, 3))
def xxxtest_corners(self):
a = cv.LoadImage("foo-mono.png", 0)
cv.AdaptiveThreshold(a, a, 255, param1=5)
scribble = cv.CreateImage(cv.GetSize(a), 8, 3)
cv.CvtColor(a, scribble, cv.CV_GRAY2BGR)
if 0:
eig_image = cv.CreateImage(cv.GetSize(a), cv.IPL_DEPTH_32F, 1)
temp_image = cv.CreateImage(cv.GetSize(a), cv.IPL_DEPTH_32F, 1)
pts = cv.GoodFeaturesToTrack(a, eig_image, temp_image, 100, 0.04, 2, use_harris=1)
for p in pts:
cv.Circle( scribble, p, 1, cv.RGB(255,0,0), -1 )
self.snap(scribble)
canny = cv.CreateImage(cv.GetSize(a), 8, 1)
cv.SubRS(a, 255, canny)
self.snap(canny)
li = cv.HoughLines2(canny,
cv.CreateMemStorage(),
cv.CV_HOUGH_STANDARD,
1,
math.pi/180,
60,
0,
0)
for (rho,theta) in li:
print rho,theta
c = math.cos(theta)
s = math.sin(theta)
x0 = c*rho
y0 = s*rho
cv.Line(scribble,
(x0 + 1000*(-s), y0 + 1000*c),
(x0 + -1000*(-s), y0 - 1000*c),
(0,255,0))
self.snap(scribble)
def test_calibration(self):
def get_corners(mono, refine = False):
(ok, corners) = cv.FindChessboardCorners(mono, (num_x_ints, num_y_ints), cv.CV_CALIB_CB_ADAPTIVE_THRESH | cv.CV_CALIB_CB_NORMALIZE_IMAGE)
if refine and ok:
corners = cv.FindCornerSubPix(mono, corners, (5,5), (-1,-1), ( cv.CV_TERMCRIT_EPS+cv.CV_TERMCRIT_ITER, 30, 0.1 ))
return (ok, corners)
def mk_object_points(nimages, squaresize = 1):
opts = cv.CreateMat(nimages * num_pts, 3, cv.CV_32FC1)
for i in range(nimages):
for j in range(num_pts):
opts[i * num_pts + j, 0] = (j / num_x_ints) * squaresize
opts[i * num_pts + j, 1] = (j % num_x_ints) * squaresize
opts[i * num_pts + j, 2] = 0
return opts
def mk_image_points(goodcorners):
ipts = cv.CreateMat(len(goodcorners) * num_pts, 2, cv.CV_32FC1)
for (i, co) in enumerate(goodcorners):
for j in range(num_pts):
ipts[i * num_pts + j, 0] = co[j][0]
ipts[i * num_pts + j, 1] = co[j][1]
return ipts
def mk_point_counts(nimages):
npts = cv.CreateMat(nimages, 1, cv.CV_32SC1)
for i in range(nimages):
npts[i, 0] = num_pts
return npts
def cvmat_iterator(cvmat):
for i in range(cvmat.rows):
for j in range(cvmat.cols):
yield cvmat[i,j]
def image_from_archive(tar, name):
member = tar.getmember(name)
filedata = tar.extractfile(member).read()
imagefiledata = cv.CreateMat(1, len(filedata), cv.CV_8UC1)
cv.SetData(imagefiledata, filedata, len(filedata))
return cv.DecodeImageM(imagefiledata)
urllib.urlretrieve("http://opencv.itseez.com/data/camera_calibration.tar.gz", "camera_calibration.tar.gz")
tf = tarfile.open("camera_calibration.tar.gz")
num_x_ints = 8
num_y_ints = 6
num_pts = num_x_ints * num_y_ints
leftimages = [image_from_archive(tf, "wide/left%04d.pgm" % i) for i in range(3, 15)]
size = cv.GetSize(leftimages[0])
# Monocular test
if True:
corners = [get_corners(i) for i in leftimages]
goodcorners = [co for (im, (ok, co)) in zip(leftimages, corners) if ok]
ipts = mk_image_points(goodcorners)
opts = mk_object_points(len(goodcorners), .1)
npts = mk_point_counts(len(goodcorners))
intrinsics = cv.CreateMat(3, 3, cv.CV_64FC1)
distortion = cv.CreateMat(4, 1, cv.CV_64FC1)
cv.SetZero(intrinsics)
cv.SetZero(distortion)
# focal lengths have 1/1 ratio
intrinsics[0,0] = 1.0
intrinsics[1,1] = 1.0
cv.CalibrateCamera2(opts, ipts, npts,
cv.GetSize(leftimages[0]),
intrinsics,
distortion,
cv.CreateMat(len(goodcorners), 3, cv.CV_32FC1),
cv.CreateMat(len(goodcorners), 3, cv.CV_32FC1),
flags = 0) # cv.CV_CALIB_ZERO_TANGENT_DIST)
# print "D =", list(cvmat_iterator(distortion))
# print "K =", list(cvmat_iterator(intrinsics))
newK = cv.CreateMat(3, 3, cv.CV_64FC1)
cv.GetOptimalNewCameraMatrix(intrinsics, distortion, size, 1.0, newK)
# print "newK =", list(cvmat_iterator(newK))
mapx = cv.CreateImage((640, 480), cv.IPL_DEPTH_32F, 1)
mapy = cv.CreateImage((640, 480), cv.IPL_DEPTH_32F, 1)
for K in [ intrinsics, newK ]:
cv.InitUndistortMap(K, distortion, mapx, mapy)
for img in leftimages[:1]:
r = cv.CloneMat(img)
cv.Remap(img, r, mapx, mapy)
# cv.ShowImage("snap", r)
# cv.WaitKey()
rightimages = [image_from_archive(tf, "wide/right%04d.pgm" % i) for i in range(3, 15)]
# Stereo test
if True:
lcorners = [get_corners(i) for i in leftimages]
rcorners = [get_corners(i) for i in rightimages]
good = [(lco, rco) for ((lok, lco), (rok, rco)) in zip(lcorners, rcorners) if (lok and rok)]
lipts = mk_image_points([l for (l, r) in good])
ripts = mk_image_points([r for (l, r) in good])
opts = mk_object_points(len(good), .108)
npts = mk_point_counts(len(good))
flags = cv.CV_CALIB_FIX_ASPECT_RATIO | cv.CV_CALIB_FIX_INTRINSIC
flags = cv.CV_CALIB_SAME_FOCAL_LENGTH + cv.CV_CALIB_FIX_PRINCIPAL_POINT + cv.CV_CALIB_ZERO_TANGENT_DIST
flags = 0
T = cv.CreateMat(3, 1, cv.CV_64FC1)
R = cv.CreateMat(3, 3, cv.CV_64FC1)
lintrinsics = cv.CreateMat(3, 3, cv.CV_64FC1)
ldistortion = cv.CreateMat(4, 1, cv.CV_64FC1)
rintrinsics = cv.CreateMat(3, 3, cv.CV_64FC1)
rdistortion = cv.CreateMat(4, 1, cv.CV_64FC1)
lR = cv.CreateMat(3, 3, cv.CV_64FC1)
rR = cv.CreateMat(3, 3, cv.CV_64FC1)
lP = cv.CreateMat(3, 4, cv.CV_64FC1)
rP = cv.CreateMat(3, 4, cv.CV_64FC1)
lmapx = cv.CreateImage(size, cv.IPL_DEPTH_32F, 1)
lmapy = cv.CreateImage(size, cv.IPL_DEPTH_32F, 1)
rmapx = cv.CreateImage(size, cv.IPL_DEPTH_32F, 1)
rmapy = cv.CreateImage(size, cv.IPL_DEPTH_32F, 1)
cv.SetIdentity(lintrinsics)
cv.SetIdentity(rintrinsics)
lintrinsics[0,2] = size[0] * 0.5
lintrinsics[1,2] = size[1] * 0.5
rintrinsics[0,2] = size[0] * 0.5
rintrinsics[1,2] = size[1] * 0.5
cv.SetZero(ldistortion)
cv.SetZero(rdistortion)
cv.StereoCalibrate(opts, lipts, ripts, npts,
lintrinsics, ldistortion,
rintrinsics, rdistortion,
size,
R, # R
T, # T
cv.CreateMat(3, 3, cv.CV_32FC1), # E
cv.CreateMat(3, 3, cv.CV_32FC1), # F
(cv.CV_TERMCRIT_ITER + cv.CV_TERMCRIT_EPS, 30, 1e-5),
flags)
for a in [-1, 0, 1]:
cv.StereoRectify(lintrinsics,
rintrinsics,
ldistortion,
rdistortion,
size,
R,
T,
lR, rR, lP, rP,
alpha = a)
cv.InitUndistortRectifyMap(lintrinsics, ldistortion, lR, lP, lmapx, lmapy)
cv.InitUndistortRectifyMap(rintrinsics, rdistortion, rR, rP, rmapx, rmapy)
for l,r in zip(leftimages, rightimages)[:1]:
l_ = cv.CloneMat(l)
r_ = cv.CloneMat(r)
cv.Remap(l, l_, lmapx, lmapy)
cv.Remap(r, r_, rmapx, rmapy)
# cv.ShowImage("snap", l_)
# cv.WaitKey()
def xxx_test_Disparity(self):
print
for t in ["8U", "8S", "16U", "16S", "32S", "32F", "64F" ]:
for c in [1,2,3,4]:
nm = "%sC%d" % (t, c)
print "int32 CV_%s=%d" % (nm, eval("cv.CV_%s" % nm))
return
integral = cv.CreateImage((641,481), cv.IPL_DEPTH_32S, 1)
L = cv.LoadImage("f0-left.png", 0)
R = cv.LoadImage("f0-right.png", 0)
d = cv.CreateImage(cv.GetSize(L), cv.IPL_DEPTH_8U, 1)
Rn = cv.CreateImage(cv.GetSize(L), cv.IPL_DEPTH_8U, 1)
started = time.time()
for i in range(100):
cv.AbsDiff(L, R, d)
cv.Integral(d, integral)
cv.SetImageROI(R, (1, 1, 639, 479))
cv.SetImageROI(Rn, (0, 0, 639, 479))
cv.Copy(R, Rn)
R = Rn
cv.ResetImageROI(R)
print 1e3 * (time.time() - started) / 100, "ms"
# self.snap(d)
def local_test_lk(self):
seq = [cv.LoadImage("track/%06d.png" % i, 0) for i in range(40)]
crit = (cv.CV_TERMCRIT_ITER, 100, 0.1)
crit = (cv.CV_TERMCRIT_EPS, 0, 0.001)
for i in range(1,40):
r = cv.CalcOpticalFlowPyrLK(seq[0], seq[i], None, None, [(32,32)], (7,7), 0, crit, 0)
pos = r[0][0]
#print pos, r[2]
a = cv.CreateImage((1024,1024), 8, 1)
b = cv.CreateImage((1024,1024), 8, 1)
cv.Resize(seq[0], a, cv.CV_INTER_NN)
cv.Resize(seq[i], b, cv.CV_INTER_NN)
cv.Line(a, (0, 512), (1024, 512), 255)
cv.Line(a, (512,0), (512,1024), 255)
x,y = [int(c) for c in pos]
cv.Line(b, (0, y*16), (1024, y*16), 255)
cv.Line(b, (x*16,0), (x*16,1024), 255)
#self.snapL([a,b])
def local_test_Haar(self):
import os
hcfile = os.environ['OPENCV_ROOT'] + '/share/opencv/haarcascades/haarcascade_frontalface_default.xml'
hc = cv.Load(hcfile)
img = cv.LoadImage('Stu.jpg', 0)
faces = cv.HaarDetectObjects(img, hc, cv.CreateMemStorage())
self.assert_(len(faces) > 0)
for (x,y,w,h),n in faces:
cv.Rectangle(img, (x,y), (x+w,y+h), 255)
#self.snap(img)
def test_create(self):
#""" CvCreateImage, CvCreateMat and the header-only form """
for (w,h) in [ (320,400), (640,480), (1024, 768) ]:
data = "z" * (w * h)
im = cv.CreateImage((w,h), 8, 1)
cv.SetData(im, data, w)
im2 = cv.CreateImageHeader((w,h), 8, 1)
cv.SetData(im2, data, w)
self.assertSame(im, im2)
m = cv.CreateMat(h, w, cv.CV_8UC1)
cv.SetData(m, data, w)
m2 = cv.CreateMatHeader(h, w, cv.CV_8UC1)
cv.SetData(m2, data, w)
self.assertSame(m, m2)
self.assertSame(im, m)
self.assertSame(im2, m2)
def test_casts(self):
im = cv.GetImage(self.get_sample("samples/c/lena.jpg", 0))
data = im.tostring()
cv.SetData(im, data, cv.GetSize(im)[0])
start_count = sys.getrefcount(data)
# Conversions should produce same data
self.assertSame(im, cv.GetImage(im))
m = cv.GetMat(im)
self.assertSame(im, m)
self.assertSame(m, cv.GetImage(m))
im2 = cv.GetImage(m)
self.assertSame(im, im2)
self.assertEqual(sys.getrefcount(data), start_count + 2)
del im2
self.assertEqual(sys.getrefcount(data), start_count + 1)
del m
self.assertEqual(sys.getrefcount(data), start_count)
del im
self.assertEqual(sys.getrefcount(data), start_count - 1)
def test_morphological(self):
im = cv.CreateImage((128, 128), cv.IPL_DEPTH_8U, 1)
cv.Resize(cv.GetImage(self.get_sample("samples/c/lena.jpg", 0)), im)
dst = cv.CloneImage(im)
# Check defaults by asserting that all these operations produce the same image
funs = [
lambda: cv.Dilate(im, dst),
lambda: cv.Dilate(im, dst, None),
lambda: cv.Dilate(im, dst, iterations = 1),
lambda: cv.Dilate(im, dst, element = None),
lambda: cv.Dilate(im, dst, iterations = 1, element = None),
lambda: cv.Dilate(im, dst, element = None, iterations = 1),
]
src_h = self.hashimg(im)
hashes = set()
for f in funs:
f()
hashes.add(self.hashimg(dst))
self.assertNotEqual(src_h, self.hashimg(dst))
# Source image should be untouched
self.assertEqual(self.hashimg(im), src_h)
# All results should be same
self.assertEqual(len(hashes), 1)
# self.snap(dst)
shapes = [eval("cv.CV_SHAPE_%s" % s) for s in ['RECT', 'CROSS', 'ELLIPSE']]
elements = [cv.CreateStructuringElementEx(sz, sz, sz / 2 + 1, sz / 2 + 1, shape) for sz in [3, 4, 7, 20] for shape in shapes]
elements += [cv.CreateStructuringElementEx(7, 7, 3, 3, cv.CV_SHAPE_CUSTOM, [1] * 49)]
for e in elements:
for iter in [1, 2]:
cv.Dilate(im, dst, e, iter)
cv.Erode(im, dst, e, iter)
temp = cv.CloneImage(im)
for op in ["OPEN", "CLOSE", "GRADIENT", "TOPHAT", "BLACKHAT"]:
cv.MorphologyEx(im, dst, temp, e, eval("cv.CV_MOP_%s" % op), iter)
def test_getmat_nd(self):
# 1D CvMatND should yield (N,1) CvMat
matnd = cv.CreateMatND([13], cv.CV_8UC1)
self.assertEqual(cv.GetDims(cv.GetMat(matnd, allowND = True)), (13, 1))
# 2D CvMatND should yield 2D CvMat
matnd = cv.CreateMatND([11, 12], cv.CV_8UC1)
self.assertEqual(cv.GetDims(cv.GetMat(matnd, allowND = True)), (11, 12))
if 0: # XXX - ticket #149
# 3D CvMatND should yield (N,1) CvMat
matnd = cv.CreateMatND([7, 8, 9], cv.CV_8UC1)
self.assertEqual(cv.GetDims(cv.GetMat(matnd, allowND = True)), (7 * 8 * 9, 1))
def test_clipline(self):
self.assert_(cv.ClipLine((100,100), (-100,0), (500,0)) == ((0,0), (99,0)))
self.assert_(cv.ClipLine((100,100), (-100,0), (-200,0)) == None)
def test_smoke_image_processing(self):
src = self.get_sample("samples/c/lena.jpg", cv.CV_LOAD_IMAGE_GRAYSCALE)
#dst = cv.CloneImage(src)
for aperture_size in [1, 3, 5, 7]:
dst_16s = cv.CreateImage(cv.GetSize(src), cv.IPL_DEPTH_16S, 1)
dst_32f = cv.CreateImage(cv.GetSize(src), cv.IPL_DEPTH_32F, 1)
cv.Sobel(src, dst_16s, 1, 1, aperture_size)
cv.Laplace(src, dst_16s, aperture_size)
cv.PreCornerDetect(src, dst_32f)
eigendst = cv.CreateImage((6*cv.GetSize(src)[0], cv.GetSize(src)[1]), cv.IPL_DEPTH_32F, 1)
cv.CornerEigenValsAndVecs(src, eigendst, 8, aperture_size)
cv.CornerMinEigenVal(src, dst_32f, 8, aperture_size)
cv.CornerHarris(src, dst_32f, 8, aperture_size)
cv.CornerHarris(src, dst_32f, 8, aperture_size, 0.1)
#self.snap(dst)
def test_fitline(self):
cv.FitLine([ (1,1), (10,10) ], cv.CV_DIST_L2, 0, 0.01, 0.01)
cv.FitLine([ (1,1,1), (10,10,10) ], cv.CV_DIST_L2, 0, 0.01, 0.01)
a = self.get_sample("samples/c/lena.jpg", 0)
eig_image = cv.CreateImage(cv.GetSize(a), cv.IPL_DEPTH_32F, 1)
temp_image = cv.CreateImage(cv.GetSize(a), cv.IPL_DEPTH_32F, 1)
pts = cv.GoodFeaturesToTrack(a, eig_image, temp_image, 100, 0.04, 2, useHarris=1)
hull = cv.ConvexHull2(pts, cv.CreateMemStorage(), return_points = 1)
cv.FitLine(hull, cv.CV_DIST_L2, 0, 0.01, 0.01)
def test_moments(self):
im = self.get_sample("samples/c/lena.jpg", 0)
mo = cv.Moments(im)
for fld in ["m00", "m10", "m01", "m20", "m11", "m02", "m30", "m21", "m12", "m03", "mu20", "mu11", "mu02", "mu30", "mu21", "mu12", "mu03", "inv_sqrt_m00"]:
self.assert_(isinstance(getattr(mo, fld), float))
x = getattr(mo, fld)
self.assert_(isinstance(x, float))
orders = []
for x_order in range(4):
for y_order in range(4 - x_order):
orders.append((x_order, y_order))
# Just a smoke test for these three functions
[ cv.GetSpatialMoment(mo, xo, yo) for (xo,yo) in orders ]
[ cv.GetCentralMoment(mo, xo, yo) for (xo,yo) in orders ]
[ cv.GetNormalizedCentralMoment(mo, xo, yo) for (xo,yo) in orders ]
# Hu Moments we can do slightly better. Check that the first
# six are invariant wrt image reflection, and that the 7th
# is negated.
hu0 = cv.GetHuMoments(cv.Moments(im))
cv.Flip(im, im, 1)
hu1 = cv.GetHuMoments(cv.Moments(im))
self.assert_(len(hu0) == 7)
self.assert_(len(hu1) == 7)
for i in range(5):
self.assert_(abs(hu0[i] - hu1[i]) < 1e-6)
self.assert_(abs(hu0[i] + hu1[i]) < 1e-6)
def test_encode(self):
im = self.get_sample("samples/c/lena.jpg", 1)
jpeg = cv.EncodeImage(".jpeg", im)
# Smoke jpeg compression at various qualities
sizes = dict([(qual, cv.EncodeImage(".jpeg", im, [cv.CV_IMWRITE_JPEG_QUALITY, qual]).cols) for qual in range(5, 100, 5)])
# Check that the default QUALITY is 95
self.assertEqual(cv.EncodeImage(".jpeg", im).cols, sizes[95])
# Check that the 'round-trip' gives an image of the same size
round_trip = cv.DecodeImage(cv.EncodeImage(".jpeg", im, [cv.CV_IMWRITE_JPEG_QUALITY, 10]))
self.assert_(cv.GetSize(round_trip) == cv.GetSize(im))
def test_reduce(self):
srcmat = cv.CreateMat(2, 3, cv.CV_32FC1)
# 0 1 2
# 3 4 5
srcmat[0,0] = 0
srcmat[0,1] = 1
srcmat[0,2] = 2
srcmat[1,0] = 3
srcmat[1,1] = 4
srcmat[1,2] = 5
def doreduce(siz, rfunc):
dst = cv.CreateMat(siz[0], siz[1], cv.CV_32FC1)
rfunc(dst)
if siz[0] != 1:
return [dst[i,0] for i in range(siz[0])]
else:
return [dst[0,i] for i in range(siz[1])]
# exercise dim
self.assertEqual(doreduce((1,3), lambda dst: cv.Reduce(srcmat, dst)), [3, 5, 7])
self.assertEqual(doreduce((1,3), lambda dst: cv.Reduce(srcmat, dst, -1)), [3, 5, 7])
self.assertEqual(doreduce((1,3), lambda dst: cv.Reduce(srcmat, dst, 0)), [3, 5, 7])
self.assertEqual(doreduce((2,1), lambda dst: cv.Reduce(srcmat, dst, 1)), [3, 12])
# exercise op
self.assertEqual(doreduce((1,3), lambda dst: cv.Reduce(srcmat, dst, op = cv.CV_REDUCE_SUM)), [3, 5, 7])
self.assertEqual(doreduce((1,3), lambda dst: cv.Reduce(srcmat, dst, op = cv.CV_REDUCE_AVG)), [1.5, 2.5, 3.5])
self.assertEqual(doreduce((1,3), lambda dst: cv.Reduce(srcmat, dst, op = cv.CV_REDUCE_MAX)), [3, 4, 5])
self.assertEqual(doreduce((1,3), lambda dst: cv.Reduce(srcmat, dst, op = cv.CV_REDUCE_MIN)), [0, 1, 2])
# exercise both dim and op
self.assertEqual(doreduce((1,3), lambda dst: cv.Reduce(srcmat, dst, 0, cv.CV_REDUCE_MAX)), [3, 4, 5])
self.assertEqual(doreduce((2,1), lambda dst: cv.Reduce(srcmat, dst, 1, cv.CV_REDUCE_MAX)), [2, 5])
def test_operations(self):
class Im:
def __init__(self, data = None):
self.m = cv.CreateMat(1, 32, cv.CV_32FC1)
if data:
cv.SetData(self.m, array.array('f', data), 128)
def __add__(self, other):
r = Im()
if isinstance(other, Im):
cv.Add(self.m, other.m, r.m)
else:
cv.AddS(self.m, (other,), r.m)
return r
def __sub__(self, other):
r = Im()
if isinstance(other, Im):
cv.Sub(self.m, other.m, r.m)
else:
cv.SubS(self.m, (other,), r.m)
return r
def __rsub__(self, other):
r = Im()
cv.SubRS(self.m, (other,), r.m)
return r
def __mul__(self, other):
r = Im()
if isinstance(other, Im):
cv.Mul(self.m, other.m, r.m)
else:
cv.ConvertScale(self.m, r.m, other)
return r
def __rmul__(self, other):
r = Im()
cv.ConvertScale(self.m, r.m, other)
return r
def __div__(self, other):
r = Im()
if isinstance(other, Im):
cv.Div(self.m, other.m, r.m)
else:
cv.ConvertScale(self.m, r.m, 1.0 / other)
return r
def __pow__(self, other):
r = Im()
cv.Pow(self.m, r.m, other)
return r
def __abs__(self):
r = Im()
cv.Abs(self.m, r.m)
return r
def __getitem__(self, i):
return self.m[0,i]
def verify(op):
r = op(a, b)
for i in range(32):
expected = op(a[i], b[i])
self.assertAlmostEqual(expected, r[i], 4)
a = Im([random.randrange(1, 256) for i in range(32)])
b = Im([random.randrange(1, 256) for i in range(32)])
# simple operations first
verify(lambda x, y: x + y)
verify(lambda x, y: x + 3)
verify(lambda x, y: x + 0)
verify(lambda x, y: x + -8)
verify(lambda x, y: x - y)
verify(lambda x, y: x - 1)
verify(lambda x, y: 1 - x)
verify(lambda x, y: abs(x))
verify(lambda x, y: x * y)
verify(lambda x, y: x * 3)
verify(lambda x, y: x / y)
verify(lambda x, y: x / 2)
for p in [-2, -1, -0.5, -0.1, 0, 0.1, 0.5, 1, 2 ]:
verify(lambda x, y: (x ** p) + (y ** p))
# Combinations...
verify(lambda x, y: x - 4 * abs(y))
verify(lambda x, y: abs(y) / x)
# a polynomial
verify(lambda x, y: 2 * x + 3 * (y ** 0.5))
def temp_test(self):
cv.temp_test()
def failing_test_rand_GetStarKeypoints(self):
# GetStarKeypoints [<cvmat(type=4242400d rows=64 cols=64 step=512 )>, <cv.cvmemstorage object at 0xb7cc40d0>, (45, 0.73705234376883488, 0.64282591451367344, 0.1567738743689836, 3)]
print cv.CV_MAT_CN(0x4242400d)
mat = cv.CreateMat( 64, 64, cv.CV_32FC2)
cv.GetStarKeypoints(mat, cv.CreateMemStorage(), (45, 0.73705234376883488, 0.64282591451367344, 0.1567738743689836, 3))
print mat
def test_rand_PutText(self):
#""" Test for bug 2829336 """
mat = cv.CreateMat( 64, 64, cv.CV_8UC1)
font = cv.InitFont(cv.CV_FONT_HERSHEY_SIMPLEX, 1, 1)
cv.PutText(mat, chr(127), (20, 20), font, 255)
def failing_test_rand_FindNearestPoint2D(self):
subdiv = cv.CreateSubdivDelaunay2D((0,0,100,100), cv.CreateMemStorage())
cv.SubdivDelaunay2DInsert( subdiv, (50, 50))
cv.CalcSubdivVoronoi2D(subdiv)
print
for e in subdiv.edges:
print e,
print " ", cv.Subdiv2DEdgeOrg(e)
print " ", cv.Subdiv2DEdgeOrg(cv.Subdiv2DRotateEdge(e, 1)), cv.Subdiv2DEdgeDst(cv.Subdiv2DRotateEdge(e, 1))
print "nearest", cv.FindNearestPoint2D(subdiv, (1.0, 1.0))
class DocumentFragmentTests(OpenCVTests):
""" Test the fragments of code that are included in the documentation """
def setUp(self):
OpenCVTests.setUp(self)
sys.path.append(".")
def test_precornerdetect(self):
from precornerdetect import precornerdetect
im = self.get_sample("samples/cpp/right01.jpg", 0)
imf = cv.CreateMat(im.rows, im.cols, cv.CV_32FC1)
cv.ConvertScale(im, imf)
(r0,r1) = precornerdetect(imf)
for r in (r0, r1):
self.assertEqual(im.cols, r.cols)
self.assertEqual(im.rows, r.rows)
def test_findstereocorrespondence(self):
from findstereocorrespondence import findstereocorrespondence
(l,r) = [self.get_sample("samples/cpp/tsukuba_%s.png" % c, cv.CV_LOAD_IMAGE_GRAYSCALE) for c in "lr"]
(disparity_left, disparity_right) = findstereocorrespondence(l, r)
disparity_left_visual = cv.CreateMat(l.rows, l.cols, cv.CV_8U)
cv.ConvertScale(disparity_left, disparity_left_visual, -16)
# self.snap(disparity_left_visual)
# Tests to run first; check the handful of basic operations that the later tests rely on
def test_calchist(self):
from calchist import hs_histogram
i1 = self.get_sample("samples/c/lena.jpg")
i2 = self.get_sample("samples/cpp/building.jpg")
i3 = cv.CloneMat(i1)
cv.Flip(i3, i3, 1)
h1 = hs_histogram(i1)
h2 = hs_histogram(i2)
h3 = hs_histogram(i3)
self.assertEqual(self.hashimg(h1), self.hashimg(h3))
self.assertNotEqual(self.hashimg(h1), self.hashimg(h2))
class Hackathon244Tests(NewOpenCVTests):
def test_int_array(self):
a = np.array([-1, 2, -3, 4, -5])
absa0 = np.abs(a)
self.assert_(cv2.norm(a, cv2.NORM_L1) == 15)
absa1 = cv2.absdiff(a, 0)
self.assertEqual(cv2.norm(absa1, absa0, cv2.NORM_INF), 0)
def test_imencode(self):
a = np.zeros((480, 640), dtype=np.uint8)
flag, ajpg = cv2.imencode("img_q90.jpg", a, [cv2.IMWRITE_JPEG_QUALITY, 90])
self.assertEqual(flag, True)
self.assertEqual(ajpg.dtype, np.uint8)
self.assertGreater(ajpg.shape[0], 1)
self.assertEqual(ajpg.shape[1], 1)
def test_projectPoints(self):
objpt = np.float64([[1,2,3]])
imgpt0, jac0 = cv2.projectPoints(objpt, np.zeros(3), np.zeros(3), np.eye(3), np.float64([]))
imgpt1, jac1 = cv2.projectPoints(objpt, np.zeros(3), np.zeros(3), np.eye(3), None)
self.assertEqual(imgpt0.shape, (objpt.shape[0], 1, 2))
self.assertEqual(imgpt1.shape, imgpt0.shape)
self.assertEqual(jac0.shape, jac1.shape)
self.assertEqual(jac0.shape[0], 2*objpt.shape[0])
def test_estimateAffine3D(self):
pattern_size = (11, 8)
pattern_points = np.zeros((np.prod(pattern_size), 3), np.float32)
pattern_points[:,:2] = np.indices(pattern_size).T.reshape(-1, 2)
pattern_points *= 10
(retval, out, inliers) = cv2.estimateAffine3D(pattern_points, pattern_points)
self.assertEqual(retval, 1)
if cv2.norm(out[2,:]) < 1e-3:
out[2,2]=1
self.assertLess(cv2.norm(out, np.float64([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0]])), 1e-3)
self.assertEqual(cv2.countNonZero(inliers), pattern_size[0]*pattern_size[1])
def test_fast(self):
fd = cv2.FastFeatureDetector(30, True)
img = self.get_sample("samples/cpp/right02.jpg", 0)
img = cv2.medianBlur(img, 3)
imgc = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
keypoints = fd.detect(img)
self.assert_(600 <= len(keypoints) <= 700)
for kpt in keypoints:
self.assertNotEqual(kpt.response, 0)
def check_close_angles(self, a, b, angle_delta):
self.assert_(abs(a - b) <= angle_delta or
abs(360 - abs(a - b)) <= angle_delta)
def check_close_pairs(self, a, b, delta):
self.assertLessEqual(abs(a[0] - b[0]), delta)
self.assertLessEqual(abs(a[1] - b[1]), delta)
def check_close_boxes(self, a, b, delta, angle_delta):
self.check_close_pairs(a[0], b[0], delta)
self.check_close_pairs(a[1], b[1], delta)
self.check_close_angles(a[2], b[2], angle_delta)
def test_geometry(self):
npt = 100
np.random.seed(244)
a = np.random.randn(npt,2).astype('float32')*50 + 150
img = np.zeros((300, 300, 3), dtype='uint8')
be = cv2.fitEllipse(a)
br = cv2.minAreaRect(a)
mc, mr = cv2.minEnclosingCircle(a)
be0 = ((150.2511749267578, 150.77322387695312), (158.024658203125, 197.57696533203125), 37.57804489135742)
br0 = ((161.2974090576172, 154.41793823242188), (199.2301483154297, 207.7177734375), -9.164555549621582)
mc0, mr0 = (160.41790771484375, 144.55152893066406), 136.713500977
self.check_close_boxes(be, be0, 5, 15)
self.check_close_boxes(br, br0, 5, 15)
self.check_close_pairs(mc, mc0, 5)
self.assertLessEqual(abs(mr - mr0), 5)
if __name__ == '__main__':
print "testing", cv.__version__
print "Testing OpenCV", cv2.__version__
random.seed(0)
unittest.main()
# optlist, args = getopt.getopt(sys.argv[1:], 'l:rd')
# loops = 1
# shuffle = 0
# doc_frags = False
# for o,a in optlist:
# if o == '-l':
# loops = int(a)
# if o == '-r':
# shuffle = 1
# if o == '-d':
# doc_frags = True
#
# cases = [PreliminaryTests, FunctionTests, AreaTests]
# if doc_frags:
# cases += [DocumentFragmentTests]
# everything = [(tc, t) for tc in cases for t in unittest.TestLoader().getTestCaseNames(tc) ]
# if len(args) == 0:
# # cases = [NewTests]
# args = everything
# else:
# args = [(tc, t) for (tc, t) in everything if t in args]
#
# suite = unittest.TestSuite()
# for l in range(loops):
# if shuffle:
# random.shuffle(args)
# for tc,t in args:
# suite.addTest(tc(t))
# unittest.TextTestRunner(verbosity=2).run(suite)
#!/usr/bin/env python
import unittest
import random
import time
import math
import sys
import array
import urllib
import tarfile
import hashlib
import os
import getopt
import operator
import functools
import numpy as np
import cv2
import cv2.cv as cv
class NewOpenCVTests(unittest.TestCase):
def get_sample(self, filename, iscolor = cv.CV_LOAD_IMAGE_COLOR):
if not filename in self.image_cache:
filedata = urllib.urlopen("https://raw.github.com/Itseez/opencv/master/" + filename).read()
self.image_cache[filename] = cv2.imdecode(np.fromstring(filedata, dtype=np.uint8), iscolor)
return self.image_cache[filename]
def setUp(self):
self.image_cache = {}
def hashimg(self, im):
""" Compute a hash for an image, useful for image comparisons """
return hashlib.md5(im.tostring()).digest()
if sys.version_info[:2] == (2, 6):
def assertLess(self, a, b, msg=None):
if not a < b:
self.fail('%s not less than %s' % (repr(a), repr(b)))
def assertLessEqual(self, a, b, msg=None):
if not a <= b:
self.fail('%s not less than or equal to %s' % (repr(a), repr(b)))
def assertGreater(self, a, b, msg=None):
if not a > b:
self.fail('%s not greater than %s' % (repr(a), repr(b)))
# Tests to run first; check the handful of basic operations that the later tests rely on
class Hackathon244Tests(NewOpenCVTests):
def test_int_array(self):
a = np.array([-1, 2, -3, 4, -5])
absa0 = np.abs(a)
self.assert_(cv2.norm(a, cv2.NORM_L1) == 15)
absa1 = cv2.absdiff(a, 0)
self.assertEqual(cv2.norm(absa1, absa0, cv2.NORM_INF), 0)
def test_imencode(self):
a = np.zeros((480, 640), dtype=np.uint8)
flag, ajpg = cv2.imencode("img_q90.jpg", a, [cv2.IMWRITE_JPEG_QUALITY, 90])
self.assertEqual(flag, True)
self.assertEqual(ajpg.dtype, np.uint8)
self.assertGreater(ajpg.shape[0], 1)
self.assertEqual(ajpg.shape[1], 1)
def test_projectPoints(self):
objpt = np.float64([[1,2,3]])
imgpt0, jac0 = cv2.projectPoints(objpt, np.zeros(3), np.zeros(3), np.eye(3), np.float64([]))
imgpt1, jac1 = cv2.projectPoints(objpt, np.zeros(3), np.zeros(3), np.eye(3), None)
self.assertEqual(imgpt0.shape, (objpt.shape[0], 1, 2))
self.assertEqual(imgpt1.shape, imgpt0.shape)
self.assertEqual(jac0.shape, jac1.shape)
self.assertEqual(jac0.shape[0], 2*objpt.shape[0])
def test_estimateAffine3D(self):
pattern_size = (11, 8)
pattern_points = np.zeros((np.prod(pattern_size), 3), np.float32)
pattern_points[:,:2] = np.indices(pattern_size).T.reshape(-1, 2)
pattern_points *= 10
(retval, out, inliers) = cv2.estimateAffine3D(pattern_points, pattern_points)
self.assertEqual(retval, 1)
if cv2.norm(out[2,:]) < 1e-3:
out[2,2]=1
self.assertLess(cv2.norm(out, np.float64([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0]])), 1e-3)
self.assertEqual(cv2.countNonZero(inliers), pattern_size[0]*pattern_size[1])
def test_fast(self):
fd = cv2.FastFeatureDetector(30, True)
img = self.get_sample("samples/cpp/right02.jpg", 0)
img = cv2.medianBlur(img, 3)
imgc = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
keypoints = fd.detect(img)
self.assert_(600 <= len(keypoints) <= 700)
for kpt in keypoints:
self.assertNotEqual(kpt.response, 0)
def check_close_angles(self, a, b, angle_delta):
self.assert_(abs(a - b) <= angle_delta or
abs(360 - abs(a - b)) <= angle_delta)
def check_close_pairs(self, a, b, delta):
self.assertLessEqual(abs(a[0] - b[0]), delta)
self.assertLessEqual(abs(a[1] - b[1]), delta)
def check_close_boxes(self, a, b, delta, angle_delta):
self.check_close_pairs(a[0], b[0], delta)
self.check_close_pairs(a[1], b[1], delta)
self.check_close_angles(a[2], b[2], angle_delta)
def test_geometry(self):
npt = 100
np.random.seed(244)
a = np.random.randn(npt,2).astype('float32')*50 + 150
img = np.zeros((300, 300, 3), dtype='uint8')
be = cv2.fitEllipse(a)
br = cv2.minAreaRect(a)
mc, mr = cv2.minEnclosingCircle(a)
be0 = ((150.2511749267578, 150.77322387695312), (158.024658203125, 197.57696533203125), 37.57804489135742)
br0 = ((161.2974090576172, 154.41793823242188), (199.2301483154297, 207.7177734375), -9.164555549621582)
mc0, mr0 = (160.41790771484375, 144.55152893066406), 136.713500977
self.check_close_boxes(be, be0, 5, 15)
self.check_close_boxes(br, br0, 5, 15)
self.check_close_pairs(mc, mc0, 5)
self.assertLessEqual(abs(mr - mr0), 5)
if __name__ == '__main__':
print "testing", cv2.__version__
random.seed(0)
unittest.main()
#!/usr/bin/python
import cv2.cv as cv
import time
cv.NamedWindow("camera", 1)
capture = cv.CaptureFromCAM(0)
while True:
img = cv.QueryFrame(capture)
cv.ShowImage("camera", img)
if cv.WaitKey(10) == 27:
break
cv.DestroyAllWindows()
#!/usr/bin/env python
import cv2.cv as cv
def is_rect_nonzero(r):
(_,_,w,h) = r
return (w > 0) and (h > 0)
class CamShiftDemo:
def __init__(self):
self.capture = cv.CaptureFromCAM(0)
cv.NamedWindow( "CamShiftDemo", 1 )
cv.NamedWindow( "Histogram", 1 )
cv.SetMouseCallback( "CamShiftDemo", self.on_mouse)
self.drag_start = None # Set to (x,y) when mouse starts drag
self.track_window = None # Set to rect when the mouse drag finishes
print( "Keys:\n"
" ESC - quit the program\n"
" b - switch to/from backprojection view\n"
"To initialize tracking, drag across the object with the mouse\n" )
def hue_histogram_as_image(self, hist):
""" Returns a nice representation of a hue histogram """
histimg_hsv = cv.CreateImage( (320,200), 8, 3)
mybins = cv.CloneMatND(hist.bins)
cv.Log(mybins, mybins)
(_, hi, _, _) = cv.MinMaxLoc(mybins)
cv.ConvertScale(mybins, mybins, 255. / hi)
w,h = cv.GetSize(histimg_hsv)
hdims = cv.GetDims(mybins)[0]
for x in range(w):
xh = (180 * x) / (w - 1) # hue sweeps from 0-180 across the image
val = int(mybins[int(hdims * x / w)] * h / 255)
cv.Rectangle( histimg_hsv, (x, 0), (x, h-val), (xh,255,64), -1)
cv.Rectangle( histimg_hsv, (x, h-val), (x, h), (xh,255,255), -1)
histimg = cv.CreateImage( (320,200), 8, 3)
cv.CvtColor(histimg_hsv, histimg, cv.CV_HSV2BGR)
return histimg
def on_mouse(self, event, x, y, flags, param):
if event == cv.CV_EVENT_LBUTTONDOWN:
self.drag_start = (x, y)
if event == cv.CV_EVENT_LBUTTONUP:
self.drag_start = None
self.track_window = self.selection
if self.drag_start:
xmin = min(x, self.drag_start[0])
ymin = min(y, self.drag_start[1])
xmax = max(x, self.drag_start[0])
ymax = max(y, self.drag_start[1])
self.selection = (xmin, ymin, xmax - xmin, ymax - ymin)
def run(self):
hist = cv.CreateHist([180], cv.CV_HIST_ARRAY, [(0,180)], 1 )
backproject_mode = False
while True:
frame = cv.QueryFrame( self.capture )
# Convert to HSV and keep the hue
hsv = cv.CreateImage(cv.GetSize(frame), 8, 3)
cv.CvtColor(frame, hsv, cv.CV_BGR2HSV)
self.hue = cv.CreateImage(cv.GetSize(frame), 8, 1)
cv.Split(hsv, self.hue, None, None, None)
# Compute back projection
backproject = cv.CreateImage(cv.GetSize(frame), 8, 1)
# Run the cam-shift
cv.CalcArrBackProject( [self.hue], backproject, hist )
if self.track_window and is_rect_nonzero(self.track_window):
crit = ( cv.CV_TERMCRIT_EPS | cv.CV_TERMCRIT_ITER, 10, 1)
(iters, (area, value, rect), track_box) = cv.CamShift(backproject, self.track_window, crit)
self.track_window = rect
# If mouse is pressed, highlight the current selected rectangle
# and recompute the histogram
if self.drag_start and is_rect_nonzero(self.selection):
sub = cv.GetSubRect(frame, self.selection)
save = cv.CloneMat(sub)
cv.ConvertScale(frame, frame, 0.5)
cv.Copy(save, sub)
x,y,w,h = self.selection
cv.Rectangle(frame, (x,y), (x+w,y+h), (255,255,255))
sel = cv.GetSubRect(self.hue, self.selection )
cv.CalcArrHist( [sel], hist, 0)
(_, max_val, _, _) = cv.GetMinMaxHistValue( hist)
if max_val != 0:
cv.ConvertScale(hist.bins, hist.bins, 255. / max_val)
elif self.track_window and is_rect_nonzero(self.track_window):
cv.EllipseBox( frame, track_box, cv.CV_RGB(255,0,0), 3, cv.CV_AA, 0 )
if not backproject_mode:
cv.ShowImage( "CamShiftDemo", frame )
else:
cv.ShowImage( "CamShiftDemo", backproject)
cv.ShowImage( "Histogram", self.hue_histogram_as_image(hist))
c = cv.WaitKey(7) % 0x100
if c == 27:
break
elif c == ord("b"):
backproject_mode = not backproject_mode
if __name__=="__main__":
demo = CamShiftDemo()
demo.run()
cv.DestroyAllWindows()
#!/usr/bin/python
import cv2.cv as cv
import sys
import urllib2
if __name__ == "__main__":
cv.NamedWindow("win")
if len(sys.argv) > 1:
filename = sys.argv[1]
im = cv.LoadImage(filename, cv.CV_LOAD_IMAGE_GRAYSCALE)
im3 = cv.LoadImage(filename, cv.CV_LOAD_IMAGE_COLOR)
else:
try: # try opening local copy of image
fileName = '../cpp/left01.jpg'
im = cv.LoadImageM(fileName, False)
im3 = cv.LoadImageM(fileName, True)
except: # if local copy cannot be opened, try downloading it
url = 'http://code.opencv.org/projects/opencv/repository/revisions/master/raw/samples/cpp/left01.jpg'
filedata = urllib2.urlopen(url).read()
imagefiledata = cv.CreateMatHeader(1, len(filedata), cv.CV_8UC1)
cv.SetData(imagefiledata, filedata, len(filedata))
im = cv.DecodeImageM(imagefiledata, cv.CV_LOAD_IMAGE_GRAYSCALE)
im3 = cv.DecodeImageM(imagefiledata, cv.CV_LOAD_IMAGE_COLOR)
chessboard_dim = ( 9, 6 )
found_all, corners = cv.FindChessboardCorners( im, chessboard_dim )
print found_all, len(corners)
cv.DrawChessboardCorners( im3, chessboard_dim, corners, found_all )
cv.ShowImage("win", im3);
cv.WaitKey()
cv.DestroyAllWindows()
#! /usr/bin/env python
print "OpenCV Python version of contours"
# import the necessary things for OpenCV
import cv2.cv as cv
# some default constants
_SIZE = 500
_DEFAULT_LEVEL = 3
# definition of some colors
_red = (0, 0, 255, 0);
_green = (0, 255, 0, 0);
_white = cv.RealScalar (255)
_black = cv.RealScalar (0)
# the callback on the trackbar, to set the level of contours we want
# to display
def on_trackbar (position):
# create the image for putting in it the founded contours
contours_image = cv.CreateImage ( (_SIZE, _SIZE), 8, 3)
# compute the real level of display, given the current position
levels = position - 3
# initialisation
_contours = contours
if levels <= 0:
# zero or negative value
# => get to the nearest face to make it look more funny
_contours = contours.h_next().h_next().h_next()
# first, clear the image where we will draw contours
cv.SetZero (contours_image)
# draw contours in red and green
cv.DrawContours (contours_image, _contours,
_red, _green,
levels, 3, cv.CV_AA,
(0, 0))
# finally, show the image
cv.ShowImage ("contours", contours_image)
if __name__ == '__main__':
# create the image where we want to display results
image = cv.CreateImage ( (_SIZE, _SIZE), 8, 1)
# start with an empty image
cv.SetZero (image)
# draw the original picture
for i in range (6):
dx = (i % 2) * 250 - 30
dy = (i / 2) * 150
cv.Ellipse (image,
(dx + 150, dy + 100),
(100, 70),
0, 0, 360, _white, -1, 8, 0)
cv.Ellipse (image,
(dx + 115, dy + 70),
(30, 20),
0, 0, 360, _black, -1, 8, 0)
cv.Ellipse (image,
(dx + 185, dy + 70),
(30, 20),
0, 0, 360, _black, -1, 8, 0)
cv.Ellipse (image,
(dx + 115, dy + 70),
(15, 15),
0, 0, 360, _white, -1, 8, 0)
cv.Ellipse (image,
(dx + 185, dy + 70),
(15, 15),
0, 0, 360, _white, -1, 8, 0)
cv.Ellipse (image,
(dx + 115, dy + 70),
(5, 5),
0, 0, 360, _black, -1, 8, 0)
cv.Ellipse (image,
(dx + 185, dy + 70),
(5, 5),
0, 0, 360, _black, -1, 8, 0)
cv.Ellipse (image,
(dx + 150, dy + 100),
(10, 5),
0, 0, 360, _black, -1, 8, 0)
cv.Ellipse (image,
(dx + 150, dy + 150),
(40, 10),
0, 0, 360, _black, -1, 8, 0)
cv.Ellipse (image,
(dx + 27, dy + 100),
(20, 35),
0, 0, 360, _white, -1, 8, 0)
cv.Ellipse (image,
(dx + 273, dy + 100),
(20, 35),
0, 0, 360, _white, -1, 8, 0)
# create window and display the original picture in it
cv.NamedWindow ("image", 1)
cv.ShowImage ("image", image)
# create the storage area
storage = cv.CreateMemStorage (0)
# find the contours
contours = cv.FindContours(image,
storage,
cv.CV_RETR_TREE,
cv.CV_CHAIN_APPROX_SIMPLE,
(0,0))
# comment this out if you do not want approximation
contours = cv.ApproxPoly (contours,
storage,
cv.CV_POLY_APPROX_DP, 3, 1)
# create the window for the contours
cv.NamedWindow ("contours", 1)
# create the trackbar, to enable the change of the displayed level
cv.CreateTrackbar ("levels+3", "contours", 3, 7, on_trackbar)
# call one time the callback, so we will have the 1st display done
on_trackbar (_DEFAULT_LEVEL)
# wait a key pressed to end
cv.WaitKey (0)
cv.DestroyAllWindows()
#! /usr/bin/env python
print "OpenCV Python version of convexhull"
# import the necessary things for OpenCV
import cv2.cv as cv
# to generate random values
import random
# how many points we want at max
_MAX_POINTS = 100
if __name__ == '__main__':
# main object to get random values from
my_random = random.Random ()
# create the image where we want to display results
image = cv.CreateImage ( (500, 500), 8, 3)
# create the window to put the image in
cv.NamedWindow ('hull', cv.CV_WINDOW_AUTOSIZE)
while True:
# do forever
# get a random number of points
count = my_random.randrange (0, _MAX_POINTS) + 1
# initialisations
points = []
for i in range (count):
# generate a random point
points.append ( (
my_random.randrange (0, image.width / 2) + image.width / 4,
my_random.randrange (0, image.width / 2) + image.width / 4
))
# compute the convex hull
storage = cv.CreateMemStorage(0)
hull = cv.ConvexHull2 (points, storage, cv.CV_CLOCKWISE, 1)
# start with an empty image
cv.SetZero (image)
# draw all the points as circles in red
for i in range (count):
cv.Circle (image, points [i], 2,
(0, 0, 255, 0),
cv.CV_FILLED, cv.CV_AA, 0)
# Draw the convex hull as a closed polyline in green
cv.PolyLine(image, [hull], 1, cv.RGB(0,255,0), 1, cv.CV_AA)
# display the final image
cv.ShowImage ('hull', image)
# handle events, and wait a key pressed
k = cv.WaitKey (0) % 0x100
if k == 27:
# user has press the ESC key, so exit
break
cv.DestroyAllWindows()
#!/usr/bin/python
"""
Find Squares in image by finding countours and filtering
"""
#Results slightly different from C version on same images, but is
#otherwise ok
import math
import cv2.cv as cv
def angle(pt1, pt2, pt0):
"calculate angle contained by 3 points(x, y)"
dx1 = pt1[0] - pt0[0]
dy1 = pt1[1] - pt0[1]
dx2 = pt2[0] - pt0[0]
dy2 = pt2[1] - pt0[1]
nom = dx1*dx2 + dy1*dy2
denom = math.sqrt( (dx1*dx1 + dy1*dy1) * (dx2*dx2 + dy2*dy2) + 1e-10 )
ang = nom / denom
return ang
def is_square(contour):
"""
Squareness checker
Square contours should:
-have 4 vertices after approximation,
-have relatively large area (to filter out noisy contours)
-be convex.
-have angles between sides close to 90deg (cos(ang) ~0 )
Note: absolute value of an area is used because area may be
positive or negative - in accordance with the contour orientation
"""
area = math.fabs( cv.ContourArea(contour) )
isconvex = cv.CheckContourConvexity(contour)
s = 0
if len(contour) == 4 and area > 1000 and isconvex:
for i in range(1, 4):
# find minimum angle between joint edges (maximum of cosine)
pt1 = contour[i]
pt2 = contour[i-1]
pt0 = contour[i-2]
t = math.fabs(angle(pt0, pt1, pt2))
if s <= t:s = t
# if cosines of all angles are small (all angles are ~90 degree)
# then its a square
if s < 0.3:return True
return False
def find_squares_from_binary( gray ):
"""
use contour search to find squares in binary image
returns list of numpy arrays containing 4 points
"""
squares = []
storage = cv.CreateMemStorage(0)
contours = cv.FindContours(gray, storage, cv.CV_RETR_TREE, cv.CV_CHAIN_APPROX_SIMPLE, (0,0))
storage = cv.CreateMemStorage(0)
while contours:
#approximate contour with accuracy proportional to the contour perimeter
arclength = cv.ArcLength(contours)
polygon = cv.ApproxPoly( contours, storage, cv.CV_POLY_APPROX_DP, arclength * 0.02, 0)
if is_square(polygon):
squares.append(polygon[0:4])
contours = contours.h_next()
return squares
def find_squares4(color_img):
"""
Finds multiple squares in image
Steps:
-Use Canny edge to highlight contours, and dilation to connect
the edge segments.
-Threshold the result to binary edge tokens
-Use cv.FindContours: returns a cv.CvSequence of cv.CvContours
-Filter each candidate: use Approx poly, keep only contours with 4 vertices,
enough area, and ~90deg angles.
Return all squares contours in one flat list of arrays, 4 x,y points each.
"""
#select even sizes only
width, height = (color_img.width & -2, color_img.height & -2 )
timg = cv.CloneImage( color_img ) # make a copy of input image
gray = cv.CreateImage( (width,height), 8, 1 )
# select the maximum ROI in the image
cv.SetImageROI( timg, (0, 0, width, height) )
# down-scale and upscale the image to filter out the noise
pyr = cv.CreateImage( (width/2, height/2), 8, 3 )
cv.PyrDown( timg, pyr, 7 )
cv.PyrUp( pyr, timg, 7 )
tgray = cv.CreateImage( (width,height), 8, 1 )
squares = []
# Find squares in every color plane of the image
# Two methods, we use both:
# 1. Canny to catch squares with gradient shading. Use upper threshold
# from slider, set the lower to 0 (which forces edges merging). Then
# dilate canny output to remove potential holes between edge segments.
# 2. Binary thresholding at multiple levels
N = 11
for c in [0, 1, 2]:
#extract the c-th color plane
cv.SetImageCOI( timg, c+1 );
cv.Copy( timg, tgray, None );
cv.Canny( tgray, gray, 0, 50, 5 )
cv.Dilate( gray, gray)
squares = squares + find_squares_from_binary( gray )
# Look for more squares at several threshold levels
for l in range(1, N):
cv.Threshold( tgray, gray, (l+1)*255/N, 255, cv.CV_THRESH_BINARY )
squares = squares + find_squares_from_binary( gray )
return squares
RED = (0,0,255)
GREEN = (0,255,0)
def draw_squares( color_img, squares ):
"""
Squares is py list containing 4-pt numpy arrays. Step through the list
and draw a polygon for each 4-group
"""
color, othercolor = RED, GREEN
for square in squares:
cv.PolyLine(color_img, [square], True, color, 3, cv.CV_AA, 0)
color, othercolor = othercolor, color
cv.ShowImage(WNDNAME, color_img)
WNDNAME = "Squares Demo"
def main():
"""Open test color images, create display window, start the search"""
cv.NamedWindow(WNDNAME, 1)
for name in [ "../c/pic%d.png" % i for i in [1, 2, 3, 4, 5, 6] ]:
img0 = cv.LoadImage(name, 1)
try:
img0
except ValueError:
print "Couldn't load %s\n" % name
continue
# slider deleted from C version, same here and use fixed Canny param=50
img = cv.CloneImage(img0)
cv.ShowImage(WNDNAME, img)
# force the image processing
draw_squares( img, find_squares4( img ) )
# wait for key.
if cv.WaitKey(-1) % 0x100 == 27:
break
if __name__ == "__main__":
main()
cv.DestroyAllWindows()
#!/usr/bin/python
import cv2.cv as cv
import urllib2
from sys import argv
def load_sample(name=None):
if len(argv) > 1:
img0 = cv.LoadImage(argv[1], cv.CV_LOAD_IMAGE_COLOR)
elif name is not None:
try:
img0 = cv.LoadImage(name, cv.CV_LOAD_IMAGE_COLOR)
except IOError:
urlbase = 'http://code.opencv.org/projects/opencv/repository/revisions/master/raw/samples/c/'
file = name.split('/')[-1]
filedata = urllib2.urlopen(urlbase+file).read()
imagefiledata = cv.CreateMatHeader(1, len(filedata), cv.CV_8UC1)
cv.SetData(imagefiledata, filedata, len(filedata))
img0 = cv.DecodeImage(imagefiledata, cv.CV_LOAD_IMAGE_COLOR)
return img0
#!/usr/bin/python
"""
the script demostrates iterative construction of
delaunay triangulation and voronoi tesselation
Original Author (C version): ?
Converted to Python by: Roman Stanchak
"""
import cv2.cv as cv
import random
def draw_subdiv_point( img, fp, color ):
cv.Circle( img, (cv.Round(fp[0]), cv.Round(fp[1])), 3, color, cv.CV_FILLED, 8, 0 );
def draw_subdiv_edge( img, edge, color ):
org_pt = cv.Subdiv2DEdgeOrg(edge);
dst_pt = cv.Subdiv2DEdgeDst(edge);
if org_pt and dst_pt :
org = org_pt.pt;
dst = dst_pt.pt;
iorg = ( cv.Round( org[0] ), cv.Round( org[1] ));
idst = ( cv.Round( dst[0] ), cv.Round( dst[1] ));
cv.Line( img, iorg, idst, color, 1, cv.CV_AA, 0 );
def draw_subdiv( img, subdiv, delaunay_color, voronoi_color ):
for edge in subdiv.edges:
edge_rot = cv.Subdiv2DRotateEdge( edge, 1 )
draw_subdiv_edge( img, edge_rot, voronoi_color );
draw_subdiv_edge( img, edge, delaunay_color );
def locate_point( subdiv, fp, img, active_color ):
(res, e0) = cv.Subdiv2DLocate( subdiv, fp );
if res in [ cv.CV_PTLOC_INSIDE, cv.CV_PTLOC_ON_EDGE ]:
e = e0
while True:
draw_subdiv_edge( img, e, active_color );
e = cv.Subdiv2DGetEdge(e, cv.CV_NEXT_AROUND_LEFT);
if e == e0:
break
draw_subdiv_point( img, fp, active_color );
def draw_subdiv_facet( img, edge ):
t = edge;
count = 0;
# count number of edges in facet
while count == 0 or t != edge:
count+=1
t = cv.Subdiv2DGetEdge( t, cv.CV_NEXT_AROUND_LEFT );
buf = []
# gather points
t = edge;
for i in range(count):
assert t>4
pt = cv.Subdiv2DEdgeOrg( t );
if not pt:
break;
buf.append( ( cv.Round(pt.pt[0]), cv.Round(pt.pt[1]) ) );
t = cv.Subdiv2DGetEdge( t, cv.CV_NEXT_AROUND_LEFT );
if( len(buf)==count ):
pt = cv.Subdiv2DEdgeDst( cv.Subdiv2DRotateEdge( edge, 1 ));
cv.FillConvexPoly( img, buf, cv.RGB(random.randrange(256),random.randrange(256),random.randrange(256)), cv.CV_AA, 0 );
cv.PolyLine( img, [buf], 1, cv.RGB(0,0,0), 1, cv.CV_AA, 0);
draw_subdiv_point( img, pt.pt, cv.RGB(0,0,0));
def paint_voronoi( subdiv, img ):
cv.CalcSubdivVoronoi2D( subdiv );
for edge in subdiv.edges:
# left
draw_subdiv_facet( img, cv.Subdiv2DRotateEdge( edge, 1 ));
# right
draw_subdiv_facet( img, cv.Subdiv2DRotateEdge( edge, 3 ));
if __name__ == '__main__':
win = "source";
rect = ( 0, 0, 600, 600 );
active_facet_color = cv.RGB( 255, 0, 0 );
delaunay_color = cv.RGB( 0,0,0);
voronoi_color = cv.RGB(0, 180, 0);
bkgnd_color = cv.RGB(255,255,255);
img = cv.CreateImage( (rect[2],rect[3]), 8, 3 );
cv.Set( img, bkgnd_color );
cv.NamedWindow( win, 1 );
storage = cv.CreateMemStorage(0);
subdiv = cv.CreateSubdivDelaunay2D( rect, storage );
print "Delaunay triangulation will be build now interactively."
print "To stop the process, press any key\n";
for i in range(200):
fp = ( random.random()*(rect[2]-10)+5, random.random()*(rect[3]-10)+5 )
locate_point( subdiv, fp, img, active_facet_color );
cv.ShowImage( win, img );
if( cv.WaitKey( 100 ) >= 0 ):
break;
cv.SubdivDelaunay2DInsert( subdiv, fp );
cv.CalcSubdivVoronoi2D( subdiv );
cv.Set( img, bkgnd_color );
draw_subdiv( img, subdiv, delaunay_color, voronoi_color );
cv.ShowImage( win, img );
if( cv.WaitKey( 100 ) >= 0 ):
break;
cv.Set( img, bkgnd_color );
paint_voronoi( subdiv, img );
cv.ShowImage( win, img );
cv.WaitKey(0);
cv.DestroyWindow( win );
#!/usr/bin/python
import cv2.cv as cv
import sys
import urllib2
hist_size = 64
range_0 = [0, 256]
ranges = [ range_0 ]
class DemHist:
def __init__(self, src_image):
self.src_image = src_image
self.dst_image = cv.CloneMat(src_image)
self.hist_image = cv.CreateImage((320, 200), 8, 1)
self.hist = cv.CreateHist([hist_size], cv.CV_HIST_ARRAY, ranges, 1)
self.brightness = 0
self.contrast = 0
cv.NamedWindow("image", 0)
cv.NamedWindow("histogram", 0)
cv.CreateTrackbar("brightness", "image", 100, 200, self.update_brightness)
cv.CreateTrackbar("contrast", "image", 100, 200, self.update_contrast)
self.update_brightcont()
def update_brightness(self, val):
self.brightness = val - 100
self.update_brightcont()
def update_contrast(self, val):
self.contrast = val - 100
self.update_brightcont()
def update_brightcont(self):
# The algorithm is by Werner D. Streidt
# (http://visca.com/ffactory/archives/5-99/msg00021.html)
if self.contrast > 0:
delta = 127. * self.contrast / 100
a = 255. / (255. - delta * 2)
b = a * (self.brightness - delta)
else:
delta = -128. * self.contrast / 100
a = (256. - delta * 2) / 255.
b = a * self.brightness + delta
cv.ConvertScale(self.src_image, self.dst_image, a, b)
cv.ShowImage("image", self.dst_image)
cv.CalcArrHist([self.dst_image], self.hist)
(min_value, max_value, _, _) = cv.GetMinMaxHistValue(self.hist)
cv.Scale(self.hist.bins, self.hist.bins, float(self.hist_image.height) / max_value, 0)
cv.Set(self.hist_image, cv.ScalarAll(255))
bin_w = round(float(self.hist_image.width) / hist_size)
for i in range(hist_size):
cv.Rectangle(self.hist_image, (int(i * bin_w), self.hist_image.height),
(int((i + 1) * bin_w), self.hist_image.height - cv.Round(self.hist.bins[i])),
cv.ScalarAll(0), -1, 8, 0)
cv.ShowImage("histogram", self.hist_image)
if __name__ == "__main__":
# Load the source image.
if len(sys.argv) > 1:
src_image = cv.GetMat(cv.LoadImage(sys.argv[1], 0))
else:
url = 'http://code.opencv.org/projects/opencv/repository/revisions/master/raw/samples/c/baboon.jpg'
filedata = urllib2.urlopen(url).read()
imagefiledata = cv.CreateMatHeader(1, len(filedata), cv.CV_8UC1)
cv.SetData(imagefiledata, filedata, len(filedata))
src_image = cv.DecodeImageM(imagefiledata, 0)
dh = DemHist(src_image)
cv.WaitKey(0)
cv.DestroyAllWindows()
#!/usr/bin/python
import cv2.cv as cv
import sys
import urllib2
# Rearrange the quadrants of Fourier image so that the origin is at
# the image center
# src & dst arrays of equal size & type
def cvShiftDFT(src_arr, dst_arr ):
size = cv.GetSize(src_arr)
dst_size = cv.GetSize(dst_arr)
if dst_size != size:
cv.Error( cv.CV_StsUnmatchedSizes, "cv.ShiftDFT", "Source and Destination arrays must have equal sizes", __FILE__, __LINE__ )
if(src_arr is dst_arr):
tmp = cv.CreateMat(size[1]/2, size[0]/2, cv.GetElemType(src_arr))
cx = size[0] / 2
cy = size[1] / 2 # image center
q1 = cv.GetSubRect( src_arr, (0,0,cx, cy) )
q2 = cv.GetSubRect( src_arr, (cx,0,cx,cy) )
q3 = cv.GetSubRect( src_arr, (cx,cy,cx,cy) )
q4 = cv.GetSubRect( src_arr, (0,cy,cx,cy) )
d1 = cv.GetSubRect( src_arr, (0,0,cx,cy) )
d2 = cv.GetSubRect( src_arr, (cx,0,cx,cy) )
d3 = cv.GetSubRect( src_arr, (cx,cy,cx,cy) )
d4 = cv.GetSubRect( src_arr, (0,cy,cx,cy) )
if(src_arr is not dst_arr):
if( not cv.CV_ARE_TYPES_EQ( q1, d1 )):
cv.Error( cv.CV_StsUnmatchedFormats, "cv.ShiftDFT", "Source and Destination arrays must have the same format", __FILE__, __LINE__ )
cv.Copy(q3, d1)
cv.Copy(q4, d2)
cv.Copy(q1, d3)
cv.Copy(q2, d4)
else:
cv.Copy(q3, tmp)
cv.Copy(q1, q3)
cv.Copy(tmp, q1)
cv.Copy(q4, tmp)
cv.Copy(q2, q4)
cv.Copy(tmp, q2)
if __name__ == "__main__":
if len(sys.argv) > 1:
im = cv.LoadImage( sys.argv[1], cv.CV_LOAD_IMAGE_GRAYSCALE)
else:
url = 'http://code.opencv.org/projects/opencv/repository/revisions/master/raw/samples/c/baboon.jpg'
filedata = urllib2.urlopen(url).read()
imagefiledata = cv.CreateMatHeader(1, len(filedata), cv.CV_8UC1)
cv.SetData(imagefiledata, filedata, len(filedata))
im = cv.DecodeImageM(imagefiledata, cv.CV_LOAD_IMAGE_GRAYSCALE)
realInput = cv.CreateImage( cv.GetSize(im), cv.IPL_DEPTH_64F, 1)
imaginaryInput = cv.CreateImage( cv.GetSize(im), cv.IPL_DEPTH_64F, 1)
complexInput = cv.CreateImage( cv.GetSize(im), cv.IPL_DEPTH_64F, 2)
cv.Scale(im, realInput, 1.0, 0.0)
cv.Zero(imaginaryInput)
cv.Merge(realInput, imaginaryInput, None, None, complexInput)
dft_M = cv.GetOptimalDFTSize( im.height - 1 )
dft_N = cv.GetOptimalDFTSize( im.width - 1 )
dft_A = cv.CreateMat( dft_M, dft_N, cv.CV_64FC2 )
image_Re = cv.CreateImage( (dft_N, dft_M), cv.IPL_DEPTH_64F, 1)
image_Im = cv.CreateImage( (dft_N, dft_M), cv.IPL_DEPTH_64F, 1)
# copy A to dft_A and pad dft_A with zeros
tmp = cv.GetSubRect( dft_A, (0,0, im.width, im.height))
cv.Copy( complexInput, tmp, None )
if(dft_A.width > im.width):
tmp = cv.GetSubRect( dft_A, (im.width,0, dft_N - im.width, im.height))
cv.Zero( tmp )
# no need to pad bottom part of dft_A with zeros because of
# use nonzero_rows parameter in cv.FT() call below
cv.DFT( dft_A, dft_A, cv.CV_DXT_FORWARD, complexInput.height )
cv.NamedWindow("win", 0)
cv.NamedWindow("magnitude", 0)
cv.ShowImage("win", im)
# Split Fourier in real and imaginary parts
cv.Split( dft_A, image_Re, image_Im, None, None )
# Compute the magnitude of the spectrum Mag = sqrt(Re^2 + Im^2)
cv.Pow( image_Re, image_Re, 2.0)
cv.Pow( image_Im, image_Im, 2.0)
cv.Add( image_Re, image_Im, image_Re, None)
cv.Pow( image_Re, image_Re, 0.5 )
# Compute log(1 + Mag)
cv.AddS( image_Re, cv.ScalarAll(1.0), image_Re, None ) # 1 + Mag
cv.Log( image_Re, image_Re ) # log(1 + Mag)
# Rearrange the quadrants of Fourier image so that the origin is at
# the image center
cvShiftDFT( image_Re, image_Re )
min, max, pt1, pt2 = cv.MinMaxLoc(image_Re)
cv.Scale(image_Re, image_Re, 1.0/(max-min), 1.0*(-min)/(max-min))
cv.ShowImage("magnitude", image_Re)
cv.WaitKey(0)
cv.DestroyAllWindows()
#!/usr/bin/python
import sys
import cv2.cv as cv
import urllib2
wndname = "Distance transform"
tbarname = "Threshold"
# The output images
dist = 0
dist8u1 = 0
dist8u2 = 0
dist8u = 0
dist32s = 0
gray = 0
edge = 0
# define a trackbar callback
def on_trackbar(edge_thresh):
cv.Threshold(gray, edge, float(edge_thresh), float(edge_thresh), cv.CV_THRESH_BINARY)
#Distance transform
cv.DistTransform(edge, dist, cv.CV_DIST_L2, cv.CV_DIST_MASK_5)
cv.ConvertScale(dist, dist, 5000.0, 0)
cv.Pow(dist, dist, 0.5)
cv.ConvertScale(dist, dist32s, 1.0, 0.5)
cv.AndS(dist32s, cv.ScalarAll(255), dist32s, None)
cv.ConvertScale(dist32s, dist8u1, 1, 0)
cv.ConvertScale(dist32s, dist32s, -1, 0)
cv.AddS(dist32s, cv.ScalarAll(255), dist32s, None)
cv.ConvertScale(dist32s, dist8u2, 1, 0)
cv.Merge(dist8u1, dist8u2, dist8u2, None, dist8u)
cv.ShowImage(wndname, dist8u)
if __name__ == "__main__":
edge_thresh = 100
if len(sys.argv) > 1:
gray = cv.LoadImage(sys.argv[1], cv.CV_LOAD_IMAGE_GRAYSCALE)
else:
url = 'http://code.opencv.org/projects/opencv/repository/revisions/master/raw/samples/c/stuff.jpg'
filedata = urllib2.urlopen(url).read()
imagefiledata = cv.CreateMatHeader(1, len(filedata), cv.CV_8UC1)
cv.SetData(imagefiledata, filedata, len(filedata))
gray = cv.DecodeImage(imagefiledata, cv.CV_LOAD_IMAGE_GRAYSCALE)
# Create the output image
dist = cv.CreateImage((gray.width, gray.height), cv.IPL_DEPTH_32F, 1)
dist8u1 = cv.CloneImage(gray)
dist8u2 = cv.CloneImage(gray)
dist8u = cv.CreateImage((gray.width, gray.height), cv.IPL_DEPTH_8U, 3)
dist32s = cv.CreateImage((gray.width, gray.height), cv.IPL_DEPTH_32S, 1)
# Convert to grayscale
edge = cv.CloneImage(gray)
# Create a window
cv.NamedWindow(wndname, 1)
# create a toolbar
cv.CreateTrackbar(tbarname, wndname, edge_thresh, 255, on_trackbar)
# Show the image
on_trackbar(edge_thresh)
# Wait for a key stroke; the same function arranges events processing
cv.WaitKey(0)
cv.DestroyAllWindows()
#!/usr/bin/python
import cv2.cv as cv
import time
from pydmtx import DataMatrix
import numpy
import sys
import math
'''
Find 2 D barcode based on up to 3 channel datamatrix
'''
def absnorm8(im, im8):
""" im may be any single-channel image type. Return an 8-bit version, absolute value, normalized so that max is 255 """
(minVal, maxVal, _, _) = cv.MinMaxLoc(im)
cv.ConvertScaleAbs(im, im8, 255 / max(abs(minVal), abs(maxVal)), 0)
return im8
font = cv.InitFont(cv.CV_FONT_HERSHEY_SIMPLEX, 1.0, 1.0, thickness = 2, lineType = cv.CV_AA)
if 0:
started = time.time()
print dm_write.decode(bg.width, bg.height, buffer(bg.tostring()), max_count = 1, min_edge = 12, max_edge = 13, shape = DataMatrix.DmtxSymbol10x10) # , timeout = 10)
print "took", time.time() - started
class DmtxFinder:
def __init__(self):
self.cache = {}
self.dm = DataMatrix()
def Cached(self, name, rows, cols, type):
key = (name, rows, cols)
if not key in self.cache:
self.cache[key] = cv.CreateMat(rows, cols, type)
return self.cache[key]
def find0(self, img):
started = time.time()
self.dm.decode(img.width,
img.height,
buffer(img.tostring()),
max_count = 4,
#min_edge = 6,
#max_edge = 19 # Units of 2 pixels
)
print "brute", time.time() - started
found = {}
for i in range(self.dm.count()):
stats = dm_read.stats(i + 1)
print stats
found[stats[0]] = stats[1]
return found
def find(self, img):
started = time.time()
gray = self.Cached('gray', img.height, img.width, cv.CV_8UC1)
cv.CvtColor(img, gray, cv.CV_BGR2GRAY)
sobel = self.Cached('sobel', img.height, img.width, cv.CV_16SC1)
sobely = self.Cached('sobely', img.height, img.width, cv.CV_16SC1)
cv.Sobel(gray, sobel, 1, 0)
cv.Sobel(gray, sobely, 0, 1)
cv.Add(sobel, sobely, sobel)
sobel8 = self.Cached('sobel8', sobel.height, sobel.width, cv.CV_8UC1)
absnorm8(sobel, sobel8)
cv.Threshold(sobel8, sobel8, 128.0, 255.0, cv.CV_THRESH_BINARY)
sobel_integral = self.Cached('sobel_integral', img.height + 1, img.width + 1, cv.CV_32SC1)
cv.Integral(sobel8, sobel_integral)
d = 16
_x1y1 = cv.GetSubRect(sobel_integral, (0, 0, sobel_integral.cols - d, sobel_integral.rows - d))
_x1y2 = cv.GetSubRect(sobel_integral, (0, d, sobel_integral.cols - d, sobel_integral.rows - d))
_x2y1 = cv.GetSubRect(sobel_integral, (d, 0, sobel_integral.cols - d, sobel_integral.rows - d))
_x2y2 = cv.GetSubRect(sobel_integral, (d, d, sobel_integral.cols - d, sobel_integral.rows - d))
summation = cv.CloneMat(_x2y2)
cv.Sub(summation, _x1y2, summation)
cv.Sub(summation, _x2y1, summation)
cv.Add(summation, _x1y1, summation)
sum8 = self.Cached('sum8', summation.height, summation.width, cv.CV_8UC1)
absnorm8(summation, sum8)
cv.Threshold(sum8, sum8, 32.0, 255.0, cv.CV_THRESH_BINARY)
cv.ShowImage("sum8", sum8)
seq = cv.FindContours(sum8, cv.CreateMemStorage(), cv.CV_RETR_EXTERNAL)
subimg = cv.GetSubRect(img, (d / 2, d / 2, sum8.cols, sum8.rows))
t_cull = time.time() - started
seqs = []
while seq:
seqs.append(seq)
seq = seq.h_next()
started = time.time()
found = {}
print 'seqs', len(seqs)
for seq in seqs:
area = cv.ContourArea(seq)
if area > 1000:
rect = cv.BoundingRect(seq)
edge = int((14 / 14.) * math.sqrt(area) / 2 + 0.5)
candidate = cv.GetSubRect(subimg, rect)
sym = self.dm.decode(candidate.width,
candidate.height,
buffer(candidate.tostring()),
max_count = 1,
#min_edge = 6,
#max_edge = int(edge) # Units of 2 pixels
)
if sym:
onscreen = [(d / 2 + rect[0] + x, d / 2 + rect[1] + y) for (x, y) in self.dm.stats(1)[1]]
found[sym] = onscreen
else:
print "FAILED"
t_brute = time.time() - started
print "cull took", t_cull, "brute", t_brute
return found
bg = cv.CreateMat(1024, 1024, cv.CV_8UC3)
cv.Set(bg, cv.RGB(0, 0, 0))
df = DmtxFinder()
cv.NamedWindow("camera", 1)
def mkdmtx(msg):
dm_write = DataMatrix()
dm_write.encode(msg)
pi = dm_write.image # .resize((14, 14))
cv_im = cv.CreateImageHeader(pi.size, cv.IPL_DEPTH_8U, 3)
cv.SetData(cv_im, pi.tostring())
return cv_im
# test = [('WIL', (100,100))]: # , ('LOW', (250,100)), ('GAR', (300, 300)), ('AGE', (500, 300))]:
test = []
y = 10
for j in range(7):
r = 28 + j * 4
mr = r * math.sqrt(2)
y += mr * 1.8
test += [(str(deg) + "abcdefgh"[j], (50 + deg * 11, y), math.pi * deg / 180, r) for deg in range(0, 90, 10)]
for (msg, (x, y), angle, r) in test:
map = cv.CreateMat(2, 3, cv.CV_32FC1)
corners = [(x + r * math.cos(angle + th), y + r * math.sin(angle + th)) for th in [0, math.pi / 2, math.pi, 3 * math.pi / 4]]
src = mkdmtx(msg)
(sx, sy) = cv.GetSize(src)
cv.GetAffineTransform([(0,0), (sx, 0), (sx, sy)], corners[:3], map)
temp = cv.CreateMat(bg.rows, bg.cols, cv.CV_8UC3)
cv.Set(temp, cv.RGB(0, 0, 0))
cv.WarpAffine(src, temp, map)
cv.Or(temp, bg, bg)
cv.ShowImage("comp", bg)
scribble = cv.CloneMat(bg)
if 0:
for i in range(10):
df.find(bg)
for (sym, coords) in df.find(bg).items():
print sym
cv.PolyLine(scribble, [coords], 1, cv.CV_RGB(255, 0,0), 1, lineType = cv.CV_AA)
Xs = [x for (x, y) in coords]
Ys = [y for (x, y) in coords]
where = ((min(Xs) + max(Xs)) / 2, max(Ys) - 50)
cv.PutText(scribble, sym, where, font, cv.RGB(0,255, 0))
cv.ShowImage("results", scribble)
cv.WaitKey()
cv.DestroyAllWindows()
sys.exit(0)
capture = cv.CaptureFromCAM(0)
while True:
img = cv.QueryFrame(capture)
cv.ShowImage("capture", img)
print df.find(img)
cv.WaitKey(6)
#! /usr/bin/env python
from random import Random
import colorsys
print "OpenCV Python version of drawing"
import cv2.cv as cv
def random_color(random):
"""
Return a random color
"""
icolor = random.randint(0, 0xFFFFFF)
return cv.Scalar(icolor & 0xff, (icolor >> 8) & 0xff, (icolor >> 16) & 0xff)
if __name__ == '__main__':
# some "constants"
width = 1000
height = 700
window_name = "Drawing Demo"
number = 100
delay = 5
line_type = cv.CV_AA # change it to 8 to see non-antialiased graphics
# create the source image
image = cv.CreateImage( (width, height), 8, 3)
# create window and display the original picture in it
cv.NamedWindow(window_name, 1)
cv.SetZero(image)
cv.ShowImage(window_name, image)
# create the random number
random = Random()
# draw some lines
for i in range(number):
pt1 = (random.randrange(-width, 2 * width),
random.randrange(-height, 2 * height))
pt2 = (random.randrange(-width, 2 * width),
random.randrange(-height, 2 * height))
cv.Line(image, pt1, pt2,
random_color(random),
random.randrange(0, 10),
line_type, 0)
cv.ShowImage(window_name, image)
cv.WaitKey(delay)
# draw some rectangles
for i in range(number):
pt1 = (random.randrange(-width, 2 * width),
random.randrange(-height, 2 * height))
pt2 = (random.randrange(-width, 2 * width),
random.randrange(-height, 2 * height))
cv.Rectangle(image, pt1, pt2,
random_color(random),
random.randrange(-1, 9),
line_type, 0)
cv.ShowImage(window_name, image)
cv.WaitKey(delay)
# draw some ellipes
for i in range(number):
pt1 = (random.randrange(-width, 2 * width),
random.randrange(-height, 2 * height))
sz = (random.randrange(0, 200),
random.randrange(0, 200))
angle = random.randrange(0, 1000) * 0.180
cv.Ellipse(image, pt1, sz, angle, angle - 100, angle + 200,
random_color(random),
random.randrange(-1, 9),
line_type, 0)
cv.ShowImage(window_name, image)
cv.WaitKey(delay)
# init the list of polylines
nb_polylines = 2
polylines_size = 3
pt = [0,] * nb_polylines
for a in range(nb_polylines):
pt [a] = [0,] * polylines_size
# draw some polylines
for i in range(number):
for a in range(nb_polylines):
for b in range(polylines_size):
pt [a][b] = (random.randrange(-width, 2 * width),
random.randrange(-height, 2 * height))
cv.PolyLine(image, pt, 1,
random_color(random),
random.randrange(1, 9),
line_type, 0)
cv.ShowImage(window_name, image)
cv.WaitKey(delay)
# draw some filled polylines
for i in range(number):
for a in range(nb_polylines):
for b in range(polylines_size):
pt [a][b] = (random.randrange(-width, 2 * width),
random.randrange(-height, 2 * height))
cv.FillPoly(image, pt,
random_color(random),
line_type, 0)
cv.ShowImage(window_name, image)
cv.WaitKey(delay)
# draw some circles
for i in range(number):
pt1 = (random.randrange(-width, 2 * width),
random.randrange(-height, 2 * height))
cv.Circle(image, pt1, random.randrange(0, 300),
random_color(random),
random.randrange(-1, 9),
line_type, 0)
cv.ShowImage(window_name, image)
cv.WaitKey(delay)
# draw some text
for i in range(number):
pt1 = (random.randrange(-width, 2 * width),
random.randrange(-height, 2 * height))
font = cv.InitFont(random.randrange(0, 8),
random.randrange(0, 100) * 0.05 + 0.01,
random.randrange(0, 100) * 0.05 + 0.01,
random.randrange(0, 5) * 0.1,
random.randrange(0, 10),
line_type)
cv.PutText(image, "Testing text rendering!",
pt1, font,
random_color(random))
cv.ShowImage(window_name, image)
cv.WaitKey(delay)
# prepare a text, and get it's properties
font = cv.InitFont(cv.CV_FONT_HERSHEY_COMPLEX,
3, 3, 0.0, 5, line_type)
text_size, ymin = cv.GetTextSize("OpenCV forever!", font)
pt1 = ((width - text_size[0]) / 2, (height + text_size[1]) / 2)
image2 = cv.CloneImage(image)
# now, draw some OpenCV pub ;-)
for i in range(0, 512, 2):
cv.SubS(image2, cv.ScalarAll(i), image)
(r, g, b) = colorsys.hsv_to_rgb((i % 100) / 100., 1, 1)
cv.PutText(image, "OpenCV forever!",
pt1, font, cv.RGB(255 * r, 255 * g, 255 * b))
cv.ShowImage(window_name, image)
cv.WaitKey(delay)
# wait some key to end
cv.WaitKey(0)
cv.DestroyAllWindows()
#! /usr/bin/env python
print "OpenCV Python version of edge"
import sys
import urllib2
import cv2.cv as cv
# some definitions
win_name = "Edge"
trackbar_name = "Threshold"
# the callback on the trackbar
def on_trackbar(position):
cv.Smooth(gray, edge, cv.CV_BLUR, 3, 3, 0)
cv.Not(gray, edge)
# run the edge dector on gray scale
cv.Canny(gray, edge, position, position * 3, 3)
# reset
cv.SetZero(col_edge)
# copy edge points
cv.Copy(im, col_edge, edge)
# show the im
cv.ShowImage(win_name, col_edge)
if __name__ == '__main__':
if len(sys.argv) > 1:
im = cv.LoadImage( sys.argv[1], cv.CV_LOAD_IMAGE_COLOR)
else:
url = 'http://code.opencv.org/projects/opencv/repository/revisions/master/raw/samples/c/fruits.jpg'
filedata = urllib2.urlopen(url).read()
imagefiledata = cv.CreateMatHeader(1, len(filedata), cv.CV_8UC1)
cv.SetData(imagefiledata, filedata, len(filedata))
im = cv.DecodeImage(imagefiledata, cv.CV_LOAD_IMAGE_COLOR)
# create the output im
col_edge = cv.CreateImage((im.width, im.height), 8, 3)
# convert to grayscale
gray = cv.CreateImage((im.width, im.height), 8, 1)
edge = cv.CreateImage((im.width, im.height), 8, 1)
cv.CvtColor(im, gray, cv.CV_BGR2GRAY)
# create the window
cv.NamedWindow(win_name, cv.CV_WINDOW_AUTOSIZE)
# create the trackbar
cv.CreateTrackbar(trackbar_name, win_name, 1, 100, on_trackbar)
# show the im
on_trackbar(0)
# wait a key pressed to end
cv.WaitKey(0)
cv.DestroyAllWindows()
#!/usr/bin/python
"""
This program is demonstration for face and object detection using haar-like features.
The program finds faces in a camera image or video stream and displays a red box around them.
Original C implementation by: ?
Python implementation by: Roman Stanchak, James Bowman
"""
import sys
import cv2.cv as cv
from optparse import OptionParser
# Parameters for haar detection
# From the API:
# The default parameters (scale_factor=2, min_neighbors=3, flags=0) are tuned
# for accurate yet slow object detection. For a faster operation on real video
# images the settings are:
# scale_factor=1.2, min_neighbors=2, flags=CV_HAAR_DO_CANNY_PRUNING,
# min_size=<minimum possible face size
min_size = (20, 20)
image_scale = 2
haar_scale = 1.2
min_neighbors = 2
haar_flags = 0
def detect_and_draw(img, cascade):
# allocate temporary images
gray = cv.CreateImage((img.width,img.height), 8, 1)
small_img = cv.CreateImage((cv.Round(img.width / image_scale),
cv.Round (img.height / image_scale)), 8, 1)
# convert color input image to grayscale
cv.CvtColor(img, gray, cv.CV_BGR2GRAY)
# scale input image for faster processing
cv.Resize(gray, small_img, cv.CV_INTER_LINEAR)
cv.EqualizeHist(small_img, small_img)
if(cascade):
t = cv.GetTickCount()
faces = cv.HaarDetectObjects(small_img, cascade, cv.CreateMemStorage(0),
haar_scale, min_neighbors, haar_flags, min_size)
t = cv.GetTickCount() - t
print "detection time = %gms" % (t/(cv.GetTickFrequency()*1000.))
if faces:
for ((x, y, w, h), n) in faces:
# the input to cv.HaarDetectObjects was resized, so scale the
# bounding box of each face and convert it to two CvPoints
pt1 = (int(x * image_scale), int(y * image_scale))
pt2 = (int((x + w) * image_scale), int((y + h) * image_scale))
cv.Rectangle(img, pt1, pt2, cv.RGB(255, 0, 0), 3, 8, 0)
cv.ShowImage("result", img)
if __name__ == '__main__':
parser = OptionParser(usage = "usage: %prog [options] [filename|camera_index]")
parser.add_option("-c", "--cascade", action="store", dest="cascade", type="str", help="Haar cascade file, default %default", default = "../data/haarcascades/haarcascade_frontalface_alt.xml")
(options, args) = parser.parse_args()
cascade = cv.Load(options.cascade)
if len(args) != 1:
parser.print_help()
sys.exit(1)
input_name = args[0]
if input_name.isdigit():
capture = cv.CreateCameraCapture(int(input_name))
else:
capture = None
cv.NamedWindow("result", 1)
if capture:
frame_copy = None
while True:
frame = cv.QueryFrame(capture)
if not frame:
cv.WaitKey(0)
break
if not frame_copy:
frame_copy = cv.CreateImage((frame.width,frame.height),
cv.IPL_DEPTH_8U, frame.nChannels)
if frame.origin == cv.IPL_ORIGIN_TL:
cv.Copy(frame, frame_copy)
else:
cv.Flip(frame, frame_copy, 0)
detect_and_draw(frame_copy, cascade)
if cv.WaitKey(10) >= 0:
break
else:
image = cv.LoadImage(input_name, 1)
detect_and_draw(image, cascade)
cv.WaitKey(0)
cv.DestroyWindow("result")
#!/usr/bin/env python
from cv import *
class FBackDemo:
def __init__(self):
self.capture = CaptureFromCAM(0)
self.mv_step = 16
self.mv_scale = 1.5
self.mv_color = (0, 255, 0)
self.cflow = None
self.flow = None
NamedWindow( "Optical Flow", 1 )
print( "Press ESC - quit the program\n" )
def draw_flow(self, flow, prevgray):
""" Returns a nice representation of a hue histogram """
CvtColor(prevgray, self.cflow, CV_GRAY2BGR)
for y in range(0, flow.height, self.mv_step):
for x in range(0, flow.width, self.mv_step):
fx, fy = flow[y, x]
Line(self.cflow, (x,y), (x+fx,y+fy), self.mv_color)
Circle(self.cflow, (x,y), 2, self.mv_color, -1)
ShowImage("Optical Flow", self.cflow)
def run(self):
first_frame = True
while True:
frame = QueryFrame( self.capture )
if first_frame:
gray = CreateImage(GetSize(frame), 8, 1)
prev_gray = CreateImage(GetSize(frame), 8, 1)
flow = CreateImage(GetSize(frame), 32, 2)
self.cflow = CreateImage(GetSize(frame), 8, 3)
CvtColor(frame, gray, CV_BGR2GRAY)
if not first_frame:
CalcOpticalFlowFarneback(prev_gray, gray, flow,
pyr_scale=0.5, levels=3, winsize=15,
iterations=3, poly_n=5, poly_sigma=1.2, flags=0)
self.draw_flow(flow, prev_gray)
c = WaitKey(7)
if c in [27, ord('q'), ord('Q')]:
break
prev_gray, gray = gray, prev_gray
first_frame = False
if __name__=="__main__":
demo = FBackDemo()
demo.run()
cv.DestroyAllWindows()
#!/usr/bin/python
import sys
import random
import urllib2
import cv2.cv as cv
im=None;
mask=None;
color_img=None;
gray_img0 = None;
gray_img = None;
ffill_case = 1;
lo_diff = 20
up_diff = 20;
connectivity = 4;
is_color = 1;
is_mask = 0;
new_mask_val = 255;
def update_lo( pos ):
lo_diff = pos
def update_up( pos ):
up_diff = pos
def on_mouse( event, x, y, flags, param ):
if( not color_img ):
return;
if event == cv.CV_EVENT_LBUTTONDOWN:
my_mask = None
seed = (x,y);
if ffill_case==0:
lo = up = 0
flags = connectivity + (new_mask_val << 8)
else:
lo = lo_diff;
up = up_diff;
flags = connectivity + (new_mask_val << 8) + cv.CV_FLOODFILL_FIXED_RANGE
b = random.randint(0,255)
g = random.randint(0,255)
r = random.randint(0,255)
if( is_mask ):
my_mask = mask
cv.Threshold( mask, mask, 1, 128, cv.CV_THRESH_BINARY );
if( is_color ):
color = cv.CV_RGB( r, g, b );
comp = cv.FloodFill( color_img, seed, color, cv.CV_RGB( lo, lo, lo ),
cv.CV_RGB( up, up, up ), flags, my_mask );
cv.ShowImage( "image", color_img );
else:
brightness = cv.RealScalar((r*2 + g*7 + b + 5)/10);
comp = cv.FloodFill( gray_img, seed, brightness, cv.RealScalar(lo),
cv.RealScalar(up), flags, my_mask );
cv.ShowImage( "image", gray_img );
print "%g pixels were repainted" % comp[0]
if( is_mask ):
cv.ShowImage( "mask", mask );
if __name__ == "__main__":
if len(sys.argv) > 1:
im = cv.LoadImage( sys.argv[1], cv.CV_LOAD_IMAGE_COLOR)
else:
url = 'http://code.opencv.org/projects/opencv/repository/revisions/master/raw/samples/c/fruits.jpg'
filedata = urllib2.urlopen(url).read()
imagefiledata = cv.CreateMatHeader(1, len(filedata), cv.CV_8UC1)
cv.SetData(imagefiledata, filedata, len(filedata))
im = cv.DecodeImage(imagefiledata, cv.CV_LOAD_IMAGE_COLOR)
print "Hot keys:"
print "\tESC - quit the program"
print "\tc - switch color/grayscale mode"
print "\tm - switch mask mode"
print "\tr - restore the original image"
print "\ts - use null-range floodfill"
print "\tf - use gradient floodfill with fixed(absolute) range"
print "\tg - use gradient floodfill with floating(relative) range"
print "\t4 - use 4-connectivity mode"
print "\t8 - use 8-connectivity mode"
color_img = cv.CloneImage( im );
gray_img0 = cv.CreateImage( (color_img.width, color_img.height), 8, 1 );
cv.CvtColor( color_img, gray_img0, cv.CV_BGR2GRAY );
gray_img = cv.CloneImage( gray_img0 );
mask = cv.CreateImage( (color_img.width + 2, color_img.height + 2), 8, 1 );
cv.NamedWindow( "image", 1 );
cv.CreateTrackbar( "lo_diff", "image", lo_diff, 255, update_lo);
cv.CreateTrackbar( "up_diff", "image", up_diff, 255, update_up);
cv.SetMouseCallback( "image", on_mouse );
while True:
if( is_color ):
cv.ShowImage( "image", color_img );
else:
cv.ShowImage( "image", gray_img );
c = cv.WaitKey(0) % 0x100
if c == 27:
print("Exiting ...");
sys.exit(0)
elif c == ord('c'):
if( is_color ):
print("Grayscale mode is set");
cv.CvtColor( color_img, gray_img, cv.CV_BGR2GRAY );
is_color = 0;
else:
print("Color mode is set");
cv.Copy( im, color_img, None );
cv.Zero( mask );
is_color = 1;
elif c == ord('m'):
if( is_mask ):
cv.DestroyWindow( "mask" );
is_mask = 0;
else:
cv.NamedWindow( "mask", 0 );
cv.Zero( mask );
cv.ShowImage( "mask", mask );
is_mask = 1;
elif c == ord('r'):
print("Original image is restored");
cv.Copy( im, color_img, None );
cv.Copy( gray_img0, gray_img, None );
cv.Zero( mask );
elif c == ord('s'):
print("Simple floodfill mode is set");
ffill_case = 0;
elif c == ord('f'):
print("Fixed Range floodfill mode is set");
ffill_case = 1;
elif c == ord('g'):
print("Gradient (floating range) floodfill mode is set");
ffill_case = 2;
elif c == ord('4'):
print("4-connectivity mode is set");
connectivity = 4;
elif c == ord('8'):
print("8-connectivity mode is set");
connectivity = 8;
cv.DestroyAllWindows()
#!/usr/bin/python
"""
This program is a demonstration of ellipse fitting.
Trackbar controls threshold parameter.
Gray lines are contours. Colored lines are fit ellipses.
Original C implementation by: Denis Burenkov.
Python implementation by: Roman Stanchak, James Bowman
"""
import sys
import urllib2
import random
import cv2.cv as cv
def contour_iterator(contour):
while contour:
yield contour
contour = contour.h_next()
class FitEllipse:
def __init__(self, source_image, slider_pos):
self.source_image = source_image
cv.CreateTrackbar("Threshold", "Result", slider_pos, 255, self.process_image)
self.process_image(slider_pos)
def process_image(self, slider_pos):
"""
This function finds contours, draws them and their approximation by ellipses.
"""
stor = cv.CreateMemStorage()
# Create the destination images
image02 = cv.CloneImage(self.source_image)
cv.Zero(image02)
image04 = cv.CreateImage(cv.GetSize(self.source_image), cv.IPL_DEPTH_8U, 3)
cv.Zero(image04)
# Threshold the source image. This needful for cv.FindContours().
cv.Threshold(self.source_image, image02, slider_pos, 255, cv.CV_THRESH_BINARY)
# Find all contours.
cont = cv.FindContours(image02,
stor,
cv.CV_RETR_LIST,
cv.CV_CHAIN_APPROX_NONE,
(0, 0))
for c in contour_iterator(cont):
# Number of points must be more than or equal to 6 for cv.FitEllipse2
if len(c) >= 6:
# Copy the contour into an array of (x,y)s
PointArray2D32f = cv.CreateMat(1, len(c), cv.CV_32FC2)
for (i, (x, y)) in enumerate(c):
PointArray2D32f[0, i] = (x, y)
# Draw the current contour in gray
gray = cv.CV_RGB(100, 100, 100)
cv.DrawContours(image04, c, gray, gray,0,1,8,(0,0))
# Fits ellipse to current contour.
(center, size, angle) = cv.FitEllipse2(PointArray2D32f)
# Convert ellipse data from float to integer representation.
center = (cv.Round(center[0]), cv.Round(center[1]))
size = (cv.Round(size[0] * 0.5), cv.Round(size[1] * 0.5))
# Draw ellipse in random color
color = cv.CV_RGB(random.randrange(256),random.randrange(256),random.randrange(256))
cv.Ellipse(image04, center, size,
angle, 0, 360,
color, 2, cv.CV_AA, 0)
# Show image. HighGUI use.
cv.ShowImage( "Result", image04 )
if __name__ == '__main__':
if len(sys.argv) > 1:
source_image = cv.LoadImage(sys.argv[1], cv.CV_LOAD_IMAGE_GRAYSCALE)
else:
url = 'http://code.opencv.org/projects/opencv/repository/revisions/master/raw/samples/c/stuff.jpg'
filedata = urllib2.urlopen(url).read()
imagefiledata = cv.CreateMatHeader(1, len(filedata), cv.CV_8UC1)
cv.SetData(imagefiledata, filedata, len(filedata))
source_image = cv.DecodeImage(imagefiledata, cv.CV_LOAD_IMAGE_GRAYSCALE)
# Create windows.
cv.NamedWindow("Source", 1)
cv.NamedWindow("Result", 1)
# Show the image.
cv.ShowImage("Source", source_image)
fe = FitEllipse(source_image, 70)
print "Press any key to exit"
cv.WaitKey(0)
cv.DestroyWindow("Source")
cv.DestroyWindow("Result")
#!/usr/bin/python
# This is a standalone program. Pass an image name as a first parameter of the program.
import sys
from math import sin, cos, sqrt, pi
import cv2.cv as cv
import urllib2
# toggle between CV_HOUGH_STANDARD and CV_HOUGH_PROBILISTIC
USE_STANDARD = True
if __name__ == "__main__":
if len(sys.argv) > 1:
filename = sys.argv[1]
src = cv.LoadImage(filename, cv.CV_LOAD_IMAGE_GRAYSCALE)
else:
url = 'http://code.opencv.org/projects/opencv/repository/revisions/master/raw/doc/pics/building.jpg'
filedata = urllib2.urlopen(url).read()
imagefiledata = cv.CreateMatHeader(1, len(filedata), cv.CV_8UC1)
cv.SetData(imagefiledata, filedata, len(filedata))
src = cv.DecodeImageM(imagefiledata, cv.CV_LOAD_IMAGE_GRAYSCALE)
cv.NamedWindow("Source", 1)
cv.NamedWindow("Hough", 1)
while True:
dst = cv.CreateImage(cv.GetSize(src), 8, 1)
color_dst = cv.CreateImage(cv.GetSize(src), 8, 3)
storage = cv.CreateMemStorage(0)
lines = 0
cv.Canny(src, dst, 50, 200, 3)
cv.CvtColor(dst, color_dst, cv.CV_GRAY2BGR)
if USE_STANDARD:
lines = cv.HoughLines2(dst, storage, cv.CV_HOUGH_STANDARD, 1, pi / 180, 100, 0, 0)
for (rho, theta) in lines[:100]:
a = cos(theta)
b = sin(theta)
x0 = a * rho
y0 = b * rho
pt1 = (cv.Round(x0 + 1000*(-b)), cv.Round(y0 + 1000*(a)))
pt2 = (cv.Round(x0 - 1000*(-b)), cv.Round(y0 - 1000*(a)))
cv.Line(color_dst, pt1, pt2, cv.RGB(255, 0, 0), 3, 8)
else:
lines = cv.HoughLines2(dst, storage, cv.CV_HOUGH_PROBABILISTIC, 1, pi / 180, 50, 50, 10)
for line in lines:
cv.Line(color_dst, line[0], line[1], cv.CV_RGB(255, 0, 0), 3, 8)
cv.ShowImage("Source", src)
cv.ShowImage("Hough", color_dst)
k = cv.WaitKey(0) % 0x100
if k == ord(' '):
USE_STANDARD = not USE_STANDARD
if k == 27:
break
cv.DestroyAllWindows()
#!/usr/bin/python
import urllib2
import sys
import cv2.cv as cv
class Sketcher:
def __init__(self, windowname, dests):
self.prev_pt = None
self.windowname = windowname
self.dests = dests
cv.SetMouseCallback(self.windowname, self.on_mouse)
def on_mouse(self, event, x, y, flags, param):
pt = (x, y)
if event == cv.CV_EVENT_LBUTTONUP or not (flags & cv.CV_EVENT_FLAG_LBUTTON):
self.prev_pt = None
elif event == cv.CV_EVENT_LBUTTONDOWN:
self.prev_pt = pt
elif event == cv.CV_EVENT_MOUSEMOVE and (flags & cv.CV_EVENT_FLAG_LBUTTON) :
if self.prev_pt:
for dst in self.dests:
cv.Line(dst, self.prev_pt, pt, cv.ScalarAll(255), 5, 8, 0)
self.prev_pt = pt
cv.ShowImage(self.windowname, img)
if __name__=="__main__":
if len(sys.argv) > 1:
img0 = cv.LoadImage( sys.argv[1], cv.CV_LOAD_IMAGE_COLOR)
else:
url = 'http://code.opencv.org/projects/opencv/repository/revisions/master/raw/samples/c/fruits.jpg'
filedata = urllib2.urlopen(url).read()
imagefiledata = cv.CreateMatHeader(1, len(filedata), cv.CV_8UC1)
cv.SetData(imagefiledata, filedata, len(filedata))
img0 = cv.DecodeImage(imagefiledata, cv.CV_LOAD_IMAGE_COLOR)
print "Hot keys:"
print "\tESC - quit the program"
print "\tr - restore the original image"
print "\ti or ENTER - run inpainting algorithm"
print "\t\t(before running it, paint something on the image)"
cv.NamedWindow("image", 1)
cv.NamedWindow("inpainted image", 1)
img = cv.CloneImage(img0)
inpainted = cv.CloneImage(img0)
inpaint_mask = cv.CreateImage(cv.GetSize(img), 8, 1)
cv.Zero(inpaint_mask)
cv.Zero(inpainted)
cv.ShowImage("image", img)
cv.ShowImage("inpainted image", inpainted)
sk = Sketcher("image", [img, inpaint_mask])
while True:
c = cv.WaitKey(0) % 0x100
if c == 27 or c == ord('q'):
break
if c == ord('r'):
cv.Zero(inpaint_mask)
cv.Copy(img0, img)
cv.ShowImage("image", img)
if c == ord('i') or c == ord('\n'):
cv.Inpaint(img, inpaint_mask, inpainted, 3, cv.CV_INPAINT_TELEA)
cv.ShowImage("inpainted image", inpainted)
cv.DestroyAllWindows()
#!/usr/bin/python
"""
Tracking of rotating point.
Rotation speed is constant.
Both state and measurements vectors are 1D (a point angle),
Measurement is the real point angle + gaussian noise.
The real and the estimated points are connected with yellow line segment,
the real and the measured points are connected with red line segment.
(if Kalman filter works correctly,
the yellow segment should be shorter than the red one).
Pressing any key (except ESC) will reset the tracking with a different speed.
Pressing ESC will stop the program.
"""
import urllib2
import cv2.cv as cv
from math import cos, sin, sqrt
import sys
if __name__ == "__main__":
A = [ [1, 1], [0, 1] ]
img = cv.CreateImage((500, 500), 8, 3)
kalman = cv.CreateKalman(2, 1, 0)
state = cv.CreateMat(2, 1, cv.CV_32FC1) # (phi, delta_phi)
process_noise = cv.CreateMat(2, 1, cv.CV_32FC1)
measurement = cv.CreateMat(1, 1, cv.CV_32FC1)
rng = cv.RNG(-1)
code = -1L
cv.Zero(measurement)
cv.NamedWindow("Kalman", 1)
while True:
cv.RandArr(rng, state, cv.CV_RAND_NORMAL, cv.RealScalar(0), cv.RealScalar(0.1))
kalman.transition_matrix[0,0] = 1
kalman.transition_matrix[0,1] = 1
kalman.transition_matrix[1,0] = 0
kalman.transition_matrix[1,1] = 1
cv.SetIdentity(kalman.measurement_matrix, cv.RealScalar(1))
cv.SetIdentity(kalman.process_noise_cov, cv.RealScalar(1e-5))
cv.SetIdentity(kalman.measurement_noise_cov, cv.RealScalar(1e-1))
cv.SetIdentity(kalman.error_cov_post, cv.RealScalar(1))
cv.RandArr(rng, kalman.state_post, cv.CV_RAND_NORMAL, cv.RealScalar(0), cv.RealScalar(0.1))
while True:
def calc_point(angle):
return (cv.Round(img.width/2 + img.width/3*cos(angle)),
cv.Round(img.height/2 - img.width/3*sin(angle)))
state_angle = state[0,0]
state_pt = calc_point(state_angle)
prediction = cv.KalmanPredict(kalman)
predict_angle = prediction[0, 0]
predict_pt = calc_point(predict_angle)
cv.RandArr(rng, measurement, cv.CV_RAND_NORMAL, cv.RealScalar(0),
cv.RealScalar(sqrt(kalman.measurement_noise_cov[0, 0])))
# generate measurement
cv.MatMulAdd(kalman.measurement_matrix, state, measurement, measurement)
measurement_angle = measurement[0, 0]
measurement_pt = calc_point(measurement_angle)
# plot points
def draw_cross(center, color, d):
cv.Line(img, (center[0] - d, center[1] - d),
(center[0] + d, center[1] + d), color, 1, cv.CV_AA, 0)
cv.Line(img, (center[0] + d, center[1] - d),
(center[0] - d, center[1] + d), color, 1, cv.CV_AA, 0)
cv.Zero(img)
draw_cross(state_pt, cv.CV_RGB(255, 255, 255), 3)
draw_cross(measurement_pt, cv.CV_RGB(255, 0,0), 3)
draw_cross(predict_pt, cv.CV_RGB(0, 255, 0), 3)
cv.Line(img, state_pt, measurement_pt, cv.CV_RGB(255, 0,0), 3, cv. CV_AA, 0)
cv.Line(img, state_pt, predict_pt, cv.CV_RGB(255, 255, 0), 3, cv. CV_AA, 0)
cv.KalmanCorrect(kalman, measurement)
cv.RandArr(rng, process_noise, cv.CV_RAND_NORMAL, cv.RealScalar(0),
cv.RealScalar(sqrt(kalman.process_noise_cov[0, 0])))
cv.MatMulAdd(kalman.transition_matrix, state, process_noise, state)
cv.ShowImage("Kalman", img)
code = cv.WaitKey(100) % 0x100
if code != -1:
break
if code in [27, ord('q'), ord('Q')]:
break
cv.DestroyWindow("Kalman")
#!/usr/bin/python
import urllib2
import cv2.cv as cv
from random import randint
MAX_CLUSTERS = 5
if __name__ == "__main__":
color_tab = [
cv.CV_RGB(255, 0,0),
cv.CV_RGB(0, 255, 0),
cv.CV_RGB(100, 100, 255),
cv.CV_RGB(255, 0,255),
cv.CV_RGB(255, 255, 0)]
img = cv.CreateImage((500, 500), 8, 3)
rng = cv.RNG(-1)
cv.NamedWindow("clusters", 1)
while True:
cluster_count = randint(2, MAX_CLUSTERS)
sample_count = randint(1, 1000)
points = cv.CreateMat(sample_count, 1, cv.CV_32FC2)
clusters = cv.CreateMat(sample_count, 1, cv.CV_32SC1)
# generate random sample from multigaussian distribution
for k in range(cluster_count):
center = (cv.RandInt(rng)%img.width, cv.RandInt(rng)%img.height)
first = k*sample_count/cluster_count
last = sample_count
if k != cluster_count:
last = (k+1)*sample_count/cluster_count
point_chunk = cv.GetRows(points, first, last)
cv.RandArr(rng, point_chunk, cv.CV_RAND_NORMAL,
cv.Scalar(center[0], center[1], 0, 0),
cv.Scalar(img.width*0.1, img.height*0.1, 0, 0))
# shuffle samples
cv.RandShuffle(points, rng)
cv.KMeans2(points, cluster_count, clusters,
(cv.CV_TERMCRIT_EPS + cv.CV_TERMCRIT_ITER, 10, 1.0))
cv.Zero(img)
for i in range(sample_count):
cluster_idx = int(clusters[i, 0])
pt = (cv.Round(points[i, 0][0]), cv.Round(points[i, 0][1]))
cv.Circle(img, pt, 2, color_tab[cluster_idx], cv.CV_FILLED, cv.CV_AA, 0)
cv.ShowImage("clusters", img)
key = cv.WaitKey(0) % 0x100
if key in [27, ord('q'), ord('Q')]:
break
cv.DestroyWindow("clusters")
#!/usr/bin/python
import urllib2
import cv2.cv as cv
import sys
if __name__ == "__main__":
laplace = None
colorlaplace = None
planes = [ None, None, None ]
capture = None
if len(sys.argv) == 1:
capture = cv.CreateCameraCapture(0)
elif len(sys.argv) == 2 and sys.argv[1].isdigit():
capture = cv.CreateCameraCapture(int(sys.argv[1]))
elif len(sys.argv) == 2:
capture = cv.CreateFileCapture(sys.argv[1])
if not capture:
print "Could not initialize capturing..."
sys.exit(-1)
cv.NamedWindow("Laplacian", 1)
while True:
frame = cv.QueryFrame(capture)
if frame:
if not laplace:
planes = [cv.CreateImage((frame.width, frame.height), 8, 1) for i in range(3)]
laplace = cv.CreateImage((frame.width, frame.height), cv.IPL_DEPTH_16S, 1)
colorlaplace = cv.CreateImage((frame.width, frame.height), 8, 3)
cv.Split(frame, planes[0], planes[1], planes[2], None)
for plane in planes:
cv.Laplace(plane, laplace, 3)
cv.ConvertScaleAbs(laplace, plane, 1, 0)
cv.Merge(planes[0], planes[1], planes[2], None, colorlaplace)
cv.ShowImage("Laplacian", colorlaplace)
if cv.WaitKey(10) != -1:
break
cv.DestroyWindow("Laplacian")
#! /usr/bin/env python
print "OpenCV Python version of lkdemo"
import sys
# import the necessary things for OpenCV
import cv2.cv as cv
#############################################################################
# some "constants"
win_size = 10
MAX_COUNT = 500
#############################################################################
# some "global" variables
image = None
pt = None
add_remove_pt = False
flags = 0
night_mode = False
need_to_init = False
#############################################################################
# the mouse callback
# the callback on the trackbar
def on_mouse (event, x, y, flags, param):
# we will use the global pt and add_remove_pt
global pt
global add_remove_pt
if image is None:
# not initialized, so skip
return
if image.origin != 0:
# different origin
y = image.height - y
if event == cv.CV_EVENT_LBUTTONDOWN:
# user has click, so memorize it
pt = (x, y)
add_remove_pt = True
#############################################################################
# so, here is the main part of the program
if __name__ == '__main__':
frames = sys.argv[1:]
if frames == []:
print "usage lkdemo.py <image files>"
sys.exit(1)
# display a small howto use it
print "Hot keys: \n" \
"\tESC - quit the program\n" \
"\tr - auto-initialize tracking\n" \
"\tc - delete all the points\n" \
"\tn - switch the \"night\" mode on/off\n" \
"\tSPACE - next frame\n" \
"To add/remove a feature point click it\n"
# first, create the necessary windows
cv.NamedWindow ('LkDemo', cv.CV_WINDOW_AUTOSIZE)
# register the mouse callback
cv.SetMouseCallback ('LkDemo', on_mouse, None)
fc = 0
while 1:
# do forever
frame = cv.LoadImage(frames[fc])
if image is None:
# create the images we need
image = cv.CreateImage (cv.GetSize (frame), 8, 3)
image.origin = frame.origin
grey = cv.CreateImage (cv.GetSize (frame), 8, 1)
prev_grey = cv.CreateImage (cv.GetSize (frame), 8, 1)
pyramid = cv.CreateImage (cv.GetSize (frame), 8, 1)
prev_pyramid = cv.CreateImage (cv.GetSize (frame), 8, 1)
features = []
# copy the frame, so we can draw on it
cv.Copy (frame, image)
# create a grey version of the image
cv.CvtColor (image, grey, cv.CV_BGR2GRAY)
if night_mode:
# night mode: only display the points
cv.SetZero (image)
if need_to_init:
# we want to search all the good points
# create the wanted images
eig = cv.CreateImage (cv.GetSize (grey), 32, 1)
temp = cv.CreateImage (cv.GetSize (grey), 32, 1)
# the default parameters
quality = 0.01
min_distance = 10
# search the good points
features = cv.GoodFeaturesToTrack (
grey, eig, temp,
MAX_COUNT,
quality, min_distance, None, 3, 0, 0.04)
# refine the corner locations
features = cv.FindCornerSubPix (
grey,
features,
(win_size, win_size), (-1, -1),
(cv.CV_TERMCRIT_ITER | cv.CV_TERMCRIT_EPS, 20, 0.03))
elif features != []:
# we have points, so display them
# calculate the optical flow
features, status, track_error = cv.CalcOpticalFlowPyrLK (
prev_grey, grey, prev_pyramid, pyramid,
features,
(win_size, win_size), 3,
(cv.CV_TERMCRIT_ITER|cv.CV_TERMCRIT_EPS, 20, 0.03),
flags)
# set back the points we keep
features = [ p for (st,p) in zip(status, features) if st]
if add_remove_pt:
# we have a point to add, so see if it is close to
# another one. If yes, don't use it
def ptptdist(p0, p1):
dx = p0[0] - p1[0]
dy = p0[1] - p1[1]
return dx**2 + dy**2
if min([ ptptdist(pt, p) for p in features ]) < 25:
# too close
add_remove_pt = 0
# draw the points as green circles
for the_point in features:
cv.Circle (image, (int(the_point[0]), int(the_point[1])), 3, (0, 255, 0, 0), -1, 8, 0)
if add_remove_pt:
# we want to add a point
# refine this corner location and append it to 'features'
features += cv.FindCornerSubPix (
grey,
[pt],
(win_size, win_size), (-1, -1),
(cv.CV_TERMCRIT_ITER | cv.CV_TERMCRIT_EPS,
20, 0.03))
# we are no longer in "add_remove_pt" mode
add_remove_pt = False
# swapping
prev_grey, grey = grey, prev_grey
prev_pyramid, pyramid = pyramid, prev_pyramid
need_to_init = False
# we can now display the image
cv.ShowImage ('LkDemo', image)
# handle events
c = cv.WaitKey(10) % 0x100
if c == 27:
# user has press the ESC key, so exit
break
# processing depending on the character
if 32 <= c and c < 128:
cc = chr(c).lower()
if cc == 'r':
need_to_init = True
elif cc == 'c':
features = []
elif cc == 'n':
night_mode = not night_mode
elif cc == ' ':
fc = (fc + 1) % len(frames)
cv.DestroyAllWindows()
#!/usr/bin/python
import sys
import urllib2
import cv2.cv as cv
src=None
dst=None
src2=None
def on_mouse(event, x, y, flags, param):
if not src:
return
if event==cv.CV_EVENT_LBUTTONDOWN:
cv.LogPolar(src, dst, (x, y), 40, cv.CV_INTER_LINEAR + cv.CV_WARP_FILL_OUTLIERS)
cv.LogPolar(dst, src2, (x, y), 40, cv.CV_INTER_LINEAR + cv.CV_WARP_FILL_OUTLIERS + cv.CV_WARP_INVERSE_MAP)
cv.ShowImage("log-polar", dst)
cv.ShowImage("inverse log-polar", src2)
if __name__ == "__main__":
if len(sys.argv) > 1:
src = cv.LoadImage( sys.argv[1], cv.CV_LOAD_IMAGE_COLOR)
else:
url = 'http://code.opencv.org/projects/opencv/repository/revisions/master/raw/samples/c/fruits.jpg'
filedata = urllib2.urlopen(url).read()
imagefiledata = cv.CreateMatHeader(1, len(filedata), cv.CV_8UC1)
cv.SetData(imagefiledata, filedata, len(filedata))
src = cv.DecodeImage(imagefiledata, cv.CV_LOAD_IMAGE_COLOR)
cv.NamedWindow("original", 1)
cv.NamedWindow("log-polar", 1)
cv.NamedWindow("inverse log-polar", 1)
dst = cv.CreateImage((256, 256), 8, 3)
src2 = cv.CreateImage(cv.GetSize(src), 8, 3)
cv.SetMouseCallback("original", on_mouse)
on_mouse(cv.CV_EVENT_LBUTTONDOWN, src.width/2, src.height/2, None, None)
cv.ShowImage("original", src)
cv.WaitKey()
cv.DestroyAllWindows()
#!/usr/bin/python
import urllib2
import cv2.cv as cv
from random import randint
def roundxy(pt):
return (cv.Round(pt[0]), cv.Round(pt[1]))
def draw_common(points):
success, center, radius = cv.MinEnclosingCircle(points)
if success:
cv.Circle(img, roundxy(center), cv.Round(radius), cv.CV_RGB(255, 255, 0), 1, cv. CV_AA, 0)
box = cv.MinAreaRect2(points)
box_vtx = [roundxy(p) for p in cv.BoxPoints(box)]
cv.PolyLine(img, [box_vtx], 1, cv.CV_RGB(0, 255, 255), 1, cv. CV_AA)
def minarea_array(img, count):
pointMat = cv.CreateMat(count, 1, cv.CV_32SC2)
for i in range(count):
pointMat[i, 0] = (randint(img.width/4, img.width*3/4),
randint(img.height/4, img.height*3/4))
cv.Zero(img)
for i in range(count):
cv.Circle(img, roundxy(pointMat[i, 0]), 2, cv.CV_RGB(255, 0, 0), cv.CV_FILLED, cv. CV_AA, 0)
draw_common(pointMat)
def minarea_seq(img, count, storage):
points = [(randint(img.width/4, img.width*3/4), randint(img.height/4, img.height*3/4)) for i in range(count)]
cv.Zero(img)
for p in points:
cv.Circle(img, roundxy(p), 2, cv.CV_RGB(255, 0, 0), cv.CV_FILLED, cv. CV_AA, 0)
draw_common(points)
if __name__ == "__main__":
img = cv.CreateImage((500, 500), 8, 3)
storage = cv.CreateMemStorage()
cv.NamedWindow("rect & circle", 1)
use_seq = True
while True:
count = randint(1, 100)
if use_seq:
minarea_seq(img, count, storage)
else:
minarea_array(img, count)
cv.ShowImage("rect & circle", img)
key = cv.WaitKey() % 0x100
if key in [27, ord('q'), ord('Q')]:
break
use_seq = not use_seq
cv.DestroyAllWindows()
#! /usr/bin/env python
import cv2.cv as cv
cap = cv.CreateFileCapture("../c/tree.avi")
img = cv.QueryFrame(cap)
print "Got frame of dimensions (", img.width, " x ", img.height, ")"
cv.NamedWindow("win", cv.CV_WINDOW_AUTOSIZE)
cv.ShowImage("win", img)
cv.MoveWindow("win", 200, 200)
cv.WaitKey(0)
cv.DestroyAllWindows()
#!/usr/bin/python
import sys
import urllib2
import cv2.cv as cv
src = 0
image = 0
dest = 0
element_shape = cv.CV_SHAPE_RECT
def Opening(pos):
element = cv.CreateStructuringElementEx(pos*2+1, pos*2+1, pos, pos, element_shape)
cv.Erode(src, image, element, 1)
cv.Dilate(image, dest, element, 1)
cv.ShowImage("Opening & Closing", dest)
def Closing(pos):
element = cv.CreateStructuringElementEx(pos*2+1, pos*2+1, pos, pos, element_shape)
cv.Dilate(src, image, element, 1)
cv.Erode(image, dest, element, 1)
cv.ShowImage("Opening & Closing", dest)
def Erosion(pos):
element = cv.CreateStructuringElementEx(pos*2+1, pos*2+1, pos, pos, element_shape)
cv.Erode(src, dest, element, 1)
cv.ShowImage("Erosion & Dilation", dest)
def Dilation(pos):
element = cv.CreateStructuringElementEx(pos*2+1, pos*2+1, pos, pos, element_shape)
cv.Dilate(src, dest, element, 1)
cv.ShowImage("Erosion & Dilation", dest)
if __name__ == "__main__":
if len(sys.argv) > 1:
src = cv.LoadImage(sys.argv[1], cv.CV_LOAD_IMAGE_COLOR)
else:
url = 'http://code.opencv.org/projects/opencv/repository/revisions/master/raw/samples/c/fruits.jpg'
filedata = urllib2.urlopen(url).read()
imagefiledata = cv.CreateMatHeader(1, len(filedata), cv.CV_8UC1)
cv.SetData(imagefiledata, filedata, len(filedata))
src = cv.DecodeImage(imagefiledata, cv.CV_LOAD_IMAGE_COLOR)
image = cv.CloneImage(src)
dest = cv.CloneImage(src)
cv.NamedWindow("Opening & Closing", 1)
cv.NamedWindow("Erosion & Dilation", 1)
cv.ShowImage("Opening & Closing", src)
cv.ShowImage("Erosion & Dilation", src)
cv.CreateTrackbar("Open", "Opening & Closing", 0, 10, Opening)
cv.CreateTrackbar("Close", "Opening & Closing", 0, 10, Closing)
cv.CreateTrackbar("Dilate", "Erosion & Dilation", 0, 10, Dilation)
cv.CreateTrackbar("Erode", "Erosion & Dilation", 0, 10, Erosion)
cv.WaitKey(0)
cv.DestroyWindow("Opening & Closing")
cv.DestroyWindow("Erosion & Dilation")
#!/usr/bin/python
import urllib2
import sys
import time
from math import cos, sin
import cv2.cv as cv
CLOCKS_PER_SEC = 1.0
MHI_DURATION = 1
MAX_TIME_DELTA = 0.5
MIN_TIME_DELTA = 0.05
N = 4
buf = range(10)
last = 0
mhi = None # MHI
orient = None # orientation
mask = None # valid orientation mask
segmask = None # motion segmentation map
storage = None # temporary storage
def update_mhi(img, dst, diff_threshold):
global last
global mhi
global storage
global mask
global orient
global segmask
timestamp = time.clock() / CLOCKS_PER_SEC # get current time in seconds
size = cv.GetSize(img) # get current frame size
idx1 = last
if not mhi or cv.GetSize(mhi) != size:
for i in range(N):
buf[i] = cv.CreateImage(size, cv.IPL_DEPTH_8U, 1)
cv.Zero(buf[i])
mhi = cv.CreateImage(size,cv. IPL_DEPTH_32F, 1)
cv.Zero(mhi) # clear MHI at the beginning
orient = cv.CreateImage(size,cv. IPL_DEPTH_32F, 1)
segmask = cv.CreateImage(size,cv. IPL_DEPTH_32F, 1)
mask = cv.CreateImage(size,cv. IPL_DEPTH_8U, 1)
cv.CvtColor(img, buf[last], cv.CV_BGR2GRAY) # convert frame to grayscale
idx2 = (last + 1) % N # index of (last - (N-1))th frame
last = idx2
silh = buf[idx2]
cv.AbsDiff(buf[idx1], buf[idx2], silh) # get difference between frames
cv.Threshold(silh, silh, diff_threshold, 1, cv.CV_THRESH_BINARY) # and threshold it
cv.UpdateMotionHistory(silh, mhi, timestamp, MHI_DURATION) # update MHI
cv.CvtScale(mhi, mask, 255./MHI_DURATION,
(MHI_DURATION - timestamp)*255./MHI_DURATION)
cv.Zero(dst)
cv.Merge(mask, None, None, None, dst)
cv.CalcMotionGradient(mhi, mask, orient, MAX_TIME_DELTA, MIN_TIME_DELTA, 3)
if not storage:
storage = cv.CreateMemStorage(0)
seq = cv.SegmentMotion(mhi, segmask, storage, timestamp, MAX_TIME_DELTA)
for (area, value, comp_rect) in seq:
if comp_rect[2] + comp_rect[3] > 100: # reject very small components
color = cv.CV_RGB(255, 0,0)
silh_roi = cv.GetSubRect(silh, comp_rect)
mhi_roi = cv.GetSubRect(mhi, comp_rect)
orient_roi = cv.GetSubRect(orient, comp_rect)
mask_roi = cv.GetSubRect(mask, comp_rect)
angle = 360 - cv.CalcGlobalOrientation(orient_roi, mask_roi, mhi_roi, timestamp, MHI_DURATION)
count = cv.Norm(silh_roi, None, cv.CV_L1, None) # calculate number of points within silhouette ROI
if count < (comp_rect[2] * comp_rect[3] * 0.05):
continue
magnitude = 30.
center = ((comp_rect[0] + comp_rect[2] / 2), (comp_rect[1] + comp_rect[3] / 2))
cv.Circle(dst, center, cv.Round(magnitude*1.2), color, 3, cv.CV_AA, 0)
cv.Line(dst,
center,
(cv.Round(center[0] + magnitude * cos(angle * cv.CV_PI / 180)),
cv.Round(center[1] - magnitude * sin(angle * cv.CV_PI / 180))),
color,
3,
cv.CV_AA,
0)
if __name__ == "__main__":
motion = 0
capture = 0
if len(sys.argv)==1:
capture = cv.CreateCameraCapture(0)
elif len(sys.argv)==2 and sys.argv[1].isdigit():
capture = cv.CreateCameraCapture(int(sys.argv[1]))
elif len(sys.argv)==2:
capture = cv.CreateFileCapture(sys.argv[1])
if not capture:
print "Could not initialize capturing..."
sys.exit(-1)
cv.NamedWindow("Motion", 1)
while True:
image = cv.QueryFrame(capture)
if(image):
if(not motion):
motion = cv.CreateImage((image.width, image.height), 8, 3)
cv.Zero(motion)
#motion.origin = image.origin
update_mhi(image, motion, 30)
cv.ShowImage("Motion", motion)
if(cv.WaitKey(10) != -1):
break
else:
break
cv.DestroyWindow("Motion")
#!/usr/bin/python
import urllib2
import sys
import cv2.cv as cv
import numpy
# SRGB-linear conversions using NumPy - see http://en.wikipedia.org/wiki/SRGB
def srgb2lin(x):
a = 0.055
return numpy.where(x <= 0.04045,
x * (1.0 / 12.92),
numpy.power((x + a) * (1.0 / (1 + a)), 2.4))
def lin2srgb(x):
a = 0.055
return numpy.where(x <= 0.0031308,
x * 12.92,
(1 + a) * numpy.power(x, 1 / 2.4) - a)
if __name__ == "__main__":
if len(sys.argv) > 1:
img0 = cv.LoadImageM( sys.argv[1], cv.CV_LOAD_IMAGE_COLOR)
else:
url = 'http://code.opencv.org/projects/opencv/repository/revisions/master/raw/samples/c/lena.jpg'
filedata = urllib2.urlopen(url).read()
imagefiledata = cv.CreateMatHeader(1, len(filedata), cv.CV_8UC1)
cv.SetData(imagefiledata, filedata, len(filedata))
img0 = cv.DecodeImageM(imagefiledata, cv.CV_LOAD_IMAGE_COLOR)
cv.NamedWindow("original", 1)
cv.ShowImage("original", img0)
# Image was originally bytes in range 0-255. Turn it into an array of floats in range 0.0 - 1.0
n = numpy.asarray(img0) / 255.0
# Use NumPy to do some transformations on the image
# Negate the image by subtracting it from 1.0
cv.NamedWindow("negative")
cv.ShowImage("negative", cv.fromarray(1.0 - n))
# Assume the image was sRGB, and compute the linear version.
cv.NamedWindow("linear")
cv.ShowImage("linear", cv.fromarray(srgb2lin(n)))
# Look at a subwindow
cv.NamedWindow("subwindow")
cv.ShowImage("subwindow", cv.fromarray(n[200:300,200:400]))
# Compute the grayscale image
cv.NamedWindow("monochrome")
ln = srgb2lin(n)
red = ln[:,:,0]
grn = ln[:,:,1]
blu = ln[:,:,2]
linear_mono = 0.3 * red + 0.59 * grn + 0.11 * blu
cv.ShowImage("monochrome", cv.fromarray(lin2srgb(linear_mono)))
# Apply a blur to the NumPy array using OpenCV
cv.NamedWindow("gaussian")
cv.Smooth(n, n, cv.CV_GAUSSIAN, 15, 15)
cv.ShowImage("gaussian", cv.fromarray(n))
cv.WaitKey(0)
cv.DestroyAllWindows()
#!/usr/bin/python
import urllib2
import sys
import cv2.cv as cv
import numpy
if __name__ == "__main__":
cv.NamedWindow("camera", 1)
capture = cv.CaptureFromCAM(0)
paste = cv.CreateMat(960, 1280, cv.CV_8UC3)
topleft = numpy.asarray(cv.GetSubRect(paste, (0, 0, 640, 480)))
topright = numpy.asarray(cv.GetSubRect(paste, (640, 0, 640, 480)))
bottomleft = numpy.asarray(cv.GetSubRect(paste, (0, 480, 640, 480)))
bottomright = numpy.asarray(cv.GetSubRect(paste, (640, 480, 640, 480)))
while True:
img = cv.GetMat(cv.QueryFrame(capture))
n = (numpy.asarray(img)).astype(numpy.uint8)
red = n[:,:,0]
grn = n[:,:,1]
blu = n[:,:,2]
topleft[:,:,0] = 255 - grn
topleft[:,:,1] = red
topleft[:,:,2] = blu
topright[:,:,0] = blu
topright[:,:,1] = 255 - red
topright[:,:,2] = grn
bottomright[:,:,0] = red
bottomright[:,:,1] = grn
bottomright[:,:,2] = 255 - blu
fgrn = grn.astype(numpy.float32)
fred = red.astype(numpy.float32)
bottomleft[:,:,0] = blu
bottomleft[:,:,1] = (abs(fgrn - fred)).astype(numpy.uint8)
bottomleft[:,:,2] = red
cv.ShowImage("camera", paste)
if cv.WaitKey(6) == 27:
break
cv.DestroyAllWindows()
#!/usr/bin/python
import sys
from cv import *
def inside(r, q):
(rx, ry), (rw, rh) = r
(qx, qy), (qw, qh) = q
return rx > qx and ry > qy and rx + rw < qx + qw and ry + rh < qy + qh
try:
img = LoadImage(sys.argv[1])
except:
try:
f = open(sys.argv[1], "rt")
except:
print "cannot read " + sys.argv[1]
sys.exit(-1)
imglist = list(f.readlines())
else:
imglist = [sys.argv[1]]
NamedWindow("people detection demo", 1)
storage = CreateMemStorage(0)
for name in imglist:
n = name.strip()
print n
try:
img = LoadImage(n)
except:
continue
#ClearMemStorage(storage)
found = list(HOGDetectMultiScale(img, storage, win_stride=(8,8),
padding=(32,32), scale=1.05, group_threshold=2))
found_filtered = []
for r in found:
insidef = False
for q in found:
if inside(r, q):
insidef = True
break
if not insidef:
found_filtered.append(r)
for r in found_filtered:
(rx, ry), (rw, rh) = r
tl = (rx + int(rw*0.1), ry + int(rh*0.07))
br = (rx + int(rw*0.9), ry + int(rh*0.87))
Rectangle(img, tl, br, (0, 255, 0), 3)
ShowImage("people detection demo", img)
c = WaitKey(0)
if c == ord('q'):
break
cv.DestroyAllWindows()
#!/usr/bin/python
import cv2.cv as cv
class PyrSegmentation:
def __init__(self, img0):
self.thresh1 = 255
self.thresh2 = 30
self.level =4
self.storage = cv.CreateMemStorage()
cv.NamedWindow("Source", 0)
cv.ShowImage("Source", img0)
cv.NamedWindow("Segmentation", 0)
cv.CreateTrackbar("Thresh1", "Segmentation", self.thresh1, 255, self.set_thresh1)
cv.CreateTrackbar("Thresh2", "Segmentation", self.thresh2, 255, self.set_thresh2)
self.image0 = cv.CloneImage(img0)
self.image1 = cv.CloneImage(img0)
cv.ShowImage("Segmentation", self.image1)
def set_thresh1(self, val):
self.thresh1 = val
self.on_segment()
def set_thresh2(self, val):
self.thresh2 = val
self.on_segment()
def on_segment(self):
comp = cv.PyrSegmentation(self.image0, self.image1, self.storage, \
self.level, self.thresh1+1, self.thresh2+1)
cv.ShowImage("Segmentation", self.image1)
def run(self):
self.on_segment()
cv.WaitKey(0)
if __name__ == "__main__":
img0 = cv.LoadImage("../c/fruits.jpg", 1)
# segmentation of the color image
PyrSegmentation(img0).run()
cv.DestroyAllWindows()
#!/usr/bin/python
#
# The full "Square Detector" program.
# It loads several images subsequentally and tries to find squares in
# each image
#
import urllib2
from math import sqrt
import cv2.cv as cv
thresh = 50
img = None
img0 = None
storage = None
wndname = "Square Detection Demo"
def angle(pt1, pt2, pt0):
dx1 = pt1.x - pt0.x
dy1 = pt1.y - pt0.y
dx2 = pt2.x - pt0.x
dy2 = pt2.y - pt0.y
return (dx1*dx2 + dy1*dy2)/sqrt((dx1*dx1 + dy1*dy1)*(dx2*dx2 + dy2*dy2) + 1e-10)
def findSquares4(img, storage):
N = 11
sz = (img.width & -2, img.height & -2)
timg = cv.CloneImage(img); # make a copy of input image
gray = cv.CreateImage(sz, 8, 1)
pyr = cv.CreateImage((sz.width/2, sz.height/2), 8, 3)
# create empty sequence that will contain points -
# 4 points per square (the square's vertices)
squares = cv.CreateSeq(0, sizeof_CvSeq, sizeof_CvPoint, storage)
squares = CvSeq_CvPoint.cast(squares)
# select the maximum ROI in the image
# with the width and height divisible by 2
subimage = cv.GetSubRect(timg, cv.Rect(0, 0, sz.width, sz.height))
# down-scale and upscale the image to filter out the noise
cv.PyrDown(subimage, pyr, 7)
cv.PyrUp(pyr, subimage, 7)
tgray = cv.CreateImage(sz, 8, 1)
# find squares in every color plane of the image
for c in range(3):
# extract the c-th color plane
channels = [None, None, None]
channels[c] = tgray
cv.Split(subimage, channels[0], channels[1], channels[2], None)
for l in range(N):
# hack: use Canny instead of zero threshold level.
# Canny helps to catch squares with gradient shading
if(l == 0):
# apply Canny. Take the upper threshold from slider
# and set the lower to 0 (which forces edges merging)
cv.Canny(tgray, gray, 0, thresh, 5)
# dilate canny output to remove potential
# holes between edge segments
cv.Dilate(gray, gray, None, 1)
else:
# apply threshold if l!=0:
# tgray(x, y) = gray(x, y) < (l+1)*255/N ? 255 : 0
cv.Threshold(tgray, gray, (l+1)*255/N, 255, cv.CV_THRESH_BINARY)
# find contours and store them all as a list
count, contours = cv.FindContours(gray, storage, sizeof_CvContour,
cv.CV_RETR_LIST, cv. CV_CHAIN_APPROX_SIMPLE, (0, 0))
if not contours:
continue
# test each contour
for contour in contours.hrange():
# approximate contour with accuracy proportional
# to the contour perimeter
result = cv.ApproxPoly(contour, sizeof_CvContour, storage,
cv.CV_POLY_APPROX_DP, cv.ContourPerimeter(contours)*0.02, 0)
# square contours should have 4 vertices after approximation
# relatively large area (to filter out noisy contours)
# and be convex.
# Note: absolute value of an area is used because
# area may be positive or negative - in accordance with the
# contour orientation
if(result.total == 4 and
abs(cv.ContourArea(result)) > 1000 and
cv.CheckContourConvexity(result)):
s = 0
for i in range(5):
# find minimum angle between joint
# edges (maximum of cosine)
if(i >= 2):
t = abs(angle(result[i], result[i-2], result[i-1]))
if s<t:
s=t
# if cosines of all angles are small
# (all angles are ~90 degree) then write quandrange
# vertices to resultant sequence
if(s < 0.3):
for i in range(4):
squares.append(result[i])
return squares
# the function draws all the squares in the image
def drawSquares(img, squares):
cpy = cv.CloneImage(img)
# read 4 sequence elements at a time (all vertices of a square)
i=0
while i<squares.total:
pt = []
# read 4 vertices
pt.append(squares[i])
pt.append(squares[i+1])
pt.append(squares[i+2])
pt.append(squares[i+3])
# draw the square as a closed polyline
cv.PolyLine(cpy, [pt], 1, cv.CV_RGB(0, 255, 0), 3, cv. CV_AA, 0)
i+=4
# show the resultant image
cv.ShowImage(wndname, cpy)
def on_trackbar(a):
if(img):
drawSquares(img, findSquares4(img, storage))
names = ["../c/pic1.png", "../c/pic2.png", "../c/pic3.png",
"../c/pic4.png", "../c/pic5.png", "../c/pic6.png" ]
if __name__ == "__main__":
# create memory storage that will contain all the dynamic data
storage = cv.CreateMemStorage(0)
for name in names:
img0 = cv.LoadImage(name, 1)
if not img0:
print "Couldn't load %s" % name
continue
img = cv.CloneImage(img0)
# create window and a trackbar (slider) with parent "image" and set callback
# (the slider regulates upper threshold, passed to Canny edge detector)
cv.NamedWindow(wndname, 1)
cv.CreateTrackbar("canny thresh", wndname, thresh, 1000, on_trackbar)
# force the image processing
on_trackbar(0)
# wait for key.
# Also the function cv.WaitKey takes care of event processing
c = cv.WaitKey(0) % 0x100
# clear memory storage - reset free space position
cv.ClearMemStorage(storage)
if(c == '\x1b'):
break
cv.DestroyWindow(wndname)
#!/usr/bin/python
import urllib2
import sys
import cv2.cv as cv
class Sketcher:
def __init__(self, windowname, dests):
self.prev_pt = None
self.windowname = windowname
self.dests = dests
cv.SetMouseCallback(self.windowname, self.on_mouse)
def on_mouse(self, event, x, y, flags, param):
pt = (x, y)
if event == cv.CV_EVENT_LBUTTONUP or not (flags & cv.CV_EVENT_FLAG_LBUTTON):
self.prev_pt = None
elif event == cv.CV_EVENT_LBUTTONDOWN:
self.prev_pt = pt
elif event == cv.CV_EVENT_MOUSEMOVE and (flags & cv.CV_EVENT_FLAG_LBUTTON) :
if self.prev_pt:
for dst in self.dests:
cv.Line(dst, self.prev_pt, pt, cv.ScalarAll(255), 5, 8, 0)
self.prev_pt = pt
cv.ShowImage(self.windowname, img)
if __name__ == "__main__":
if len(sys.argv) > 1:
img0 = cv.LoadImage( sys.argv[1], cv.CV_LOAD_IMAGE_COLOR)
else:
url = 'http://code.opencv.org/projects/opencv/repository/revisions/master/raw/samples/c/fruits.jpg'
filedata = urllib2.urlopen(url).read()
imagefiledata = cv.CreateMatHeader(1, len(filedata), cv.CV_8UC1)
cv.SetData(imagefiledata, filedata, len(filedata))
img0 = cv.DecodeImage(imagefiledata, cv.CV_LOAD_IMAGE_COLOR)
rng = cv.RNG(-1)
print "Hot keys:"
print "\tESC - quit the program"
print "\tr - restore the original image"
print "\tw - run watershed algorithm"
print "\t (before that, roughly outline several markers on the image)"
cv.NamedWindow("image", 1)
cv.NamedWindow("watershed transform", 1)
img = cv.CloneImage(img0)
img_gray = cv.CloneImage(img0)
wshed = cv.CloneImage(img0)
marker_mask = cv.CreateImage(cv.GetSize(img), 8, 1)
markers = cv.CreateImage(cv.GetSize(img), cv.IPL_DEPTH_32S, 1)
cv.CvtColor(img, marker_mask, cv.CV_BGR2GRAY)
cv.CvtColor(marker_mask, img_gray, cv.CV_GRAY2BGR)
cv.Zero(marker_mask)
cv.Zero(wshed)
cv.ShowImage("image", img)
cv.ShowImage("watershed transform", wshed)
sk = Sketcher("image", [img, marker_mask])
while True:
c = cv.WaitKey(0) % 0x100
if c == 27 or c == ord('q'):
break
if c == ord('r'):
cv.Zero(marker_mask)
cv.Copy(img0, img)
cv.ShowImage("image", img)
if c == ord('w'):
storage = cv.CreateMemStorage(0)
#cv.SaveImage("wshed_mask.png", marker_mask)
#marker_mask = cv.LoadImage("wshed_mask.png", 0)
contours = cv.FindContours(marker_mask, storage, cv.CV_RETR_CCOMP, cv.CV_CHAIN_APPROX_SIMPLE)
def contour_iterator(contour):
while contour:
yield contour
contour = contour.h_next()
cv.Zero(markers)
comp_count = 0
for c in contour_iterator(contours):
cv.DrawContours(markers,
c,
cv.ScalarAll(comp_count + 1),
cv.ScalarAll(comp_count + 1),
-1,
-1,
8)
comp_count += 1
cv.Watershed(img0, markers)
cv.Set(wshed, cv.ScalarAll(255))
# paint the watershed image
color_tab = [(cv.RandInt(rng) % 180 + 50, cv.RandInt(rng) % 180 + 50, cv.RandInt(rng) % 180 + 50) for i in range(comp_count)]
for j in range(markers.height):
for i in range(markers.width):
idx = markers[j, i]
if idx != -1:
wshed[j, i] = color_tab[int(idx - 1)]
cv.AddWeighted(wshed, 0.5, img_gray, 0.5, 0, wshed)
cv.ShowImage("watershed transform", wshed)
cv.DestroyAllWindows()
......@@ -24,8 +24,3 @@ if __name__ == '__main__':
r = 1.0 * len(cv2_used) / len(cv2_callable)
print '\ncv2 api coverage: %d / %d (%.1f%%)' % ( len(cv2_used), len(cv2_callable), r*100 )
print '\nold (cv) symbols:'
for s in found:
if s.startswith('cv.'):
print s
......@@ -37,7 +37,7 @@ if __name__ == '__main__':
img = np.zeros((sz, sz), np.uint8)
track = np.cumsum(np.random.rand(500000, 2)-0.5, axis=0)
track = np.int32(track*10 + (sz/2, sz/2))
cv2.polylines(img, [track], 0, 255, 1, cv2.CV_AA)
cv2.polylines(img, [track], 0, 255, 1, cv2.LINE_AA)
small = img
......
......@@ -71,8 +71,8 @@ def mtx2rvec(R):
return axis * np.arctan2(s, c)
def draw_str(dst, (x, y), s):
cv2.putText(dst, s, (x+1, y+1), cv2.FONT_HERSHEY_PLAIN, 1.0, (0, 0, 0), thickness = 2, lineType=cv2.CV_AA)
cv2.putText(dst, s, (x, y), cv2.FONT_HERSHEY_PLAIN, 1.0, (255, 255, 255), lineType=cv2.CV_AA)
cv2.putText(dst, s, (x+1, y+1), cv2.FONT_HERSHEY_PLAIN, 1.0, (0, 0, 0), thickness = 2, lineType=cv2.LINE_AA)
cv2.putText(dst, s, (x, y), cv2.FONT_HERSHEY_PLAIN, 1.0, (255, 255, 255), lineType=cv2.LINE_AA)
class Sketcher:
def __init__(self, windowname, dests, colors_func):
......
......@@ -53,7 +53,7 @@ if __name__ == '__main__':
vis = np.zeros((h, w, 3), np.uint8)
levels = levels - 3
cv2.drawContours( vis, contours, (-1, 3)[levels <= 0], (128,255,255),
3, cv2.CV_AA, hierarchy, abs(levels) )
3, cv2.LINE_AA, hierarchy, abs(levels) )
cv2.imshow('contours', vis)
update(3)
cv2.createTrackbar( "levels+3", "contours", 3, 7, update )
......
......@@ -57,7 +57,7 @@ def motion_kernel(angle, d, sz=65):
def defocus_kernel(d, sz=65):
kern = np.zeros((sz, sz), np.uint8)
cv2.circle(kern, (sz, sz), d, 255, -1, cv2.CV_AA, shift=1)
cv2.circle(kern, (sz, sz), d, 255, -1, cv2.LINE_AA, shift=1)
kern = np.float32(kern) / 255.0
return kern
......@@ -69,7 +69,7 @@ if __name__ == '__main__':
opts = dict(opts)
try:
fn = args[0]
except:
except:
fn = 'data/licenseplate_motion.jpg'
win = 'deconvolution'
......@@ -78,7 +78,7 @@ if __name__ == '__main__':
if img is None:
print 'Failed to load fn1:', fn1
sys.exit(1)
img = np.float32(img)/255.0
cv2.imshow('input', img)
......
......@@ -93,7 +93,7 @@ if __name__ == "__main__":
shift_dft(log_spectrum, log_spectrum)
# normalize and display the results as rgb
cv2.normalize(log_spectrum, log_spectrum, 0.0, 1.0, cv2.cv.CV_MINMAX)
cv2.normalize(log_spectrum, log_spectrum, 0.0, 1.0, cv2.NORM_MINMAX)
cv2.imshow("magnitude", log_spectrum)
cv2.waitKey(0)
......
......@@ -14,7 +14,6 @@ Keys:
import numpy as np
import cv2
import cv2.cv as cv
from common import make_cmap
......@@ -30,7 +29,7 @@ if __name__ == '__main__':
if img is None:
print 'Failed to load fn:', fn
sys.exit(1)
cm = make_cmap('jet')
need_update = True
voronoi = False
......@@ -40,7 +39,7 @@ if __name__ == '__main__':
need_update = False
thrs = cv2.getTrackbarPos('threshold', 'distrans')
mark = cv2.Canny(img, thrs, 3*thrs)
dist, labels = cv2.distanceTransformWithLabels(~mark, cv.CV_DIST_L2, 5)
dist, labels = cv2.distanceTransformWithLabels(~mark, cv2.DIST_L2, 5)
if voronoi:
vis = cm[np.uint8(labels)]
else:
......
......@@ -2,7 +2,6 @@
import numpy as np
import cv2
import cv2.cv as cv
# local modules
from video import create_capture
......@@ -13,7 +12,7 @@ USAGE: facedetect.py [--cascade <cascade_fn>] [--nested-cascade <cascade_fn>] [<
'''
def detect(img, cascade):
rects = cascade.detectMultiScale(img, scaleFactor=1.3, minNeighbors=4, minSize=(30, 30), flags = cv.CV_HAAR_SCALE_IMAGE)
rects = cascade.detectMultiScale(img, scaleFactor=1.3, minNeighbors=4, minSize=(30, 30), flags = cv2.CASCADE_SCALE_IMAGE)
if len(rects) == 0:
return []
rects[:,2:] += rects[:,:2]
......
......@@ -42,7 +42,7 @@ def sample_line(p1, p2, n, noise=0.0):
t = np.random.rand(n,1)
return p1 + (p2-p1)*t + np.random.normal(size=(n, 2))*noise
dist_func_names = it.cycle('CV_DIST_L2 CV_DIST_L1 CV_DIST_L12 CV_DIST_FAIR CV_DIST_WELSCH CV_DIST_HUBER'.split())
dist_func_names = it.cycle('DIST_L2 DIST_L1 DIST_L12 DIST_FAIR DIST_WELSCH DIST_HUBER'.split())
cur_func_name = dist_func_names.next()
def update(_=None):
......@@ -63,7 +63,7 @@ def update(_=None):
cv2.circle(img, toint(p), 2, (255, 255, 255), -1)
for p in outliers:
cv2.circle(img, toint(p), 2, (64, 64, 255), -1)
func = getattr(cv2.cv, cur_func_name)
func = getattr(cv2, cur_func_name)
vx, vy, cx, cy = cv2.fitLine(np.float32(points), func, 0, 0.01, 0.01)
cv2.line(img, (int(cx-vx*w), int(cy-vy*w)), (int(cx+vx*w), int(cy+vy*w)), (0, 0, 255))
......
......@@ -23,7 +23,7 @@ def draw_gaussain(img, mean, cov, color):
w, u, vt = cv2.SVDecomp(cov)
ang = np.arctan2(u[1, 0], u[0, 0])*(180/np.pi)
s1, s2 = np.sqrt(w)*3.0
cv2.ellipse(img, (x, y), (s1, s2), ang, 0, 360, color, 1, cv2.CV_AA)
cv2.ellipse(img, (x, y), (s1, s2), ang, 0, 360, color, 1, cv2.LINE_AA)
if __name__ == '__main__':
......
......@@ -22,11 +22,11 @@ img = cv2.cvtColor(src, cv2.COLOR_BGR2GRAY)
img = cv2.medianBlur(img, 5)
cimg = src.copy() # numpy function
circles = cv2.HoughCircles(img, cv2.cv.CV_HOUGH_GRADIENT, 1, 10, np.array([]), 100, 30, 1, 30)
circles = cv2.HoughCircles(img, cv2.HOUGH_GRADIENT, 1, 10, np.array([]), 100, 30, 1, 30)
a, b, c = circles.shape
for i in range(b):
cv2.circle(cimg, (circles[0][i][0], circles[0][i][1]), circles[0][i][2], (0, 0, 255), 3, cv2.cv.CV_AA)
cv2.circle(cimg, (circles[0][i][0], circles[0][i][1]), 2, (0, 255, 0), 3, cv2.cv.CV_AA) # draw center of circle
cv2.circle(cimg, (circles[0][i][0], circles[0][i][1]), circles[0][i][2], (0, 0, 255), 3, cv2.LINE_AA)
cv2.circle(cimg, (circles[0][i][0], circles[0][i][1]), 2, (0, 255, 0), 3, cv2.LINE_AA) # draw center of circle
cv2.imshow("source", src)
cv2.imshow("detected circles", cimg)
......
......@@ -10,31 +10,31 @@ import sys
import math
try:
fn = sys.argv[1]
fn = sys.argv[1]
except:
fn = "../cpp/pic1.png"
fn = "../cpp/pic1.png"
print __doc__
src = cv2.imread(fn)
dst = cv2.Canny(src, 50, 200)
cdst = cv2.cvtColor(dst, cv2.COLOR_GRAY2BGR)
# HoughLines()
# lines = cv2.HoughLines(dst, 1, cv2.cv.CV_PI/180.0, 50, np.array([]), 0, 0)
# lines = cv2.HoughLines(dst, 1, math.pi/180.0, 50, np.array([]), 0, 0)
# a,b,c = lines.shape
# for i in range(b):
# rho = lines[0][i][0]
# theta = lines[0][i][1]
# a = math.cos(theta)
# b = math.sin(theta)
# x0, y0 = a*rho, b*rho
# pt1 = ( int(x0+1000*(-b)), int(y0+1000*(a)) )
# pt2 = ( int(x0-1000*(-b)), int(y0-1000*(a)) )
# cv2.line(cdst, pt1, pt2, (0, 0, 255), 3, cv2.cv.CV_AA)
# rho = lines[0][i][0]
# theta = lines[0][i][1]
# a = math.cos(theta)
# b = math.sin(theta)
# x0, y0 = a*rho, b*rho
# pt1 = ( int(x0+1000*(-b)), int(y0+1000*(a)) )
# pt2 = ( int(x0-1000*(-b)), int(y0-1000*(a)) )
# cv2.line(cdst, pt1, pt2, (0, 0, 255), 3, cv2.LINE_AA)
lines = cv2.HoughLinesP(dst, 1, cv2.cv.CV_PI/180.0, 50, np.array([]), 50, 10)
lines = cv2.HoughLinesP(dst, 1, math.pi/180.0, 50, np.array([]), 50, 10)
a,b,c = lines.shape
for i in range(b):
cv2.line(cdst, (lines[0][i][0], lines[0][i][1]), (lines[0][i][2], lines[0][i][3]), (0, 0, 255), 3, cv2.cv.CV_AA)
cv2.line(cdst, (lines[0][i][0], lines[0][i][1]), (lines[0][i][2], lines[0][i][3]), (0, 0, 255), 3, cv2.LINE_AA)
cv2.imshow("source", src)
cv2.imshow("detected lines", cdst)
......
......@@ -9,7 +9,6 @@ Demonstrate using a mouse to interact with an image:
ESC to exit
'''
import numpy as np
import cv2 as cv
# built-in modules
import os
......@@ -24,27 +23,27 @@ sel = (0,0,0,0)
def onmouse(event, x, y, flags, param):
global drag_start, sel
if event == cv.EVENT_LBUTTONDOWN:
if event == cv2.EVENT_LBUTTONDOWN:
drag_start = x, y
sel = 0,0,0,0
elif event == cv.EVENT_LBUTTONUP:
elif event == cv2.EVENT_LBUTTONUP:
if sel[2] > sel[0] and sel[3] > sel[1]:
patch = gray[sel[1]:sel[3],sel[0]:sel[2]]
result = cv.matchTemplate(gray,patch,cv.TM_CCOEFF_NORMED)
result = cv2.matchTemplate(gray,patch,cv2.TM_CCOEFF_NORMED)
result = np.abs(result)**3
val, result = cv.threshold(result, 0.01, 0, cv.THRESH_TOZERO)
result8 = cv.normalize(result,None,0,255,cv.NORM_MINMAX,cv.CV_8U)
cv.imshow("result", result8)
val, result = cv2.threshold(result, 0.01, 0, cv2.THRESH_TOZERO)
result8 = cv2.normalize(result,None,0,255,cv2.NORM_MINMAX,cv2.CV_8U)
cv2.imshow("result", result8)
drag_start = None
elif drag_start:
#print flags
if flags & cv.EVENT_FLAG_LBUTTON:
if flags & cv2.EVENT_FLAG_LBUTTON:
minpos = min(drag_start[0], x), min(drag_start[1], y)
maxpos = max(drag_start[0], x), max(drag_start[1], y)
sel = minpos[0], minpos[1], maxpos[0], maxpos[1]
img = cv.cvtColor(gray, cv.COLOR_GRAY2BGR)
cv.rectangle(img, (sel[0], sel[1]), (sel[2], sel[3]), (0,255,255), 1)
cv.imshow("gray", img)
img = cv2.cvtColor(gray, cv2.COLOR_GRAY2BGR)
cv2.rectangle(img, (sel[0], sel[1]), (sel[2], sel[3]), (0,255,255), 1)
cv2.imshow("gray", img)
else:
print "selection is complete"
drag_start = None
......@@ -55,21 +54,21 @@ if __name__ == '__main__':
args = parser.parse_args()
path = args.input
cv.namedWindow("gray",1)
cv.setMouseCallback("gray", onmouse)
cv2.namedWindow("gray",1)
cv2.setMouseCallback("gray", onmouse)
'''Loop through all the images in the directory'''
for infile in glob.glob( os.path.join(path, '*.*') ):
ext = os.path.splitext(infile)[1][1:] #get the filename extenstion
if ext == "png" or ext == "jpg" or ext == "bmp" or ext == "tiff" or ext == "pbm":
print infile
img=cv.imread(infile,1)
img=cv2.imread(infile,1)
if img == None:
continue
sel = (0,0,0,0)
drag_start = None
gray=cv.cvtColor(img, cv.COLOR_BGR2GRAY)
cv.imshow("gray",gray)
if (cv.waitKey() & 255) == 27:
gray=cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
cv2.imshow("gray",gray)
if (cv2.waitKey() & 255) == 27:
break
cv.destroyAllWindows()
cv2.destroyAllWindows()
......@@ -24,7 +24,7 @@ if __name__ == '__main__':
if img is None:
print 'Failed to load image file:', fn
sys.exit(1)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
h, w = img.shape[:2]
......@@ -38,7 +38,7 @@ if __name__ == '__main__':
points = np.dstack( np.mgrid[d/2:w:d, d/2:h:d] ).reshape(-1, 2)
for x, y in points:
vx, vy = np.int32(flow[y, x]*d)
cv2.line(vis, (x-vx, y-vy), (x+vx, y+vy), (0, 0, 0), 1, cv2.CV_AA)
cv2.line(vis, (x-vx, y-vy), (x+vx, y+vy), (0, 0, 0), 1, cv2.LINE_AA)
cv2.imshow('input', img)
cv2.imshow('flow', vis)
cv2.waitKey()
......@@ -9,7 +9,6 @@ Inspired by http://www.jonathanmccabe.com/Cyclic_Symmetric_Multi-Scale_Turing_Pa
import numpy as np
import cv2
import cv2.cv as cv
from common import draw_str
import getopt, sys
from itertools import count
......@@ -30,7 +29,7 @@ if __name__ == '__main__':
out = None
if '-o' in args:
fn = args['-o']
out = cv2.VideoWriter(args['-o'], cv.CV_FOURCC(*'DIB '), 30.0, (w, h), False)
out = cv2.VideoWriter(args['-o'], cv2.VideoWriter_fourcc(*'DIB '), 30.0, (w, h), False)
print 'writing %s ...' % fn
a = np.zeros((h, w), np.float32)
......
......@@ -105,7 +105,7 @@ class Chess(VideoSynthBase):
img_quads = cv2.projectPoints(quads.reshape(-1, 3), self.rvec, self.tvec, self.K, self.dist_coef) [0]
img_quads.shape = quads.shape[:2] + (2,)
for q in img_quads:
cv2.fillConvexPoly(img, np.int32(q*4), color, cv2.CV_AA, shift=2)
cv2.fillConvexPoly(img, np.int32(q*4), color, cv2.LINE_AA, shift=2)
def render(self, dst):
t = self.t
......@@ -159,8 +159,8 @@ def create_capture(source = 0, fallback = presets['chess']):
cap = cv2.VideoCapture(source)
if 'size' in params:
w, h = map(int, params['size'].split('x'))
cap.set(cv2.cv.CV_CAP_PROP_FRAME_WIDTH, w)
cap.set(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT, h)
cap.set(cv2.CAP_PROP_FRAME_WIDTH, w)
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, h)
if cap is None or not cap.isOpened():
print 'Warning: unable to open video source: ', source
if fallback is not None:
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment