Commit 57ff3633 authored by Alexander Alekhin's avatar Alexander Alekhin

python: 'cv2.' -> 'cv.' via 'import cv2 as cv'

parent 9a64c542
import argparse
import cv2
import cv2 as cv
import glob
import numpy as np
import os
......@@ -12,15 +12,15 @@ import time
ALGORITHMS_TO_EVALUATE = [
(cv2.bgsegm.createBackgroundSubtractorMOG, 'MOG', {}),
(cv2.bgsegm.createBackgroundSubtractorGMG, 'GMG', {}),
(cv2.bgsegm.createBackgroundSubtractorCNT, 'CNT', {}),
(cv2.bgsegm.createBackgroundSubtractorLSBP, 'LSBP-vanilla', {'nSamples': 20, 'LSBPRadius': 4, 'Tlower': 2.0, 'Tupper': 200.0, 'Tinc': 1.0, 'Tdec': 0.05, 'Rscale': 5.0, 'Rincdec': 0.05, 'LSBPthreshold': 8}),
(cv2.bgsegm.createBackgroundSubtractorLSBP, 'LSBP-speed', {'nSamples': 10, 'LSBPRadius': 16, 'Tlower': 2.0, 'Tupper': 32.0, 'Tinc': 1.0, 'Tdec': 0.05, 'Rscale': 10.0, 'Rincdec': 0.005, 'LSBPthreshold': 8}),
(cv2.bgsegm.createBackgroundSubtractorLSBP, 'LSBP-quality', {'nSamples': 20, 'LSBPRadius': 16, 'Tlower': 2.0, 'Tupper': 32.0, 'Tinc': 1.0, 'Tdec': 0.05, 'Rscale': 10.0, 'Rincdec': 0.005, 'LSBPthreshold': 8}),
(cv2.bgsegm.createBackgroundSubtractorLSBP, 'LSBP-camera-motion-compensation', {'mc': 1}),
(cv2.bgsegm.createBackgroundSubtractorGSOC, 'GSOC', {}),
(cv2.bgsegm.createBackgroundSubtractorGSOC, 'GSOC-camera-motion-compensation', {'mc': 1})
(cv.bgsegm.createBackgroundSubtractorMOG, 'MOG', {}),
(cv.bgsegm.createBackgroundSubtractorGMG, 'GMG', {}),
(cv.bgsegm.createBackgroundSubtractorCNT, 'CNT', {}),
(cv.bgsegm.createBackgroundSubtractorLSBP, 'LSBP-vanilla', {'nSamples': 20, 'LSBPRadius': 4, 'Tlower': 2.0, 'Tupper': 200.0, 'Tinc': 1.0, 'Tdec': 0.05, 'Rscale': 5.0, 'Rincdec': 0.05, 'LSBPthreshold': 8}),
(cv.bgsegm.createBackgroundSubtractorLSBP, 'LSBP-speed', {'nSamples': 10, 'LSBPRadius': 16, 'Tlower': 2.0, 'Tupper': 32.0, 'Tinc': 1.0, 'Tdec': 0.05, 'Rscale': 10.0, 'Rincdec': 0.005, 'LSBPthreshold': 8}),
(cv.bgsegm.createBackgroundSubtractorLSBP, 'LSBP-quality', {'nSamples': 20, 'LSBPRadius': 16, 'Tlower': 2.0, 'Tupper': 32.0, 'Tinc': 1.0, 'Tdec': 0.05, 'Rscale': 10.0, 'Rincdec': 0.005, 'LSBPthreshold': 8}),
(cv.bgsegm.createBackgroundSubtractorLSBP, 'LSBP-camera-motion-compensation', {'mc': 1}),
(cv.bgsegm.createBackgroundSubtractorGSOC, 'GSOC', {}),
(cv.bgsegm.createBackgroundSubtractorGSOC, 'GSOC-camera-motion-compensation', {'mc': 1})
]
......@@ -54,14 +54,14 @@ def evaluate_algorithm(gt, frames, algo, algo_arguments):
t_start = time.time()
for i in range(len(gt)):
frame = np.uint8(cv2.imread(frames[i], cv2.IMREAD_COLOR))
frame = np.uint8(cv.imread(frames[i], cv.IMREAD_COLOR))
mask.append(bgs.apply(frame))
average_duration = (time.time() - t_start) / len(gt)
average_precision, average_recall, average_f1, average_accuracy = [], [], [], []
for i in range(len(gt)):
gt_mask = np.uint8(cv2.imread(gt[i], cv2.IMREAD_GRAYSCALE))
gt_mask = np.uint8(cv.imread(gt[i], cv.IMREAD_GRAYSCALE))
roi = ((gt_mask == 255) | (gt_mask == 0))
if roi.sum() > 0:
gt_answer, answer = gt_mask[roi], mask[i][roi]
......
import numpy as np
import cv2
import cv2 as cv
import argparse
import os
......@@ -17,22 +17,22 @@ def main():
f = map(lambda x: os.path.join(args.frames, x), os.listdir(args.frames))
f.sort()
gt = np.uint8(map(lambda x: cv2.imread(x, cv2.IMREAD_GRAYSCALE), gt))
f = np.uint8(map(lambda x: cv2.imread(x, cv2.IMREAD_COLOR), f))
gt = np.uint8(map(lambda x: cv.imread(x, cv.IMREAD_GRAYSCALE), gt))
f = np.uint8(map(lambda x: cv.imread(x, cv.IMREAD_COLOR), f))
if not args.lsbp:
bgs = cv2.bgsegm.createBackgroundSubtractorGSOC()
bgs = cv.bgsegm.createBackgroundSubtractorGSOC()
else:
bgs = cv2.bgsegm.createBackgroundSubtractorLSBP()
bgs = cv.bgsegm.createBackgroundSubtractorLSBP()
for i in xrange(f.shape[0]):
cv2.imshow('Frame', f[i])
cv2.imshow('Ground-truth', gt[i])
cv.imshow('Frame', f[i])
cv.imshow('Ground-truth', gt[i])
mask = bgs.apply(f[i])
bg = bgs.getBackgroundImage()
cv2.imshow('BG', bg)
cv2.imshow('Output mask', mask)
k = cv2.waitKey(0)
cv.imshow('BG', bg)
cv.imshow('Output mask', mask)
k = cv.waitKey(0)
if k == 27:
break
......
import cv2
import cv2 as cv
import argparse
......@@ -9,15 +9,15 @@ def main():
argparser.add_argument('-o', '--obj', help='Object image. It must be strictly smaller than background.', required=True)
args = argparser.parse_args()
bg = cv2.imread(args.background)
obj = cv2.imread(args.obj)
generator = cv2.bgsegm.createSyntheticSequenceGenerator(bg, obj)
bg = cv.imread(args.background)
obj = cv.imread(args.obj)
generator = cv.bgsegm.createSyntheticSequenceGenerator(bg, obj)
while True:
frame, mask = generator.getNextFrame()
cv2.imshow('Generated frame', frame)
cv2.imshow('Generated mask', mask)
k = cv2.waitKey(int(1000.0 / 30))
cv.imshow('Generated frame', frame)
cv.imshow('Generated mask', mask)
k = cv.waitKey(int(1000.0 / 30))
if k == 27:
break
......
......@@ -4,7 +4,7 @@ Bioinspired Module Retina Introduction {#bioinspired_retina}
Retina class overview
---------------------
@note do not forget that the retina model is included in the following namespace : cv::bioinspired with C++ and in cv2.bioinspired with Python
@note do not forget that the retina model is included in the following namespace : cv::bioinspired with C++ and in cv.bioinspired with Python
### Introduction
......@@ -425,14 +425,14 @@ Python version
@code{.py}
#import OpenCV module
import cv2
import cv2 as cv
#setup webcam reader
videoHandler = cv2.VideoCapture(0)
videoHandler = cv.VideoCapture(0)
succeed, inputImage=videoHandler.read()
#allocate a retina instance with input size equal to the one of the loaded image
retina = cv2.bioinspired.createRetina((inputImage.shape[1], inputImage.shape[0]))
retina = cv.bioinspired.createRetina((inputImage.shape[1], inputImage.shape[0]))
#retina parameters management methods use sample
#-> save current (here default) retina parameters to a xml file (you may use it only one time to get the file and modify it)
......@@ -446,7 +446,7 @@ while stillProcess is True:
#grab a new frame and display it
stillProcess, inputImage=videoHandler.read()
cv2.imshow('input frame', inputImage)
cv.imshow('input frame', inputImage)
#run retina on the input image
retina.run(inputImage)
......@@ -456,11 +456,11 @@ while stillProcess is True:
retinaOut_magno=retina.getMagno()
#draw retina outputs
cv2.imshow('retina parvo out', retinaOut_parvo)
cv2.imshow('retina magno out', retinaOut_magno)
cv.imshow('retina parvo out', retinaOut_parvo)
cv.imshow('retina magno out', retinaOut_magno)
#wait a little to let the time for figures to be drawn
cv2.waitKey(2)
cv.waitKey(2)
@endcode
......
......@@ -117,10 +117,10 @@ iterations. This is an arbitrary number that we found experimentally to be (more
enough
@code{.py}
import cv2
import cv2 as cv
inputImage = cv2.imread('checkershadow_illusion4med.jpg', 1)
retina = cv2.bioinspired.createRetina((inputImage.shape[1], inputImage.shape[0]))
inputImage = cv.imread('checkershadow_illusion4med.jpg', 1)
retina = cv.bioinspired.createRetina((inputImage.shape[1], inputImage.shape[0]))
# the retina object is created with default parameters. If you want to read
# the parameters from an external XML file, uncomment the next line
......@@ -134,15 +134,15 @@ for i in range(20):
retinaOut_parvo = retina.getParvo()
# show both the original image and the processed one
cv2.imshow('image', inputImage)
cv2.imshow('retina parvo out', retinaOut_parvo)
cv.imshow('image', inputImage)
cv.imshow('retina parvo out', retinaOut_parvo)
# wait for a key to be pressed and exit
cv2.waitKey(0)
cv2.destroyAllWindows()
cv.waitKey(0)
cv.destroyAllWindows()
# write the output image on a file
cv2.imwrite('checkershadow_parvo.png', retinaOut_parvo)
cv.imwrite('checkershadow_parvo.png', retinaOut_parvo)
@endcode
Whatever method you used to process the image, you should end up
......
#!/usr/bin/env python
import numpy as np
import cv2
import cv2 as cv
MHI_DURATION = 0.5
DEFAULT_THRESHOLD = 32
......@@ -12,12 +12,12 @@ def nothing(dummy):
pass
def draw_motion_comp(vis, (x, y, w, h), angle, color):
cv2.rectangle(vis, (x, y), (x+w, y+h), (0, 255, 0))
cv.rectangle(vis, (x, y), (x+w, y+h), (0, 255, 0))
r = min(w/2, h/2)
cx, cy = x+w/2, y+h/2
angle = angle*np.pi/180
cv2.circle(vis, (cx, cy), r, color, 3)
cv2.line(vis, (cx, cy), (int(cx+np.cos(angle)*r), int(cy+np.sin(angle)*r)), color, 3)
cv.circle(vis, (cx, cy), r, color, 3)
cv.line(vis, (cx, cy), (int(cx+np.cos(angle)*r), int(cy+np.sin(angle)*r)), color, 3)
if __name__ == '__main__':
import sys
......@@ -26,12 +26,12 @@ if __name__ == '__main__':
except:
video_src = 0
cv2.namedWindow('motempl')
cv.namedWindow('motempl')
visuals = ['input', 'frame_diff', 'motion_hist', 'grad_orient']
cv2.createTrackbar('visual', 'motempl', 2, len(visuals)-1, nothing)
cv2.createTrackbar('threshold', 'motempl', DEFAULT_THRESHOLD, 255, nothing)
cv.createTrackbar('visual', 'motempl', 2, len(visuals)-1, nothing)
cv.createTrackbar('threshold', 'motempl', DEFAULT_THRESHOLD, 255, nothing)
cam = cv2.VideoCapture(video_src)
cam = cv.VideoCapture(video_src)
if not cam.isOpened():
print("could not open video_src " + str(video_src) + " !\n")
sys.exit(1)
......@@ -48,27 +48,27 @@ if __name__ == '__main__':
ret, frame = cam.read()
if ret == False:
break
frame_diff = cv2.absdiff(frame, prev_frame)
gray_diff = cv2.cvtColor(frame_diff, cv2.COLOR_BGR2GRAY)
thrs = cv2.getTrackbarPos('threshold', 'motempl')
ret, motion_mask = cv2.threshold(gray_diff, thrs, 1, cv2.THRESH_BINARY)
timestamp = cv2.getTickCount() / cv2.getTickFrequency()
cv2.motempl.updateMotionHistory(motion_mask, motion_history, timestamp, MHI_DURATION)
mg_mask, mg_orient = cv2.motempl.calcMotionGradient( motion_history, MAX_TIME_DELTA, MIN_TIME_DELTA, apertureSize=5 )
seg_mask, seg_bounds = cv2.motempl.segmentMotion(motion_history, timestamp, MAX_TIME_DELTA)
frame_diff = cv.absdiff(frame, prev_frame)
gray_diff = cv.cvtColor(frame_diff, cv.COLOR_BGR2GRAY)
thrs = cv.getTrackbarPos('threshold', 'motempl')
ret, motion_mask = cv.threshold(gray_diff, thrs, 1, cv.THRESH_BINARY)
timestamp = cv.getTickCount() / cv.getTickFrequency()
cv.motempl.updateMotionHistory(motion_mask, motion_history, timestamp, MHI_DURATION)
mg_mask, mg_orient = cv.motempl.calcMotionGradient( motion_history, MAX_TIME_DELTA, MIN_TIME_DELTA, apertureSize=5 )
seg_mask, seg_bounds = cv.motempl.segmentMotion(motion_history, timestamp, MAX_TIME_DELTA)
visual_name = visuals[cv2.getTrackbarPos('visual', 'motempl')]
visual_name = visuals[cv.getTrackbarPos('visual', 'motempl')]
if visual_name == 'input':
vis = frame.copy()
elif visual_name == 'frame_diff':
vis = frame_diff.copy()
elif visual_name == 'motion_hist':
vis = np.uint8(np.clip((motion_history-(timestamp-MHI_DURATION)) / MHI_DURATION, 0, 1)*255)
vis = cv2.cvtColor(vis, cv2.COLOR_GRAY2BGR)
vis = cv.cvtColor(vis, cv.COLOR_GRAY2BGR)
elif visual_name == 'grad_orient':
hsv[:,:,0] = mg_orient/2
hsv[:,:,2] = mg_mask*255
vis = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
vis = cv.cvtColor(hsv, cv.COLOR_HSV2BGR)
for i, rect in enumerate([(0, 0, w, h)] + list(seg_bounds)):
x, y, rw, rh = rect
......@@ -79,16 +79,16 @@ if __name__ == '__main__':
orient_roi = mg_orient [y:y+rh,x:x+rw]
mask_roi = mg_mask [y:y+rh,x:x+rw]
mhi_roi = motion_history[y:y+rh,x:x+rw]
if cv2.norm(silh_roi, cv2.NORM_L1) < area*0.05:
if cv.norm(silh_roi, cv.NORM_L1) < area*0.05:
continue
angle = cv2.motempl.calcGlobalOrientation(orient_roi, mask_roi, mhi_roi, timestamp, MHI_DURATION)
angle = cv.motempl.calcGlobalOrientation(orient_roi, mask_roi, mhi_roi, timestamp, MHI_DURATION)
color = ((255, 0, 0), (0, 0, 255))[i == 0]
draw_motion_comp(vis, rect, angle, color)
cv2.putText(vis, visual_name, (20, 20), cv2.FONT_HERSHEY_PLAIN, 1.0, (200,0,0))
cv2.imshow('motempl', vis)
cv.putText(vis, visual_name, (20, 20), cv.FONT_HERSHEY_PLAIN, 1.0, (200,0,0))
cv.imshow('motempl', vis)
prev_frame = frame.copy()
if 0xFF & cv2.waitKey(5) == 27:
if 0xFF & cv.waitKey(5) == 27:
break
cv2.destroyAllWindows()
cv.destroyAllWindows()
......@@ -3,7 +3,7 @@
import os
import sys
import numpy as np
import cv2
import cv2 as cv
import struct
import argparse
from math import sqrt
......@@ -73,7 +73,7 @@ def load_flo(flo):
def get_w(m):
s = m.shape
w = cv2.dct(m)
w = cv.dct(m)
w *= 2.0 / sqrt(s[0] * s[1])
#w[0,0] *= 0.5
w[:, 0] *= sqrt(0.5)
......
import cv2
import numpy as np
import cv2 as cv
# aruco
adict = cv2.aruco.Dictionary_get(cv2.aruco.DICT_4X4_50)
cv2.imshow("marker", cv2.aruco.drawMarker(adict, 0, 400))
adict = cv.aruco.Dictionary_get(cv.aruco.DICT_4X4_50)
cv.imshow("marker", cv.aruco.drawMarker(adict, 0, 400))
# random calibration data. your mileage may vary.
imsize = (800, 600)
K = cv2.getDefaultNewCameraMatrix(np.diag([800, 800, 1]), imsize, True)
K = cv.getDefaultNewCameraMatrix(np.diag([800, 800, 1]), imsize, True)
# AR scene
cv2.ovis.addResourceLocation("packs/Sinbad.zip") # shipped with Ogre
cv.ovis.addResourceLocation("packs/Sinbad.zip") # shipped with Ogre
win = cv2.ovis.createWindow("arucoAR", imsize, flags=0)
win = cv.ovis.createWindow("arucoAR", imsize, flags=0)
win.createEntity("figure", "Sinbad.mesh", (0, 0, -5), (-1.57, 0, 0))
win.createLightEntity("sun", (0, 0, -100))
# video capture
cap = cv2.VideoCapture(0)
cap.set(cv2.CAP_PROP_FRAME_WIDTH, imsize[0])
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, imsize[1])
cap = cv.VideoCapture(0)
cap.set(cv.CAP_PROP_FRAME_WIDTH, imsize[0])
cap.set(cv.CAP_PROP_FRAME_HEIGHT, imsize[1])
while cv2.ovis.renderOneFrame():
while cv.ovis.renderOneFrame():
img = cap.read()[1]
win.setBackground(img)
corners, ids = cv2.aruco.detectMarkers(img, adict)[:2]
corners, ids = cv.aruco.detectMarkers(img, adict)[:2]
cv2.waitKey(1)
cv.waitKey(1)
if ids is None:
continue
rvecs, tvecs = cv2.aruco.estimatePoseSingleMarkers(corners, 5, K, None)[:2]
rvecs, tvecs = cv.aruco.estimatePoseSingleMarkers(corners, 5, K, None)[:2]
win.setCameraPose(tvecs[0].ravel(), rvecs[0].ravel(), invert=True)
import cv2
import numpy as np
import cv2 as cv
# add some external resources
cv2.ovis.addResourceLocation("packs/Sinbad.zip")
cv.ovis.addResourceLocation("packs/Sinbad.zip")
# camera intrinsics
imsize = (800, 600)
......@@ -10,19 +10,19 @@ K = np.diag([800, 800, 1])
K[:2, 2] = (400, 100) # offset pp
# observer scene
owin = cv2.ovis.createWindow("VR", imsize)
cv2.ovis.createGridMesh("ground", (10, 10), (10, 10))
owin = cv.ovis.createWindow("VR", imsize)
cv.ovis.createGridMesh("ground", (10, 10), (10, 10))
owin.createEntity("ground", "ground", rot=(1.57, 0, 0))
owin.createCameraEntity("cam", K, imsize, 5)
owin.createEntity("figure", "Sinbad.mesh", (0, -5, 0)) # externally defined mesh
owin.createLightEntity("sun", (0, 0, -100))
# interaction scene
iwin = cv2.ovis.createWindow("AR", imsize, cv2.ovis.SCENE_SEPERATE | cv2.ovis.SCENE_INTERACTIVE)
iwin = cv.ovis.createWindow("AR", imsize, cv.ovis.SCENE_SEPERATE | cv.ovis.SCENE_INTERACTIVE)
iwin.createEntity("figure", "Sinbad.mesh", (0, -5, 0))
iwin.createLightEntity("sun", (0, 0, -100))
iwin.setCameraIntrinsics(K, imsize)
while cv2.ovis.renderOneFrame():
while cv.ovis.renderOneFrame():
R, t = iwin.getCameraPose()
owin.setEntityPose("cam", t, R)
#!/usr/bin/python
import cv2
import cv2 as cv
import numpy as np
import sys
img1 = cv2.imread(sys.argv[1])
img1 = cv.imread(sys.argv[1])
img1 = img1.astype(np.float32)
shift = np.array([5., 5.])
mapTest = cv2.reg.MapShift(shift)
mapTest = cv.reg.MapShift(shift)
img2 = mapTest.warp(img1)
mapper = cv2.reg.MapperGradShift()
mappPyr = cv2.reg.MapperPyramid(mapper)
mapper = cv.reg.MapperGradShift()
mappPyr = cv.reg.MapperPyramid(mapper)
resMap = mappPyr.calculate(img1, img2)
mapShift = cv2.reg.MapTypeCaster_toShift(resMap)
mapShift = cv.reg.MapTypeCaster_toShift(resMap)
print(mapShift.getShift())
import cv2
import cv2 as cv
import numpy as np
def rotation(theta):
......@@ -42,7 +42,7 @@ Rt = np.vstack((
np.array([0, 0, 0, 1])
)).astype(np.float32)
icp = cv2.ppf_match_3d_ICP(100)
icp = cv.ppf_match_3d_ICP(100)
I = np.eye(4)
print("Unaligned error:\t%.6f" % np.linalg.norm(I - Rt))
......
......@@ -2,7 +2,7 @@
#!/usr/bin/python
import sys
import os
import cv2
import cv2 as cv
import numpy as np
def main():
......@@ -19,8 +19,8 @@ def main():
print " See the documentation of text::TextDetectorCNN class to get download links."
quit()
img = cv2.imread(str(sys.argv[1]))
textSpotter = cv2.text.TextDetectorCNN_create("textbox.prototxt", "TextBoxes_icdar13.caffemodel")
img = cv.imread(str(sys.argv[1]))
textSpotter = cv.text.TextDetectorCNN_create("textbox.prototxt", "TextBoxes_icdar13.caffemodel")
rects, outProbs = textSpotter.detect(img);
vis = img.copy()
thres = 0.6
......@@ -28,10 +28,10 @@ def main():
for r in range(np.shape(rects)[0]):
if outProbs[r] > thres:
rect = rects[r]
cv2.rectangle(vis, (rect[0],rect[1]), (rect[0] + rect[2], rect[1] + rect[3]), (255, 0, 0), 2)
cv.rectangle(vis, (rect[0],rect[1]), (rect[0] + rect[2], rect[1] + rect[3]), (255, 0, 0), 2)
cv2.imshow("Text detection result", vis)
cv2.waitKey()
cv.imshow("Text detection result", vis)
cv.waitKey()
if __name__ == "__main__":
main()
......@@ -3,7 +3,7 @@
import sys
import os
import cv2
import cv2 as cv
import numpy as np
print('\ndetect_er_chars.py')
......@@ -17,22 +17,22 @@ if (len(sys.argv) < 2):
pathname = os.path.dirname(sys.argv[0])
img = cv2.imread(str(sys.argv[1]))
gray = cv2.imread(str(sys.argv[1]),0)
img = cv.imread(str(sys.argv[1]))
gray = cv.imread(str(sys.argv[1]),0)
erc1 = cv2.text.loadClassifierNM1(pathname+'/trained_classifierNM1.xml')
er1 = cv2.text.createERFilterNM1(erc1)
erc1 = cv.text.loadClassifierNM1(pathname+'/trained_classifierNM1.xml')
er1 = cv.text.createERFilterNM1(erc1)
erc2 = cv2.text.loadClassifierNM2(pathname+'/trained_classifierNM2.xml')
er2 = cv2.text.createERFilterNM2(erc2)
erc2 = cv.text.loadClassifierNM2(pathname+'/trained_classifierNM2.xml')
er2 = cv.text.createERFilterNM2(erc2)
regions = cv2.text.detectRegions(gray,er1,er2)
regions = cv.text.detectRegions(gray,er1,er2)
#Visualization
rects = [cv2.boundingRect(p.reshape(-1, 1, 2)) for p in regions]
rects = [cv.boundingRect(p.reshape(-1, 1, 2)) for p in regions]
for rect in rects:
cv2.rectangle(img, rect[0:2], (rect[0]+rect[2],rect[1]+rect[3]), (0, 0, 0), 2)
cv.rectangle(img, rect[0:2], (rect[0]+rect[2],rect[1]+rect[3]), (0, 0, 0), 2)
for rect in rects:
cv2.rectangle(img, rect[0:2], (rect[0]+rect[2],rect[1]+rect[3]), (255, 255, 255), 1)
cv2.imshow("Text detection result", img)
cv2.waitKey(0)
cv.rectangle(img, rect[0:2], (rect[0]+rect[2],rect[1]+rect[3]), (255, 255, 255), 1)
cv.imshow("Text detection result", img)
cv.waitKey(0)
......@@ -3,7 +3,7 @@
import sys
import os
import cv2
import cv2 as cv
import numpy as np
print('\ntextdetection.py')
......@@ -18,13 +18,13 @@ if (len(sys.argv) < 2):
pathname = os.path.dirname(sys.argv[0])
img = cv2.imread(str(sys.argv[1]))
img = cv.imread(str(sys.argv[1]))
# for visualization
vis = img.copy()
# Extract channels to be processed individually
channels = cv2.text.computeNMChannels(img)
channels = cv.text.computeNMChannels(img)
# Append negative channels to detect ER- (bright regions over dark background)
cn = len(channels)-1
for c in range(0,cn):
......@@ -35,24 +35,24 @@ print("Extracting Class Specific Extremal Regions from "+str(len(channels))+" ch
print(" (...) this may take a while (...)")
for channel in channels:
erc1 = cv2.text.loadClassifierNM1(pathname+'/trained_classifierNM1.xml')
er1 = cv2.text.createERFilterNM1(erc1,16,0.00015,0.13,0.2,True,0.1)
erc1 = cv.text.loadClassifierNM1(pathname+'/trained_classifierNM1.xml')
er1 = cv.text.createERFilterNM1(erc1,16,0.00015,0.13,0.2,True,0.1)
erc2 = cv2.text.loadClassifierNM2(pathname+'/trained_classifierNM2.xml')
er2 = cv2.text.createERFilterNM2(erc2,0.5)
erc2 = cv.text.loadClassifierNM2(pathname+'/trained_classifierNM2.xml')
er2 = cv.text.createERFilterNM2(erc2,0.5)
regions = cv2.text.detectRegions(channel,er1,er2)
regions = cv.text.detectRegions(channel,er1,er2)
rects = cv2.text.erGrouping(img,channel,[r.tolist() for r in regions])
#rects = cv2.text.erGrouping(img,channel,[x.tolist() for x in regions], cv2.text.ERGROUPING_ORIENTATION_ANY,'../../GSoC2014/opencv_contrib/modules/text/samples/trained_classifier_erGrouping.xml',0.5)
rects = cv.text.erGrouping(img,channel,[r.tolist() for r in regions])
#rects = cv.text.erGrouping(img,channel,[x.tolist() for x in regions], cv.text.ERGROUPING_ORIENTATION_ANY,'../../GSoC2014/opencv_contrib/modules/text/samples/trained_classifier_erGrouping.xml',0.5)
#Visualization
for r in range(0,np.shape(rects)[0]):
rect = rects[r]
cv2.rectangle(vis, (rect[0],rect[1]), (rect[0]+rect[2],rect[1]+rect[3]), (0, 0, 0), 2)
cv2.rectangle(vis, (rect[0],rect[1]), (rect[0]+rect[2],rect[1]+rect[3]), (255, 255, 255), 1)
cv.rectangle(vis, (rect[0],rect[1]), (rect[0]+rect[2],rect[1]+rect[3]), (0, 0, 0), 2)
cv.rectangle(vis, (rect[0],rect[1]), (rect[0]+rect[2],rect[1]+rect[3]), (255, 255, 255), 1)
#Visualization
cv2.imshow("Text detection result", vis)
cv2.waitKey(0)
cv.imshow("Text detection result", vis)
cv.waitKey(0)
import numpy as np
import cv2
import cv2 as cv
import sys
if len(sys.argv) != 2:
......@@ -8,9 +8,9 @@ if len(sys.argv) != 2:
print('Select 3 tracking targets')
cv2.namedWindow("tracking")
camera = cv2.VideoCapture(sys.argv[1])
tracker = cv2.MultiTracker_create()
cv.namedWindow("tracking")
camera = cv.VideoCapture(sys.argv[1])
tracker = cv.MultiTracker_create()
init_once = False
ok, image=camera.read()
......@@ -18,9 +18,9 @@ if not ok:
print('Failed to read video')
exit()
bbox1 = cv2.selectROI('tracking', image)
bbox2 = cv2.selectROI('tracking', image)
bbox3 = cv2.selectROI('tracking', image)
bbox1 = cv.selectROI('tracking', image)
bbox2 = cv.selectROI('tracking', image)
bbox3 = cv.selectROI('tracking', image)
while camera.isOpened():
ok, image=camera.read()
......@@ -29,9 +29,9 @@ while camera.isOpened():
break
if not init_once:
ok = tracker.add(cv2.TrackerMIL_create(), image, bbox1)
ok = tracker.add(cv2.TrackerMIL_create(), image, bbox2)
ok = tracker.add(cv2.TrackerMIL_create(), image, bbox3)
ok = tracker.add(cv.TrackerMIL_create(), image, bbox1)
ok = tracker.add(cv.TrackerMIL_create(), image, bbox2)
ok = tracker.add(cv.TrackerMIL_create(), image, bbox3)
init_once = True
ok, boxes = tracker.update(image)
......@@ -40,8 +40,8 @@ while camera.isOpened():
for newbox in boxes:
p1 = (int(newbox[0]), int(newbox[1]))
p2 = (int(newbox[0] + newbox[2]), int(newbox[1] + newbox[3]))
cv2.rectangle(image, p1, p2, (200,0,0))
cv.rectangle(image, p1, p2, (200,0,0))
cv2.imshow('tracking', image)
k = cv2.waitKey(1)
cv.imshow('tracking', image)
k = cv.waitKey(1)
if k == 27 : break # esc pressed
import numpy as np
import cv2
import cv2 as cv
import sys
if len(sys.argv) != 2:
print('Input video name is missing')
exit()
cv2.namedWindow("tracking")
camera = cv2.VideoCapture(sys.argv[1])
cv.namedWindow("tracking")
camera = cv.VideoCapture(sys.argv[1])
ok, image=camera.read()
if not ok:
print('Failed to read video')
exit()
bbox = cv2.selectROI("tracking", image)
tracker = cv2.TrackerMIL_create()
bbox = cv.selectROI("tracking", image)
tracker = cv.TrackerMIL_create()
init_once = False
while camera.isOpened():
......@@ -32,8 +32,8 @@ while camera.isOpened():
if ok:
p1 = (int(newbox[0]), int(newbox[1]))
p2 = (int(newbox[0] + newbox[2]), int(newbox[1] + newbox[3]))
cv2.rectangle(image, p1, p2, (200,0,0))
cv.rectangle(image, p1, p2, (200,0,0))
cv2.imshow("tracking", image)
k = cv2.waitKey(1) & 0xff
cv.imshow("tracking", image)
k = cv.waitKey(1) & 0xff
if k == 27 : break # esc pressed
......@@ -7,7 +7,7 @@ Usage:
edgeboxes_demo.py [<model>] [<input_image>]
'''
import cv2
import cv2 as cv
import numpy as np
import sys
......@@ -15,24 +15,24 @@ if __name__ == '__main__':
print(__doc__)
model = sys.argv[1]
im = cv2.imread(sys.argv[2])
im = cv.imread(sys.argv[2])
edge_detection = cv2.ximgproc.createStructuredEdgeDetection(model)
rgb_im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
edge_detection = cv.ximgproc.createStructuredEdgeDetection(model)
rgb_im = cv.cvtColor(im, cv.COLOR_BGR2RGB)
edges = edge_detection.detectEdges(np.float32(rgb_im) / 255.0)
orimap = edge_detection.computeOrientation(edges)
edges = edge_detection.edgesNms(edges, orimap)
edge_boxes = cv2.ximgproc.createEdgeBoxes()
edge_boxes = cv.ximgproc.createEdgeBoxes()
edge_boxes.setMaxBoxes(30)
boxes = edge_boxes.getBoundingBoxes(edges, orimap)
for b in boxes:
x, y, w, h = b
cv2.rectangle(im, (x, y), (x+w, y+h), (0, 255, 0), 1, cv2.LINE_AA)
cv.rectangle(im, (x, y), (x+w, y+h), (0, 255, 0), 1, cv.LINE_AA)
cv2.imshow("edges", edges)
cv2.imshow("edgeboxes", im)
cv2.waitKey(0)
cv2.destroyAllWindows()
cv.imshow("edges", edges)
cv.imshow("edgeboxes", im)
cv.waitKey(0)
cv.destroyAllWindows()
......@@ -4,7 +4,7 @@
import numpy as np
import argparse
import cv2
import cv2 as cv
# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
......@@ -12,18 +12,18 @@ ap.add_argument("-i", "--image", help = "path to the image file")
args = vars(ap.parse_args())
# load the image
image = cv2.imread(args["image"])
image = cv.imread(args["image"])
lower = np.array([20,0,155])
upper = np.array([255,120,250])
shapeMask = cv2.inRange(image, lower, upper)
shapeMask = cv.inRange(image, lower, upper)
# find the contours in the mask
(cnts, _) = cv2.findContours(shapeMask.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
cv2.imshow("Mask", shapeMask)
(cnts, _) = cv.findContours(shapeMask.copy(), cv.RETR_EXTERNAL,
cv.CHAIN_APPROX_SIMPLE)
cv.imshow("Mask", shapeMask)
# loop over the contours
for c in cnts:
cv2.drawContours(image, [c], -1, (0, 255, 0), 2)
cv2.imshow("Image", image)
cv2.waitKey(0)
\ No newline at end of file
cv.drawContours(image, [c], -1, (0, 255, 0), 2)
cv.imshow("Image", image)
cv.waitKey(0)
\ No newline at end of file
......@@ -10,16 +10,16 @@ Usage:
Use "a" to display less rects, 'd' to display more rects, "q" to quit.
'''
import cv2
import cv2 as cv
import sys
if __name__ == '__main__':
img = cv2.imread(sys.argv[1])
img = cv.imread(sys.argv[1])
cv2.setUseOptimized(True)
cv2.setNumThreads(8)
cv.setUseOptimized(True)
cv.setNumThreads(8)
gs = cv2.ximgproc.segmentation.createSelectiveSearchSegmentation()
gs = cv.ximgproc.segmentation.createSelectiveSearchSegmentation()
gs.setBaseImage(img)
if (sys.argv[2][0] == 's'):
......@@ -43,10 +43,10 @@ if __name__ == '__main__':
for i in range(len(rects)):
if (i < nb_rects):
x, y, w, h = rects[i]
cv2.rectangle(wimg, (x, y), (x+w, y+h), (0, 255, 0), 1, cv2.LINE_AA)
cv.rectangle(wimg, (x, y), (x+w, y+h), (0, 255, 0), 1, cv.LINE_AA)
cv2.imshow("Output", wimg);
c = cv2.waitKey()
cv.imshow("Output", wimg);
c = cv.waitKey()
if (c == 100):
nb_rects += 10
......@@ -57,4 +57,4 @@ if __name__ == '__main__':
elif (c == 113):
break
cv2.destroyAllWindows()
cv.destroyAllWindows()
......@@ -3,7 +3,7 @@ from __future__ import print_function
import os, sys, argparse, json
import numpy as np
import scipy.io
import cv2
import cv2 as cv
import timeit
from learn_color_balance import load_ground_truth
......@@ -44,7 +44,7 @@ def evaluate(im, algo, gt_illuminant, i, range_thresh, bin_num, dst_folder, mode
new_im = None
start_time = timeit.default_timer()
if algo=="grayworld":
inst = cv2.xphoto.createGrayworldWB()
inst = cv.xphoto.createGrayworldWB()
inst.setSaturationThreshold(0.95)
new_im = inst.balanceWhite(im)
elif algo=="nothing":
......@@ -53,7 +53,7 @@ def evaluate(im, algo, gt_illuminant, i, range_thresh, bin_num, dst_folder, mode
model_path = ""
if len(algo.split(":"))>1:
model_path = os.path.join(model_folder, algo.split(":")[1])
inst = cv2.xphoto.createLearningBasedWB(model_path)
inst = cv.xphoto.createLearningBasedWB(model_path)
inst.setRangeMaxVal(range_thresh)
inst.setSaturationThreshold(0.98)
inst.setHistBinNum(bin_num)
......@@ -63,14 +63,14 @@ def evaluate(im, algo, gt_illuminant, i, range_thresh, bin_num, dst_folder, mode
g1 = float(1.0 / gains[2])
g2 = float(1.0 / gains[1])
g3 = float(1.0 / gains[0])
new_im = cv2.xphoto.applyChannelGains(im, g1, g2, g3)
new_im = cv.xphoto.applyChannelGains(im, g1, g2, g3)
time = 1000*(timeit.default_timer() - start_time) #time in ms
if len(dst_folder)>0:
if not os.path.exists(dst_folder):
os.makedirs(dst_folder)
im_name = ("%04d_" % i) + algo.replace(":","_") + ".jpg"
cv2.imwrite(os.path.join(dst_folder, im_name), stretch_to_8bit(new_im))
cv.imwrite(os.path.join(dst_folder, im_name), stretch_to_8bit(new_im))
#recover the illuminant from the color balancing result, assuming the standard model:
estimated_illuminant = [0, 0, 0]
......@@ -248,7 +248,7 @@ if __name__ == '__main__':
if file not in state[algorithm].keys() and\
((i>=img_range[0] and i<img_range[1]) or img_range[0]==img_range[1]==0):
cur_path = os.path.join(args.input_folder, file)
im = cv2.imread(cur_path, -1).astype(np.float32)
im = cv.imread(cur_path, -1).astype(np.float32)
im -= black_levels[i]
range_thresh = 255
if len(args.input_bit_depth)>0:
......
......@@ -4,7 +4,7 @@ import os, sys, argparse
import numpy as np
import scipy.io
from sklearn.tree import DecisionTreeRegressor
import cv2
import cv2 as cv
import random
......@@ -96,7 +96,7 @@ def generate_code(model, input_params, use_YML, out_file):
thresh_vals += local_thresh_vals
leaf_vals += local_leaf_vals
if use_YML:
fs = cv2.FileStorage(out_file, 1)
fs = cv.FileStorage(out_file, 1)
fs.write("num_trees", len(model))
fs.write("num_tree_nodes", 2**depth)
fs.write("feature_idx", np.array(feature_idx).astype(np.uint8))
......@@ -246,14 +246,14 @@ if __name__ == '__main__':
i=0
sz = len(img_files)
random.seed(1234)
inst = cv2.xphoto.createLearningBasedWB()
inst = cv.xphoto.createLearningBasedWB()
inst.setRangeMaxVal(255)
inst.setSaturationThreshold(0.98)
inst.setHistBinNum(hist_bin_num)
for file in img_files:
if (i>=img_range[0] and i<img_range[1]) or (img_range[0]==img_range[1]==0):
cur_path = os.path.join(args.input_folder,file)
im = cv2.imread(cur_path, -1).astype(np.float32)
im = cv.imread(cur_path, -1).astype(np.float32)
im -= black_levels[i]
im_8bit = convert_to_8bit(im)
cur_img_features = inst.extractSimpleFeatures(im_8bit, None)
......
......@@ -5,7 +5,7 @@ This module contains some common routines used by other samples.
'''
import numpy as np
import cv2
import cv2 as cv
# built-in modules
import os
......@@ -63,7 +63,7 @@ def lookat(eye, target, up = (0, 0, 1)):
return R, tvec
def mtx2rvec(R):
w, u, vt = cv2.SVDecomp(R - np.eye(3))
w, u, vt = cv.SVDecomp(R - np.eye(3))
p = vt[0] + u[:,0]*w[0] # same as np.dot(R, vt[0])
c = np.dot(vt[0], p)
s = np.dot(vt[1], p)
......@@ -71,8 +71,8 @@ def mtx2rvec(R):
return axis * np.arctan2(s, c)
def draw_str(dst, (x, y), s):
cv2.putText(dst, s, (x+1, y+1), cv2.FONT_HERSHEY_PLAIN, 1.0, (0, 0, 0), thickness = 2, lineType=cv2.LINE_AA)
cv2.putText(dst, s, (x, y), cv2.FONT_HERSHEY_PLAIN, 1.0, (255, 255, 255), lineType=cv2.LINE_AA)
cv.putText(dst, s, (x+1, y+1), cv.FONT_HERSHEY_PLAIN, 1.0, (0, 0, 0), thickness = 2, lineType=cv.LINE_AA)
cv.putText(dst, s, (x, y), cv.FONT_HERSHEY_PLAIN, 1.0, (255, 255, 255), lineType=cv.LINE_AA)
class Sketcher:
def __init__(self, windowname, dests, colors_func):
......@@ -82,21 +82,21 @@ class Sketcher:
self.colors_func = colors_func
self.dirty = False
self.show()
cv2.setMouseCallback(self.windowname, self.on_mouse)
cv.setMouseCallback(self.windowname, self.on_mouse)
def show(self):
cv2.imshow(self.windowname, self.dests[0])
cv.imshow(self.windowname, self.dests[0])
def on_mouse(self, event, x, y, flags, param):
pt = (x, y)
if event == cv2.EVENT_LBUTTONDOWN:
if event == cv.EVENT_LBUTTONDOWN:
self.prev_pt = pt
elif event == cv2.EVENT_LBUTTONUP:
elif event == cv.EVENT_LBUTTONUP:
self.prev_pt = None
if self.prev_pt and flags & cv2.EVENT_FLAG_LBUTTON:
if self.prev_pt and flags & cv.EVENT_FLAG_LBUTTON:
for dst, color in zip(self.dests, self.colors_func()):
cv2.line(dst, self.prev_pt, pt, color, 5)
cv.line(dst, self.prev_pt, pt, color, 5)
self.dirty = True
self.prev_pt = pt
self.show()
......@@ -131,7 +131,7 @@ def nothing(*arg, **kw):
pass
def clock():
return cv2.getTickCount() / cv2.getTickFrequency()
return cv.getTickCount() / cv.getTickFrequency()
@contextmanager
def Timer(msg):
......@@ -157,15 +157,15 @@ class RectSelector:
def __init__(self, win, callback):
self.win = win
self.callback = callback
cv2.setMouseCallback(win, self.onmouse)
cv.setMouseCallback(win, self.onmouse)
self.drag_start = None
self.drag_rect = None
def onmouse(self, event, x, y, flags, param):
x, y = np.int16([x, y]) # BUG
if event == cv2.EVENT_LBUTTONDOWN:
if event == cv.EVENT_LBUTTONDOWN:
self.drag_start = (x, y)
if self.drag_start:
if flags & cv2.EVENT_FLAG_LBUTTON:
if flags & cv.EVENT_FLAG_LBUTTON:
xo, yo = self.drag_start
x0, y0 = np.minimum([xo, yo], [x, y])
x1, y1 = np.maximum([xo, yo], [x, y])
......@@ -182,7 +182,7 @@ class RectSelector:
if not self.drag_rect:
return False
x0, y0, x1, y1 = self.drag_rect
cv2.rectangle(vis, (x0, y0), (x1, y1), (0, 255, 0), 2)
cv.rectangle(vis, (x0, y0), (x1, y1), (0, 255, 0), 2)
return True
@property
def dragging(self):
......@@ -217,4 +217,4 @@ def mdot(*args):
def draw_keypoints(vis, keypoints, color = (0, 255, 255)):
for kp in keypoints:
x, y = kp.pt
cv2.circle(vis, (int(x), int(y)), 2, color)
cv.circle(vis, (int(x), int(y)), 2, color)
......@@ -17,7 +17,7 @@ ESC - exit
from __future__ import print_function
import numpy as np
import cv2
import cv2 as cv
import video
......@@ -27,10 +27,10 @@ def draw_flow(img, flow, step=16):
fx, fy = flow[y,x].T
lines = np.vstack([x, y, x+fx, y+fy]).T.reshape(-1, 2, 2)
lines = np.int32(lines + 0.5)
vis = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
cv2.polylines(vis, lines, 0, (0, 255, 0))
vis = cv.cvtColor(img, cv.COLOR_GRAY2BGR)
cv.polylines(vis, lines, 0, (0, 255, 0))
for (x1, y1), (x2, y2) in lines:
cv2.circle(vis, (x1, y1), 1, (0, 255, 0), -1)
cv.circle(vis, (x1, y1), 1, (0, 255, 0), -1)
return vis
......@@ -43,7 +43,7 @@ def draw_hsv(flow):
hsv[...,0] = ang*(180/np.pi/2)
hsv[...,1] = 255
hsv[...,2] = np.minimum(v*4, 255)
bgr = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
bgr = cv.cvtColor(hsv, cv.COLOR_HSV2BGR)
return bgr
......@@ -52,7 +52,7 @@ def warp_flow(img, flow):
flow = -flow
flow[:,:,0] += np.arange(w)
flow[:,:,1] += np.arange(h)[:,np.newaxis]
res = cv2.remap(img, flow, None, cv2.INTER_LINEAR)
res = cv.remap(img, flow, None, cv.INTER_LINEAR)
return res
......@@ -66,19 +66,19 @@ if __name__ == '__main__':
cam = video.create_capture(fn)
ret, prev = cam.read()
prevgray = cv2.cvtColor(prev, cv2.COLOR_BGR2GRAY)
prevgray = cv.cvtColor(prev, cv.COLOR_BGR2GRAY)
show_hsv = False
show_glitch = False
use_spatial_propagation = False
use_temporal_propagation = True
cur_glitch = prev.copy()
inst = cv2.optflow.createOptFlow_DIS(cv2.optflow.DISOPTICAL_FLOW_PRESET_MEDIUM)
inst = cv.optflow.createOptFlow_DIS(cv.optflow.DISOPTICAL_FLOW_PRESET_MEDIUM)
inst.setUseSpatialPropagation(use_spatial_propagation)
flow = None
while True:
ret, img = cam.read()
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
if flow is not None and use_temporal_propagation:
#warp previous flow to get an initial approximation for the current flow:
flow = inst.calc(prevgray, gray, warp_flow(flow,flow))
......@@ -86,14 +86,14 @@ if __name__ == '__main__':
flow = inst.calc(prevgray, gray, None)
prevgray = gray
cv2.imshow('flow', draw_flow(gray, flow))
cv.imshow('flow', draw_flow(gray, flow))
if show_hsv:
cv2.imshow('flow HSV', draw_hsv(flow))
cv.imshow('flow HSV', draw_hsv(flow))
if show_glitch:
cur_glitch = warp_flow(cur_glitch, flow)
cv2.imshow('glitch', cur_glitch)
cv.imshow('glitch', cur_glitch)
ch = 0xFF & cv2.waitKey(5)
ch = 0xFF & cv.waitKey(5)
if ch == 27:
break
if ch == ord('1'):
......@@ -111,4 +111,4 @@ if __name__ == '__main__':
if ch == ord('4'):
use_temporal_propagation = not use_temporal_propagation
print('temporal propagation is', ['off', 'on'][use_temporal_propagation])
cv2.destroyAllWindows()
cv.destroyAllWindows()
......@@ -10,7 +10,7 @@ Usage:
'''
import numpy as np
import cv2
import cv2 as cv
# relative module
import video
......@@ -30,9 +30,9 @@ if __name__ == '__main__':
def nothing(*arg):
pass
cv2.namedWindow('SEEDS')
cv2.createTrackbar('Number of Superpixels', 'SEEDS', 400, 1000, nothing)
cv2.createTrackbar('Iterations', 'SEEDS', 4, 12, nothing)
cv.namedWindow('SEEDS')
cv.createTrackbar('Number of Superpixels', 'SEEDS', 400, 1000, nothing)
cv.createTrackbar('Iterations', 'SEEDS', 4, 12, nothing)
seeds = None
display_mode = 0
......@@ -44,14 +44,14 @@ if __name__ == '__main__':
cap = video.create_capture(fn)
while True:
flag, img = cap.read()
converted_img = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
converted_img = cv.cvtColor(img, cv.COLOR_BGR2HSV)
height,width,channels = converted_img.shape
num_superpixels_new = cv2.getTrackbarPos('Number of Superpixels', 'SEEDS')
num_iterations = cv2.getTrackbarPos('Iterations', 'SEEDS')
num_superpixels_new = cv.getTrackbarPos('Number of Superpixels', 'SEEDS')
num_iterations = cv.getTrackbarPos('Iterations', 'SEEDS')
if not seeds or num_superpixels_new != num_superpixels:
num_superpixels = num_superpixels_new
seeds = cv2.ximgproc.createSuperpixelSEEDS(width, height, channels,
seeds = cv.ximgproc.createSuperpixelSEEDS(width, height, channels,
num_superpixels, num_levels, prior, num_histogram_bins)
color_img = np.zeros((height,width,3), np.uint8)
color_img[:] = (0, 0, 255)
......@@ -71,21 +71,21 @@ if __name__ == '__main__':
mask = seeds.getLabelContourMask(False)
# stitch foreground & background together
mask_inv = cv2.bitwise_not(mask)
result_bg = cv2.bitwise_and(img, img, mask=mask_inv)
result_fg = cv2.bitwise_and(color_img, color_img, mask=mask)
result = cv2.add(result_bg, result_fg)
mask_inv = cv.bitwise_not(mask)
result_bg = cv.bitwise_and(img, img, mask=mask_inv)
result_fg = cv.bitwise_and(color_img, color_img, mask=mask)
result = cv.add(result_bg, result_fg)
if display_mode == 0:
cv2.imshow('SEEDS', result)
cv.imshow('SEEDS', result)
elif display_mode == 1:
cv2.imshow('SEEDS', mask)
cv.imshow('SEEDS', mask)
else:
cv2.imshow('SEEDS', labels)
cv.imshow('SEEDS', labels)
ch = cv2.waitKey(1)
ch = cv.waitKey(1)
if ch == 27:
break
elif ch & 0xff == ord(' '):
display_mode = (display_mode + 1) % 3
cv2.destroyAllWindows()
cv.destroyAllWindows()
......@@ -32,7 +32,7 @@ Keys:
import numpy as np
from numpy import pi, sin, cos
import cv2
import cv2 as cv
# built-in modules
from time import clock
......@@ -45,14 +45,14 @@ class VideoSynthBase(object):
self.bg = None
self.frame_size = (640, 480)
if bg is not None:
self.bg = cv2.imread(bg, 1)
self.bg = cv.imread(bg, 1)
h, w = self.bg.shape[:2]
self.frame_size = (w, h)
if size is not None:
w, h = map(int, size.split('x'))
self.frame_size = (w, h)
self.bg = cv2.resize(self.bg, self.frame_size)
self.bg = cv.resize(self.bg, self.frame_size)
self.noise = float(noise)
......@@ -71,8 +71,8 @@ class VideoSynthBase(object):
if self.noise > 0.0:
noise = np.zeros((h, w, 3), np.int8)
cv2.randn(noise, np.zeros(3), np.ones(3)*255*self.noise)
buf = cv2.add(buf, noise, dtype=cv2.CV_8UC3)
cv.randn(noise, np.zeros(3), np.ones(3)*255*self.noise)
buf = cv.add(buf, noise, dtype=cv.CV_8UC3)
return True, buf
def isOpened(self):
......@@ -102,10 +102,10 @@ class Chess(VideoSynthBase):
self.t = 0
def draw_quads(self, img, quads, color = (0, 255, 0)):
img_quads = cv2.projectPoints(quads.reshape(-1, 3), self.rvec, self.tvec, self.K, self.dist_coef) [0]
img_quads = cv.projectPoints(quads.reshape(-1, 3), self.rvec, self.tvec, self.K, self.dist_coef) [0]
img_quads.shape = quads.shape[:2] + (2,)
for q in img_quads:
cv2.fillConvexPoly(img, np.int32(q*4), color, cv2.LINE_AA, shift=2)
cv.fillConvexPoly(img, np.int32(q*4), color, cv.LINE_AA, shift=2)
def render(self, dst):
t = self.t
......@@ -156,11 +156,11 @@ def create_capture(source = 0, fallback = presets['chess']):
try: cap = Class(**params)
except: pass
else:
cap = cv2.VideoCapture(source)
cap = cv.VideoCapture(source)
if 'size' in params:
w, h = map(int, params['size'].split('x'))
cap.set(cv2.CAP_PROP_FRAME_WIDTH, w)
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, h)
cap.set(cv.CAP_PROP_FRAME_WIDTH, w)
cap.set(cv.CAP_PROP_FRAME_HEIGHT, h)
if cap is None or not cap.isOpened():
print 'Warning: unable to open video source: ', source
if fallback is not None:
......@@ -186,14 +186,14 @@ if __name__ == '__main__':
for i, cap in enumerate(caps):
ret, img = cap.read()
imgs.append(img)
cv2.imshow('capture %d' % i, img)
ch = 0xFF & cv2.waitKey(1)
cv.imshow('capture %d' % i, img)
ch = 0xFF & cv.waitKey(1)
if ch == 27:
break
if ch == ord(' '):
for i, img in enumerate(imgs):
fn = '%s/shot_%d_%03d.bmp' % (shotdir, i, shot_idx)
cv2.imwrite(fn, img)
cv.imwrite(fn, img)
print fn, 'saved'
shot_idx += 1
cv2.destroyAllWindows()
cv.destroyAllWindows()
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment