Commit 25b4d8a1 authored by Vitaly Tuzov's avatar Vitaly Tuzov

Added images necessary for tests

parent aaa30dc5
...@@ -21,9 +21,9 @@ class calibration_test(NewOpenCVTests): ...@@ -21,9 +21,9 @@ class calibration_test(NewOpenCVTests):
img_names = [] img_names = []
for i in range(1, 15): for i in range(1, 15):
if i < 10: if i < 10:
img_names.append('samples/data/left0{}.jpg'.format(str(i))) img_names.append('samples/cpp/left0{}.jpg'.format(str(i)))
elif i != 10: elif i != 10:
img_names.append('samples/data/left{}.jpg'.format(str(i))) img_names.append('samples/cpp/left{}.jpg'.format(str(i)))
square_size = 1.0 square_size = 1.0
pattern_size = (9, 6) pattern_size = (9, 6)
......
...@@ -39,7 +39,7 @@ class camshift_test(NewOpenCVTests): ...@@ -39,7 +39,7 @@ class camshift_test(NewOpenCVTests):
def prepareRender(self): def prepareRender(self):
self.render = TestSceneRender(self.get_sample('samples/data/pca_test1.jpg'), deformation = True) self.render = TestSceneRender(self.get_sample('samples/python2/data/pca_test1.jpg'), deformation = True)
def runTracker(self): def runTracker(self):
......
...@@ -16,7 +16,7 @@ from tests_common import NewOpenCVTests ...@@ -16,7 +16,7 @@ from tests_common import NewOpenCVTests
class dft_test(NewOpenCVTests): class dft_test(NewOpenCVTests):
def test_dft(self): def test_dft(self):
img = self.get_sample('samples/data/rubberwhale1.png', 0) img = self.get_sample('samples/gpu/rubberwhale1.png', 0)
eps = 0.001 eps = 0.001
#test direct transform #test direct transform
......
...@@ -36,7 +36,7 @@ from numpy.linalg import norm ...@@ -36,7 +36,7 @@ from numpy.linalg import norm
SZ = 20 # size of each digit is SZ x SZ SZ = 20 # size of each digit is SZ x SZ
CLASS_N = 10 CLASS_N = 10
DIGITS_FN = 'samples/data/digits.png' DIGITS_FN = 'samples/python2/data/digits.png'
def split2d(img, cell_size, flatten=True): def split2d(img, cell_size, flatten=True):
h, w = img.shape[:2] h, w = img.shape[:2]
......
...@@ -31,7 +31,7 @@ class facedetect_test(NewOpenCVTests): ...@@ -31,7 +31,7 @@ class facedetect_test(NewOpenCVTests):
cascade = cv2.CascadeClassifier(cascade_fn) cascade = cv2.CascadeClassifier(cascade_fn)
nested = cv2.CascadeClassifier(nested_fn) nested = cv2.CascadeClassifier(nested_fn)
samples = ['samples/data/lena.jpg', 'cv/cascadeandhog/images/mona-lisa.png'] samples = ['samples/c/lena.jpg', 'cv/cascadeandhog/images/mona-lisa.png']
faces = [] faces = []
eyes = [] eyes = []
......
...@@ -42,8 +42,8 @@ class feature_homography_test(NewOpenCVTests): ...@@ -42,8 +42,8 @@ class feature_homography_test(NewOpenCVTests):
def test_feature_homography(self): def test_feature_homography(self):
self.render = TestSceneRender(self.get_sample('samples/data/graf1.png'), self.render = TestSceneRender(self.get_sample('samples/python2/data/graf1.png'),
self.get_sample('samples/data/box.png'), noise = 0.4, speed = 0.5) self.get_sample('samples/c/box.png'), noise = 0.4, speed = 0.5)
self.frame = self.render.getNextFrame() self.frame = self.render.getNextFrame()
self.tracker = PlaneTracker() self.tracker = PlaneTracker()
self.tracker.clear() self.tracker.clear()
......
...@@ -39,7 +39,7 @@ class houghcircles_test(NewOpenCVTests): ...@@ -39,7 +39,7 @@ class houghcircles_test(NewOpenCVTests):
def test_houghcircles(self): def test_houghcircles(self):
fn = "samples/data/board.jpg" fn = "samples/cpp/board.jpg"
src = self.get_sample(fn, 1) src = self.get_sample(fn, 1)
img = cv2.cvtColor(src, cv2.COLOR_BGR2GRAY) img = cv2.cvtColor(src, cv2.COLOR_BGR2GRAY)
......
...@@ -26,7 +26,7 @@ class houghlines_test(NewOpenCVTests): ...@@ -26,7 +26,7 @@ class houghlines_test(NewOpenCVTests):
def test_houghlines(self): def test_houghlines(self):
fn = "/samples/data/pic1.png" fn = "/samples/cpp/pic1.png"
src = self.get_sample(fn) src = self.get_sample(fn)
dst = cv2.Canny(src, 50, 200) dst = cv2.Canny(src, 50, 200)
......
...@@ -150,7 +150,7 @@ class letter_recog_test(NewOpenCVTests): ...@@ -150,7 +150,7 @@ class letter_recog_test(NewOpenCVTests):
Model = models[model] Model = models[model]
classifier = Model() classifier = Model()
samples, responses = load_base(self.repoPath + '/samples/data/letter-recognition.data') samples, responses = load_base(self.repoPath + '/samples/cpp/letter-recognition.data')
train_n = int(len(samples)*classifier.train_ratio) train_n = int(len(samples)*classifier.train_ratio)
classifier.train(samples[:train_n], responses[:train_n]) classifier.train(samples[:train_n], responses[:train_n])
......
...@@ -44,8 +44,8 @@ class lk_homography_test(NewOpenCVTests): ...@@ -44,8 +44,8 @@ class lk_homography_test(NewOpenCVTests):
numFeaturesInRectOnStart = 0 numFeaturesInRectOnStart = 0
def test_lk_homography(self): def test_lk_homography(self):
self.render = TestSceneRender(self.get_sample('samples/data/graf1.png'), self.render = TestSceneRender(self.get_sample('samples/python2/data/graf1.png'),
self.get_sample('samples/data/box.png'), noise = 0.1, speed = 1.0) self.get_sample('samples/c/box.png'), noise = 0.1, speed = 1.0)
frame = self.render.getNextFrame() frame = self.render.getNextFrame()
frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
......
...@@ -50,7 +50,7 @@ class lk_track_test(NewOpenCVTests): ...@@ -50,7 +50,7 @@ class lk_track_test(NewOpenCVTests):
def test_lk_track(self): def test_lk_track(self):
self.render = TestSceneRender(self.get_sample('samples/data/graf1.png'), self.get_sample('samples/data/box.png')) self.render = TestSceneRender(self.get_sample('samples/python2/data/graf1.png'), self.get_sample('samples/c/box.png'))
self.runTracker() self.runTracker()
def runTracker(self): def runTracker(self):
......
...@@ -18,7 +18,7 @@ class morphology_test(NewOpenCVTests): ...@@ -18,7 +18,7 @@ class morphology_test(NewOpenCVTests):
def test_morphology(self): def test_morphology(self):
fn = 'samples/data/rubberwhale1.png' fn = 'samples/gpu/rubberwhale1.png'
img = self.get_sample(fn) img = self.get_sample(fn)
modes = ['erode/dilate', 'open/close', 'blackhat/tophat', 'gradient'] modes = ['erode/dilate', 'open/close', 'blackhat/tophat', 'gradient']
......
...@@ -24,7 +24,7 @@ class peopledetect_test(NewOpenCVTests): ...@@ -24,7 +24,7 @@ class peopledetect_test(NewOpenCVTests):
hog = cv2.HOGDescriptor() hog = cv2.HOGDescriptor()
hog.setSVMDetector( cv2.HOGDescriptor_getDefaultPeopleDetector() ) hog.setSVMDetector( cv2.HOGDescriptor_getDefaultPeopleDetector() )
dirPath = 'samples/data/' dirPath = 'samples/gpu/'
samples = ['basketball1.png', 'basketball2.png'] samples = ['basketball1.png', 'basketball2.png']
testPeople = [ testPeople = [
......
...@@ -61,7 +61,7 @@ class squares_test(NewOpenCVTests): ...@@ -61,7 +61,7 @@ class squares_test(NewOpenCVTests):
def test_squares(self): def test_squares(self):
img = self.get_sample('samples/data/pic1.png') img = self.get_sample('samples/cpp/pic1.png')
squares = find_squares(img) squares = find_squares(img)
testSquares = [ testSquares = [
......
...@@ -21,7 +21,7 @@ class texture_flow_test(NewOpenCVTests): ...@@ -21,7 +21,7 @@ class texture_flow_test(NewOpenCVTests):
def test_texture_flow(self): def test_texture_flow(self):
img = self.get_sample('samples/data/pic6.png') img = self.get_sample('samples/cpp/pic6.png')
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
h, w = img.shape[:2] h, w = img.shape[:2]
......
...@@ -100,20 +100,3 @@ class TestSceneRender(): ...@@ -100,20 +100,3 @@ class TestSceneRender():
def resetTime(self): def resetTime(self):
self.time = 0.0 self.time = 0.0
if __name__ == '__main__':
backGr = cv2.imread('../../../samples/data/lena.jpg')
render = TestSceneRender(backGr, noise = 0.5)
while True:
img = render.getNextFrame()
cv2.imshow('img', img)
ch = 0xFF & cv2.waitKey(3)
if ch == 27:
break
cv2.destroyAllWindows()
\ No newline at end of file
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment