Skip to content
Projects
Groups
Snippets
Help
Loading...
Sign in / Register
Toggle navigation
O
opencv
Project
Project
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Packages
Packages
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
submodule
opencv
Commits
17e9fde7
Commit
17e9fde7
authored
Oct 17, 2019
by
Alexander Alekhin
Browse files
Options
Browse Files
Download
Plain Diff
Merge pull request #15718 from alalek:pylint_warnings
parents
692e1ecc
0e40c8a0
Hide whitespace changes
Inline
Side-by-side
Showing
26 changed files
with
40 additions
and
39 deletions
+40
-39
test_misc.py
modules/python/test/test_misc.py
+1
-1
fast_neural_style.py
samples/dnn/fast_neural_style.py
+1
-1
mobilenet_ssd_accuracy.py
samples/dnn/mobilenet_ssd_accuracy.py
+1
-1
text_detection.py
samples/dnn/text_detection.py
+1
-1
tf_text_graph_common.py
samples/dnn/tf_text_graph_common.py
+1
-1
tf_text_graph_ssd.py
samples/dnn/tf_text_graph_ssd.py
+1
-1
browse.py
samples/python/browse.py
+1
-1
calibrate.py
samples/python/calibrate.py
+2
-2
camera_calibration_show_extrinsics.py
samples/python/camera_calibration_show_extrinsics.py
+1
-1
color_histogram.py
samples/python/color_histogram.py
+1
-1
edge.py
samples/python/edge.py
+1
-1
facedetect.py
samples/python/facedetect.py
+1
-1
fitline.py
samples/python/fitline.py
+1
-0
houghcircles.py
samples/python/houghcircles.py
+1
-1
houghlines.py
samples/python/houghlines.py
+2
-2
kmeans.py
samples/python/kmeans.py
+1
-1
lappyr.py
samples/python/lappyr.py
+1
-1
opt_flow.py
samples/python/opt_flow.py
+2
-2
peopledetect.py
samples/python/peopledetect.py
+1
-1
stereo_match.py
samples/python/stereo_match.py
+2
-2
turing.py
samples/python/turing.py
+1
-1
mat_operations.py
...ython/tutorial_code/core/mat_operations/mat_operations.py
+9
-9
changing_contrast_brightness_image.py
...st_brightness_image/changing_contrast_brightness_image.py
+2
-2
introduction_to_pca.py
...torial_code/ml/introduction_to_pca/introduction_to_pca.py
+2
-2
video_threaded.py
samples/python/video_threaded.py
+1
-1
video_v4l2.py
samples/python/video_v4l2.py
+1
-1
No files found.
modules/python/test/test_misc.py
View file @
17e9fde7
...
@@ -96,7 +96,7 @@ class SamplesFindFile(NewOpenCVTests):
...
@@ -96,7 +96,7 @@ class SamplesFindFile(NewOpenCVTests):
def
test_MissingFileException
(
self
):
def
test_MissingFileException
(
self
):
try
:
try
:
res
=
cv
.
samples
.
findFile
(
'non_existed.file'
,
True
)
_
res
=
cv
.
samples
.
findFile
(
'non_existed.file'
,
True
)
self
.
assertEqual
(
"Dead code"
,
0
)
self
.
assertEqual
(
"Dead code"
,
0
)
except
cv
.
error
as
_e
:
except
cv
.
error
as
_e
:
pass
pass
...
...
samples/dnn/fast_neural_style.py
View file @
17e9fde7
...
@@ -14,7 +14,7 @@ parser.add_argument('--median_filter', default=0, type=int, help='Kernel size of
...
@@ -14,7 +14,7 @@ parser.add_argument('--median_filter', default=0, type=int, help='Kernel size of
args
=
parser
.
parse_args
()
args
=
parser
.
parse_args
()
net
=
cv
.
dnn
.
readNetFromTorch
(
cv
.
samples
.
findFile
(
args
.
model
))
net
=
cv
.
dnn
.
readNetFromTorch
(
cv
.
samples
.
findFile
(
args
.
model
))
net
.
setPreferableBackend
(
cv
.
dnn
.
DNN_BACKEND_OPENCV
)
;
net
.
setPreferableBackend
(
cv
.
dnn
.
DNN_BACKEND_OPENCV
)
if
args
.
input
:
if
args
.
input
:
cap
=
cv
.
VideoCapture
(
args
.
input
)
cap
=
cv
.
VideoCapture
(
args
.
input
)
...
...
samples/dnn/mobilenet_ssd_accuracy.py
View file @
17e9fde7
...
@@ -27,7 +27,7 @@ args = parser.parse_args()
...
@@ -27,7 +27,7 @@ args = parser.parse_args()
### Get OpenCV predictions #####################################################
### Get OpenCV predictions #####################################################
net
=
cv
.
dnn
.
readNetFromTensorflow
(
cv
.
samples
.
findFile
(
args
.
weights
),
cv
.
samples
.
findFile
(
args
.
prototxt
))
net
=
cv
.
dnn
.
readNetFromTensorflow
(
cv
.
samples
.
findFile
(
args
.
weights
),
cv
.
samples
.
findFile
(
args
.
prototxt
))
net
.
setPreferableBackend
(
cv
.
dnn
.
DNN_BACKEND_OPENCV
)
;
net
.
setPreferableBackend
(
cv
.
dnn
.
DNN_BACKEND_OPENCV
)
detections
=
[]
detections
=
[]
for
imgName
in
os
.
listdir
(
args
.
images
):
for
imgName
in
os
.
listdir
(
args
.
images
):
...
...
samples/dnn/text_detection.py
View file @
17e9fde7
...
@@ -134,7 +134,7 @@ def main():
...
@@ -134,7 +134,7 @@ def main():
for
j
in
range
(
4
):
for
j
in
range
(
4
):
p1
=
(
vertices
[
j
][
0
],
vertices
[
j
][
1
])
p1
=
(
vertices
[
j
][
0
],
vertices
[
j
][
1
])
p2
=
(
vertices
[(
j
+
1
)
%
4
][
0
],
vertices
[(
j
+
1
)
%
4
][
1
])
p2
=
(
vertices
[(
j
+
1
)
%
4
][
0
],
vertices
[(
j
+
1
)
%
4
][
1
])
cv
.
line
(
frame
,
p1
,
p2
,
(
0
,
255
,
0
),
1
)
;
cv
.
line
(
frame
,
p1
,
p2
,
(
0
,
255
,
0
),
1
)
# Put efficiency information
# Put efficiency information
cv
.
putText
(
frame
,
label
,
(
0
,
15
),
cv
.
FONT_HERSHEY_SIMPLEX
,
0.5
,
(
0
,
255
,
0
))
cv
.
putText
(
frame
,
label
,
(
0
,
15
),
cv
.
FONT_HERSHEY_SIMPLEX
,
0.5
,
(
0
,
255
,
0
))
...
...
samples/dnn/tf_text_graph_common.py
View file @
17e9fde7
...
@@ -21,7 +21,7 @@ def tokenize(s):
...
@@ -21,7 +21,7 @@ def tokenize(s):
elif
token
:
elif
token
:
tokens
.
append
(
token
)
tokens
.
append
(
token
)
token
=
""
token
=
""
isString
=
(
symbol
==
'
\"
'
or
symbol
==
'
\'
'
)
^
isString
;
isString
=
(
symbol
==
'
\"
'
or
symbol
==
'
\'
'
)
^
isString
elif
symbol
==
'{'
or
symbol
==
'}'
or
symbol
==
'['
or
symbol
==
']'
:
elif
symbol
==
'{'
or
symbol
==
'}'
or
symbol
==
'['
or
symbol
==
']'
:
if
token
:
if
token
:
...
...
samples/dnn/tf_text_graph_ssd.py
View file @
17e9fde7
...
@@ -122,7 +122,7 @@ def createSSDGraph(modelPath, configPath, outputPath):
...
@@ -122,7 +122,7 @@ def createSSDGraph(modelPath, configPath, outputPath):
print
(
'Input image size:
%
dx
%
d'
%
(
image_width
,
image_height
))
print
(
'Input image size:
%
dx
%
d'
%
(
image_width
,
image_height
))
# Read the graph.
# Read the graph.
inpNames
=
[
'image_tensor'
]
_
inpNames
=
[
'image_tensor'
]
outNames
=
[
'num_detections'
,
'detection_scores'
,
'detection_boxes'
,
'detection_classes'
]
outNames
=
[
'num_detections'
,
'detection_scores'
,
'detection_boxes'
,
'detection_classes'
]
writeTextGraph
(
modelPath
,
outputPath
,
outNames
)
writeTextGraph
(
modelPath
,
outputPath
,
outNames
)
...
...
samples/python/browse.py
View file @
17e9fde7
...
@@ -45,7 +45,7 @@ def main():
...
@@ -45,7 +45,7 @@ def main():
small
=
img
small
=
img
for
i
in
xrange
(
3
):
for
_
i
in
xrange
(
3
):
small
=
cv
.
pyrDown
(
small
)
small
=
cv
.
pyrDown
(
small
)
def
onmouse
(
event
,
x
,
y
,
flags
,
param
):
def
onmouse
(
event
,
x
,
y
,
flags
,
param
):
...
...
samples/python/calibrate.py
View file @
17e9fde7
...
@@ -97,7 +97,7 @@ def main():
...
@@ -97,7 +97,7 @@ def main():
obj_points
.
append
(
pattern_points
)
obj_points
.
append
(
pattern_points
)
# calculate camera distortion
# calculate camera distortion
rms
,
camera_matrix
,
dist_coefs
,
rvecs
,
tvecs
=
cv
.
calibrateCamera
(
obj_points
,
img_points
,
(
w
,
h
),
None
,
None
)
rms
,
camera_matrix
,
dist_coefs
,
_rvecs
,
_
tvecs
=
cv
.
calibrateCamera
(
obj_points
,
img_points
,
(
w
,
h
),
None
,
None
)
print
(
"
\n
RMS:"
,
rms
)
print
(
"
\n
RMS:"
,
rms
)
print
(
"camera matrix:
\n
"
,
camera_matrix
)
print
(
"camera matrix:
\n
"
,
camera_matrix
)
...
@@ -106,7 +106,7 @@ def main():
...
@@ -106,7 +106,7 @@ def main():
# undistort the image with the calibration
# undistort the image with the calibration
print
(
''
)
print
(
''
)
for
fn
in
img_names
if
debug_dir
else
[]:
for
fn
in
img_names
if
debug_dir
else
[]:
path
,
name
,
ext
=
splitfn
(
fn
)
_path
,
name
,
_
ext
=
splitfn
(
fn
)
img_found
=
os
.
path
.
join
(
debug_dir
,
name
+
'_chess.png'
)
img_found
=
os
.
path
.
join
(
debug_dir
,
name
+
'_chess.png'
)
outfile
=
os
.
path
.
join
(
debug_dir
,
name
+
'_undistorted.png'
)
outfile
=
os
.
path
.
join
(
debug_dir
,
name
+
'_undistorted.png'
)
...
...
samples/python/camera_calibration_show_extrinsics.py
View file @
17e9fde7
...
@@ -184,7 +184,7 @@ def main():
...
@@ -184,7 +184,7 @@ def main():
extrinsics
=
fs
.
getNode
(
'extrinsic_parameters'
)
.
mat
()
extrinsics
=
fs
.
getNode
(
'extrinsic_parameters'
)
.
mat
()
import
matplotlib.pyplot
as
plt
import
matplotlib.pyplot
as
plt
from
mpl_toolkits.mplot3d
import
Axes3D
from
mpl_toolkits.mplot3d
import
Axes3D
# pylint: disable=unused-variable
fig
=
plt
.
figure
()
fig
=
plt
.
figure
()
ax
=
fig
.
gca
(
projection
=
'3d'
)
ax
=
fig
.
gca
(
projection
=
'3d'
)
...
...
samples/python/color_histogram.py
View file @
17e9fde7
...
@@ -46,7 +46,7 @@ class App():
...
@@ -46,7 +46,7 @@ class App():
cam
=
video
.
create_capture
(
fn
,
fallback
=
'synth:bg=baboon.jpg:class=chess:noise=0.05'
)
cam
=
video
.
create_capture
(
fn
,
fallback
=
'synth:bg=baboon.jpg:class=chess:noise=0.05'
)
while
True
:
while
True
:
flag
,
frame
=
cam
.
read
()
_
flag
,
frame
=
cam
.
read
()
cv
.
imshow
(
'camera'
,
frame
)
cv
.
imshow
(
'camera'
,
frame
)
small
=
cv
.
pyrDown
(
frame
)
small
=
cv
.
pyrDown
(
frame
)
...
...
samples/python/edge.py
View file @
17e9fde7
...
@@ -38,7 +38,7 @@ def main():
...
@@ -38,7 +38,7 @@ def main():
cap
=
video
.
create_capture
(
fn
)
cap
=
video
.
create_capture
(
fn
)
while
True
:
while
True
:
flag
,
img
=
cap
.
read
()
_
flag
,
img
=
cap
.
read
()
gray
=
cv
.
cvtColor
(
img
,
cv
.
COLOR_BGR2GRAY
)
gray
=
cv
.
cvtColor
(
img
,
cv
.
COLOR_BGR2GRAY
)
thrs1
=
cv
.
getTrackbarPos
(
'thrs1'
,
'edge'
)
thrs1
=
cv
.
getTrackbarPos
(
'thrs1'
,
'edge'
)
thrs2
=
cv
.
getTrackbarPos
(
'thrs2'
,
'edge'
)
thrs2
=
cv
.
getTrackbarPos
(
'thrs2'
,
'edge'
)
...
...
samples/python/facedetect.py
View file @
17e9fde7
...
@@ -48,7 +48,7 @@ def main():
...
@@ -48,7 +48,7 @@ def main():
cam
=
create_capture
(
video_src
,
fallback
=
'synth:bg={}:noise=0.05'
.
format
(
cv
.
samples
.
findFile
(
'samples/data/lena.jpg'
)))
cam
=
create_capture
(
video_src
,
fallback
=
'synth:bg={}:noise=0.05'
.
format
(
cv
.
samples
.
findFile
(
'samples/data/lena.jpg'
)))
while
True
:
while
True
:
ret
,
img
=
cam
.
read
()
_
ret
,
img
=
cam
.
read
()
gray
=
cv
.
cvtColor
(
img
,
cv
.
COLOR_BGR2GRAY
)
gray
=
cv
.
cvtColor
(
img
,
cv
.
COLOR_BGR2GRAY
)
gray
=
cv
.
equalizeHist
(
gray
)
gray
=
cv
.
equalizeHist
(
gray
)
...
...
samples/python/fitline.py
View file @
17e9fde7
...
@@ -88,6 +88,7 @@ def main():
...
@@ -88,6 +88,7 @@ def main():
update
()
update
()
ch
=
cv
.
waitKey
(
0
)
ch
=
cv
.
waitKey
(
0
)
if
ch
==
ord
(
'f'
):
if
ch
==
ord
(
'f'
):
global
cur_func_name
if
PY3
:
if
PY3
:
cur_func_name
=
next
(
dist_func_names
)
cur_func_name
=
next
(
dist_func_names
)
else
:
else
:
...
...
samples/python/houghcircles.py
View file @
17e9fde7
...
@@ -30,7 +30,7 @@ def main():
...
@@ -30,7 +30,7 @@ def main():
circles
=
cv
.
HoughCircles
(
img
,
cv
.
HOUGH_GRADIENT
,
1
,
10
,
np
.
array
([]),
100
,
30
,
1
,
30
)
circles
=
cv
.
HoughCircles
(
img
,
cv
.
HOUGH_GRADIENT
,
1
,
10
,
np
.
array
([]),
100
,
30
,
1
,
30
)
if
circles
is
not
None
:
# Check if circles have been found and only then iterate over these and add them to the image
if
circles
is
not
None
:
# Check if circles have been found and only then iterate over these and add them to the image
a
,
b
,
c
=
circles
.
shape
_a
,
b
,
_
c
=
circles
.
shape
for
i
in
range
(
b
):
for
i
in
range
(
b
):
cv
.
circle
(
cimg
,
(
circles
[
0
][
i
][
0
],
circles
[
0
][
i
][
1
]),
circles
[
0
][
i
][
2
],
(
0
,
0
,
255
),
3
,
cv
.
LINE_AA
)
cv
.
circle
(
cimg
,
(
circles
[
0
][
i
][
0
],
circles
[
0
][
i
][
1
]),
circles
[
0
][
i
][
2
],
(
0
,
0
,
255
),
3
,
cv
.
LINE_AA
)
cv
.
circle
(
cimg
,
(
circles
[
0
][
i
][
0
],
circles
[
0
][
i
][
1
]),
2
,
(
0
,
255
,
0
),
3
,
cv
.
LINE_AA
)
# draw center of circle
cv
.
circle
(
cimg
,
(
circles
[
0
][
i
][
0
],
circles
[
0
][
i
][
1
]),
2
,
(
0
,
255
,
0
),
3
,
cv
.
LINE_AA
)
# draw center of circle
...
...
samples/python/houghlines.py
View file @
17e9fde7
...
@@ -29,14 +29,14 @@ def main():
...
@@ -29,14 +29,14 @@ def main():
if
True
:
# HoughLinesP
if
True
:
# HoughLinesP
lines
=
cv
.
HoughLinesP
(
dst
,
1
,
math
.
pi
/
180.0
,
40
,
np
.
array
([]),
50
,
10
)
lines
=
cv
.
HoughLinesP
(
dst
,
1
,
math
.
pi
/
180.0
,
40
,
np
.
array
([]),
50
,
10
)
a
,
b
,
c
=
lines
.
shape
a
,
b
,
_
c
=
lines
.
shape
for
i
in
range
(
a
):
for
i
in
range
(
a
):
cv
.
line
(
cdst
,
(
lines
[
i
][
0
][
0
],
lines
[
i
][
0
][
1
]),
(
lines
[
i
][
0
][
2
],
lines
[
i
][
0
][
3
]),
(
0
,
0
,
255
),
3
,
cv
.
LINE_AA
)
cv
.
line
(
cdst
,
(
lines
[
i
][
0
][
0
],
lines
[
i
][
0
][
1
]),
(
lines
[
i
][
0
][
2
],
lines
[
i
][
0
][
3
]),
(
0
,
0
,
255
),
3
,
cv
.
LINE_AA
)
else
:
# HoughLines
else
:
# HoughLines
lines
=
cv
.
HoughLines
(
dst
,
1
,
math
.
pi
/
180.0
,
50
,
np
.
array
([]),
0
,
0
)
lines
=
cv
.
HoughLines
(
dst
,
1
,
math
.
pi
/
180.0
,
50
,
np
.
array
([]),
0
,
0
)
if
lines
is
not
None
:
if
lines
is
not
None
:
a
,
b
,
c
=
lines
.
shape
a
,
b
,
_
c
=
lines
.
shape
for
i
in
range
(
a
):
for
i
in
range
(
a
):
rho
=
lines
[
i
][
0
][
0
]
rho
=
lines
[
i
][
0
][
0
]
theta
=
lines
[
i
][
0
][
1
]
theta
=
lines
[
i
][
0
][
1
]
...
...
samples/python/kmeans.py
View file @
17e9fde7
...
@@ -33,7 +33,7 @@ def main():
...
@@ -33,7 +33,7 @@ def main():
points
,
_
=
make_gaussians
(
cluster_n
,
img_size
)
points
,
_
=
make_gaussians
(
cluster_n
,
img_size
)
term_crit
=
(
cv
.
TERM_CRITERIA_EPS
,
30
,
0.1
)
term_crit
=
(
cv
.
TERM_CRITERIA_EPS
,
30
,
0.1
)
ret
,
labels
,
centers
=
cv
.
kmeans
(
points
,
cluster_n
,
None
,
term_crit
,
10
,
0
)
_ret
,
labels
,
_
centers
=
cv
.
kmeans
(
points
,
cluster_n
,
None
,
term_crit
,
10
,
0
)
img
=
np
.
zeros
((
img_size
,
img_size
,
3
),
np
.
uint8
)
img
=
np
.
zeros
((
img_size
,
img_size
,
3
),
np
.
uint8
)
for
(
x
,
y
),
label
in
zip
(
np
.
int32
(
points
),
labels
.
ravel
()):
for
(
x
,
y
),
label
in
zip
(
np
.
int32
(
points
),
labels
.
ravel
()):
...
...
samples/python/lappyr.py
View file @
17e9fde7
...
@@ -60,7 +60,7 @@ def main():
...
@@ -60,7 +60,7 @@ def main():
cv
.
createTrackbar
(
'
%
d'
%
i
,
'level control'
,
5
,
50
,
nothing
)
cv
.
createTrackbar
(
'
%
d'
%
i
,
'level control'
,
5
,
50
,
nothing
)
while
True
:
while
True
:
ret
,
frame
=
cap
.
read
()
_
ret
,
frame
=
cap
.
read
()
pyr
=
build_lappyr
(
frame
,
leveln
)
pyr
=
build_lappyr
(
frame
,
leveln
)
for
i
in
xrange
(
leveln
):
for
i
in
xrange
(
leveln
):
...
...
samples/python/opt_flow.py
View file @
17e9fde7
...
@@ -64,14 +64,14 @@ def main():
...
@@ -64,14 +64,14 @@ def main():
fn
=
0
fn
=
0
cam
=
video
.
create_capture
(
fn
)
cam
=
video
.
create_capture
(
fn
)
ret
,
prev
=
cam
.
read
()
_
ret
,
prev
=
cam
.
read
()
prevgray
=
cv
.
cvtColor
(
prev
,
cv
.
COLOR_BGR2GRAY
)
prevgray
=
cv
.
cvtColor
(
prev
,
cv
.
COLOR_BGR2GRAY
)
show_hsv
=
False
show_hsv
=
False
show_glitch
=
False
show_glitch
=
False
cur_glitch
=
prev
.
copy
()
cur_glitch
=
prev
.
copy
()
while
True
:
while
True
:
ret
,
img
=
cam
.
read
()
_
ret
,
img
=
cam
.
read
()
gray
=
cv
.
cvtColor
(
img
,
cv
.
COLOR_BGR2GRAY
)
gray
=
cv
.
cvtColor
(
img
,
cv
.
COLOR_BGR2GRAY
)
flow
=
cv
.
calcOpticalFlowFarneback
(
prevgray
,
gray
,
None
,
0.5
,
3
,
15
,
3
,
5
,
1.2
,
0
)
flow
=
cv
.
calcOpticalFlowFarneback
(
prevgray
,
gray
,
None
,
0.5
,
3
,
15
,
3
,
5
,
1.2
,
0
)
prevgray
=
gray
prevgray
=
gray
...
...
samples/python/peopledetect.py
View file @
17e9fde7
...
@@ -51,7 +51,7 @@ def main():
...
@@ -51,7 +51,7 @@ def main():
print
(
'loading error'
)
print
(
'loading error'
)
continue
continue
found
,
w
=
hog
.
detectMultiScale
(
img
,
winStride
=
(
8
,
8
),
padding
=
(
32
,
32
),
scale
=
1.05
)
found
,
_
w
=
hog
.
detectMultiScale
(
img
,
winStride
=
(
8
,
8
),
padding
=
(
32
,
32
),
scale
=
1.05
)
found_filtered
=
[]
found_filtered
=
[]
for
ri
,
r
in
enumerate
(
found
):
for
ri
,
r
in
enumerate
(
found
):
for
qi
,
q
in
enumerate
(
found
):
for
qi
,
q
in
enumerate
(
found
):
...
...
samples/python/stereo_match.py
View file @
17e9fde7
...
@@ -69,8 +69,8 @@ def main():
...
@@ -69,8 +69,8 @@ def main():
out_points
=
points
[
mask
]
out_points
=
points
[
mask
]
out_colors
=
colors
[
mask
]
out_colors
=
colors
[
mask
]
out_fn
=
'out.ply'
out_fn
=
'out.ply'
write_ply
(
'out.ply'
,
out_points
,
out_colors
)
write_ply
(
out_fn
,
out_points
,
out_colors
)
print
(
'
%
s saved'
%
'out.ply'
)
print
(
'
%
s saved'
%
out_fn
)
cv
.
imshow
(
'left'
,
imgL
)
cv
.
imshow
(
'left'
,
imgL
)
cv
.
imshow
(
'disparity'
,
(
disp
-
min_disp
)
/
num_disp
)
cv
.
imshow
(
'disparity'
,
(
disp
-
min_disp
)
/
num_disp
)
...
...
samples/python/turing.py
View file @
17e9fde7
...
@@ -32,7 +32,7 @@ def main():
...
@@ -32,7 +32,7 @@ def main():
w
,
h
=
512
,
512
w
,
h
=
512
,
512
args
,
args_list
=
getopt
.
getopt
(
sys
.
argv
[
1
:],
'o:'
,
[])
args
,
_
args_list
=
getopt
.
getopt
(
sys
.
argv
[
1
:],
'o:'
,
[])
args
=
dict
(
args
)
args
=
dict
(
args
)
out
=
None
out
=
None
if
'-o'
in
args
:
if
'-o'
in
args
:
...
...
samples/python/tutorial_code/core/mat_operations/mat_operations.py
View file @
17e9fde7
...
@@ -25,13 +25,13 @@ def access_pixel():
...
@@ -25,13 +25,13 @@ def access_pixel():
y
=
0
y
=
0
x
=
0
x
=
0
## [Pixel access 1]
## [Pixel access 1]
intensity
=
img
[
y
,
x
]
_
intensity
=
img
[
y
,
x
]
## [Pixel access 1]
## [Pixel access 1]
## [Pixel access 3]
## [Pixel access 3]
blue
=
img
[
y
,
x
,
0
]
_
blue
=
img
[
y
,
x
,
0
]
green
=
img
[
y
,
x
,
1
]
_
green
=
img
[
y
,
x
,
1
]
red
=
img
[
y
,
x
,
2
]
_
red
=
img
[
y
,
x
,
2
]
## [Pixel access 3]
## [Pixel access 3]
## [Pixel access 5]
## [Pixel access 5]
...
@@ -42,12 +42,12 @@ def reference_counting():
...
@@ -42,12 +42,12 @@ def reference_counting():
# Memory management and reference counting
# Memory management and reference counting
## [Reference counting 2]
## [Reference counting 2]
img
=
cv
.
imread
(
'image.jpg'
)
img
=
cv
.
imread
(
'image.jpg'
)
img1
=
np
.
copy
(
img
)
_
img1
=
np
.
copy
(
img
)
## [Reference counting 2]
## [Reference counting 2]
## [Reference counting 3]
## [Reference counting 3]
img
=
cv
.
imread
(
'image.jpg'
)
img
=
cv
.
imread
(
'image.jpg'
)
sobelx
=
cv
.
Sobel
(
img
,
cv
.
CV_32F
,
1
,
0
);
_sobelx
=
cv
.
Sobel
(
img
,
cv
.
CV_32F
,
1
,
0
)
## [Reference counting 3]
## [Reference counting 3]
def
primitive_operations
():
def
primitive_operations
():
...
@@ -57,17 +57,17 @@ def primitive_operations():
...
@@ -57,17 +57,17 @@ def primitive_operations():
## [Set image to black]
## [Set image to black]
## [Select ROI]
## [Select ROI]
smallImg
=
img
[
10
:
110
,
10
:
110
]
_
smallImg
=
img
[
10
:
110
,
10
:
110
]
## [Select ROI]
## [Select ROI]
## [BGR to Gray]
## [BGR to Gray]
img
=
cv
.
imread
(
'image.jpg'
)
img
=
cv
.
imread
(
'image.jpg'
)
grey
=
cv
.
cvtColor
(
img
,
cv
.
COLOR_BGR2GRAY
)
_
grey
=
cv
.
cvtColor
(
img
,
cv
.
COLOR_BGR2GRAY
)
## [BGR to Gray]
## [BGR to Gray]
src
=
np
.
ones
((
4
,
4
),
np
.
uint8
)
src
=
np
.
ones
((
4
,
4
),
np
.
uint8
)
## [Convert to CV_32F]
## [Convert to CV_32F]
dst
=
src
.
astype
(
np
.
float32
)
_
dst
=
src
.
astype
(
np
.
float32
)
## [Convert to CV_32F]
## [Convert to CV_32F]
def
visualize_images
():
def
visualize_images
():
...
...
samples/python/tutorial_code/imgProc/changing_contrast_brightness_image/changing_contrast_brightness_image.py
View file @
17e9fde7
...
@@ -25,8 +25,8 @@ def gammaCorrection():
...
@@ -25,8 +25,8 @@ def gammaCorrection():
res
=
cv
.
LUT
(
img_original
,
lookUpTable
)
res
=
cv
.
LUT
(
img_original
,
lookUpTable
)
## [changing-contrast-brightness-gamma-correction]
## [changing-contrast-brightness-gamma-correction]
img_gamma_corrected
=
cv
.
hconcat
([
img_original
,
res
])
;
img_gamma_corrected
=
cv
.
hconcat
([
img_original
,
res
])
cv
.
imshow
(
"Gamma correction"
,
img_gamma_corrected
)
;
cv
.
imshow
(
"Gamma correction"
,
img_gamma_corrected
)
def
on_linear_transform_alpha_trackbar
(
val
):
def
on_linear_transform_alpha_trackbar
(
val
):
global
alpha
global
alpha
...
...
samples/python/tutorial_code/ml/introduction_to_pca/introduction_to_pca.py
View file @
17e9fde7
...
@@ -85,13 +85,13 @@ _, contours, _ = cv.findContours(bw, cv.RETR_LIST, cv.CHAIN_APPROX_NONE)
...
@@ -85,13 +85,13 @@ _, contours, _ = cv.findContours(bw, cv.RETR_LIST, cv.CHAIN_APPROX_NONE)
for
i
,
c
in
enumerate
(
contours
):
for
i
,
c
in
enumerate
(
contours
):
# Calculate the area of each contour
# Calculate the area of each contour
area
=
cv
.
contourArea
(
c
)
;
area
=
cv
.
contourArea
(
c
)
# Ignore contours that are too small or too large
# Ignore contours that are too small or too large
if
area
<
1e2
or
1e5
<
area
:
if
area
<
1e2
or
1e5
<
area
:
continue
continue
# Draw each contour only for visualisation purposes
# Draw each contour only for visualisation purposes
cv
.
drawContours
(
src
,
contours
,
i
,
(
0
,
0
,
255
),
2
)
;
cv
.
drawContours
(
src
,
contours
,
i
,
(
0
,
0
,
255
),
2
)
# Find the orientation of each shape
# Find the orientation of each shape
getOrientation
(
c
,
src
)
getOrientation
(
c
,
src
)
## [contours]
## [contours]
...
...
samples/python/video_threaded.py
View file @
17e9fde7
...
@@ -70,7 +70,7 @@ def main():
...
@@ -70,7 +70,7 @@ def main():
draw_str
(
res
,
(
20
,
60
),
"frame interval :
%.1
f ms"
%
(
frame_interval
.
value
*
1000
))
draw_str
(
res
,
(
20
,
60
),
"frame interval :
%.1
f ms"
%
(
frame_interval
.
value
*
1000
))
cv
.
imshow
(
'threaded video'
,
res
)
cv
.
imshow
(
'threaded video'
,
res
)
if
len
(
pending
)
<
threadn
:
if
len
(
pending
)
<
threadn
:
ret
,
frame
=
cap
.
read
()
_
ret
,
frame
=
cap
.
read
()
t
=
clock
()
t
=
clock
()
frame_interval
.
update
(
t
-
last_frame_time
)
frame_interval
.
update
(
t
-
last_frame_time
)
last_frame_time
=
t
last_frame_time
=
t
...
...
samples/python/video_v4l2.py
View file @
17e9fde7
...
@@ -42,7 +42,7 @@ def main():
...
@@ -42,7 +42,7 @@ def main():
cv
.
createTrackbar
(
"Focus"
,
"Video"
,
focus
,
100
,
lambda
v
:
cap
.
set
(
cv
.
CAP_PROP_FOCUS
,
v
/
100
))
cv
.
createTrackbar
(
"Focus"
,
"Video"
,
focus
,
100
,
lambda
v
:
cap
.
set
(
cv
.
CAP_PROP_FOCUS
,
v
/
100
))
while
True
:
while
True
:
status
,
img
=
cap
.
read
()
_
status
,
img
=
cap
.
read
()
fourcc
=
decode_fourcc
(
cap
.
get
(
cv
.
CAP_PROP_FOURCC
))
fourcc
=
decode_fourcc
(
cap
.
get
(
cv
.
CAP_PROP_FOURCC
))
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment