Commit 8c25a8eb authored by Alexander Alekhin's avatar Alexander Alekhin

Merge remote-tracking branch 'upstream/3.4' into merge-3.4

parents f147ba16 b82ecaf4
......@@ -715,7 +715,7 @@ if(UNIX)
CHECK_INCLUDE_FILE(pthread.h HAVE_PTHREAD)
if(ANDROID)
set(OPENCV_LINKER_LIBS ${OPENCV_LINKER_LIBS} dl m log)
elseif(${CMAKE_SYSTEM_NAME} MATCHES "FreeBSD|NetBSD|DragonFly|OpenBSD|Haiku")
elseif(CMAKE_SYSTEM_NAME MATCHES "FreeBSD|NetBSD|DragonFly|OpenBSD|Haiku")
set(OPENCV_LINKER_LIBS ${OPENCV_LINKER_LIBS} m pthread)
elseif(EMSCRIPTEN)
# no need to link to system libs with emscripten
......
......@@ -296,11 +296,18 @@ endif()
# workaround gcc bug for aligned ld/st
# https://github.com/opencv/opencv/issues/13211
if((PPC64LE AND NOT CMAKE_CROSSCOMPILING) OR OPENCV_FORCE_COMPILER_CHECK_VSX_ALIGNED)
ocv_check_runtime_flag("${CPU_BASELINE_FLAGS}" "OPENCV_CHECK_VSX_ALIGNED" "${OpenCV_SOURCE_DIR}/cmake/checks/runtime/cpu_vsx_aligned.cpp")
ocv_check_runtime_flag("${CPU_BASELINE_FLAGS}" OPENCV_CHECK_VSX_ALIGNED "${OpenCV_SOURCE_DIR}/cmake/checks/runtime/cpu_vsx_aligned.cpp")
if(NOT OPENCV_CHECK_VSX_ALIGNED)
add_extra_compiler_option_force(-DCV_COMPILER_VSX_BROKEN_ALIGNED)
endif()
endif()
# validate inline asm with fixes register number and constraints wa, wd, wf
if(PPC64LE)
ocv_check_compiler_flag(CXX "${CPU_BASELINE_FLAGS}" OPENCV_CHECK_VSX_ASM "${OpenCV_SOURCE_DIR}/cmake/checks/cpu_vsx_asm.cpp")
if(NOT OPENCV_CHECK_VSX_ASM)
add_extra_compiler_option_force(-DCV_COMPILER_VSX_BROKEN_ASM)
endif()
endif()
# combine all "extra" options
if(NOT OPENCV_SKIP_EXTRA_COMPILER_FLAGS)
......
# The script detects Intel(R) Inference Engine installation
#
# Cache variables:
# INF_ENGINE_OMP_DIR - directory with OpenMP library to link with (needed by some versions of IE)
# INF_ENGINE_RELEASE - a number reflecting IE source interface (linked with OpenVINO release)
#
# Detect parameters:
......@@ -11,8 +10,8 @@
# - INF_ENGINE_INCLUDE_DIRS - headers search location
# - INF_ENGINE_LIB_DIRS - library search location
# 3. OpenVINO location:
# - environment variable INTEL_CVSDK_DIR is set to location of OpenVINO installation dir
# - INF_ENGINE_PLATFORM - part of name of library directory representing its platform (default ubuntu_16.04)
# - environment variable INTEL_OPENVINO_DIR is set to location of OpenVINO installation dir
# - INF_ENGINE_PLATFORM - part of name of library directory representing its platform
#
# Result:
# INF_ENGINE_TARGET - set to name of imported library target representing InferenceEngine
......@@ -36,11 +35,13 @@ function(add_custom_ie_build _inc _lib _lib_rel _lib_dbg _msg)
IMPORTED_IMPLIB_DEBUG "${_lib_dbg}"
INTERFACE_INCLUDE_DIRECTORIES "${_inc}"
)
find_library(omp_lib iomp5 PATHS "${INF_ENGINE_OMP_DIR}" NO_DEFAULT_PATH)
if(NOT omp_lib)
message(WARNING "OpenMP for IE have not been found. Set INF_ENGINE_OMP_DIR variable if you experience build errors.")
else()
set_target_properties(inference_engine PROPERTIES IMPORTED_LINK_INTERFACE_LIBRARIES "${omp_lib}")
if(NOT INF_ENGINE_RELEASE VERSION_GREATER "2018050000")
find_library(INF_ENGINE_OMP_LIBRARY iomp5 PATHS "${INF_ENGINE_OMP_DIR}" NO_DEFAULT_PATH)
if(NOT INF_ENGINE_OMP_LIBRARY)
message(WARNING "OpenMP for IE have not been found. Set INF_ENGINE_OMP_DIR variable if you experience build errors.")
else()
set_target_properties(inference_engine PROPERTIES IMPORTED_LINK_INTERFACE_LIBRARIES "${INF_ENGINE_OMP_LIBRARY}")
endif()
endif()
set(INF_ENGINE_VERSION "Unknown" CACHE STRING "")
set(INF_ENGINE_TARGET inference_engine PARENT_SCOPE)
......@@ -64,9 +65,17 @@ if(NOT INF_ENGINE_TARGET AND INF_ENGINE_LIB_DIRS AND INF_ENGINE_INCLUDE_DIRS)
add_custom_ie_build("${ie_custom_inc}" "${ie_custom_lib}" "${ie_custom_lib_rel}" "${ie_custom_lib_dbg}" "INF_ENGINE_{INCLUDE,LIB}_DIRS")
endif()
set(_loc "$ENV{INTEL_CVSDK_DIR}")
set(_loc "$ENV{INTEL_OPENVINO_DIR}")
if(NOT _loc AND DEFINED ENV{INTEL_CVSDK_DIR})
set(_loc "$ENV{INTEL_CVSDK_DIR}") # OpenVINO 2018.x
endif()
if(NOT INF_ENGINE_TARGET AND _loc)
set(INF_ENGINE_PLATFORM "ubuntu_16.04" CACHE STRING "InferenceEngine platform (library dir)")
if(NOT INF_ENGINE_RELEASE VERSION_GREATER "2018050000")
set(INF_ENGINE_PLATFORM_DEFAULT "ubuntu_16.04")
else()
set(INF_ENGINE_PLATFORM_DEFAULT "")
endif()
set(INF_ENGINE_PLATFORM "${INF_ENGINE_PLATFORM_DEFAULT}" CACHE STRING "InferenceEngine platform (library dir)")
find_path(ie_custom_env_inc "inference_engine.hpp" PATHS "${_loc}/deployment_tools/inference_engine/include" NO_DEFAULT_PATH)
find_library(ie_custom_env_lib "inference_engine" PATHS "${_loc}/deployment_tools/inference_engine/lib/${INF_ENGINE_PLATFORM}/intel64" NO_DEFAULT_PATH)
find_library(ie_custom_env_lib_rel "inference_engine" PATHS "${_loc}/deployment_tools/inference_engine/lib/intel64/Release" NO_DEFAULT_PATH)
......
......@@ -822,6 +822,11 @@ function(ocv_output_status msg)
message(STATUS "${msg}")
string(REPLACE "\\" "\\\\" msg "${msg}")
string(REPLACE "\"" "\\\"" msg "${msg}")
string(REGEX REPLACE "^\n+|\n+$" "" msg "${msg}")
if(msg MATCHES "\n")
message(WARNING "String to be inserted to version_string.inc has an unexpected line break: '${msg}'")
string(REPLACE "\n" "\\n" msg "${msg}")
endif()
set(OPENCV_BUILD_INFO_STR "${OPENCV_BUILD_INFO_STR}\"${msg}\\n\"\n" CACHE INTERNAL "")
endfunction()
......
#if defined(__VSX__)
#if defined(__PPC64__) && defined(__LITTLE_ENDIAN__)
#include <altivec.h>
#else
#error "OpenCV only supports little-endian mode"
#endif
#else
#error "VSX is not supported"
#endif
/*
* xlc and wide versions of clang don't support %x<n> in the inline asm template which fixes register number
* when using any of the register constraints wa, wd, wf
*/
int main()
{
__vector float vf;
__vector signed int vi;
__asm__ __volatile__ ("xvcvsxwsp %x0,%x1" : "=wf" (vf) : "wa" (vi));
return 0;
}
\ No newline at end of file
......@@ -2,6 +2,7 @@
// https://github.com/opencv/opencv/issues/13211
#include <altivec.h>
#undef bool
#define vsx_ld vec_vsx_ld
#define vsx_st vec_vsx_st
......
Anisotropic image segmentation by a gradient structure tensor {#tutorial_anisotropic_image_segmentation_by_a_gst}
==========================
@prev_tutorial{tutorial_motion_deblur_filter}
@next_tutorial{tutorial_periodic_noise_removing_filter}
Goal
----
......@@ -45,28 +48,65 @@ The orientation of an anisotropic image:
Coherency:
\f[C = \frac{\lambda_1 - \lambda_2}{\lambda_1 + \lambda_2}\f]
The coherency ranges from 0 to 1. For ideal local orientation (\f$\lambda_2\f$ = 0, \f$\lambda_1\f$ > 0) it is one, for an isotropic gray value structure (\f$\lambda_1\f$ = \f$\lambda_2\f$ > 0) it is zero.
The coherency ranges from 0 to 1. For ideal local orientation (\f$\lambda_2\f$ = 0, \f$\lambda_1\f$ > 0) it is one, for an isotropic gray value structure (\f$\lambda_1\f$ = \f$\lambda_2\f$ \> 0) it is zero.
Source code
-----------
You can find source code in the `samples/cpp/tutorial_code/ImgProc/anisotropic_image_segmentation/anisotropic_image_segmentation.cpp` of the OpenCV source code library.
@include cpp/tutorial_code/ImgProc/anisotropic_image_segmentation/anisotropic_image_segmentation.cpp
@add_toggle_cpp
@include cpp/tutorial_code/ImgProc/anisotropic_image_segmentation/anisotropic_image_segmentation.cpp
@end_toggle
@add_toggle_python
@include samples/python/tutorial_code/imgProc/anisotropic_image_segmentation/anisotropic_image_segmentation.py
@end_toggle
Explanation
-----------
An anisotropic image segmentation algorithm consists of a gradient structure tensor calculation, an orientation calculation, a coherency calculation and an orientation and coherency thresholding:
@snippet samples/cpp/tutorial_code/ImgProc/anisotropic_image_segmentation/anisotropic_image_segmentation.cpp main
@add_toggle_cpp
@snippet samples/cpp/tutorial_code/ImgProc/anisotropic_image_segmentation/anisotropic_image_segmentation.cpp main
@end_toggle
@add_toggle_python
@snippet samples/python/tutorial_code/imgProc/anisotropic_image_segmentation/anisotropic_image_segmentation.py main
@end_toggle
A function calcGST() calculates orientation and coherency by using a gradient structure tensor. An input parameter w defines a window size:
@snippet samples/cpp/tutorial_code/ImgProc/anisotropic_image_segmentation/anisotropic_image_segmentation.cpp calcGST
@add_toggle_cpp
@snippet samples/cpp/tutorial_code/ImgProc/anisotropic_image_segmentation/anisotropic_image_segmentation.cpp calcGST
@end_toggle
@add_toggle_python
@snippet samples/python/tutorial_code/imgProc/anisotropic_image_segmentation/anisotropic_image_segmentation.py calcGST
@end_toggle
The below code applies a thresholds LowThr and HighThr to image orientation and a threshold C_Thr to image coherency calculated by the previous function. LowThr and HighThr define orientation range:
@snippet samples/cpp/tutorial_code/ImgProc/anisotropic_image_segmentation/anisotropic_image_segmentation.cpp thresholding
@add_toggle_cpp
@snippet samples/cpp/tutorial_code/ImgProc/anisotropic_image_segmentation/anisotropic_image_segmentation.cpp thresholding
@end_toggle
@add_toggle_python
@snippet samples/python/tutorial_code/imgProc/anisotropic_image_segmentation/anisotropic_image_segmentation.py thresholding
@end_toggle
And finally we combine thresholding results:
@snippet samples/cpp/tutorial_code/ImgProc/anisotropic_image_segmentation/anisotropic_image_segmentation.cpp combining
@add_toggle_cpp
@snippet samples/cpp/tutorial_code/ImgProc/anisotropic_image_segmentation/anisotropic_image_segmentation.cpp combining
@end_toggle
@add_toggle_python
@snippet samples/python/tutorial_code/imgProc/anisotropic_image_segmentation/anisotropic_image_segmentation.py combining
@end_toggle
Result
------
......
Motion Deblur Filter {#tutorial_motion_deblur_filter}
==========================
@prev_tutorial{tutorial_out_of_focus_deblur_filter}
@next_tutorial{tutorial_anisotropic_image_segmentation_by_a_gst}
Goal
----
......
......@@ -2,6 +2,7 @@ Out-of-focus Deblur Filter {#tutorial_out_of_focus_deblur_filter}
==========================
@prev_tutorial{tutorial_distance_transform}
@next_tutorial{tutorial_motion_deblur_filter}
Goal
----
......
Periodic Noise Removing Filter {#tutorial_periodic_noise_removing_filter}
==========================
@prev_tutorial{tutorial_anisotropic_image_segmentation_by_a_gst}
Goal
----
......
......@@ -177,8 +177,6 @@ pattern (every view is described by several 3D-2D point correspondences).
opencv_source_code/samples/cpp/calibration.cpp
- A calibration sample in order to do 3D reconstruction can be found at
opencv_source_code/samples/cpp/build3dmodel.cpp
- A calibration sample of an artificially generated camera and chessboard patterns can be
found at opencv_source_code/samples/cpp/calibration_artificial.cpp
- A calibration example on stereo calibration can be found at
opencv_source_code/samples/cpp/stereo_calib.cpp
- A calibration example on stereo matching can be found at
......
......@@ -3,7 +3,7 @@ set(the_description "The Core Functionality")
ocv_add_dispatched_file(mathfuncs_core SSE2 AVX AVX2)
ocv_add_dispatched_file(stat SSE4_2 AVX2)
ocv_add_dispatched_file(arithm SSE2 SSE4_1 AVX2 VSX3)
ocv_add_dispatched_file(convert SSE2 AVX2)
ocv_add_dispatched_file(convert SSE2 AVX2 VSX3)
ocv_add_dispatched_file(convert_scale SSE2 AVX2)
ocv_add_dispatched_file(count_non_zero SSE2 AVX2)
ocv_add_dispatched_file(matmul SSE2 AVX2)
......
......@@ -291,6 +291,8 @@ VSX_IMPL_1RG(vec_udword2, wi, vec_float4, wf, xvcvspuxds, vec_ctulo)
*
* So we're not able to use inline asm and only use built-in functions that CLANG supports
* and use __builtin_convertvector if clang missng any of vector conversions built-in functions
*
* todo: clang asm template bug is fixed, need to reconsider the current workarounds.
*/
// convert vector helper
......
......@@ -322,6 +322,8 @@ private:
// Fortran subroutine in EISPACK.
// Initialize
const int max_iters_count = 1000 * this->n;
int nn = this->n;
int n1 = nn - 1;
int low = 0;
......@@ -487,7 +489,9 @@ private:
}
}
iter = iter + 1; // (Could check iteration count here.)
iter = iter + 1;
if (iter > max_iters_count)
CV_Error(Error::StsNoConv, "Algorithm doesn't converge (complex eigen values?)");
// Look for two consecutive small sub-diagonal elements
int m = n1 - 2;
......
This diff is collapsed.
......@@ -519,4 +519,27 @@ TEST_P(Core_EigenZero, double)
}
INSTANTIATE_TEST_CASE_P(/**/, Core_EigenZero, testing::Values(2, 3, 5));
TEST(Core_EigenNonSymmetric, convergence)
{
Matx33d m(
0, -1, 0,
1, 0, 1,
0, -1, 0);
Mat eigenvalues, eigenvectors;
// eigen values are complex, algorithm doesn't converge
try
{
cv::eigenNonSymmetric(m, eigenvalues, eigenvectors);
std::cout << Mat(eigenvalues.t()) << std::endl;
}
catch (const cv::Exception& e)
{
EXPECT_EQ(Error::StsNoConv, e.code) << e.what();
}
catch (...)
{
FAIL() << "Unknown exception has been raised";
}
}
}} // namespace
......@@ -276,17 +276,15 @@ public:
public:
KMeansDistanceComputer(Distance _distance, const Matrix<ElementType>& _dataset,
const int _branching, const int* _indices, const Matrix<double>& _dcenters, const size_t _veclen,
int* _count, int* _belongs_to, std::vector<DistanceType>& _radiuses, bool& _converged)
std::vector<int> &_new_centroids, std::vector<DistanceType> &_sq_dists)
: distance(_distance)
, dataset(_dataset)
, branching(_branching)
, indices(_indices)
, dcenters(_dcenters)
, veclen(_veclen)
, count(_count)
, belongs_to(_belongs_to)
, radiuses(_radiuses)
, converged(_converged)
, new_centroids(_new_centroids)
, sq_dists(_sq_dists)
{
}
......@@ -297,8 +295,8 @@ public:
for( int i = begin; i<end; ++i)
{
DistanceType sq_dist = distance(dataset[indices[i]], dcenters[0], veclen);
int new_centroid = 0;
DistanceType sq_dist(distance(dataset[indices[i]], dcenters[0], veclen));
int new_centroid(0);
for (int j=1; j<branching; ++j) {
DistanceType new_sq_dist = distance(dataset[indices[i]], dcenters[j], veclen);
if (sq_dist>new_sq_dist) {
......@@ -306,15 +304,8 @@ public:
sq_dist = new_sq_dist;
}
}
if (sq_dist > radiuses[new_centroid]) {
radiuses[new_centroid] = sq_dist;
}
if (new_centroid != belongs_to[i]) {
CV_XADD(&count[belongs_to[i]], -1);
CV_XADD(&count[new_centroid], 1);
belongs_to[i] = new_centroid;
converged = false;
}
sq_dists[i] = sq_dist;
new_centroids[i] = new_centroid;
}
}
......@@ -325,10 +316,8 @@ public:
const int* indices;
const Matrix<double>& dcenters;
const size_t veclen;
int* count;
int* belongs_to;
std::vector<DistanceType>& radiuses;
bool& converged;
std::vector<int> &new_centroids;
std::vector<DistanceType> &sq_dists;
KMeansDistanceComputer& operator=( const KMeansDistanceComputer & ) { return *this; }
};
......@@ -796,10 +785,27 @@ private:
}
}
std::vector<int> new_centroids(indices_length);
std::vector<DistanceType> sq_dists(indices_length);
// reassign points to clusters
KMeansDistanceComputer invoker(distance_, dataset_, branching, indices, dcenters, veclen_, count, belongs_to, radiuses, converged);
KMeansDistanceComputer invoker(distance_, dataset_, branching, indices, dcenters, veclen_, new_centroids, sq_dists);
parallel_for_(cv::Range(0, (int)indices_length), invoker);
for (int i=0; i < (int)indices_length; ++i) {
DistanceType sq_dist(sq_dists[i]);
int new_centroid(new_centroids[i]);
if (sq_dist > radiuses[new_centroid]) {
radiuses[new_centroid] = sq_dist;
}
if (new_centroid != belongs_to[i]) {
count[belongs_to[i]]--;
count[new_centroid]++;
belongs_to[i] = new_centroid;
converged = false;
}
}
for (int i=0; i<branching; ++i) {
// if one cluster converges to an empty cluster,
// move an element into that cluster
......
......@@ -106,9 +106,8 @@ def affine_detect(detector, img, mask=None, pool=None):
print()
return keypoints, np.array(descrs)
if __name__ == '__main__':
print(__doc__)
def main():
import sys, getopt
opts, args = getopt.getopt(sys.argv[1:], '', ['feature='])
opts = dict(opts)
......@@ -160,4 +159,10 @@ if __name__ == '__main__':
match_and_draw('affine find_obj')
cv.waitKey()
print('Done')
if __name__ == '__main__':
print(__doc__)
main()
cv.destroyAllWindows()
......@@ -26,11 +26,7 @@ import cv2 as cv
# built-in modules
import sys
if __name__ == '__main__':
print('This sample shows how to implement a simple hi resolution image navigation.')
print('USAGE: browse.py [image filename]')
print()
def main():
if len(sys.argv) > 1:
fn = cv.samples.findFile(sys.argv[1])
print('loading %s ...' % fn)
......@@ -62,4 +58,10 @@ if __name__ == '__main__':
cv.imshow('preview', small)
cv.setMouseCallback('preview', onmouse)
cv.waitKey()
print('Done')
if __name__ == '__main__':
print(__doc__)
main()
cv.destroyAllWindows()
......@@ -25,7 +25,7 @@ from common import splitfn
# built-in modules
import os
if __name__ == '__main__':
def main():
import sys
import getopt
from glob import glob
......@@ -126,4 +126,10 @@ if __name__ == '__main__':
print('Undistorted image written to: %s' % outfile)
cv.imwrite(outfile, dst)
print('Done')
if __name__ == '__main__':
print(__doc__)
main()
cv.destroyAllWindows()
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
# Python 2/3 compatibility
from __future__ import print_function
import numpy as np
from matplotlib import cm
from numpy import linspace
import argparse
import cv2 as cv
from numpy import linspace
def inverse_homogeneoux_matrix(M):
R = M[0:3, 0:3]
T = M[0:3, 3]
......@@ -119,6 +119,8 @@ def create_board_model(extrinsics, board_width, board_height, square_size, draw_
def draw_camera_boards(ax, camera_matrix, cam_width, cam_height, scale_focal,
extrinsics, board_width, board_height, square_size,
patternCentric):
from matplotlib import cm
min_values = np.zeros((3,1))
min_values = np.inf
max_values = np.zeros((3,1))
......@@ -158,6 +160,8 @@ def draw_camera_boards(ax, camera_matrix, cam_width, cam_height, scale_focal,
return min_values, max_values
def main():
import argparse
parser = argparse.ArgumentParser(description='Plot camera calibration extrinsics.',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--calibration', type=str, default='left_intrinsics.yml',
......@@ -179,6 +183,9 @@ def main():
camera_matrix = fs.getNode('camera_matrix').mat()
extrinsics = fs.getNode('extrinsic_parameters').mat()
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.set_aspect("equal")
......@@ -211,6 +218,10 @@ def main():
ax.set_title('Extrinsic Parameters Visualization')
plt.show()
print('Done')
if __name__ == "__main__":
if __name__ == '__main__':
print(__doc__)
main()
cv.destroyAllWindows()
......@@ -119,10 +119,10 @@ class App(object):
if __name__ == '__main__':
print(__doc__)
import sys
try:
video_src = sys.argv[1]
except:
video_src = 0
print(__doc__)
App(video_src).run()
......@@ -46,7 +46,7 @@ def coherence_filter(img, sigma = 11, str_sigma = 11, blend = 0.5, iter_n = 4):
return img
if __name__ == '__main__':
def main():
import sys
try:
fn = sys.argv[1]
......@@ -82,4 +82,11 @@ if __name__ == '__main__':
update()
if ch == 27:
break
print('Done')
if __name__ == '__main__':
print(__doc__)
main()
cv.destroyAllWindows()
......@@ -8,6 +8,9 @@ Keys:
'''
# Python 2/3 compatibility
from __future__ import print_function
import numpy as np
import cv2 as cv
......@@ -17,46 +20,54 @@ import sys
# local modules
import video
if __name__ == '__main__':
class App():
def set_scale(self, val):
self.hist_scale = val
def run(self):
hsv_map = np.zeros((180, 256, 3), np.uint8)
h, s = np.indices(hsv_map.shape[:2])
hsv_map[:,:,0] = h
hsv_map[:,:,1] = s
hsv_map[:,:,2] = 255
hsv_map = cv.cvtColor(hsv_map, cv.COLOR_HSV2BGR)
cv.imshow('hsv_map', hsv_map)
cv.namedWindow('hist', 0)
self.hist_scale = 10
cv.createTrackbar('scale', 'hist', self.hist_scale, 32, self.set_scale)
try:
fn = sys.argv[1]
except:
fn = 0
cam = video.create_capture(fn, fallback='synth:bg=baboon.jpg:class=chess:noise=0.05')
while True:
flag, frame = cam.read()
cv.imshow('camera', frame)
hsv_map = np.zeros((180, 256, 3), np.uint8)
h, s = np.indices(hsv_map.shape[:2])
hsv_map[:,:,0] = h
hsv_map[:,:,1] = s
hsv_map[:,:,2] = 255
hsv_map = cv.cvtColor(hsv_map, cv.COLOR_HSV2BGR)
cv.imshow('hsv_map', hsv_map)
cv.namedWindow('hist', 0)
hist_scale = 10
def set_scale(val):
global hist_scale
hist_scale = val
cv.createTrackbar('scale', 'hist', hist_scale, 32, set_scale)
try:
fn = sys.argv[1]
except:
fn = 0
cam = video.create_capture(fn, fallback='synth:bg=baboon.jpg:class=chess:noise=0.05')
while True:
flag, frame = cam.read()
cv.imshow('camera', frame)
small = cv.pyrDown(frame)
hsv = cv.cvtColor(small, cv.COLOR_BGR2HSV)
dark = hsv[...,2] < 32
hsv[dark] = 0
h = cv.calcHist([hsv], [0, 1], None, [180, 256], [0, 180, 0, 256])
h = np.clip(h*0.005*hist_scale, 0, 1)
vis = hsv_map*h[:,:,np.newaxis] / 255.0
cv.imshow('hist', vis)
ch = cv.waitKey(1)
if ch == 27:
break
small = cv.pyrDown(frame)
hsv = cv.cvtColor(small, cv.COLOR_BGR2HSV)
dark = hsv[...,2] < 32
hsv[dark] = 0
h = cv.calcHist([hsv], [0, 1], None, [180, 256], [0, 180, 0, 256])
h = np.clip(h*0.005*self.hist_scale, 0, 1)
vis = hsv_map*h[:,:,np.newaxis] / 255.0
cv.imshow('hist', vis)
ch = cv.waitKey(1)
if ch == 27:
break
print('Done')
if __name__ == '__main__':
print(__doc__)
App().run()
cv.destroyAllWindows()
......@@ -48,9 +48,7 @@ def make_image():
cv.ellipse( img, (dx+273, dy+100), (20,35), 0, 0, 360, white, -1 )
return img
if __name__ == '__main__':
print(__doc__)
def main():
img = make_image()
h, w = img.shape[:2]
......@@ -67,4 +65,10 @@ if __name__ == '__main__':
cv.createTrackbar( "levels+3", "contours", 3, 7, update )
cv.imshow('image', img)
cv.waitKey()
print('Done')
if __name__ == '__main__':
print(__doc__)
main()
cv.destroyAllWindows()
......@@ -65,8 +65,7 @@ def defocus_kernel(d, sz=65):
return kern
if __name__ == '__main__':
print(__doc__)
def main():
import sys, getopt
opts, args = getopt.getopt(sys.argv[1:], '', ['circle', 'angle=', 'd=', 'snr='])
opts = dict(opts)
......@@ -128,3 +127,11 @@ if __name__ == '__main__':
if ch == ord(' '):
defocus = not defocus
update(None)
print('Done')
if __name__ == '__main__':
print(__doc__)
main()
cv.destroyAllWindows()
......@@ -11,8 +11,9 @@ USAGE:
# Python 2/3 compatibility
from __future__ import print_function
import cv2 as cv
import numpy as np
import cv2 as cv
import sys
......@@ -62,8 +63,8 @@ def shift_dft(src, dst=None):
return dst
if __name__ == "__main__":
def main():
if len(sys.argv) > 1:
fname = sys.argv[1]
else:
......@@ -110,4 +111,10 @@ if __name__ == "__main__":
cv.imshow("magnitude", log_spectrum)
cv.waitKey(0)
print('Done')
if __name__ == '__main__':
print(__doc__)
main()
cv.destroyAllWindows()
......@@ -27,12 +27,12 @@ Usage:
# Python 2/3 compatibility
from __future__ import print_function
import numpy as np
import cv2 as cv
# built-in modules
from multiprocessing.pool import ThreadPool
import cv2 as cv
import numpy as np
from numpy.linalg import norm
# local modules
......
......@@ -23,6 +23,7 @@ if PY3:
import numpy as np
import cv2 as cv
from multiprocessing.pool import ThreadPool
from digits import *
......
......@@ -96,6 +96,10 @@ def main():
if ch == 27:
break
print('Done')
if __name__ == '__main__':
print(__doc__)
main()
cv.destroyAllWindows()
......@@ -19,13 +19,12 @@ import cv2 as cv
from common import make_cmap
if __name__ == '__main__':
def main():
import sys
try:
fn = sys.argv[1]
except:
fn = 'fruits.jpg'
print(__doc__)
fn = cv.samples.findFile(fn)
img = cv.imread(fn, cv.IMREAD_GRAYSCALE)
......@@ -69,4 +68,11 @@ if __name__ == '__main__':
update()
if need_update:
update()
print('Done')
if __name__ == '__main__':
print(__doc__)
main()
cv.destroyAllWindows()
......@@ -23,9 +23,7 @@ import video
import sys
if __name__ == '__main__':
print(__doc__)
def main():
try:
fn = sys.argv[1]
except:
......@@ -52,4 +50,11 @@ if __name__ == '__main__':
ch = cv.waitKey(5)
if ch == 27:
break
print('Done')
if __name__ == '__main__':
print(__doc__)
main()
cv.destroyAllWindows()
......@@ -30,9 +30,8 @@ def draw_rects(img, rects, color):
for x1, y1, x2, y2 in rects:
cv.rectangle(img, (x1, y1), (x2, y2), color, 2)
if __name__ == '__main__':
def main():
import sys, getopt
print(__doc__)
args, video_src = getopt.getopt(sys.argv[1:], '', ['cascade=', 'nested-cascade='])
try:
......@@ -70,4 +69,11 @@ if __name__ == '__main__':
if cv.waitKey(5) == 27:
break
print('Done')
if __name__ == '__main__':
print(__doc__)
main()
cv.destroyAllWindows()
......@@ -19,6 +19,7 @@ from __future__ import print_function
import numpy as np
import cv2 as cv
from common import anorm, getsize
FLANN_INDEX_KDTREE = 1 # bug: flann enums are missing
......@@ -137,9 +138,7 @@ def explore_match(win, img1, img2, kp_pairs, status = None, H = None):
return vis
if __name__ == '__main__':
print(__doc__)
def main():
import sys, getopt
opts, args = getopt.getopt(sys.argv[1:], '', ['feature='])
opts = dict(opts)
......@@ -187,4 +186,11 @@ if __name__ == '__main__':
match_and_draw('find_obj')
cv.waitKey()
print('Done')
if __name__ == '__main__':
print(__doc__)
main()
cv.destroyAllWindows()
......@@ -79,9 +79,7 @@ def update(_=None):
draw_str(img, (20, 20), cur_func_name)
cv.imshow('fit line', img)
if __name__ == '__main__':
print(__doc__)
def main():
cv.namedWindow('fit line')
cv.createTrackbar('noise', 'fit line', 3, 50, update)
cv.createTrackbar('point n', 'fit line', 100, 500, update)
......@@ -96,3 +94,11 @@ if __name__ == '__main__':
cur_func_name = dist_func_names.next()
if ch == 27:
break
print('Done')
if __name__ == '__main__':
print(__doc__)
main()
cv.destroyAllWindows()
......@@ -20,61 +20,69 @@ from __future__ import print_function
import numpy as np
import cv2 as cv
if __name__ == '__main__':
import sys
try:
fn = sys.argv[1]
except:
fn = 'fruits.jpg'
print(__doc__)
import sys
img = cv.imread(cv.samples.findFile(fn))
if img is None:
print('Failed to load image file:', fn)
sys.exit(1)
class App():
h, w = img.shape[:2]
mask = np.zeros((h+2, w+2), np.uint8)
seed_pt = None
fixed_range = True
connectivity = 4
def update(dummy=None):
if seed_pt is None:
cv.imshow('floodfill', img)
def update(self, dummy=None):
if self.seed_pt is None:
cv.imshow('floodfill', self.img)
return
flooded = img.copy()
mask[:] = 0
flooded = self.img.copy()
self.mask[:] = 0
lo = cv.getTrackbarPos('lo', 'floodfill')
hi = cv.getTrackbarPos('hi', 'floodfill')
flags = connectivity
if fixed_range:
flags = self.connectivity
if self.fixed_range:
flags |= cv.FLOODFILL_FIXED_RANGE
cv.floodFill(flooded, mask, seed_pt, (255, 255, 255), (lo,)*3, (hi,)*3, flags)
cv.circle(flooded, seed_pt, 2, (0, 0, 255), -1)
cv.floodFill(flooded, self.mask, self.seed_pt, (255, 255, 255), (lo,)*3, (hi,)*3, flags)
cv.circle(flooded, self.seed_pt, 2, (0, 0, 255), -1)
cv.imshow('floodfill', flooded)
def onmouse(event, x, y, flags, param):
global seed_pt
def onmouse(self, event, x, y, flags, param):
if flags & cv.EVENT_FLAG_LBUTTON:
seed_pt = x, y
update()
update()
cv.setMouseCallback('floodfill', onmouse)
cv.createTrackbar('lo', 'floodfill', 20, 255, update)
cv.createTrackbar('hi', 'floodfill', 20, 255, update)
while True:
ch = cv.waitKey()
if ch == 27:
break
if ch == ord('f'):
fixed_range = not fixed_range
print('using %s range' % ('floating', 'fixed')[fixed_range])
update()
if ch == ord('c'):
connectivity = 12-connectivity
print('connectivity =', connectivity)
update()
self.seed_pt = x, y
self.update()
def run(self):
try:
fn = sys.argv[1]
except:
fn = 'fruits.jpg'
self.img = cv.imread(cv.samples.findFile(fn))
if self.img is None:
print('Failed to load image file:', fn)
sys.exit(1)
h, w = self.img.shape[:2]
self.mask = np.zeros((h+2, w+2), np.uint8)
self.seed_pt = None
self.fixed_range = True
self.connectivity = 4
self.update()
cv.setMouseCallback('floodfill', self.onmouse)
cv.createTrackbar('lo', 'floodfill', 20, 255, self.update)
cv.createTrackbar('hi', 'floodfill', 20, 255, self.update)
while True:
ch = cv.waitKey()
if ch == 27:
break
if ch == ord('f'):
self.fixed_range = not self.fixed_range
print('using %s range' % ('floating', 'fixed')[self.fixed_range])
self.update()
if ch == ord('c'):
self.connectivity = 12-self.connectivity
print('connectivity =', self.connectivity)
self.update()
print('Done')
if __name__ == '__main__':
print(__doc__)
App().run()
cv.destroyAllWindows()
......@@ -19,6 +19,7 @@ from __future__ import print_function
import numpy as np
import cv2 as cv
from multiprocessing.pool import ThreadPool
......@@ -47,11 +48,10 @@ def process_threaded(img, filters, threadn = 8):
np.maximum(accum, fimg, accum)
return accum
if __name__ == '__main__':
def main():
import sys
from common import Timer
print(__doc__)
try:
img_fn = sys.argv[1]
except:
......@@ -73,4 +73,10 @@ if __name__ == '__main__':
cv.imshow('img', img)
cv.imshow('result', res2)
cv.waitKey()
print('Done')
if __name__ == '__main__':
print(__doc__)
main()
cv.destroyAllWindows()
......@@ -9,9 +9,10 @@ if PY3:
xrange = range
import numpy as np
from numpy import random
import cv2 as cv
from numpy import random
def make_gaussians(cluster_n, img_size):
points = []
ref_distrs = []
......@@ -34,7 +35,7 @@ def draw_gaussain(img, mean, cov, color):
cv.ellipse(img, (x, y), (s1, s2), ang, 0, 360, color, 1, cv.LINE_AA)
if __name__ == '__main__':
def main():
cluster_n = 5
img_size = 512
......@@ -66,4 +67,11 @@ if __name__ == '__main__':
ch = cv.waitKey(0)
if ch == 27:
break
print('Done')
if __name__ == '__main__':
print(__doc__)
main()
cv.destroyAllWindows()
This diff is collapsed.
......@@ -18,8 +18,8 @@ Abid Rahman 3/14/12 debug Gary Bradski
# Python 2/3 compatibility
from __future__ import print_function
import cv2 as cv
import numpy as np
import cv2 as cv
bins = np.arange(256).reshape(256,1)
......@@ -53,8 +53,7 @@ def hist_lines(im):
return y
if __name__ == '__main__':
def main():
import sys
if len(sys.argv)>1:
......@@ -116,4 +115,11 @@ if __name__ == '__main__':
print('ESC')
cv.destroyAllWindows()
break
print('Done')
if __name__ == '__main__':
print(__doc__)
main()
cv.destroyAllWindows()
......@@ -11,13 +11,12 @@ Usage:
# Python 2/3 compatibility
from __future__ import print_function
import cv2 as cv
import numpy as np
import sys
import cv2 as cv
if __name__ == '__main__':
print(__doc__)
import sys
def main():
try:
fn = sys.argv[1]
except IndexError:
......@@ -40,3 +39,10 @@ if __name__ == '__main__':
cv.imshow("source", src)
cv.waitKey(0)
print('Done')
if __name__ == '__main__':
print(__doc__)
main()
cv.destroyAllWindows()
......@@ -13,12 +13,11 @@ from __future__ import print_function
import cv2 as cv
import numpy as np
import sys
import math
if __name__ == '__main__':
print(__doc__)
def main():
try:
fn = sys.argv[1]
except IndexError:
......@@ -52,3 +51,10 @@ if __name__ == '__main__':
cv.imshow("source", src)
cv.waitKey(0)
print('Done')
if __name__ == '__main__':
print(__doc__)
main()
cv.destroyAllWindows()
......@@ -20,17 +20,16 @@ from __future__ import print_function
import numpy as np
import cv2 as cv
from common import Sketcher
if __name__ == '__main__':
def main():
import sys
try:
fn = sys.argv[1]
except:
fn = 'fruits.jpg'
print(__doc__)
img = cv.imread(cv.samples.findFile(fn))
if img is None:
print('Failed to load image file:', fn)
......@@ -51,4 +50,11 @@ if __name__ == '__main__':
img_mark[:] = img
mark[:] = 0
sketch.show()
print('Done')
if __name__ == '__main__':
print(__doc__)
main()
cv.destroyAllWindows()
......@@ -18,12 +18,13 @@ PY3 = sys.version_info[0] == 3
if PY3:
long = int
import numpy as np
import cv2 as cv
from math import cos, sin, sqrt
import numpy as np
if __name__ == "__main__":
def main():
img_height = 500
img_width = 500
kalman = cv.KalmanFilter(2, 1, 0)
......@@ -93,4 +94,10 @@ if __name__ == "__main__":
if code in [27, ord('q'), ord('Q')]:
break
cv.destroyWindow("Kalman")
print('Done')
if __name__ == '__main__':
print(__doc__)
main()
cv.destroyAllWindows()
......@@ -18,12 +18,10 @@ import cv2 as cv
from gaussian_mix import make_gaussians
if __name__ == '__main__':
def main():
cluster_n = 5
img_size = 512
print(__doc__)
# generating bright palette
colors = np.zeros((1, cluster_n, 3), np.uint8)
colors[0,:] = 255
......@@ -43,8 +41,15 @@ if __name__ == '__main__':
cv.circle(img, (x, y), 1, c, -1)
cv.imshow('gaussian mixture', img)
cv.imshow('kmeans', img)
ch = cv.waitKey(0)
if ch == 27:
break
print('Done')
if __name__ == '__main__':
print(__doc__)
main()
cv.destroyAllWindows()
......@@ -22,6 +22,7 @@ if PY3:
import numpy as np
import cv2 as cv
import video
from common import nothing, getsize
......@@ -44,9 +45,8 @@ def merge_lappyr(levels):
return np.uint8(np.clip(img, 0, 255))
if __name__ == '__main__':
def main():
import sys
print(__doc__)
try:
fn = sys.argv[1]
......@@ -72,3 +72,11 @@ if __name__ == '__main__':
if cv.waitKey(1) == 27:
break
print('Done')
if __name__ == '__main__':
print(__doc__)
main()
cv.destroyAllWindows()
......@@ -145,12 +145,10 @@ class MLP(LetterStatModel):
if __name__ == '__main__':
def main():
import getopt
import sys
print(__doc__)
models = [RTrees, KNearest, Boost, SVM, MLP] # NBayes
models = dict( [(cls.__name__.lower(), cls) for cls in models] )
......@@ -186,4 +184,11 @@ if __name__ == '__main__':
fn = args['--save']
print('saving model to %s ...' % fn)
model.save(fn)
print('Done')
if __name__ == '__main__':
print(__doc__)
main()
cv.destroyAllWindows()
......@@ -25,6 +25,7 @@ from __future__ import print_function
import numpy as np
import cv2 as cv
import video
from common import draw_str
from video import presets
......@@ -112,9 +113,11 @@ def main():
except:
video_src = 0
print(__doc__)
App(video_src).run()
cv.destroyAllWindows()
print('Done')
if __name__ == '__main__':
print(__doc__)
main()
cv.destroyAllWindows()
......@@ -23,6 +23,7 @@ from __future__ import print_function
import numpy as np
import cv2 as cv
import video
from common import anorm2, draw_str
from time import clock
......@@ -96,9 +97,11 @@ def main():
except:
video_src = 0
print(__doc__)
App(video_src).run()
cv.destroyAllWindows()
print('Done')
if __name__ == '__main__':
print(__doc__)
main()
cv.destroyAllWindows()
......@@ -13,11 +13,10 @@ Keys:
# Python 2/3 compatibility
from __future__ import print_function
import numpy as np
import cv2 as cv
if __name__ == '__main__':
print(__doc__)
def main():
import sys
try:
fn = sys.argv[1]
......@@ -37,3 +36,10 @@ if __name__ == '__main__':
cv.imshow('linearpolar', img3)
cv.waitKey(0)
print('Done')
if __name__ == '__main__':
print(__doc__)
main()
cv.destroyAllWindows()
......@@ -21,9 +21,7 @@ import numpy as np
import cv2 as cv
if __name__ == '__main__':
print(__doc__)
def main():
import sys
from itertools import cycle
from common import draw_str
......@@ -93,4 +91,11 @@ if __name__ == '__main__':
else:
cur_str_mode = str_modes.next()
update()
print('Done')
if __name__ == '__main__':
print(__doc__)
main()
cv.destroyAllWindows()
......@@ -25,59 +25,64 @@ import argparse
from math import *
drag_start = None
sel = (0,0,0,0)
class App():
drag_start = None
sel = (0,0,0,0)
def onmouse(event, x, y, flags, param):
global drag_start, sel
if event == cv.EVENT_LBUTTONDOWN:
drag_start = x, y
sel = 0,0,0,0
elif event == cv.EVENT_LBUTTONUP:
if sel[2] > sel[0] and sel[3] > sel[1]:
patch = gray[sel[1]:sel[3],sel[0]:sel[2]]
result = cv.matchTemplate(gray,patch,cv.TM_CCOEFF_NORMED)
result = np.abs(result)**3
_val, result = cv.threshold(result, 0.01, 0, cv.THRESH_TOZERO)
result8 = cv.normalize(result,None,0,255,cv.NORM_MINMAX,cv.CV_8U)
cv.imshow("result", result8)
drag_start = None
elif drag_start:
#print flags
if flags & cv.EVENT_FLAG_LBUTTON:
minpos = min(drag_start[0], x), min(drag_start[1], y)
maxpos = max(drag_start[0], x), max(drag_start[1], y)
sel = minpos[0], minpos[1], maxpos[0], maxpos[1]
img = cv.cvtColor(gray, cv.COLOR_GRAY2BGR)
cv.rectangle(img, (sel[0], sel[1]), (sel[2], sel[3]), (0,255,255), 1)
cv.imshow("gray", img)
else:
print("selection is complete")
drag_start = None
def onmouse(self, event, x, y, flags, param):
if event == cv.EVENT_LBUTTONDOWN:
self.drag_start = x, y
self.sel = (0,0,0,0)
elif event == cv.EVENT_LBUTTONUP:
if self.sel[2] > self.sel[0] and self.sel[3] > self.sel[1]:
patch = self.gray[self.sel[1]:self.sel[3], self.sel[0]:self.sel[2]]
result = cv.matchTemplate(self.gray, patch, cv.TM_CCOEFF_NORMED)
result = np.abs(result)**3
_val, result = cv.threshold(result, 0.01, 0, cv.THRESH_TOZERO)
result8 = cv.normalize(result, None, 0, 255, cv.NORM_MINMAX, cv.CV_8U)
cv.imshow("result", result8)
self.drag_start = None
elif self.drag_start:
#print flags
if flags & cv.EVENT_FLAG_LBUTTON:
minpos = min(self.drag_start[0], x), min(self.drag_start[1], y)
maxpos = max(self.drag_start[0], x), max(self.drag_start[1], y)
self.sel = (minpos[0], minpos[1], maxpos[0], maxpos[1])
img = cv.cvtColor(self.gray, cv.COLOR_GRAY2BGR)
cv.rectangle(img, (self.sel[0], self.sel[1]), (self.sel[2], self.sel[3]), (0,255,255), 1)
cv.imshow("gray", img)
else:
print("selection is complete")
self.drag_start = None
if __name__ == '__main__':
print(__doc__)
def run(self):
parser = argparse.ArgumentParser(description='Demonstrate mouse interaction with images')
parser.add_argument("-i","--input", default='../data/', help="Input directory.")
args = parser.parse_args()
path = args.input
cv.namedWindow("gray",1)
cv.setMouseCallback("gray", self.onmouse)
'''Loop through all the images in the directory'''
for infile in glob.glob( os.path.join(path, '*.*') ):
ext = os.path.splitext(infile)[1][1:] #get the filename extension
if ext == "png" or ext == "jpg" or ext == "bmp" or ext == "tiff" or ext == "pbm":
print(infile)
parser = argparse.ArgumentParser(description='Demonstrate mouse interaction with images')
parser.add_argument("-i","--input", default='../data/', help="Input directory.")
args = parser.parse_args()
path = args.input
img = cv.imread(infile,1)
if img is None:
continue
self.sel = (0,0,0,0)
self.drag_start = None
self.gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
cv.imshow("gray", self.gray)
if cv.waitKey() == 27:
break
cv.namedWindow("gray",1)
cv.setMouseCallback("gray", onmouse)
'''Loop through all the images in the directory'''
for infile in glob.glob( os.path.join(path, '*.*') ):
ext = os.path.splitext(infile)[1][1:] #get the filename extension
if ext == "png" or ext == "jpg" or ext == "bmp" or ext == "tiff" or ext == "pbm":
print(infile)
print('Done')
img=cv.imread(infile,1)
if img is None:
continue
sel = (0,0,0,0)
drag_start = None
gray=cv.cvtColor(img, cv.COLOR_BGR2GRAY)
cv.imshow("gray",gray)
if cv.waitKey() == 27:
break
if __name__ == '__main__':
print(__doc__)
App().run()
cv.destroyAllWindows()
......@@ -14,12 +14,16 @@ Keys:
'''
# Python 2/3 compatibility
from __future__ import print_function
import numpy as np
import cv2 as cv
import video
import sys
if __name__ == '__main__':
def main():
try:
video_src = sys.argv[1]
except:
......@@ -42,4 +46,11 @@ if __name__ == '__main__':
cv.imshow('img', vis)
if cv.waitKey(5) == 27:
break
print('Done')
if __name__ == '__main__':
print(__doc__)
main()
cv.destroyAllWindows()
......@@ -13,11 +13,11 @@ Usage:
# Python 2/3 compatibility
from __future__ import print_function
import numpy as np
import cv2 as cv
if __name__ == '__main__':
def main():
import sys
print(__doc__)
try:
param = sys.argv[1]
......@@ -31,3 +31,11 @@ if __name__ == '__main__':
print("\t--help\n\t\tprint this help")
else:
print("Welcome to OpenCV")
print('Done')
if __name__ == '__main__':
print(__doc__)
main()
cv.destroyAllWindows()
......@@ -18,6 +18,7 @@ from __future__ import print_function
import numpy as np
import cv2 as cv
import video
......@@ -55,9 +56,8 @@ def warp_flow(img, flow):
res = cv.remap(img, flow, None, cv.INTER_LINEAR)
return res
if __name__ == '__main__':
def main():
import sys
print(__doc__)
try:
fn = sys.argv[1]
except IndexError:
......@@ -94,4 +94,11 @@ if __name__ == '__main__':
if show_glitch:
cur_glitch = img.copy()
print('glitch is', ['off', 'on'][show_glitch])
print('Done')
if __name__ == '__main__':
print(__doc__)
main()
cv.destroyAllWindows()
......@@ -30,13 +30,11 @@ def draw_detections(img, rects, thickness = 1):
cv.rectangle(img, (x+pad_w, y+pad_h), (x+w-pad_w, y+h-pad_h), (0, 255, 0), thickness)
if __name__ == '__main__':
def main():
import sys
from glob import glob
import itertools as it
print(__doc__)
hog = cv.HOGDescriptor()
hog.setSVMDetector( cv.HOGDescriptor_getDefaultPeopleDetector() )
......@@ -68,4 +66,11 @@ if __name__ == '__main__':
ch = cv.waitKey()
if ch == 27:
break
print('Done')
if __name__ == '__main__':
print(__doc__)
main()
cv.destroyAllWindows()
......@@ -7,6 +7,7 @@ Loads several images sequentially and tries to find squares in each image.
'''
# Python 2/3 compatibility
from __future__ import print_function
import sys
PY3 = sys.version_info[0] == 3
......@@ -42,7 +43,7 @@ def find_squares(img):
squares.append(cnt)
return squares
if __name__ == '__main__':
def main():
from glob import glob
for fn in glob('../data/pic*.png'):
img = cv.imread(fn)
......@@ -52,4 +53,11 @@ if __name__ == '__main__':
ch = cv.waitKey()
if ch == 27:
break
print('Done')
if __name__ == '__main__':
print(__doc__)
main()
cv.destroyAllWindows()
......@@ -33,7 +33,7 @@ def write_ply(fn, verts, colors):
np.savetxt(f, verts, fmt='%f %f %f %d %d %d ')
if __name__ == '__main__':
def main():
print('loading images...')
imgL = cv.pyrDown(cv.imread(cv.samples.findFile('aloeL.jpg'))) # downscale images for faster processing
imgR = cv.pyrDown(cv.imread(cv.samples.findFile('aloeR.jpg')))
......@@ -75,4 +75,11 @@ if __name__ == '__main__':
cv.imshow('left', imgL)
cv.imshow('disparity', (disp-min_disp)/num_disp)
cv.waitKey()
print('Done')
if __name__ == '__main__':
print(__doc__)
main()
cv.destroyAllWindows()
......@@ -16,7 +16,7 @@ from __future__ import print_function
import numpy as np
import cv2 as cv
if __name__ == '__main__':
def main():
import sys
try:
fn = sys.argv[1]
......@@ -45,3 +45,11 @@ if __name__ == '__main__':
cv.imshow('input', img)
cv.imshow('flow', vis)
cv.waitKey()
print('Done')
if __name__ == '__main__':
print(__doc__)
main()
cv.destroyAllWindows()
......@@ -5,9 +5,10 @@
from __future__ import print_function
import numpy as np
import cv2 as cv
from numpy import pi, sin, cos
import cv2 as cv
defaultSize = 512
......@@ -86,7 +87,7 @@ class TestSceneRender():
else:
self.currentRect = self.initialRect + np.int( 30*cos(self.time*self.speed) + 50*sin(self.time*self.speed))
if self.deformation:
self.currentRect[1:3] += self.h/20*cos(self.time)
self.currentRect[1:3] += int(self.h/20*cos(self.time))
cv.fillConvexPoly(img, self.currentRect, (0, 0, 255))
self.time += self.timeStep
......@@ -96,8 +97,7 @@ class TestSceneRender():
self.time = 0.0
if __name__ == '__main__':
def main():
backGr = cv.imread(cv.samples.findFile('graf1.png'))
fgr = cv.imread(cv.samples.findFile('box.png'))
......@@ -111,6 +111,11 @@ if __name__ == '__main__':
ch = cv.waitKey(3)
if ch == 27:
break
#import os
#print (os.environ['PYTHONPATH'])
print('Done')
if __name__ == '__main__':
print(__doc__)
main()
cv.destroyAllWindows()
......@@ -27,7 +27,7 @@ USAGE: turing.py [-o <output.avi>]
Press ESC to stop.
'''
if __name__ == '__main__':
def main():
print(help_message)
w, h = 512, 512
......@@ -71,4 +71,11 @@ if __name__ == '__main__':
cv.imshow('a', vis)
if cv.waitKey(5) == 27:
break
print('Done')
if __name__ == '__main__':
print(__doc__)
main()
cv.destroyAllWindows()
import cv2 as cv
import numpy as np
import argparse
W = 52 # window size is WxW
C_Thr = 0.43 # threshold for coherency
LowThr = 35 # threshold1 for orientation, it ranges from 0 to 180
HighThr = 57 # threshold2 for orientation, it ranges from 0 to 180
## [calcGST]
## [calcJ_header]
## [calcGST_proto]
def calcGST(inputIMG, w):
## [calcGST_proto]
img = inputIMG.astype(np.float32)
# GST components calculation (start)
# J = (J11 J12; J12 J22) - GST
imgDiffX = cv.Sobel(img, cv.CV_32F, 1, 0, 3)
imgDiffY = cv.Sobel(img, cv.CV_32F, 0, 1, 3)
imgDiffXY = cv.multiply(imgDiffX, imgDiffY)
## [calcJ_header]
imgDiffXX = cv.multiply(imgDiffX, imgDiffX)
imgDiffYY = cv.multiply(imgDiffY, imgDiffY)
J11 = cv.boxFilter(imgDiffXX, cv.CV_32F, (w,w))
J22 = cv.boxFilter(imgDiffYY, cv.CV_32F, (w,w))
J12 = cv.boxFilter(imgDiffXY, cv.CV_32F, (w,w))
# GST components calculations (stop)
# eigenvalue calculation (start)
# lambda1 = J11 + J22 + sqrt((J11-J22)^2 + 4*J12^2)
# lambda2 = J11 + J22 - sqrt((J11-J22)^2 + 4*J12^2)
tmp1 = J11 + J22
tmp2 = J11 - J22
tmp2 = cv.multiply(tmp2, tmp2)
tmp3 = cv.multiply(J12, J12)
tmp4 = np.sqrt(tmp2 + 4.0 * tmp3)
lambda1 = tmp1 + tmp4 # biggest eigenvalue
lambda2 = tmp1 - tmp4 # smallest eigenvalue
# eigenvalue calculation (stop)
# Coherency calculation (start)
# Coherency = (lambda1 - lambda2)/(lambda1 + lambda2)) - measure of anisotropism
# Coherency is anisotropy degree (consistency of local orientation)
imgCoherencyOut = cv.divide(lambda1 - lambda2, lambda1 + lambda2)
# Coherency calculation (stop)
# orientation angle calculation (start)
# tan(2*Alpha) = 2*J12/(J22 - J11)
# Alpha = 0.5 atan2(2*J12/(J22 - J11))
imgOrientationOut = cv.phase(J22 - J11, 2.0 * J12, angleInDegrees = True)
imgOrientationOut = 0.5 * imgOrientationOut
# orientation angle calculation (stop)
return imgCoherencyOut, imgOrientationOut
## [calcGST]
parser = argparse.ArgumentParser(description='Code for Anisotropic image segmentation tutorial.')
parser.add_argument('-i', '--input', help='Path to input image.', required=True)
args = parser.parse_args()
imgIn = cv.imread(args.input, cv.IMREAD_GRAYSCALE)
if imgIn is None:
print('Could not open or find the image: {}'.format(args.input))
exit(0)
## [main_extra]
## [main]
imgCoherency, imgOrientation = calcGST(imgIn, W)
## [thresholding]
_, imgCoherencyBin = cv.threshold(imgCoherency, C_Thr, 255, cv.THRESH_BINARY)
_, imgOrientationBin = cv.threshold(imgOrientation, LowThr, HighThr, cv.THRESH_BINARY)
## [thresholding]
## [combining]
imgBin = cv.bitwise_and(imgCoherencyBin, imgOrientationBin)
## [combining]
## [main]
imgCoherency = cv.normalize(imgCoherency, None, alpha=0, beta=1, norm_type=cv.NORM_MINMAX, dtype=cv.CV_32F)
imgOrientation = cv.normalize(imgOrientation, None, alpha=0, beta=1, norm_type=cv.NORM_MINMAX, dtype=cv.CV_32F)
cv.imshow('result.jpg', np.uint8(0.5*(imgIn + imgBin)))
cv.imshow('Coherency.jpg', imgCoherency)
cv.imshow('Orientation.jpg', imgOrientation)
cv.waitKey(0)
## [main_extra]
......@@ -32,13 +32,13 @@ Keys:
# Python 2/3 compatibility
from __future__ import print_function
import numpy as np
import cv2 as cv
import re
import numpy as np
from numpy import pi, sin, cos
import cv2 as cv
# built-in modules
from time import clock
......
......@@ -36,11 +36,9 @@ class DummyTask:
def get(self):
return self.data
if __name__ == '__main__':
def main():
import sys
print(__doc__)
try:
fn = sys.argv[1]
except:
......@@ -86,4 +84,11 @@ if __name__ == '__main__':
threaded_mode = not threaded_mode
if ch == 27:
break
cv.destroyAllWindows()
print('Done')
if __name__ == '__main__':
print(__doc__)
main()
cv.destroyAllWindows()
......@@ -17,51 +17,62 @@ Keys:
# Python 2/3 compatibility
from __future__ import print_function
import numpy as np
import cv2 as cv
def decode_fourcc(v):
v = int(v)
return "".join([chr((v >> 8 * i) & 0xFF) for i in range(4)])
def main():
font = cv.FONT_HERSHEY_SIMPLEX
color = (0, 255, 0)
def decode_fourcc(v):
v = int(v)
return "".join([chr((v >> 8 * i) & 0xFF) for i in range(4)])
cap = cv.VideoCapture(0)
cap.set(cv.CAP_PROP_AUTOFOCUS, False) # Known bug: https://github.com/opencv/opencv/pull/5474
font = cv.FONT_HERSHEY_SIMPLEX
color = (0, 255, 0)
cv.namedWindow("Video")
cap = cv.VideoCapture(0)
cap.set(cv.CAP_PROP_AUTOFOCUS, False) # Known bug: https://github.com/opencv/opencv/pull/5474
convert_rgb = True
fps = int(cap.get(cv.CAP_PROP_FPS))
focus = int(min(cap.get(cv.CAP_PROP_FOCUS) * 100, 2**31-1)) # ceil focus to C_LONG as Python3 int can go to +inf
cv.namedWindow("Video")
cv.createTrackbar("FPS", "Video", fps, 30, lambda v: cap.set(cv.CAP_PROP_FPS, v))
cv.createTrackbar("Focus", "Video", focus, 100, lambda v: cap.set(cv.CAP_PROP_FOCUS, v / 100))
convert_rgb = True
fps = int(cap.get(cv.CAP_PROP_FPS))
focus = int(min(cap.get(cv.CAP_PROP_FOCUS) * 100, 2**31-1)) # ceil focus to C_LONG as Python3 int can go to +inf
while True:
status, img = cap.read()
cv.createTrackbar("FPS", "Video", fps, 30, lambda v: cap.set(cv.CAP_PROP_FPS, v))
cv.createTrackbar("Focus", "Video", focus, 100, lambda v: cap.set(cv.CAP_PROP_FOCUS, v / 100))
fourcc = decode_fourcc(cap.get(cv.CAP_PROP_FOURCC))
while True:
status, img = cap.read()
fps = cap.get(cv.CAP_PROP_FPS)
fourcc = decode_fourcc(cap.get(cv.CAP_PROP_FOURCC))
if not bool(cap.get(cv.CAP_PROP_CONVERT_RGB)):
if fourcc == "MJPG":
img = cv.imdecode(img, cv.IMREAD_GRAYSCALE)
elif fourcc == "YUYV":
img = cv.cvtColor(img, cv.COLOR_YUV2GRAY_YUYV)
else:
print("unsupported format")
fps = cap.get(cv.CAP_PROP_FPS)
if not bool(cap.get(cv.CAP_PROP_CONVERT_RGB)):
if fourcc == "MJPG":
img = cv.imdecode(img, cv.IMREAD_GRAYSCALE)
elif fourcc == "YUYV":
img = cv.cvtColor(img, cv.COLOR_YUV2GRAY_YUYV)
else:
print("unsupported format")
break
cv.putText(img, "Mode: {}".format(fourcc), (15, 40), font, 1.0, color)
cv.putText(img, "FPS: {}".format(fps), (15, 80), font, 1.0, color)
cv.imshow("Video", img)
k = cv.waitKey(1)
if k == 27:
break
elif k == ord('g'):
convert_rgb = not convert_rgb
cap.set(cv.CAP_PROP_CONVERT_RGB, convert_rgb)
cv.putText(img, "Mode: {}".format(fourcc), (15, 40), font, 1.0, color)
cv.putText(img, "FPS: {}".format(fps), (15, 80), font, 1.0, color)
cv.imshow("Video", img)
print('Done')
k = cv.waitKey(1)
if k == 27:
break
elif k == ord('g'):
convert_rgb = not convert_rgb
cap.set(cv.CAP_PROP_CONVERT_RGB, convert_rgb)
if __name__ == '__main__':
print(__doc__)
main()
cv.destroyAllWindows()
......@@ -76,10 +76,10 @@ class App:
if __name__ == '__main__':
print(__doc__)
import sys
try:
fn = sys.argv[1]
except:
fn = 'fruits.jpg'
print(__doc__)
App(cv.samples.findFile(fn)).run()
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment