Commit f666bd22 authored by Andrey Kamaev's avatar Andrey Kamaev

Merged the trunk r8595:8668 (except iOS and new gpu functionality)

parent bd0e0b58
......@@ -36,13 +36,13 @@
# ANDROID_NDK=/opt/android-ndk - path to the NDK root.
# Can be set as environment variable. Can be set only at first cmake run.
#
# ANDROID_STANDALONE_TOOLCHAIN=/opt/android-toolchain - path to the
# ANDROID_STANDALONE_TOOLCHAIN=/opt/android-toolchain - path to the
# standalone toolchain. This option is not used if full NDK is found
# (ignored if ANDROID_NDK is set).
# Can be set as environment variable. Can be set only at first cmake run.
#
# ANDROID_ABI=armeabi-v7a - specifies the target Application Binary
# Interface (ABI). This option nearly matches to the APP_ABI variable
# Interface (ABI). This option nearly matches to the APP_ABI variable
# used by ndk-build tool from Android NDK.
# Possible values are:
# "armeabi" - matches to the NDK ABI with the same name.
......@@ -94,10 +94,10 @@
# The flags will be prepopulated with critical flags, so don't loose them.
# Also be aware that toolchain also sets configuration-specific compiler
# flags and linker flags.
#
# ANDROID and BUILD_ANDROID will be set to true, you may test any of these
#
# ANDROID and BUILD_ANDROID will be set to true, you may test any of these
# variables to make necessary Android-specific configuration changes.
#
#
# Also ARMEABI or ARMEABI_V7A or X86 will be set true, mutually exclusive.
# NEON option will be set true if VFP is set to NEON.
#
......@@ -131,7 +131,7 @@
# cmake pass
# [~] toolchain exits with error if ARM_TARGET is not recognized
# - modified June 2011
# [~] default NDK path is updated for version r5c
# [~] default NDK path is updated for version r5c
# [+] variable CMAKE_SYSTEM_PROCESSOR is set based on ARM_TARGET
# [~] toolchain install directory is added to linker paths
# [-] removed SWIG-related stuff from toolchain
......@@ -705,12 +705,14 @@ set( ANDROID_SYSTEM_LIB_DIRS "" )
set( LIBRARY_OUTPUT_PATH_ROOT ${CMAKE_SOURCE_DIR} CACHE PATH "root for library output, set this to change where android libs are installed to" )
set( CMAKE_INSTALL_PREFIX "${ANDROID_TOOLCHAIN_ROOT}/user" CACHE STRING "path for installing" )
if( EXISTS "${CMAKE_SOURCE_DIR}/jni/CMakeLists.txt" )
set( EXECUTABLE_OUTPUT_PATH "${LIBRARY_OUTPUT_PATH_ROOT}/bin/${ANDROID_NDK_ABI_NAME}" CACHE PATH "Output directory for applications" )
else()
set( EXECUTABLE_OUTPUT_PATH "${LIBRARY_OUTPUT_PATH_ROOT}/bin" CACHE PATH "Output directory for applications" )
if(NOT _CMAKE_IN_TRY_COMPILE)
if( EXISTS "${CMAKE_SOURCE_DIR}/jni/CMakeLists.txt" )
set( EXECUTABLE_OUTPUT_PATH "${LIBRARY_OUTPUT_PATH_ROOT}/bin/${ANDROID_NDK_ABI_NAME}" CACHE PATH "Output directory for applications" )
else()
set( EXECUTABLE_OUTPUT_PATH "${LIBRARY_OUTPUT_PATH_ROOT}/bin" CACHE PATH "Output directory for applications" )
endif()
set( LIBRARY_OUTPUT_PATH "${LIBRARY_OUTPUT_PATH_ROOT}/libs/${ANDROID_NDK_ABI_NAME}" CACHE PATH "path for android libs" )
endif()
set( LIBRARY_OUTPUT_PATH "${LIBRARY_OUTPUT_PATH_ROOT}/libs/${ANDROID_NDK_ABI_NAME}" CACHE PATH "path for android libs" )
#includes
list( APPEND ANDROID_SYSTEM_INCLUDE_DIRS "${ANDROID_SYSROOT}/usr/include" )
......@@ -960,7 +962,7 @@ endif()
set( ANDROID True )
set( BUILD_ANDROID True )
# where is the target environment
# where is the target environment
set( CMAKE_FIND_ROOT_PATH "${ANDROID_TOOLCHAIN_ROOT}/bin" "${ANDROID_TOOLCHAIN_ROOT}/${ANDROID_TOOLCHAIN_MACHINE_NAME}" "${ANDROID_SYSROOT}" "${CMAKE_INSTALL_PREFIX}" "${CMAKE_INSTALL_PREFIX}/share" )
# only search for libraries and includes in the ndk toolchain
......@@ -1053,7 +1055,7 @@ endif()
# ANDROID_ABI : "armeabi-v7a" (default), "armeabi", "armeabi-v7a with NEON", "armeabi-v7a with VFPV3", "armeabi-v6 with VFP", "x86", "mips"
# ANDROID_NATIVE_API_LEVEL : 3,4,5,8,9,14 (depends on NDK version)
# ANDROID_SET_OBSOLETE_VARIABLES : ON/OFF
# ANDROID_USE_STLPORT : OFF/ON - EXPERIMENTAL!!!
# ANDROID_USE_STLPORT : OFF/ON - EXPERIMENTAL!!!
# ANDROID_FORBID_SYGWIN : ON/OFF
# ANDROID_NO_UNDEFINED : ON/OFF
# ANDROID_SO_UNDEFINED : OFF/ON (default depends on NDK version)
......
......@@ -6,53 +6,37 @@ Installation in iOS
Required packages
==================
* GCC 4.x or later
* CMake 2.8 or higher
* Xcode 4.0 or higher
* CMake 2.8.8 or higher
* Xcode 4.3 or higher
Getting the cutting-edge OpenCV from SourceForge SVN repository
-----------------------------------------------------------------
Launch SVN client and checkout either
a. the current OpenCV snapshot from here: http://code.opencv.org/svn/opencv/trunk
#. or the latest tested OpenCV snapshot from here: http://code.opencv.org/svn/opencv/tags/latest_tested_snapshot
Launch SVN client and checkout the current OpenCV snapshot from here: http://code.opencv.org/svn/opencv/trunk/opencv
In MacOS it can be done using the following command in Terminal:
.. code-block:: bash
cd ~/<my_working _directory>
svn co http://code.opencv.org/svn/opencv/trunk
svn co http://code.opencv.org/svn/opencv/trunk/opencv
Building OpenCV from source using CMake, using the command line
================================================================
#. Create a temporary directory, which we denote as <cmake_binary_dir>, where you want to put the generated Makefiles, project files as well the object filees and output binaries
#. Enter the <cmake_binary_dir> and type
.. code-block:: bash
cmake [<some optional parameters>] <path to the OpenCV source directory>
For example
#. Make symbolic link for Xcode to let OpenCV build scripts find the compiler, header files etc.
.. code-block:: bash
.. code-block:: bash
cd /
sudo ln -s /Applications/Xcode.app/Contents/Developer Developer
cd ~/opencv
cd ..
mkdir release
cd release
cmake -GXcode -DCMAKE_TOOLCHAIN_FILE=../opencv/ios/cmake/Toolchains/Toolchain-iPhoneOS_Xcode.cmake -DCMAKE_INSTALL_PREFIX=../OpenCV_iPhoneOS -DCMAKE_BUILD_TYPE=RELEASE ../opencv
#. Enter the created temporary directory (<cmake_binary_dir>) and proceed with:
.. code-block:: bash
xcodebuild -sdk iphoneos -configuration Release -target ALL_BUILD
xcodebuild -sdk iphoneos -configuration Release -target install install
#. Build OpenCV framework
.. code-block:: bash
cd ~/<my_working_directory>
python opencv/ios/build_framework.py ios
If everything's fine, after a few minutes you will get ~/<my_working_directory>/ios/opencv2.framework. You can add this framework to your Xcode projects.
......@@ -16,7 +16,7 @@ Installation by using the pre-built libraries
1. Open up a web browser and go to: http://sourceforge.net/projects/opencvlibrary/files/opencv-win/
#. Open the folder for the latest version (currently this is 2.4).
#. Open the folder for the latest version (currently this is 2.4.1).
#. Choose a build you want to use and download it. The naming conventions used will show what kind of support they offer. For example:
......@@ -60,10 +60,10 @@ If you are building your own libraries you can take either the source files from
.. container:: enumeratevisibleitemswithsquare
+ stable and tested build - http://code.opencv.org/svn/opencv/branches/2.4 (the number at the end will change with every new realease, so change it to that)
+ stable and tested build - http://code.opencv.org/svn/opencv/tags/2.4.1 (the number at the end will change with every new realease, so change it to that)
+ development build - http://code.opencv.org/svn/opencv/trunk/
While the later one may contain a couple of new and experimental algorithms, performance increases and interface improvements, be aware, that it may also contain many-many bugs. Using the first one is recommended in most of the cases. That is unless you are extending the OpenCV library itself or really need to most up to date version of it.
While the later one may contain a couple of new and experimental algorithms, performance increases and interface improvements, be aware, that it may also contain some bugs. Using the first one is recommended in most of the cases. That is unless you are extending the OpenCV library itself or really need the most up to date version of it.
Building the OpenCV library from scratch requires a couple of tools installed beforehand:
......@@ -287,11 +287,11 @@ Building the library
+ *BUILD_DOCS* -> It creates two projects for building the documentation of OpenCV (there will be a separate project for building the HTML and the PDF files). Note that these aren't built together with the solution. You need to make an explicit build project command on these to do so.
+ *BUILD_EXAMPLES* -> OpenCV comes with many example applications from which you may learn most of the libraries capabilities. This will also come handy to easily try out if OpenCV is fully functional on your computer.
+ *BUILD_JAVA_SUPPORT* -> At the moment this has no real meaning on the Windows platform. Ignore it.
+ *BUILD_NEW_PYTHON_SUPPORT* -> Self-explanatory. Create the binaries to use OpenCV from the Python language.
+ *BUILD_PACKAGE* -> Prior to version 2.3 with this you could build a project that will build an OpenCV installer. With this you can easily install your OpenCV flavor on other systems. For the latest source files of OpenCV it generates a new project that simply creates zip archive with OpenCV sources.
+ *BUILD_SHARED_LIBS* -> With this you can control to build DLL files (when turned on) or static library files (\*.lib) otherwise.
+ *BUILD_TESTS* -> Each module of OpenCV has a test project assigned to it. Building these test projects is also a good way to try out, that the modules work just as expected on your system too.
+ *BUILD_PERF_TESTS* -> There are also performance tests for many OpenCV functions. If you're concerned about performance, build them and run.
+ *BUILD_opencv_python* -> Self-explanatory. Create the binaries to use OpenCV from the Python language.
Press again the *Configure* button and ensure no errors are reported. If this is the case you can tell CMake to create the project files by pushing the *Generate* button. Go to the build directory and open the created **OpenCV** solution.
Depending on just how much of the above options you have selected the solution may contain quite a lot of projects so be tolerant on the IDE at the startup.
......
......@@ -393,7 +393,7 @@ bool computeKsi( int transformType,
const Mat& image0, const Mat& cloud0,
const Mat& image1, const Mat& dI_dx1, const Mat& dI_dy1,
const Mat& corresps, int correspsCount,
double fx, double fy, double sobelScale, double normScale, double determinantThreshold,
double fx, double fy, double sobelScale, double determinantThreshold,
Mat& ksi )
{
int Cwidth = -1;
......@@ -419,6 +419,7 @@ bool computeKsi( int transformType,
Mat C( correspsCount, Cwidth, CV_64FC1 );
Mat dI_dt( correspsCount, 1, CV_64FC1 );
double sigma = 0;
int pointCount = 0;
for( int v0 = 0; v0 < corresps.rows; v0++ )
{
......@@ -428,14 +429,36 @@ bool computeKsi( int transformType,
{
int u1, v1;
get2shorts( corresps.at<int>(v0,u0), u1, v1 );
double diff = static_cast<double>(image1.at<uchar>(v1,u1)) -
static_cast<double>(image0.at<uchar>(v0,u0));
sigma += diff * diff;
pointCount++;
}
}
}
sigma = std::sqrt(sigma/pointCount);
pointCount = 0;
for( int v0 = 0; v0 < corresps.rows; v0++ )
{
for( int u0 = 0; u0 < corresps.cols; u0++ )
{
if( corresps.at<int>(v0,u0) != -1 )
{
int u1, v1;
get2shorts( corresps.at<int>(v0,u0), u1, v1 );
double diff = static_cast<double>(image1.at<uchar>(v1,u1)) -
static_cast<double>(image0.at<uchar>(v0,u0));
double w = sigma + std::abs(diff);
w = w > DBL_EPSILON ? 1./w : 1.;
(*computeCFuncPtr)( (double*)C.ptr(pointCount),
normScale * sobelScale * dI_dx1.at<short int>(v1,u1),
normScale * sobelScale * dI_dy1.at<short int>(v1,u1),
w * sobelScale * dI_dx1.at<short int>(v1,u1),
w * sobelScale * dI_dy1.at<short int>(v1,u1),
cloud0.at<Point3f>(v0,u0), fx, fy);
dI_dt.at<double>(pointCount) = normScale * (static_cast<double>(image1.at<uchar>(v1,u1)) -
static_cast<double>(image0.at<uchar>(v0,u0)));
dI_dt.at<double>(pointCount) = w * diff;
pointCount++;
}
}
......@@ -556,8 +579,6 @@ bool cv::RGBDOdometry( cv::Mat& Rt, const Mat& initRt,
const double fx = levelCameraMatrix.at<double>(0,0);
const double fy = levelCameraMatrix.at<double>(1,1);
const double avgf = 0.5 *(fx + fy);
const double normScale = 1./(255*avgf);
const double determinantThreshold = 1e-6;
Mat corresps( levelImage0.size(), levelImage0.type(), CV_32SC1 );
......@@ -576,7 +597,7 @@ bool cv::RGBDOdometry( cv::Mat& Rt, const Mat& initRt,
levelImage0, levelCloud0,
levelImage1, level_dI_dx1, level_dI_dy1,
corresps, correspsCount,
fx, fy, sobelScale, normScale, determinantThreshold,
fx, fy, sobelScale, determinantThreshold,
ksi );
if( !solutionExist )
......
......@@ -90,9 +90,9 @@ public:
Distance d = Distance()) :
dataset_(input_data), index_params_(params), distance_(d)
{
table_number_ = get_param<unsigned int>(index_params_,"table_number",12);
key_size_ = get_param<unsigned int>(index_params_,"key_size",20);
multi_probe_level_ = get_param<unsigned int>(index_params_,"multi_probe_level",2);
table_number_ = get_param<int>(index_params_,"table_number",12);
key_size_ = get_param<int>(index_params_,"key_size",20);
multi_probe_level_ = get_param<int>(index_params_,"multi_probe_level",2);
feature_size_ = (unsigned)dataset_.cols;
fill_xor_mask(0, key_size_, multi_probe_level_, xor_masks_);
......
......@@ -1622,16 +1622,16 @@ NCVStatus ncvDetectObjectsMultiScale_device(NCVMatrix<Ncv8u> &d_srcImg,
continue;
}
NcvSize32s srcRoi, srcIIRoi, scaledIIRoi, searchRoi;
NcvSize32s srcRoi_, srcIIRo_i, scaledIIRoi, searchRoi;
srcRoi.width = d_srcImg.width();
srcRoi.height = d_srcImg.height();
srcRoi_.width = d_srcImg.width();
srcRoi_.height = d_srcImg.height();
srcIIRoi.width = srcRoi.width + 1;
srcIIRoi.height = srcRoi.height + 1;
srcIIRo_i.width = srcRoi_.width + 1;
srcIIRo_i.height = srcRoi_.height + 1;
scaledIIRoi.width = srcIIRoi.width / scale;
scaledIIRoi.height = srcIIRoi.height / scale;
scaledIIRoi.width = srcIIRo_i.width / scale;
scaledIIRoi.height = srcIIRo_i.height / scale;
searchRoi.width = scaledIIRoi.width - haar.ClassifierSize.width;
searchRoi.height = scaledIIRoi.height - haar.ClassifierSize.height;
......@@ -1659,12 +1659,12 @@ NCVStatus ncvDetectObjectsMultiScale_device(NCVMatrix<Ncv8u> &d_srcImg,
{
Ncv32u scale = scalesVector[i];
NcvSize32u srcRoi, scaledIIRoi, searchRoi;
NcvSize32u srcRoi_, scaledIIRoi, searchRoi;
NcvSize32u srcIIRoi;
srcRoi.width = d_srcImg.width();
srcRoi.height = d_srcImg.height();
srcIIRoi.width = srcRoi.width + 1;
srcIIRoi.height = srcRoi.height + 1;
srcRoi_.width = d_srcImg.width();
srcRoi_.height = d_srcImg.height();
srcIIRoi.width = srcRoi_.width + 1;
srcIIRoi.height = srcRoi_.height + 1;
scaledIIRoi.width = srcIIRoi.width / scale;
scaledIIRoi.height = srcIIRoi.height / scale;
searchRoi.width = scaledIIRoi.width - haar.ClassifierSize.width;
......
......@@ -1414,17 +1414,17 @@ NCVStatus compactVector_32u_device(Ncv32u *d_src, Ncv32u srcLen,
//calculate hierarchical partial sums
for (Ncv32u i=1; i<partSumNums.size()-1; i++)
{
dim3 grid(partSumNums[i+1]);
if (grid.x > 65535)
dim3 grid_partial(partSumNums[i+1]);
if (grid_partial.x > 65535)
{
grid.y = (grid.x + 65534) / 65535;
grid.x = 65535;
grid_partial.y = (grid_partial.x + 65534) / 65535;
grid_partial.x = 65535;
}
if (grid.x != 1)
if (grid_partial.x != 1)
{
removePass1Scan
<false, true>
<<<grid, block, 0, nppStGetActiveCUDAstream()>>>
<<<grid_partial, block, 0, nppStGetActiveCUDAstream()>>>
(d_hierSums.ptr() + partSumOffsets[i],
partSumNums[i], NULL,
d_hierSums.ptr() + partSumOffsets[i+1],
......@@ -1434,7 +1434,7 @@ NCVStatus compactVector_32u_device(Ncv32u *d_src, Ncv32u srcLen,
{
removePass1Scan
<false, false>
<<<grid, block, 0, nppStGetActiveCUDAstream()>>>
<<<grid_partial, block, 0, nppStGetActiveCUDAstream()>>>
(d_hierSums.ptr() + partSumOffsets[i],
partSumNums[i], NULL,
NULL,
......
......@@ -723,16 +723,16 @@ static NCVStatus drawRectsWrapperHost(T *h_dst,
if (rect.x < dstWidth)
{
for (Ncv32u i=rect.y; i<rect.y+rect.height && i<dstHeight; i++)
for (Ncv32u each=rect.y; each<rect.y+rect.height && each<dstHeight; each++)
{
h_dst[i*dstStride+rect.x] = color;
h_dst[each*dstStride+rect.x] = color;
}
}
if (rect.x+rect.width-1 < dstWidth)
{
for (Ncv32u i=rect.y; i<rect.y+rect.height && i<dstHeight; i++)
for (Ncv32u each=rect.y; each<rect.y+rect.height && each<dstHeight; each++)
{
h_dst[i*dstStride+rect.x+rect.width-1] = color;
h_dst[each*dstStride+rect.x+rect.width-1] = color;
}
}
if (rect.y < dstHeight)
......
......@@ -623,11 +623,11 @@ class NCVVectorAlloc : public NCVVector<T>
{
NCVVectorAlloc();
NCVVectorAlloc(const NCVVectorAlloc &);
NCVVectorAlloc& operator=(const NCVVectorAlloc<T>&);
NCVVectorAlloc& operator=(const NCVVectorAlloc<T>&);
public:
NCVVectorAlloc(INCVMemAllocator &allocator_, Ncv32u length)
NCVVectorAlloc(INCVMemAllocator &allocator_, Ncv32u length_)
:
allocator(allocator_)
{
......@@ -636,11 +636,11 @@ public:
this->clear();
this->allocatedMem.clear();
ncvStat = allocator.alloc(this->allocatedMem, length * sizeof(T));
ncvStat = allocator.alloc(this->allocatedMem, length_ * sizeof(T));
ncvAssertPrintReturn(ncvStat == NCV_SUCCESS, "NCVVectorAlloc ctor:: alloc failed", );
this->_ptr = (T *)this->allocatedMem.begin.ptr;
this->_length = length;
this->_length = length_;
this->_memtype = this->allocatedMem.begin.memtype;
}
......@@ -698,15 +698,15 @@ public:
this->bReused = true;
}
NCVVectorReuse(const NCVMemSegment &memSegment, Ncv32u length)
NCVVectorReuse(const NCVMemSegment &memSegment, Ncv32u length_)
{
this->bReused = false;
this->clear();
ncvAssertPrintReturn(length * sizeof(T) <= memSegment.size, \
ncvAssertPrintReturn(length_ * sizeof(T) <= memSegment.size, \
"NCVVectorReuse ctor:: memory binding failed due to size mismatch", );
this->_length = length;
this->_length = length_;
this->_ptr = (T *)memSegment.begin.ptr;
this->_memtype = memSegment.begin.memtype;
......@@ -841,34 +841,34 @@ class NCVMatrixAlloc : public NCVMatrix<T>
NCVMatrixAlloc& operator=(const NCVMatrixAlloc &);
public:
NCVMatrixAlloc(INCVMemAllocator &allocator, Ncv32u width, Ncv32u height, Ncv32u _pitch=0)
NCVMatrixAlloc(INCVMemAllocator &allocator_, Ncv32u width_, Ncv32u height_, Ncv32u pitch_=0)
:
allocator(allocator)
allocator(allocator_)
{
NCVStatus ncvStat;
this->clear();
this->allocatedMem.clear();
Ncv32u widthBytes = width * sizeof(T);
Ncv32u widthBytes = width_ * sizeof(T);
Ncv32u pitchBytes = alignUp(widthBytes, allocator.alignment());
if (_pitch != 0)
if (pitch_ != 0)
{
ncvAssertPrintReturn(_pitch >= pitchBytes &&
(_pitch & (allocator.alignment() - 1)) == 0,
ncvAssertPrintReturn(pitch_ >= pitchBytes &&
(pitch_ & (allocator.alignment() - 1)) == 0,
"NCVMatrixAlloc ctor:: incorrect pitch passed", );
pitchBytes = _pitch;
pitchBytes = pitch_;
}
Ncv32u requiredAllocSize = pitchBytes * height;
Ncv32u requiredAllocSize = pitchBytes * height_;
ncvStat = allocator.alloc(this->allocatedMem, requiredAllocSize);
ncvAssertPrintReturn(ncvStat == NCV_SUCCESS, "NCVMatrixAlloc ctor:: alloc failed", );
this->_ptr = (T *)this->allocatedMem.begin.ptr;
this->_width = width;
this->_height = height;
this->_width = width_;
this->_height = height_;
this->_pitch = pitchBytes;
this->_memtype = this->allocatedMem.begin.memtype;
}
......@@ -916,34 +916,34 @@ class NCVMatrixReuse : public NCVMatrix<T>
public:
NCVMatrixReuse(const NCVMemSegment &memSegment, Ncv32u alignment, Ncv32u width, Ncv32u height, Ncv32u pitch=0, NcvBool bSkipPitchCheck=false)
NCVMatrixReuse(const NCVMemSegment &memSegment, Ncv32u alignment, Ncv32u width_, Ncv32u height_, Ncv32u pitch_=0, NcvBool bSkipPitchCheck=false)
{
this->bReused = false;
this->clear();
Ncv32u widthBytes = width * sizeof(T);
Ncv32u widthBytes = width_ * sizeof(T);
Ncv32u pitchBytes = alignUp(widthBytes, alignment);
if (pitch != 0)
if (pitch_ != 0)
{
if (!bSkipPitchCheck)
{
ncvAssertPrintReturn(pitch >= pitchBytes &&
(pitch & (alignment - 1)) == 0,
ncvAssertPrintReturn(pitch_ >= pitchBytes &&
(pitch_ & (alignment - 1)) == 0,
"NCVMatrixReuse ctor:: incorrect pitch passed", );
}
else
{
ncvAssertPrintReturn(pitch >= widthBytes, "NCVMatrixReuse ctor:: incorrect pitch passed", );
ncvAssertPrintReturn(pitch_ >= widthBytes, "NCVMatrixReuse ctor:: incorrect pitch passed", );
}
pitchBytes = pitch;
pitchBytes = pitch_;
}
ncvAssertPrintReturn(pitchBytes * height <= memSegment.size, \
ncvAssertPrintReturn(pitchBytes * height_ <= memSegment.size, \
"NCVMatrixReuse ctor:: memory binding failed due to size mismatch", );
this->_width = width;
this->_height = height;
this->_width = width_;
this->_height = height_;
this->_pitch = pitchBytes;
this->_ptr = (T *)memSegment.begin.ptr;
this->_memtype = memSegment.begin.memtype;
......
......@@ -188,7 +188,7 @@ elseif(APPLE)
list(APPEND HIGHGUI_LIBRARIES "-framework Carbon" "-framework QuickTime" "-framework CoreFoundation" "-framework QuartzCore")
else()
list(APPEND highgui_srcs src/cap_qtkit.mm)
list(APPEND HIGHGUI_LIBRARIES "-framework QTKit" "-framework QuartzCore")
list(APPEND HIGHGUI_LIBRARIES "-framework QTKit" "-framework QuartzCore" "-framework AppKit")
endif()
endif()
......
......@@ -445,12 +445,12 @@ class videoDevice{
int nFramesForReconnect;
unsigned long nFramesRunning;
int connection;
int storeConn;
int storeConn;
int myID;
long requestedFrameTime; //ie fps
char nDeviceName[255];
WCHAR wDeviceName[255];
char nDeviceName[255];
WCHAR wDeviceName[255];
unsigned char * pixels;
char * pBuffer;
......@@ -643,7 +643,7 @@ public:
bufferSetup = false;
newFrame = false;
latestBufferLength = 0;
latestBufferLength = 0;
hEvent = CreateEvent(NULL, true, false, NULL);
}
......@@ -655,7 +655,7 @@ public:
DeleteCriticalSection(&critSection);
CloseHandle(hEvent);
if(bufferSetup){
delete pixels;
delete[] pixels;
}
}
......@@ -665,11 +665,11 @@ public:
if(bufferSetup){
return false;
}else{
numBytes = numBytesIn;
pixels = new unsigned char[numBytes];
numBytes = numBytesIn;
pixels = new unsigned char[numBytes];
bufferSetup = true;
newFrame = false;
latestBufferLength = 0;
latestBufferLength = 0;
}
return true;
}
......@@ -796,12 +796,12 @@ void videoDevice::setSize(int w, int h){
}
else
{
width = w;
height = h;
videoSize = w*h*3;
width = w;
height = h;
videoSize = w*h*3;
sizeSet = true;
pixels = new unsigned char[videoSize];
pBuffer = new char[videoSize];
pixels = new unsigned char[videoSize];
pBuffer = new char[videoSize];
memset(pixels, 0 , videoSize);
sgCallback->setupBuffer(videoSize);
......
......@@ -657,7 +657,7 @@ Applies a fixed-level threshold to each array element.
.. ocv:pyoldfunction:: cv.Threshold(src, dst, threshold, maxValue, thresholdType)-> None
:param src: Source array (single-channel, 8-bit of 32-bit floating point).
:param src: Source array (single-channel, 8-bit or 32-bit floating point).
:param dst: Destination array of the same size and type as ``src`` .
......
......@@ -2839,6 +2839,11 @@ void cv::warpAffine( InputArray _src, OutputArray _dst,
CV_Assert( (M0.type() == CV_32F || M0.type() == CV_64F) && M0.rows == 2 && M0.cols == 3 );
M0.convertTo(matM, matM.type());
#ifdef HAVE_TEGRA_OPTIMIZATION
if( tegra::warpAffine(src, dst, M, flags, borderType, borderValue) )
return;
#endif
if( !(flags & WARP_INVERSE_MAP) )
{
double D = M[0]*M[4] - M[1]*M[3];
......@@ -2851,22 +2856,6 @@ void cv::warpAffine( InputArray _src, OutputArray _dst,
M[2] = b1; M[5] = b2;
}
#ifdef HAVE_TEGRA_OPTIMIZATION
if (borderType == BORDER_REPLICATE)
{
if( tegra::warpAffine(src, dst, M, interpolation, borderType, borderValue) )
return;
}
else
{
double warp_mat[6];
Mat warp_m(2, 3, CV_64F, warp_mat);
M0.convertTo(warp_m, warp_m.type());
if( tegra::warpAffine(src, dst, warp_mat, interpolation, borderType, borderValue) )
return;
}
#endif
int x, y, x1, y1, width = dst.cols, height = dst.rows;
AutoBuffer<int> _abdelta(width*2);
int* adelta = &_abdelta[0], *bdelta = adelta + width;
......@@ -2995,14 +2984,14 @@ void cv::warpPerspective( InputArray _src, OutputArray _dst, InputArray _M0,
CV_Assert( (M0.type() == CV_32F || M0.type() == CV_64F) && M0.rows == 3 && M0.cols == 3 );
M0.convertTo(matM, matM.type());
if( !(flags & WARP_INVERSE_MAP) )
invert(matM, matM);
#ifdef HAVE_TEGRA_OPTIMIZATION
if( tegra::warpPerspective(src, dst, M, interpolation, borderType, borderValue) )
if( tegra::warpPerspective(src, dst, M, flags, borderType, borderValue) )
return;
#endif
if( !(flags & WARP_INVERSE_MAP) )
invert(matM, matM);
int x, y, x1, y1, width = dst.cols, height = dst.rows;
int bh0 = std::min(BLOCK_SZ/2, height);
......
This diff is collapsed.
......@@ -488,7 +488,7 @@ public:
bool balanced=false );
virtual float predict( const CvMat* sample, bool returnDFVal=false ) const;
virtual float predict( const CvMat* samples, CvMat* results ) const;
virtual float predict( const CvMat* samples, CV_OUT CvMat* results ) const;
#ifndef SWIG
CV_WRAP CvSVM( const cv::Mat& trainData, const cv::Mat& responses,
......@@ -510,6 +510,7 @@ public:
CvParamGrid degreeGrid = CvSVM::get_default_grid(CvSVM::DEGREE),
bool balanced=false);
CV_WRAP virtual float predict( const cv::Mat& sample, bool returnDFVal=false ) const;
CV_WRAP_AS(predict_all) void predict( cv::InputArray samples, cv::OutputArray results ) const;
#endif
CV_WRAP virtual int get_support_vector_count() const;
......
......@@ -1250,7 +1250,7 @@ CvBoost::update_weights( CvBoostTree* tree )
if( have_subsample )
{
float* values = (float*)cur_buf_pos;
cur_buf_pos = (uchar*)(values + data->buf->step);
cur_buf_pos = (uchar*)(values + data->buf->cols);
uchar* missing = cur_buf_pos;
cur_buf_pos = missing + data->buf->step;
CvMat _sample, _mask;
......
......@@ -2124,6 +2124,12 @@ float CvSVM::predict(const CvMat* samples, CV_OUT CvMat* results) const
return result;
}
void CvSVM::predict( cv::InputArray _samples, cv::OutputArray _results ) const
{
_results.create(_samples.size().height, 1, CV_32F);
CvMat samples = _samples.getMat(), results = _results.getMat();
predict(&samples, &results);
}
CvSVM::CvSVM( const Mat& _train_data, const Mat& _responses,
const Mat& _var_idx, const Mat& _sample_idx, CvSVMParams _params )
......
......@@ -60,7 +60,7 @@ add_custom_command(
DEPENDS ${opencv_hdrs})
add_library(${the_module} SHARED src2/cv2.cpp ${CMAKE_CURRENT_BINARY_DIR}/generated0.i ${cv2_generated_hdrs} src2/cv2.cv.hpp)
if(PYTHON_DEBUG_LIBRARIES)
if(PYTHON_DEBUG_LIBRARIES AND NOT PYTHON_LIBRARIES MATCHES "optimized.*debug")
target_link_libraries(${the_module} debug ${PYTHON_DEBUG_LIBRARIES} optimized ${PYTHON_LIBRARIES})
else()
target_link_libraries(${the_module} ${PYTHON_LIBRARIES})
......
......@@ -13,7 +13,7 @@ else
endif
LOCAL_SRC_FILES := DetectionBasedTracker_jni.cpp
LOCAL_C_INCLUDES := $(LOCAL_PATH)
LOCAL_C_INCLUDES += $(LOCAL_PATH)
LOCAL_LDLIBS += -llog -ldl
LOCAL_MODULE := detection_based_tacker
......
......@@ -437,13 +437,20 @@ int build_mlp_classifier( char* data_filename,
cvMat( 1, (int)(sizeof(layer_sz)/sizeof(layer_sz[0])), CV_32S, layer_sz );
mlp.create( &layer_sizes );
printf( "Training the classifier (may take a few minutes)...\n");
mlp.train( &train_data, new_responses, 0, 0,
CvANN_MLP_TrainParams(cvTermCriteria(CV_TERMCRIT_ITER,300,0.01),
#if 1
CvANN_MLP_TrainParams::BACKPROP,0.001));
int method = CvANN_MLP_TrainParams::BACKPROP;
double method_param = 0.001;
int max_iter = 300;
#else
CvANN_MLP_TrainParams::RPROP,0.05));
int method = CvANN_MLP_TrainParams::RPROP;
double method_param = 0.1;
int max_iter = 1000;
#endif
mlp.train( &train_data, new_responses, 0, 0,
CvANN_MLP_TrainParams(cvTermCriteria(CV_TERMCRIT_ITER,max_iter,0.01),
method, method_param));
cvReleaseMat( &new_responses );
printf("\n");
}
......
......@@ -86,7 +86,7 @@ void detectAndDisplay( Mat frame )
for( int j = 0; j < eyes.size(); j++ )
{
Point center( faces[i].x + eyes[j].x + eyes[j].width*0.5, faces[i].y + eyes[j].y + eyes[j].height*0.5 );
int radius = cvRound( (eyes[j].width + eyes[i].height)*0.25 );
int radius = cvRound( (eyes[j].width + eyes[j].height)*0.25 );
circle( frame, center, radius, Scalar( 255, 0, 0 ), 3, 8, 0 );
}
}
......
......@@ -190,3 +190,7 @@ def mosaic(w, imgs):
imgs = it.chain([img0], imgs)
rows = grouper(w, imgs, pad)
return np.vstack(map(np.hstack, rows))
def getsize(img):
h, w = img.shape[:2]
return w, h
......@@ -76,8 +76,8 @@ if __name__ == '__main__':
img2 = cv2.imread(fn2, 0)
surf = cv2.SURF(1000)
kp1, desc1 = surf.detect(img1, None, False)
kp2, desc2 = surf.detect(img2, None, False)
kp1, desc1 = surf.detectAndCompute(img1, None)
kp2, desc2 = surf.detectAndCompute(img2, None)
desc1.shape = (-1, surf.descriptorSize())
desc2.shape = (-1, surf.descriptorSize())
print 'img1 - %d features, img2 - %d features' % (len(kp1), len(kp2))
......
''' An example of Laplacian Pyramid construction and merging.
Level : Intermediate
Usage : python lappyr.py [<video source>]
References:
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.54.299
Alexander Mordvintsev 6/10/12
'''
import numpy as np
import cv2
import video
from common import nothing, getsize
def build_lappyr(img, leveln=6, dtype=np.int16):
img = dtype(img)
levels = []
for i in xrange(leveln-1):
next_img = cv2.pyrDown(img)
img1 = cv2.pyrUp(next_img, dstsize=getsize(img))
levels.append(img-img1)
img = next_img
levels.append(img)
return levels
def merge_lappyr(levels):
img = levels[-1]
for lev_img in levels[-2::-1]:
img = cv2.pyrUp(img, dstsize=getsize(lev_img))
img += lev_img
return np.uint8(np.clip(img, 0, 255))
if __name__ == '__main__':
import sys
print __doc__
try: fn = sys.argv[1]
except: fn = 0
cap = video.create_capture(fn)
leveln = 6
cv2.namedWindow('level control')
for i in xrange(leveln):
cv2.createTrackbar('%d'%i, 'level control', 5, 50, nothing)
while True:
ret, frame = cap.read()
pyr = build_lappyr(frame, leveln)
for i in xrange(leveln):
v = cv2.getTrackbarPos('%d'%i, 'level control') / 5
pyr[i] *= v
res = merge_lappyr(pyr)
cv2.imshow('laplacian pyramid filter', res)
if cv2.waitKey(1) == 27:
break
......@@ -88,7 +88,7 @@ class SVM(LetterStatModel):
self.model.train(samples, responses, params = params)
def predict(self, samples):
return np.float32( [self.model.predict(s) for s in samples] )
return self.model.predict_all(samples).ravel()
class MLP(LetterStatModel):
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment