Commit 891d7da6 authored by Andrey Kamaev's avatar Andrey Kamaev

Merge branch '2.4'

parents c5e979ce 2be893a2
.git* export-ignore
* text=auto
* whitespace=!indent,trail,space
*.py text whitespace=tab-in-indent,trail,space,fix
*.cpp text whitespace=tab-in-indent,trail,space,fix
*.hpp text whitespace=tab-in-indent,trail,space,fix
*.cxx text whitespace=tab-in-indent,trail,space,fix
*.hxx text whitespace=tab-in-indent,trail,space,fix
*.mm text whitespace=tab-in-indent,trail,space,fix
*.c text whitespace=tab-in-indent,trail,space,fix
*.h text whitespace=tab-in-indent,trail,space,fix
*.i text whitespace=tab-in-indent,trail,space,fix
*.java text whitespace=tab-in-indent,trail,space,fix
*.cu text whitespace=tab-in-indent,trail,space,fix
*.cl text whitespace=tab-in-indent,trail,space,fix
*.cmake text whitespace=tab-in-indent,trail,space,fix
*.cmakein text whitespace=tab-in-indent,trail,space,fix
*.in text whitespace=tab-in-indent,trail,space,fix
CMakeLists.txt text whitespace=tab-in-indent,trail,space,fix
*.rst text whitespace=tab-in-indent,trail,space,fix
* text=auto whitespace=trailing-space,space-before-tab,-indent-with-non-tab,tab-in-indent,tabwidth=4
*.py text
*.cpp text
*.hpp text
*.cxx text
*.hxx text
*.mm text
*.c text
*.h text
*.i text
*.js text
*.java text
*.scala text
*.cu text
*.cl text
*.css_t text
*.qrc text
*.qss text
*.S text
*.rst text
*.tex text
*.sty text
*.aidl text
*.mk text
*.cmake text whitespace=tabwidth=2
*.cmakein text whitespace=tabwidth=2
*.in text whitespace=tabwidth=2
CMakeLists.txt text whitespace=tabwidth=2
*.png binary
*.jepg binary
......@@ -32,22 +41,21 @@ CMakeLists.txt text whitespace=tab-in-indent,trail,space,fix
*.a binary
*.so binary
*.dll binary
*.jar binary
*.pdf binary
*.pbxproj binary
*.vec binary
*.doc binary
*.css_t text
*.qrc text
*.qss text
*.S text
*.xml -text
*.yml -text
*.xml -text whitespace=cr-at-eol
*.yml -text whitespace=cr-at-eol
.project -text whitespace=cr-at-eol merge=union
.classpath -text whitespace=cr-at-eol merge=union
.cproject -text whitespace=cr-at-eol merge=union
org.eclipse.jdt.core.prefs -text whitespace=cr-at-eol merge=union
*.vcproj text eol=crlf merge=union
*.cproject text eol=crlf merge=union
*.bat text eol=crlf
*.cmd text eol=crlf
*.cmd.tmpl text eol=crlf
......
......@@ -15,7 +15,7 @@ using namespace android;
const int OpenCVEngine::Platform = DetectKnownPlatforms();
const int OpenCVEngine::CpuID = GetCpuID();
const int OpenCVEngine::KnownVersions[] = {2040000, 2040100, 2040200, 2040300, 2040301, 2040302};
const int OpenCVEngine::KnownVersions[] = {2040000, 2040100, 2040200, 2040300, 2040301, 2040302, 2040400};
bool OpenCVEngine::ValidateVersion(int version)
{
......
......@@ -218,6 +218,66 @@ TEST(OpenCVEngineTest, InstallAndGetVersion)
#endif
#endif
}
TEST(OpenCVEngineTest, GetPathFor2_4_2)
{
sp<IOpenCVEngine> Engine = InitConnect();
Starter.PackageManager->InstalledPackages.clear();
Starter.PackageManager->InstallVersion(2040200, PLATFORM_UNKNOWN, ARCH_ARMv7);
EXPECT_FALSE(NULL == Engine.get());
String16 result = Engine->GetLibPathByVersion(String16("2.4.2"));
EXPECT_STREQ("/data/data/org.opencv.lib_v24_armv7a/lib", String8(result).string());
}
TEST(OpenCVEngineTest, GetPathFor2_4_3)
{
sp<IOpenCVEngine> Engine = InitConnect();
Starter.PackageManager->InstalledPackages.clear();
Starter.PackageManager->InstallVersion(2040300, PLATFORM_UNKNOWN, ARCH_ARMv7);
EXPECT_FALSE(NULL == Engine.get());
String16 result = Engine->GetLibPathByVersion(String16("2.4.3"));
EXPECT_STREQ("/data/data/org.opencv.lib_v24_armv7a/lib", String8(result).string());
}
TEST(OpenCVEngineTest, GetPathFor2_4_3_1)
{
sp<IOpenCVEngine> Engine = InitConnect();
Starter.PackageManager->InstalledPackages.clear();
Starter.PackageManager->InstallVersion(2040301, PLATFORM_UNKNOWN, ARCH_ARMv7);
EXPECT_FALSE(NULL == Engine.get());
String16 result = Engine->GetLibPathByVersion(String16("2.4.3.1"));
EXPECT_STREQ("/data/data/org.opencv.lib_v24_armv7a/lib", String8(result).string());
}
TEST(OpenCVEngineTest, GetPathFor2_4_3_2)
{
sp<IOpenCVEngine> Engine = InitConnect();
Starter.PackageManager->InstalledPackages.clear();
Starter.PackageManager->InstallVersion(2040302, PLATFORM_UNKNOWN, ARCH_ARMv7);
EXPECT_FALSE(NULL == Engine.get());
String16 result = Engine->GetLibPathByVersion(String16("2.4.3.2"));
EXPECT_STREQ("/data/data/org.opencv.lib_v24_armv7a/lib", String8(result).string());
}
TEST(OpenCVEngineTest, GetPathFor2_4_4)
{
sp<IOpenCVEngine> Engine = InitConnect();
Starter.PackageManager->InstalledPackages.clear();
Starter.PackageManager->InstallVersion(2040400, PLATFORM_UNKNOWN, ARCH_ARMv7);
EXPECT_FALSE(NULL == Engine.get());
String16 result = Engine->GetLibPathByVersion(String16("2.4.4"));
EXPECT_STREQ("/data/data/org.opencv.lib_v24_armv7a/lib", String8(result).string());
}
TEST(OpenCVEngineTest, GetPathFor2_4_5)
{
sp<IOpenCVEngine> Engine = InitConnect();
Starter.PackageManager->InstalledPackages.clear();
Starter.PackageManager->InstallVersion(2040500, PLATFORM_UNKNOWN, ARCH_ARMv7);
EXPECT_FALSE(NULL == Engine.get());
String16 result = Engine->GetLibPathByVersion(String16("2.4.5"));
EXPECT_EQ(0, result.size()); // 2.4.5 is not published yet
}
#endif
#ifndef __i386__
......
......@@ -33,8 +33,48 @@ if(CUDA_FOUND)
message(STATUS "CUDA detected: " ${CUDA_VERSION})
set(CUDA_ARCH_BIN "1.1 1.2 1.3 2.0 2.1(2.0) 3.0" CACHE STRING "Specify 'real' GPU architectures to build binaries for, BIN(PTX) format is supported")
set(CUDA_ARCH_PTX "2.0 3.0" CACHE STRING "Specify 'virtual' PTX architectures to build PTX intermediate code for")
set(_generations "Fermi" "Kepler")
if(NOT CMAKE_CROSSCOMPILING)
list(APPEND _generations "Auto")
endif()
set(CUDA_GENERATION "" CACHE STRING "Build CUDA device code only for specific GPU architecture. Leave empty to build for all architectures.")
if( CMAKE_VERSION VERSION_GREATER "2.8" )
set_property( CACHE CUDA_GENERATION PROPERTY STRINGS "" ${_generations} )
endif()
if(CUDA_GENERATION)
if(NOT ";${_generations};" MATCHES ";${CUDA_GENERATION};")
string(REPLACE ";" ", " _generations "${_generations}")
message(FATAL_ERROR "ERROR: ${_generations} Generations are suppered.")
endif()
unset(CUDA_ARCH_BIN CACHE)
unset(CUDA_ARCH_PTX CACHE)
endif()
set(__cuda_arch_ptx "")
if(CUDA_GENERATION STREQUAL "Fermi")
set(__cuda_arch_bin "2.0 2.1(2.0)")
elseif(CUDA_GENERATION STREQUAL "Kepler")
set(__cuda_arch_bin "3.0")
elseif(CUDA_GENERATION STREQUAL "Auto")
execute_process( COMMAND "${CUDA_NVCC_EXECUTABLE}" "${OpenCV_SOURCE_DIR}/cmake/OpenCVDetectCudaArch.cu" "--run"
WORKING_DIRECTORY "${CMAKE_BINARY_DIR}${CMAKE_FILES_DIRECTORY}/CMakeTmp/"
RESULT_VARIABLE _nvcc_res OUTPUT_VARIABLE _nvcc_out
ERROR_QUIET OUTPUT_STRIP_TRAILING_WHITESPACE)
if(NOT _nvcc_res EQUAL 0)
message(STATUS "Automatic detection of CUDA generation failed. Going to build for all known architectures.")
else()
set(__cuda_arch_bin "${_nvcc_out}")
endif()
endif()
if(NOT DEFINED __cuda_arch_bin)
set(__cuda_arch_bin "1.1 1.2 1.3 2.0 2.1(2.0) 3.0")
set(__cuda_arch_ptx "2.0 3.0")
endif()
set(CUDA_ARCH_BIN ${__cuda_arch_bin} CACHE STRING "Specify 'real' GPU architectures to build binaries for, BIN(PTX) format is supported")
set(CUDA_ARCH_PTX ${__cuda_arch_ptx} CACHE STRING "Specify 'virtual' PTX architectures to build PTX intermediate code for")
string(REGEX REPLACE "\\." "" ARCH_BIN_NO_POINTS "${CUDA_ARCH_BIN}")
string(REGEX REPLACE "\\." "" ARCH_PTX_NO_POINTS "${CUDA_ARCH_PTX}")
......
#include <stdio.h>
int main()
{
int count = 0;
if (cudaSuccess != cudaGetDeviceCount(&count)){return -1;}
if (count == 0) {return -1;}
for (int device = 0; device < count; ++device)
{
cudaDeviceProp prop;
if (cudaSuccess != cudaGetDeviceProperties(&prop, device)){ continue;}
printf("%d.%d ", prop.major, prop.minor);
}
return 0;
}
\ No newline at end of file
......@@ -42,7 +42,7 @@ else
OPENCV_EXTRA_COMPONENTS:=@OPENCV_EXTRA_COMPONENTS_CONFIGMAKE@
endif
ifeq ($(TARGET_ARCH_ABI),mips)
OPENCV_3RDPARTY_COMPONENTS:=@OPENCV_3RDPARTY_COMPONENTS_CONFIGMAKE_NO_TBB@
OPENCV_3RDPARTY_COMPONENTS:=@OPENCV_3RDPARTY_COMPONENTS_CONFIGMAKE@
OPENCV_EXTRA_COMPONENTS:=@OPENCV_EXTRA_COMPONENTS_CONFIGMAKE@
endif
endif
......
......@@ -48,10 +48,10 @@ The structure of package contents looks as follows:
::
OpenCV-2.4.3-android-sdk
OpenCV-2.4.4-android-sdk
|_ apk
| |_ OpenCV_2.4.3_binary_pack_armv7a.apk
| |_ OpenCV_2.4.3_Manager_2.0_XXX.apk
| |_ OpenCV_2.4.4_binary_pack_armv7a.apk
| |_ OpenCV_2.4.4_Manager_2.6_XXX.apk
|
|_ doc
|_ samples
......@@ -157,10 +157,10 @@ Get the OpenCV4Android SDK
.. code-block:: bash
unzip ~/Downloads/OpenCV-2.4.3-android-sdk.zip
unzip ~/Downloads/OpenCV-2.4.4-android-sdk.zip
.. |opencv_android_bin_pack| replace:: OpenCV-2.4.3.2-android-sdk.zip
.. _opencv_android_bin_pack_url: http://sourceforge.net/projects/opencvlibrary/files/opencv-android/2.4.3/OpenCV-2.4.3.2-android-sdk.zip/download
.. |opencv_android_bin_pack| replace:: OpenCV-2.4.4-android-sdk.zip
.. _opencv_android_bin_pack_url: http://sourceforge.net/projects/opencvlibrary/files/opencv-android/2.4.4/OpenCV-2.4.4-android-sdk.zip/download
.. |opencv_android_bin_pack_url| replace:: |opencv_android_bin_pack|
.. |seven_zip| replace:: 7-Zip
.. _seven_zip: http://www.7-zip.org/
......@@ -295,7 +295,7 @@ Well, running samples from Eclipse is very simple:
.. code-block:: sh
:linenos:
<Android SDK path>/platform-tools/adb install <OpenCV4Android SDK path>/apk/OpenCV_2.4.3_Manager_armv7a-neon.apk
<Android SDK path>/platform-tools/adb install <OpenCV4Android SDK path>/apk/OpenCV_2.4.4_Manager_armv7a-neon.apk
.. note:: ``armeabi``, ``armv7a-neon``, ``arm7a-neon-android8``, ``mips`` and ``x86`` stand for
platform targets:
......
......@@ -55,14 +55,14 @@ Manager to access OpenCV libraries externally installed in the target system.
:guilabel:`File -> Import -> Existing project in your workspace`.
Press :guilabel:`Browse` button and locate OpenCV4Android SDK
(:file:`OpenCV-2.4.3-android-sdk/sdk`).
(:file:`OpenCV-2.4.4-android-sdk/sdk`).
.. image:: images/eclipse_opencv_dependency0.png
:alt: Add dependency from OpenCV library
:align: center
#. In application project add a reference to the OpenCV Java SDK in
:guilabel:`Project -> Properties -> Android -> Library -> Add` select ``OpenCV Library - 2.4.3``.
:guilabel:`Project -> Properties -> Android -> Library -> Add` select ``OpenCV Library - 2.4.4``.
.. image:: images/eclipse_opencv_dependency1.png
:alt: Add dependency from OpenCV library
......@@ -128,27 +128,27 @@ described above.
#. Add the OpenCV library project to your workspace the same way as for the async initialization
above. Use menu :guilabel:`File -> Import -> Existing project in your workspace`,
press :guilabel:`Browse` button and select OpenCV SDK path
(:file:`OpenCV-2.4.3-android-sdk/sdk`).
(:file:`OpenCV-2.4.4-android-sdk/sdk`).
.. image:: images/eclipse_opencv_dependency0.png
:alt: Add dependency from OpenCV library
:align: center
#. In the application project add a reference to the OpenCV4Android SDK in
:guilabel:`Project -> Properties -> Android -> Library -> Add` select ``OpenCV Library - 2.4.3``;
:guilabel:`Project -> Properties -> Android -> Library -> Add` select ``OpenCV Library - 2.4.4``;
.. image:: images/eclipse_opencv_dependency1.png
:alt: Add dependency from OpenCV library
:align: center
#. If your application project **doesn't have a JNI part**, just copy the corresponding OpenCV
native libs from :file:`<OpenCV-2.4.3-android-sdk>/sdk/native/libs/<target_arch>` to your
native libs from :file:`<OpenCV-2.4.4-android-sdk>/sdk/native/libs/<target_arch>` to your
project directory to folder :file:`libs/<target_arch>`.
In case of the application project **with a JNI part**, instead of manual libraries copying you
need to modify your ``Android.mk`` file:
add the following two code lines after the ``"include $(CLEAR_VARS)"`` and before
``"include path_to_OpenCV-2.4.3-android-sdk/sdk/native/jni/OpenCV.mk"``
``"include path_to_OpenCV-2.4.4-android-sdk/sdk/native/jni/OpenCV.mk"``
.. code-block:: make
:linenos:
......@@ -221,7 +221,7 @@ taken:
.. code-block:: make
include C:\Work\OpenCV4Android\OpenCV-2.4.3-android-sdk\sdk\native\jni\OpenCV.mk
include C:\Work\OpenCV4Android\OpenCV-2.4.4-android-sdk\sdk\native\jni\OpenCV.mk
Should be inserted into the :file:`jni/Android.mk` file **after** this line:
......@@ -382,7 +382,7 @@ result.
OpenCVLoader.initAsync(OpenCVLoader.OPENCV_VERSION_2_4_3, this, mLoaderCallback);
}
#. Defines that your activity implements CvViewFrameListener interface and fix activity related
#. Defines that your activity implements ``CvViewFrameListener2`` interface and fix activity related
errors by defining missed methods. For this activity define ``onCreate``, ``onDestroy`` and
``onPause`` and implement them according code snippet bellow. Fix errors by adding requited
imports.
......@@ -423,8 +423,8 @@ result.
public void onCameraViewStopped() {
}
public Mat onCameraFrame(Mat inputFrame) {
return inputFrame;
public Mat onCameraFrame(CvCameraViewFrame inputFrame) {
return inputFrame.rgba();
}
#. Run your application on device or emulator.
......@@ -432,7 +432,7 @@ result.
Lets discuss some most important steps. Every Android application with UI must implement Activity
and View. By the first steps we create blank activity and default view layout. The simplest
OpenCV-centric application must implement OpenCV initialization, create its own view to show
preview from camera and implements ``CvViewFrameListener`` interface to get frames from camera and
preview from camera and implements ``CvViewFrameListener2`` interface to get frames from camera and
process it.
First of all we create our application view using xml layout. Our layout consists of the only
......@@ -448,8 +448,13 @@ After creating layout we need to implement ``Activity`` class. OpenCV initializa
been already discussed above. In this sample we use asynchronous initialization. Implementation of
``CvCameraViewListener`` interface allows you to add processing steps after frame grabbing from
camera and before its rendering on screen. The most important function is ``onCameraFrame``. It is
callback function and it is called on retrieving frame from camera. The callback input is frame
from camera. RGBA format is used by default. You can change this behavior by ``SetCaptureFormat``
method of ``View`` class. ``Highgui.CV_CAP_ANDROID_COLOR_FRAME_RGBA`` and
``Highgui.CV_CAP_ANDROID_GREY_FRAME`` are supported. It expects that function returns RGBA frame
that will be drawn on the screen.
callback function and it is called on retrieving frame from camera. The callback input is object
of ``CvCameraViewFrame`` class that represents frame from camera.
.. note::
Do not save or use ``CvCameraViewFrame`` object out of ``onCameraFrame`` callback. This object
does not have its own state and its behavior out of callback is unpredictable!
It has ``rgba()`` and ``gray()`` methods that allows to get frame as RGBA and one channel gray scale
``Mat`` respectively. It expects that ``onCameraFrame`` function returns RGBA frame that will be
drawn on the screen.
......@@ -4458,6 +4458,26 @@ public:
Ptr<Algorithm> (Algorithm::*getter)()=0,
void (Algorithm::*setter)(const Ptr<Algorithm>&)=0,
const std::string& help=std::string());
void addParam(Algorithm& algo, const char* name,
float& value, bool readOnly=false,
float (Algorithm::*getter)()=0,
void (Algorithm::*setter)(float)=0,
const std::string& help=std::string());
void addParam(Algorithm& algo, const char* name,
unsigned int& value, bool readOnly=false,
unsigned int (Algorithm::*getter)()=0,
void (Algorithm::*setter)(unsigned int)=0,
const std::string& help=std::string());
void addParam(Algorithm& algo, const char* name,
uint64& value, bool readOnly=false,
uint64 (Algorithm::*getter)()=0,
void (Algorithm::*setter)(uint64)=0,
const std::string& help=std::string());
void addParam(Algorithm& algo, const char* name,
uchar& value, bool readOnly=false,
uchar (Algorithm::*getter)()=0,
void (Algorithm::*setter)(uchar)=0,
const std::string& help=std::string());
template<typename _Tp, typename _Base> void addParam(Algorithm& algo, const char* name,
Ptr<_Tp>& value, bool readOnly=false,
Ptr<_Tp> (Algorithm::*getter)()=0,
......@@ -4477,7 +4497,7 @@ protected:
struct CV_EXPORTS Param
{
enum { INT=0, BOOLEAN=1, REAL=2, STRING=3, MAT=4, MAT_VECTOR=5, ALGORITHM=6, FLOAT=7, UNSIGNED_INT=8, UINT64=9 };
enum { INT=0, BOOLEAN=1, REAL=2, STRING=3, MAT=4, MAT_VECTOR=5, ALGORITHM=6, FLOAT=7, UNSIGNED_INT=8, UINT64=9, UCHAR=11 };
Param();
Param(int _type, bool _readonly, int _offset,
......@@ -4572,12 +4592,19 @@ template<> struct ParamType<uint64>
enum { type = Param::UINT64 };
};
template<> struct ParamType<uchar>
{
typedef uchar const_param_type;
typedef uchar member_type;
enum { type = Param::UCHAR };
};
// The CommandLineParser class is designed for command line arguments parsing
class CV_EXPORTS CommandLineParser
{
public:
public:
CommandLineParser(int argc, const char* const argv[], const std::string& keys);
CommandLineParser(const CommandLineParser& parser);
CommandLineParser& operator = (const CommandLineParser& parser);
......@@ -4586,11 +4613,11 @@ public:
template <typename T>
T get(const std::string& name, bool space_delete = true) const
{
{
T val = T();
getByName(name, space_delete, ParamType<T>::type, (void*)&val);
return val;
}
}
template <typename T>
T get(int index, bool space_delete = true) const
......
This diff is collapsed.
......@@ -42,6 +42,10 @@
#include "precomp.hpp"
#if defined _M_IX86 && defined _MSC_VER && _MSC_VER < 1700
#pragma float_control(precise, on)
#endif
namespace cv
{
......@@ -1095,6 +1099,7 @@ double cv::invert( InputArray _src, OutputArray _dst, int method )
if( type == CV_32FC1 )
{
double d = det3(Sf);
if( d != 0. )
{
double t[12];
......
......@@ -419,7 +419,9 @@ static void fixCCS( Mat& mat, int cols, int flags )
}
}
#if defined _MSC_VER && _MSC_VER >= 1700
#pragma optimize("", off)
#endif
static void mulComplex( const Mat& src1, const Mat& src2, Mat& dst, int flags )
{
dst.create(src1.rows, src1.cols, src1.type());
......@@ -439,8 +441,8 @@ static void mulComplex( const Mat& src1, const Mat& src2, Mat& dst, int flags )
if( !(flags & CV_DXT_MUL_CONJ) )
for( j = 0; j < cols; j += 2 )
{
double re = (double)a[j]*b[j] - (double)a[j+1]*b[j+1];
double im = (double)a[j+1]*b[j] + (double)a[j]*b[j+1];
double re = (double)a[j]*(double)b[j] - (double)a[j+1]*(double)b[j+1];
double im = (double)a[j+1]*(double)b[j] + (double)a[j]*(double)b[j+1];
c[j] = (float)re;
c[j+1] = (float)im;
......@@ -448,8 +450,8 @@ static void mulComplex( const Mat& src1, const Mat& src2, Mat& dst, int flags )
else
for( j = 0; j < cols; j += 2 )
{
double re = (double)a[j]*b[j] + (double)a[j+1]*b[j+1];
double im = (double)a[j+1]*b[j] - (double)a[j]*b[j+1];
double re = (double)a[j]*(double)b[j] + (double)a[j+1]*(double)b[j+1];
double im = (double)a[j+1]*(double)b[j] - (double)a[j]*(double)b[j+1];
c[j] = (float)re;
c[j+1] = (float)im;
......@@ -482,6 +484,9 @@ static void mulComplex( const Mat& src1, const Mat& src2, Mat& dst, int flags )
}
}
}
#if defined _MSC_VER && _MSC_VER >= 1700
#pragma optimize("", on)
#endif
}
......
......@@ -659,6 +659,7 @@ protected:
virtual void findBlobs(const Mat &image, const Mat &binaryImage, std::vector<Center> &centers) const;
Params params;
AlgorithmInfo* info() const;
};
......
......@@ -125,6 +125,26 @@ CV_INIT_ALGORITHM(GFTTDetector, "Feature2D.GFTT",
///////////////////////////////////////////////////////////////////////////////////////////////////////////
CV_INIT_ALGORITHM(SimpleBlobDetector, "Feature2D.SimpleBlob",
obj.info()->addParam(obj, "thresholdStep", obj.params.thresholdStep);
obj.info()->addParam(obj, "minThreshold", obj.params.minThreshold);
obj.info()->addParam(obj, "maxThreshold", obj.params.maxThreshold);
obj.info()->addParam_(obj, "minRepeatability", (sizeof(size_t) == sizeof(uint64))?Param::UINT64 : Param::UNSIGNED_INT, &obj.params.minRepeatability, false, 0, 0);
obj.info()->addParam(obj, "minDistBetweenBlobs", obj.params.minDistBetweenBlobs);
obj.info()->addParam(obj, "filterByColor", obj.params.filterByColor);
obj.info()->addParam(obj, "blobColor", obj.params.blobColor);
obj.info()->addParam(obj, "filterByArea", obj.params.filterByArea);
obj.info()->addParam(obj, "maxArea", obj.params.maxArea);
obj.info()->addParam(obj, "filterByCircularity", obj.params.filterByCircularity);
obj.info()->addParam(obj, "maxCircularity", obj.params.maxCircularity);
obj.info()->addParam(obj, "filterByInertia", obj.params.filterByInertia);
obj.info()->addParam(obj, "maxInertiaRatio", obj.params.maxInertiaRatio);
obj.info()->addParam(obj, "filterByConvexity", obj.params.filterByConvexity);
obj.info()->addParam(obj, "maxConvexity", obj.params.maxConvexity);
);
///////////////////////////////////////////////////////////////////////////////////////////////////////////
class CV_EXPORTS HarrisDetector : public GFTTDetector
{
public:
......
......@@ -130,7 +130,7 @@ GPU_TEST_P(HoughCircles, Accuracy)
const bool useRoi = GET_PARAM(2);
const float dp = 2.0f;
const float minDist = 10.0f;
const float minDist = 0.0f;
const int minRadius = 10;
const int maxRadius = 20;
const int cannyThreshold = 100;
......@@ -163,7 +163,7 @@ GPU_TEST_P(HoughCircles, Accuracy)
{
cv::Vec3f gold = circles_gold[j];
if (std::fabs(cur[0] - gold[0]) < minDist && std::fabs(cur[1] - gold[1]) < minDist && std::fabs(cur[2] - gold[2]) < minDist)
if (std::fabs(cur[0] - gold[0]) < 5 && std::fabs(cur[1] - gold[1]) < 5 && std::fabs(cur[2] - gold[2]) < 5)
{
found = true;
break;
......
<?xml version="1.0" encoding="UTF-8"?>
<lint>
<issue id="NewApi">
<ignore path="src\org\opencv\android\JavaCameraView.java" />
</issue>
</lint>
\ No newline at end of file
......@@ -156,9 +156,21 @@ public abstract class CameraBridgeViewBase extends SurfaceView implements Surfac
private CvCameraViewListener mOldStyleListener;
};
/**
* This class interface is abstract representation of single frame from camera for onCameraFrame callback
* Attention: Do not use objects, that represents this interface out of onCameraFrame callback!
*/
public interface CvCameraViewFrame {
public abstract Mat rgba();
public abstract Mat gray();
/**
* This method returns RGBA Mat with frame
*/
public Mat rgba();
/**
* This method returns single channel gray scale Mat with frame
*/
public Mat gray();
};
public void surfaceChanged(SurfaceHolder arg0, int arg1, int arg2, int arg3) {
......
......@@ -2,7 +2,6 @@ package org.opencv.android;
import java.util.List;
import android.annotation.TargetApi;
import android.content.Context;
import android.graphics.ImageFormat;
import android.graphics.SurfaceTexture;
......@@ -11,7 +10,6 @@ import android.hardware.Camera.PreviewCallback;
import android.os.Build;
import android.util.AttributeSet;
import android.util.Log;
import android.view.SurfaceHolder;
import org.opencv.core.CvType;
import org.opencv.core.Mat;
......@@ -64,7 +62,6 @@ public class JavaCameraView extends CameraBridgeViewBase implements PreviewCallb
Log.d(TAG, "Java camera view ctor");
}
@TargetApi(11)
protected boolean initializeCamera(int width, int height) {
Log.d(TAG, "Initialize java camera");
boolean result = true;
......@@ -154,7 +151,6 @@ public class JavaCameraView extends CameraBridgeViewBase implements PreviewCallb
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.HONEYCOMB) {
mSurfaceTexture = new SurfaceTexture(MAGIC_TEXTURE_ID);
getHolder().setType(SurfaceHolder.SURFACE_TYPE_PUSH_BUFFERS);
mCamera.setPreviewTexture(mSurfaceTexture);
} else
mCamera.setPreviewDisplay(null);
......@@ -234,7 +230,6 @@ public class JavaCameraView extends CameraBridgeViewBase implements PreviewCallb
releaseCamera();
}
@TargetApi(Build.VERSION_CODES.FROYO)
public void onPreviewFrame(byte[] frame, Camera arg1) {
Log.i(TAG, "Preview Frame received. Need to create MAT and deliver it to clients");
Log.i(TAG, "Frame size is " + frame.length);
......
This diff is collapsed.
......@@ -65,6 +65,32 @@ namespace cv
//CVCL_DEVICE_TYPE_CUSTOM = (1 << 4)
CVCL_DEVICE_TYPE_ALL = 0xFFFFFFFF
};
enum DevMemRW
{
DEVICE_MEM_R_W = 0,
DEVICE_MEM_R_ONLY,
DEVICE_MEM_W_ONLY
};
enum DevMemType
{
DEVICE_MEM_DEFAULT = 0,
DEVICE_MEM_AHP, //alloc host pointer
DEVICE_MEM_UHP, //use host pointer
DEVICE_MEM_CHP, //copy host pointer
DEVICE_MEM_PM //persistent memory
};
//Get the global device memory and read/write type
//return 1 if unified memory system supported, otherwise return 0
CV_EXPORTS int getDevMemType(DevMemRW& rw_type, DevMemType& mem_type);
//Set the global device memory and read/write type,
//the newly generated oclMat will all use this type
//return -1 if the target type is unsupported, otherwise return 0
CV_EXPORTS int setDevMemType(DevMemRW rw_type = DEVICE_MEM_R_W, DevMemType mem_type = DEVICE_MEM_DEFAULT);
//this class contains ocl runtime information
class CV_EXPORTS Info
{
......@@ -227,6 +253,11 @@ namespace cv
// previous data is unreferenced if needed.
void create(int rows, int cols, int type);
void create(Size size, int type);
//! allocates new oclMatrix with specified device memory type.
void createEx(int rows, int cols, int type, DevMemRW rw_type, DevMemType mem_type);
void createEx(Size size, int type, DevMemRW rw_type, DevMemType mem_type);
//! decreases reference counter;
// deallocate the data when reference counter reaches 0.
void release();
......@@ -878,7 +909,7 @@ namespace cv
CV_EXPORTS void HoughCircles(const oclMat& src, oclMat& circles, HoughCirclesBuf& buf, int method, float dp, float minDist, int cannyThreshold, int votesThreshold, int minRadius, int maxRadius, int maxCircles = 4096);
CV_EXPORTS void HoughCirclesDownload(const oclMat& d_circles, OutputArray h_circles);
///////////////////////////////////////// clAmdFft related /////////////////////////////////////////
//! Performs a forward or inverse discrete Fourier transform (1D or 2D) of floating point matrix.
//! Param dft_size is the size of DFT transform.
......@@ -1788,6 +1819,8 @@ namespace cv
const oclMat &bu, const oclMat &bv,
float pos, oclMat &newFrame, oclMat &buf);
//! computes moments of the rasterized shape or a vector of points
CV_EXPORTS Moments ocl_moments(InputArray _array, bool binaryImage);
}
}
#if defined _MSC_VER && _MSC_VER >= 1200
......
......@@ -4317,11 +4317,11 @@ INSTANTIATE_TEST_CASE_P(Arithm, Lut, Combine(
Values(false))); // Values(false) is the reserved parameter
INSTANTIATE_TEST_CASE_P(Arithm, Exp, Combine(
Values(CV_32FC1, CV_64FC1),
Values(CV_32FC1, CV_32FC1),
Values(false))); // Values(false) is the reserved parameter
INSTANTIATE_TEST_CASE_P(Arithm, Log, Combine(
Values(CV_32FC1, CV_64FC1),
Values(CV_32FC1, CV_32FC1),
Values(false))); // Values(false) is the reserved parameter
INSTANTIATE_TEST_CASE_P(Arithm, Add, Combine(
......
......@@ -53,6 +53,10 @@ void cv::ocl::dft(const oclMat&, oclMat&, Size, int)
{
CV_Error(CV_StsNotImplemented, "OpenCL DFT is not implemented");
}
namespace cv { namespace ocl {
void fft_teardown();
}}
void cv::ocl::fft_teardown(){}
#else
#include "clAmdFft.h"
namespace cv
......
......@@ -55,13 +55,16 @@ using namespace cv::ocl;
//#define PRINT_KERNEL_RUN_TIME
#define RUN_TIMES 100
#ifndef CL_MEM_USE_PERSISTENT_MEM_AMD
#define CL_MEM_USE_PERSISTENT_MEM_AMD 0
#endif
//#define AMD_DOUBLE_DIFFER
namespace cv
{
namespace ocl
{
extern void fft_teardown();
/*
* The binary caching system to eliminate redundant program source compilation.
* Strictly, this is not a cache because we do not implement evictions right now.
......@@ -69,6 +72,15 @@ namespace cv
*/
std::auto_ptr<ProgramCache> ProgramCache::programCache;
ProgramCache *programCache = NULL;
DevMemType gDeviceMemType = DEVICE_MEM_DEFAULT;
DevMemRW gDeviceMemRW = DEVICE_MEM_R_W;
int gDevMemTypeValueMap[5] = {0,
CL_MEM_ALLOC_HOST_PTR,
CL_MEM_USE_HOST_PTR,
CL_MEM_COPY_HOST_PTR,
CL_MEM_USE_PERSISTENT_MEM_AMD};
int gDevMemRWValueMap[3] = {CL_MEM_READ_WRITE, CL_MEM_READ_ONLY, CL_MEM_WRITE_ONLY};
ProgramCache::ProgramCache()
{
codeCache.clear();
......@@ -110,30 +122,25 @@ namespace cv
}
////////////////////////Common OpenCL specific calls///////////////
//Info::Info()
//{
// oclplatform = 0;
// oclcontext = 0;
// devnum = 0;
//}
//Info::~Info()
//{
// release();
//}
//void Info::release()
//{
// if(oclplatform)
// {
// oclplatform = 0;
// }
// if(oclcontext)
// {
// openCLSafeCall(clReleaseContext(oclcontext));
// }
// devices.empty();
// devName.empty();
//}
struct Info::Impl
int getDevMemType(DevMemRW& rw_type, DevMemType& mem_type)
{
rw_type = gDeviceMemRW;
mem_type = gDeviceMemType;
return Context::getContext()->impl->unified_memory;
}
int setDevMemType(DevMemRW rw_type, DevMemType mem_type)
{
if( (mem_type == DEVICE_MEM_PM && Context::getContext()->impl->unified_memory == 0) ||
mem_type == DEVICE_MEM_UHP ||
mem_type == DEVICE_MEM_CHP )
return -1;
gDeviceMemRW = rw_type;
gDeviceMemType = mem_type;
return 0;
}
struct Info::Impl
{
cl_platform_id oclplatform;
std::vector<cl_device_id> devices;
......@@ -287,11 +294,8 @@ namespace cv
}
void *getoclContext()
{
return &(Context::getContext()->impl->clContext);
}
void *getoclCommandQueue()
......@@ -316,10 +320,16 @@ namespace cv
void openCLMallocPitch(Context *clCxt, void **dev_ptr, size_t *pitch,
size_t widthInBytes, size_t height)
{
openCLMallocPitchEx(clCxt, dev_ptr, pitch, widthInBytes, height, gDeviceMemRW, gDeviceMemType);
}
void openCLMallocPitchEx(Context *clCxt, void **dev_ptr, size_t *pitch,
size_t widthInBytes, size_t height, DevMemRW rw_type, DevMemType mem_type)
{
cl_int status;
*dev_ptr = clCreateBuffer(clCxt->impl->clContext, CL_MEM_READ_WRITE,
*dev_ptr = clCreateBuffer(clCxt->impl->clContext, gDevMemRWValueMap[rw_type]|gDevMemTypeValueMap[mem_type],
widthInBytes * height, 0, &status);
openCLVerifyCall(status);
*pitch = widthInBytes;
......@@ -834,6 +844,11 @@ namespace cv
clcxt->impl->double_support = oclinfo.impl->double_support;
//extra options to recognize compiler options
memcpy(clcxt->impl->extra_options, oclinfo.impl->extra_options, 512);
cl_bool unfymem = false;
openCLSafeCall(clGetDeviceInfo(clcxt->impl->devices, CL_DEVICE_HOST_UNIFIED_MEMORY,
sizeof(cl_bool), (void *)&unfymem, NULL));
if(unfymem)
clcxt->impl->unified_memory = 1;
}
Context::Context()
{
......@@ -850,6 +865,7 @@ namespace cv
impl->double_support = 0;
//extra options to recognize vendor specific fp64 extensions
memset(impl->extra_options, 0, 512);
impl->unified_memory = 0;
programCache = ProgramCache::getProgramCache();
}
......@@ -874,6 +890,7 @@ namespace cv
}
void Info::release()
{
fft_teardown();
if(impl->oclplatform)
{
impl->oclplatform = 0;
......
......@@ -45,6 +45,7 @@
#include <iomanip>
#include "precomp.hpp"
#include "mcwutil.hpp"
using namespace cv;
using namespace cv::ocl;
......@@ -230,73 +231,10 @@ void interpolate::blendFrames(const oclMat &frame0, const oclMat &/*frame1*/, co
void interpolate::bindImgTex(const oclMat &img, cl_mem &texture)
{
cl_image_format format;
int err;
int depth = img.depth();
int channels = img.channels();
switch(depth)
{
case CV_8U:
format.image_channel_data_type = CL_UNSIGNED_INT8;
break;
case CV_32S:
format.image_channel_data_type = CL_UNSIGNED_INT32;
break;
case CV_32F:
format.image_channel_data_type = CL_FLOAT;
break;
default:
throw std::exception();
break;
}
switch(channels)
{
case 1:
format.image_channel_order = CL_R;
break;
case 3:
format.image_channel_order = CL_RGB;
break;
case 4:
format.image_channel_order = CL_RGBA;
break;
default:
throw std::exception();
break;
}
if(texture)
{
openCLFree(texture);
}
#ifdef CL_VERSION_1_2
cl_image_desc desc;
desc.image_type = CL_MEM_OBJECT_IMAGE2D;
desc.image_width = img.step / img.elemSize();
desc.image_height = img.rows;
desc.image_depth = 0;
desc.image_array_size = 1;
desc.image_row_pitch = 0;
desc.image_slice_pitch = 0;
desc.buffer = NULL;
desc.num_mip_levels = 0;
desc.num_samples = 0;
texture = clCreateImage(Context::getContext()->impl->clContext, CL_MEM_READ_WRITE, &format, &desc, NULL, &err);
#else
texture = clCreateImage2D(
Context::getContext()->impl->clContext,
CL_MEM_READ_WRITE,
&format,
img.step / img.elemSize(),
img.rows,
0,
NULL,
&err);
#endif
size_t origin[] = { 0, 0, 0 };
size_t region[] = { img.step / img.elemSize(), img.rows, 1 };
clEnqueueCopyBufferToImage(img.clCxt->impl->clCmdQueue, (cl_mem)img.data, texture, 0, origin, region, 0, NULL, 0);
openCLSafeCall(err);
texture = bindTexture(img);
}
......@@ -203,8 +203,8 @@ __kernel void YUV2RGB(int cols,int rows,int src_step,int dst_step,int channels,
__constant int ITUR_BT_601_CY = 1220542;
__constant int ITUR_BT_601_CUB = 2116026;
__constant int ITUR_BT_601_CUG = -409993;
__constant int ITUR_BT_601_CVG = -852492;
__constant int ITUR_BT_601_CUG = 409993;
__constant int ITUR_BT_601_CVG = 852492;
__constant int ITUR_BT_601_CVR = 1673527;
__constant int ITUR_BT_601_SHIFT = 20;
......@@ -229,7 +229,7 @@ __kernel void YUV2RGBA_NV12(int cols,int rows,int src_step,int dst_step,
int V = usrc[1] - 128;
int ruv = (1 << (ITUR_BT_601_SHIFT - 1)) + ITUR_BT_601_CVR * V;
int guv = (1 << (ITUR_BT_601_SHIFT - 1)) + ITUR_BT_601_CVG * V + ITUR_BT_601_CUG * U;
int guv = (1 << (ITUR_BT_601_SHIFT - 1)) - ITUR_BT_601_CVG * V - ITUR_BT_601_CUG * U;
int buv = (1 << (ITUR_BT_601_SHIFT - 1)) + ITUR_BT_601_CUB * U;
Y1 = max(0, Y1 - 16) * ITUR_BT_601_CY;
......
This diff is collapsed.
This diff is collapsed.
......@@ -68,6 +68,8 @@ namespace cv
extern const char *operator_setTo;
extern const char *operator_setToM;
extern const char *convertC3C4;
extern DevMemType gDeviceMemType;
extern DevMemRW gDeviceMemRW;
}
}
......@@ -911,7 +913,17 @@ oclMat cv::ocl::oclMat::reshape(int new_cn, int new_rows) const
}
void cv::ocl::oclMat::createEx(Size size, int type, DevMemRW rw_type, DevMemType mem_type)
{
createEx(size.height, size.width, type, rw_type, mem_type);
}
void cv::ocl::oclMat::create(int _rows, int _cols, int _type)
{
createEx(_rows, _cols, _type, gDeviceMemRW, gDeviceMemType);
}
void cv::ocl::oclMat::createEx(int _rows, int _cols, int _type, DevMemRW rw_type, DevMemType mem_type)
{
clCxt = Context::getContext();
/* core logic */
......@@ -936,7 +948,7 @@ void cv::ocl::oclMat::create(int _rows, int _cols, int _type)
size_t esz = elemSize();
void *dev_ptr;
openCLMallocPitch(clCxt, &dev_ptr, &step, GPU_MATRIX_MALLOC_STEP(esz * cols), rows);
openCLMallocPitchEx(clCxt, &dev_ptr, &step, GPU_MATRIX_MALLOC_STEP(esz * cols), rows, rw_type, mem_type);
//openCLMallocPitch(clCxt,&dev_ptr, &step, esz * cols, rows);
if (esz * cols == step)
......
......@@ -217,6 +217,36 @@ namespace cv
{
openCLFree(texture);
}
bool support_image2d(Context *clCxt)
{
static const char * _kernel_string = "__kernel void test_func(image2d_t img) {}";
static bool _isTested = false;
static bool _support = false;
if(_isTested)
{
return _support;
}
try
{
cv::ocl::openCLGetKernelFromSource(clCxt, &_kernel_string, "test_func");
_support = true;
}
catch (const cv::Exception& e)
{
if(e.code == -217)
{
_support = false;
}
else
{
// throw e once again
throw e;
}
}
_isTested = true;
return _support;
}
}//namespace ocl
}//namespace cv
......
......@@ -69,6 +69,10 @@ namespace cv
// 2. for faster clamping, there is no buffer padding for the constructed texture
cl_mem bindTexture(const oclMat &mat);
void releaseTexture(cl_mem& texture);
// returns whether the current context supports image2d_t format or not
bool support_image2d(Context *clCxt = Context::getContext());
}//namespace ocl
}//namespace cv
......
This diff is collapsed.
......@@ -93,6 +93,8 @@ namespace cv
///////////////////////////OpenCL call wrappers////////////////////////////
void openCLMallocPitch(Context *clCxt, void **dev_ptr, size_t *pitch,
size_t widthInBytes, size_t height);
void openCLMallocPitchEx(Context *clCxt, void **dev_ptr, size_t *pitch,
size_t widthInBytes, size_t height, DevMemRW rw_type, DevMemType mem_type);
void openCLMemcpy2D(Context *clCxt, void *dst, size_t dpitch,
const void *src, size_t spitch,
size_t width, size_t height, enum openCLMemcpyKind kind, int channels = -1);
......@@ -141,6 +143,7 @@ namespace cv
//extra options to recognize vendor specific fp64 extensions
char extra_options[512];
std::string Binpath;
int unified_memory; //1 means integrated GPU, otherwise this value is 0
};
}
}
......
......@@ -573,8 +573,9 @@ static void lkSparse_run(oclMat &I, oclMat &J,
Context *clCxt = I.clCxt;
int elemCntPerRow = I.step / I.elemSize();
std::string kernelName = "lkSparse";
size_t localThreads[3] = { 8, 8, 1 };
size_t globalThreads[3] = { 8 * ptcount, 8, 1};
bool isImageSupported = support_image2d();
size_t localThreads[3] = { 8, isImageSupported ? 8 : 32, 1 };
size_t globalThreads[3] = { 8 * ptcount, isImageSupported ? 8 : 32, 1};
int cn = I.oclchannels();
char calcErr;
if (level == 0)
......@@ -587,8 +588,9 @@ static void lkSparse_run(oclMat &I, oclMat &J,
}
std::vector<std::pair<size_t , const void *> > args;
cl_mem ITex = bindTexture(I);
cl_mem JTex = bindTexture(J);
cl_mem ITex = isImageSupported ? bindTexture(I) : (cl_mem)I.data;
cl_mem JTex = isImageSupported ? bindTexture(J) : (cl_mem)J.data;
args.push_back( std::make_pair( sizeof(cl_mem), (void *)&ITex ));
args.push_back( std::make_pair( sizeof(cl_mem), (void *)&JTex ));
......@@ -601,6 +603,8 @@ static void lkSparse_run(oclMat &I, oclMat &J,
args.push_back( std::make_pair( sizeof(cl_int), (void *)&level ));
args.push_back( std::make_pair( sizeof(cl_int), (void *)&I.rows ));
args.push_back( std::make_pair( sizeof(cl_int), (void *)&I.cols ));
if (!isImageSupported)
args.push_back( std::make_pair( sizeof(cl_int), (void *)&elemCntPerRow ) );
args.push_back( std::make_pair( sizeof(cl_int), (void *)&patch.x ));
args.push_back( std::make_pair( sizeof(cl_int), (void *)&patch.y ));
args.push_back( std::make_pair( sizeof(cl_int), (void *)&cn ));
......@@ -609,19 +613,14 @@ static void lkSparse_run(oclMat &I, oclMat &J,
args.push_back( std::make_pair( sizeof(cl_int), (void *)&iters ));
args.push_back( std::make_pair( sizeof(cl_char), (void *)&calcErr ));
try
if(isImageSupported)
{
openCLExecuteKernel2(clCxt, &pyrlk, kernelName, globalThreads, localThreads, args, I.oclchannels(), I.depth(), CLFLUSH);
}
catch(Exception&)
{
printf("Warning: The image2d_t is not supported by the device. Using alternative method!\n");
releaseTexture(ITex);
releaseTexture(JTex);
ITex = (cl_mem)I.data;
JTex = (cl_mem)J.data;
localThreads[1] = globalThreads[1] = 32;
args.insert( args.begin()+11, std::make_pair( sizeof(cl_int), (void *)&elemCntPerRow ) );
}
else
{
openCLExecuteKernel2(clCxt, &pyrlk_no_image, kernelName, globalThreads, localThreads, args, I.oclchannels(), I.depth(), CLFLUSH);
}
}
......@@ -723,7 +722,7 @@ static void lkDense_run(oclMat &I, oclMat &J, oclMat &u, oclMat &v,
oclMat &prevU, oclMat &prevV, oclMat *err, Size winSize, int iters)
{
Context *clCxt = I.clCxt;
bool isImageSupported = clCxt->impl->devName.find("Intel(R) HD Graphics") == std::string::npos;
bool isImageSupported = support_image2d();
int elemCntPerRow = I.step / I.elemSize();
std::string kernelName = "lkDense";
......
This diff is collapsed.
#include "precomp.hpp"
#include <iomanip>
#include "opencv2/imgproc/imgproc_c.h"
#ifdef HAVE_OPENCL
using namespace cv;
using namespace cv::ocl;
using namespace cvtest;
using namespace testing;
using namespace std;
extern string workdir;
PARAM_TEST_CASE(MomentsTestBase, MatType, bool)
{
int type;
cv::Mat mat1;
bool test_contours;
virtual void SetUp()
{
type = GET_PARAM(0);
test_contours = GET_PARAM(1);
cv::RNG &rng = TS::ptr()->get_rng();
cv::Size size(10*MWIDTH, 10*MHEIGHT);
mat1 = randomMat(rng, size, type, 5, 16, false);
}
void Compare(Moments& cpu, Moments& gpu)
{
Mat gpu_dst, cpu_dst;
HuMoments(cpu, cpu_dst);
HuMoments(gpu, gpu_dst);
EXPECT_MAT_NEAR(gpu_dst,cpu_dst, .5, "");
}
};
struct ocl_Moments : MomentsTestBase {};
TEST_P(ocl_Moments, Mat)
{
bool binaryImage = 0;
SetUp();
for(int j = 0; j < LOOP_TIMES; j++)
{
if(test_contours)
{
Mat src = imread( workdir + "../cpp/pic3.png", 1 );
Mat src_gray, canny_output;
cvtColor( src, src_gray, CV_BGR2GRAY );
vector<vector<Point> > contours;
vector<Vec4i> hierarchy;
Canny( src_gray, canny_output, 100, 200, 3 );
findContours( canny_output, contours, hierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, Point(0, 0) );
for( size_t i = 0; i < contours.size(); i++ )
{
Moments m = moments( contours[i], false );
Moments dm = ocl::ocl_moments( contours[i], false );
Compare(m, dm);
}
}
cv::_InputArray _array(mat1);
cv::Moments CvMom = cv::moments(_array, binaryImage);
cv::Moments oclMom = cv::ocl::ocl_moments(_array, binaryImage);
Compare(CvMom, oclMom);
}
}
INSTANTIATE_TEST_CASE_P(Moments, ocl_Moments, Combine(
Values(CV_8UC1, CV_16UC1, CV_16SC1, CV_64FC1), Values(true,false)));
#endif // HAVE_OPENCL
......@@ -37,10 +37,12 @@ if __name__ == "__main__":
parser.add_option("", "--module", dest="module", default=None, metavar="NAME", help="module prefix for test names")
parser.add_option("", "--columns", dest="columns", default=None, metavar="NAMES", help="comma-separated list of column aliases")
parser.add_option("", "--no-relatives", action="store_false", dest="calc_relatives", default=True, help="do not output relative values")
parser.add_option("", "--with-cycles-reduction", action="store_true", dest="calc_cr", default=False, help="alos output cycle reduction percentages")
parser.add_option("", "--with-cycles-reduction", action="store_true", dest="calc_cr", default=False, help="output cycle reduction percentages")
parser.add_option("", "--with-score", action="store_true", dest="calc_score", default=False, help="output automatic classification of speedups")
parser.add_option("", "--show-all", action="store_true", dest="showall", default=False, help="also include empty and \"notrun\" lines")
parser.add_option("", "--match", dest="match", default=None)
parser.add_option("", "--match-replace", dest="match_replace", default="")
parser.add_option("", "--regressions-only", dest="regressionsOnly", default=None, metavar="X-FACTOR", help="show only tests with performance regressions not")
(options, args) = parser.parse_args()
options.generateHtml = detectHtmlOutputType(options.format)
......@@ -106,6 +108,7 @@ if __name__ == "__main__":
# build table
getter = metrix_table[options.metric][1]
getter_score = metrix_table["score"][1]
if options.calc_relatives:
getter_p = metrix_table[options.metric + "%"][1]
if options.calc_cr:
......@@ -129,6 +132,11 @@ if __name__ == "__main__":
for set in metric_sets:
tbl.newColumn(str(i) + "%", getSetName(set, i, options.columns) + "\nvs\n" + getSetName(test_sets[0], 0, options.columns) + "\n(x-factor)", align = "center", cssclass = "col_rel")
i += 1
if options.calc_score:
i = 1
for set in metric_sets:
tbl.newColumn(str(i) + "S", getSetName(set, i, options.columns) + "\nvs\n" + getSetName(test_sets[0], 0, options.columns) + "\n(score)", align = "center", cssclass = "col_name")
i += 1
# rows
prevGroupName = None
......@@ -157,6 +165,8 @@ if __name__ == "__main__":
tbl.newCell(str(i) + "%", "-")
if options.calc_cr and i > 0:
tbl.newCell(str(i) + "$", "-")
if options.calc_score and i > 0:
tbl.newCell(str(i) + "$", "-")
else:
status = case.get("status")
if status != "run":
......@@ -167,6 +177,8 @@ if __name__ == "__main__":
tbl.newCell(str(i) + "%", "-", color = "red")
if options.calc_cr and i > 0:
tbl.newCell(str(i) + "$", "-", color = "red")
if options.calc_score and i > 0:
tbl.newCell(str(i) + "S", "-", color = "red")
else:
val = getter(case, cases[0], options.units)
if options.calc_relatives and i > 0 and val:
......@@ -177,6 +189,10 @@ if __name__ == "__main__":
valcr = getter_cr(case, cases[0], options.units)
else:
valcr = None
if options.calc_score and i > 0 and val:
val_score = getter_score(case, cases[0], options.units)
else:
val_score = None
if not valp or i == 0:
color = None
elif valp > 1.05:
......@@ -192,9 +208,23 @@ if __name__ == "__main__":
tbl.newCell(str(i) + "%", formatValue(valp, "%"), valp, color = color, bold = color)
if options.calc_cr and i > 0:
tbl.newCell(str(i) + "$", formatValue(valcr, "$"), valcr, color = color, bold = color)
if options.calc_score and i > 0:
tbl.newCell(str(i) + "S", formatValue(val_score, "S"), val_score, color = color, bold = color)
if not needNewRow:
tbl.trimLastRow()
if options.regressionsOnly:
for r in reversed(range(len(tbl.rows))):
delete = True
i = 1
for set in metric_sets:
val = tbl.rows[r].cells[len(tbl.rows[r].cells)-i].value
if val is not None and val < float(options.regressionsOnly):
delete = False
i += 1
if (delete):
tbl.rows.pop(r)
# output table
if options.generateHtml:
if options.format == "moinwiki":
......@@ -205,3 +235,6 @@ if __name__ == "__main__":
htmlPrintFooter(sys.stdout)
else:
tbl.consolePrintTable(sys.stdout)
if options.regressionsOnly:
sys.exit(len(tbl.rows))
#!/usr/bin/env python
import sys, re, os.path, cgi, stat
import sys, re, os.path, cgi, stat, math
from optparse import OptionParser
from color import getColorizer
......@@ -627,6 +627,21 @@ def getCycleReduction(test, test0, metric):
return None
return (1.0-float(val)/val0)*100
def getScore(test, test0, metric):
if not test or not test0:
return None
m0 = float(test.get("gmean", None))
m1 = float(test0.get("gmean", None))
if m0 == 0 or m1 == 0:
return None
s0 = float(test.get("gstddev", None))
s1 = float(test0.get("gstddev", None))
s = math.sqrt(s0*s0 + s1*s1)
m0 = math.log(m0)
m1 = math.log(m1)
if s == 0:
return None
return (m0-m1)/s
metrix_table = \
{
......@@ -655,6 +670,8 @@ metrix_table = \
"median$": ("Median (cycle reduction)", lambda test,test0,units: getCycleReduction(test, test0, "median")),
"stddev$": ("Standard deviation (cycle reduction)", lambda test,test0,units: getCycleReduction(test, test0, "stddev")),
"gstddev$": ("Standard deviation of Ln(time) (cycle reduction)", lambda test,test0,units: getCycleReduction(test, test0, "gstddev")),
"score": ("SCORE", lambda test,test0,units: getScore(test, test0, "gstddev")),
}
def formatValue(val, metric, units = None):
......@@ -664,6 +681,18 @@ def formatValue(val, metric, units = None):
return "%.2f" % val
if metric.endswith("$"):
return "%.2f%%" % val
if metric.endswith("S"):
if val > 3.5:
return "SLOWER"
if val < -3.5:
return "FASTER"
if val > -1.5 and val < 1.5:
return " "
if val < 0:
return "faster"
if val > 0:
return "slower"
#return "%.4f" % val
return "%.3f %s" % (val, units)
if __name__ == "__main__":
......
......@@ -68,7 +68,6 @@ public class ImageManipulationsActivity extends Activity implements CvCameraView
private float mBuff[];
private Mat mRgbaInnerWindow;
private Mat mGrayInnerWindow;
private Mat mBlurWindow;
private Mat mZoomWindow;
private Mat mZoomCorner;
private Mat mSepiaKernel;
......@@ -220,9 +219,6 @@ public class ImageManipulationsActivity extends Activity implements CvCameraView
if (mGrayInnerWindow == null && !mGray.empty())
mGrayInnerWindow = mGray.submat(top, top + height, left, left + width);
if (mBlurWindow == null)
mBlurWindow = mRgba.submat(0, rows, cols / 3, cols * 2 / 3);
if (mZoomCorner == null)
mZoomCorner = mRgba.submat(0, rows / 2 - rows / 10, 0, cols / 2 - cols / 10);
......@@ -236,8 +232,6 @@ public class ImageManipulationsActivity extends Activity implements CvCameraView
mZoomWindow.release();
if (mZoomCorner != null)
mZoomCorner.release();
if (mBlurWindow != null)
mBlurWindow.release();
if (mGrayInnerWindow != null)
mGrayInnerWindow.release();
if (mRgbaInnerWindow != null)
......@@ -254,7 +248,6 @@ public class ImageManipulationsActivity extends Activity implements CvCameraView
mIntermediateMat = null;
mRgbaInnerWindow = null;
mGrayInnerWindow = null;
mBlurWindow = null;
mZoomCorner = null;
mZoomWindow = null;
}
......@@ -327,7 +320,9 @@ public class ImageManipulationsActivity extends Activity implements CvCameraView
break;
case ImageManipulationsActivity.VIEW_MODE_SEPIA:
Core.transform(mRgba, mRgba, mSepiaKernel);
if ((mRgbaInnerWindow == null) || (mRgba.cols() != mSizeRgba.width) || (mRgba.height() != mSizeRgba.height))
CreateAuxiliaryMats();
Core.transform(mRgbaInnerWindow, mRgbaInnerWindow, mSepiaKernel);
break;
case ImageManipulationsActivity.VIEW_MODE_ZOOM:
......
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment