Merged the trunk r8595:8668 (except iOS and new gpu functionality)
@ -36,13 +36,13 @@
|
||||
# ANDROID_NDK=/opt/android-ndk - path to the NDK root.
|
||||
# Can be set as environment variable. Can be set only at first cmake run.
|
||||
#
|
||||
# ANDROID_STANDALONE_TOOLCHAIN=/opt/android-toolchain - path to the
|
||||
# ANDROID_STANDALONE_TOOLCHAIN=/opt/android-toolchain - path to the
|
||||
# standalone toolchain. This option is not used if full NDK is found
|
||||
# (ignored if ANDROID_NDK is set).
|
||||
# Can be set as environment variable. Can be set only at first cmake run.
|
||||
#
|
||||
# ANDROID_ABI=armeabi-v7a - specifies the target Application Binary
|
||||
# Interface (ABI). This option nearly matches to the APP_ABI variable
|
||||
# Interface (ABI). This option nearly matches to the APP_ABI variable
|
||||
# used by ndk-build tool from Android NDK.
|
||||
# Possible values are:
|
||||
# "armeabi" - matches to the NDK ABI with the same name.
|
||||
@ -94,10 +94,10 @@
|
||||
# The flags will be prepopulated with critical flags, so don't loose them.
|
||||
# Also be aware that toolchain also sets configuration-specific compiler
|
||||
# flags and linker flags.
|
||||
#
|
||||
# ANDROID and BUILD_ANDROID will be set to true, you may test any of these
|
||||
#
|
||||
# ANDROID and BUILD_ANDROID will be set to true, you may test any of these
|
||||
# variables to make necessary Android-specific configuration changes.
|
||||
#
|
||||
#
|
||||
# Also ARMEABI or ARMEABI_V7A or X86 will be set true, mutually exclusive.
|
||||
# NEON option will be set true if VFP is set to NEON.
|
||||
#
|
||||
@ -131,7 +131,7 @@
|
||||
# cmake pass
|
||||
# [~] toolchain exits with error if ARM_TARGET is not recognized
|
||||
# - modified June 2011
|
||||
# [~] default NDK path is updated for version r5c
|
||||
# [~] default NDK path is updated for version r5c
|
||||
# [+] variable CMAKE_SYSTEM_PROCESSOR is set based on ARM_TARGET
|
||||
# [~] toolchain install directory is added to linker paths
|
||||
# [-] removed SWIG-related stuff from toolchain
|
||||
@ -705,12 +705,14 @@ set( ANDROID_SYSTEM_LIB_DIRS "" )
|
||||
set( LIBRARY_OUTPUT_PATH_ROOT ${CMAKE_SOURCE_DIR} CACHE PATH "root for library output, set this to change where android libs are installed to" )
|
||||
set( CMAKE_INSTALL_PREFIX "${ANDROID_TOOLCHAIN_ROOT}/user" CACHE STRING "path for installing" )
|
||||
|
||||
if( EXISTS "${CMAKE_SOURCE_DIR}/jni/CMakeLists.txt" )
|
||||
set( EXECUTABLE_OUTPUT_PATH "${LIBRARY_OUTPUT_PATH_ROOT}/bin/${ANDROID_NDK_ABI_NAME}" CACHE PATH "Output directory for applications" )
|
||||
else()
|
||||
set( EXECUTABLE_OUTPUT_PATH "${LIBRARY_OUTPUT_PATH_ROOT}/bin" CACHE PATH "Output directory for applications" )
|
||||
if(NOT _CMAKE_IN_TRY_COMPILE)
|
||||
if( EXISTS "${CMAKE_SOURCE_DIR}/jni/CMakeLists.txt" )
|
||||
set( EXECUTABLE_OUTPUT_PATH "${LIBRARY_OUTPUT_PATH_ROOT}/bin/${ANDROID_NDK_ABI_NAME}" CACHE PATH "Output directory for applications" )
|
||||
else()
|
||||
set( EXECUTABLE_OUTPUT_PATH "${LIBRARY_OUTPUT_PATH_ROOT}/bin" CACHE PATH "Output directory for applications" )
|
||||
endif()
|
||||
set( LIBRARY_OUTPUT_PATH "${LIBRARY_OUTPUT_PATH_ROOT}/libs/${ANDROID_NDK_ABI_NAME}" CACHE PATH "path for android libs" )
|
||||
endif()
|
||||
set( LIBRARY_OUTPUT_PATH "${LIBRARY_OUTPUT_PATH_ROOT}/libs/${ANDROID_NDK_ABI_NAME}" CACHE PATH "path for android libs" )
|
||||
|
||||
#includes
|
||||
list( APPEND ANDROID_SYSTEM_INCLUDE_DIRS "${ANDROID_SYSROOT}/usr/include" )
|
||||
@ -960,7 +962,7 @@ endif()
|
||||
set( ANDROID True )
|
||||
set( BUILD_ANDROID True )
|
||||
|
||||
# where is the target environment
|
||||
# where is the target environment
|
||||
set( CMAKE_FIND_ROOT_PATH "${ANDROID_TOOLCHAIN_ROOT}/bin" "${ANDROID_TOOLCHAIN_ROOT}/${ANDROID_TOOLCHAIN_MACHINE_NAME}" "${ANDROID_SYSROOT}" "${CMAKE_INSTALL_PREFIX}" "${CMAKE_INSTALL_PREFIX}/share" )
|
||||
|
||||
# only search for libraries and includes in the ndk toolchain
|
||||
@ -1053,7 +1055,7 @@ endif()
|
||||
# ANDROID_ABI : "armeabi-v7a" (default), "armeabi", "armeabi-v7a with NEON", "armeabi-v7a with VFPV3", "armeabi-v6 with VFP", "x86", "mips"
|
||||
# ANDROID_NATIVE_API_LEVEL : 3,4,5,8,9,14 (depends on NDK version)
|
||||
# ANDROID_SET_OBSOLETE_VARIABLES : ON/OFF
|
||||
# ANDROID_USE_STLPORT : OFF/ON - EXPERIMENTAL!!!
|
||||
# ANDROID_USE_STLPORT : OFF/ON - EXPERIMENTAL!!!
|
||||
# ANDROID_FORBID_SYGWIN : ON/OFF
|
||||
# ANDROID_NO_UNDEFINED : ON/OFF
|
||||
# ANDROID_SO_UNDEFINED : OFF/ON (default depends on NDK version)
|
||||
|
@ -5,14 +5,15 @@
|
||||
Using Android binary package with Eclipse
|
||||
*****************************************
|
||||
|
||||
This tutorial was tested using Ubuntu 10.04 and Windows 7 SP1 operating systems. Nevertheless, it should also work on any other **OS**\ es supported by Android SDK (including Mac OS X). If you encounter errors after following the steps described here, feel free to contact us via *android-opencv* discussion group https://groups.google.com/group/android-opencv/ and we will try to help you.
|
||||
This tutorial was tested using Ubuntu 10.04 and Windows 7 SP1 operating systems.
|
||||
Nevertheless, it should also work on any other **OS**\ es supported by Android SDK (including Mac OS X).
|
||||
If you encounter errors after following the steps described here, feel free to contact us via `android-opencv <https://groups.google.com/group/android-opencv/>`_ discussion group and we will try to help you.
|
||||
|
||||
Quick environment setup for Android development
|
||||
===============================================
|
||||
|
||||
If you are making a clean environment installation then you can try **T**\ egra **A**\ ndroid **D**\ evelopment **P**\ ack (*TADP*) released by *NVIDIA*:
|
||||
|
||||
http://developer.nvidia.com/tegra-android-development-pack
|
||||
If you are making a clean environment installation then you can try `Tegra Android Development Pack <http://developer.nvidia.com/tegra-android-development-pack>`_
|
||||
(**TADP**) released by **NVIDIA**:
|
||||
|
||||
It will cover all of the environment set up automatically and you can go to the next step :ref:`Get_the_OpenCV_package_for_Android_development` right after automatic setup.
|
||||
|
||||
@ -45,9 +46,10 @@ You need the following tools to be installed:
|
||||
|
||||
#. **Sun JDK 6**
|
||||
|
||||
Visit http://www.oracle.com/technetwork/java/javase/downloads/index.html and download installer for your OS.
|
||||
Visit `Java SE Downloads page <http://www.oracle.com/technetwork/java/javase/downloads/>`_ and download installer for your OS.
|
||||
|
||||
Here is a detailed :abbr:`JDK (Java Development Kit)` installation guide for Ubuntu and Mac OS: http://source.android.com/source/initializing.html#installing-the-jdk (only JDK sections are applicable for OpenCV)
|
||||
Here is a detailed :abbr:`JDK (Java Development Kit)` `installation guide <http://source.android.com/source/initializing.html#installing-the-jdk>`_
|
||||
for Ubuntu and Mac OS (only JDK sections are applicable for OpenCV)
|
||||
|
||||
.. note:: OpenJDK is not usable for Android development because Android SDK supports only Sun JDK.
|
||||
If you use Ubuntu, after installation of Sun JDK you should run the following command to set Sun java environment:
|
||||
@ -58,9 +60,9 @@ You need the following tools to be installed:
|
||||
|
||||
#. **Android SDK**
|
||||
|
||||
Get the latest Android SDK from http://developer.android.com/sdk/index.html
|
||||
Get the latest ``Android SDK`` from http://developer.android.com/sdk/index.html
|
||||
|
||||
Here is Google's install guide for SDK http://developer.android.com/sdk/installing.html
|
||||
Here is Google's `install guide <http://developer.android.com/sdk/installing.html>`_ for SDK.
|
||||
|
||||
.. note:: If you choose SDK packed into Windows installer, then you should have 32-bit JRE installed. It is not needed for Android development, but installer is x86 application and requires 32-bit Java runtime.
|
||||
|
||||
@ -80,36 +82,34 @@ You need the following tools to be installed:
|
||||
|
||||
You need the following SDK components to be installed:
|
||||
|
||||
* *Android SDK Tools, revision12* or newer
|
||||
* *Android SDK Tools, revision14* or newer
|
||||
|
||||
Older revisions should also work, but they are not recommended.
|
||||
|
||||
* *SDK Platform Android 2.2, API 8, revision 2* (also known as *android-8*)
|
||||
|
||||
This is minimal platform supported by OpenCV Java API. And it is set as default for OpenCV distribution. It is possible to use newer platform with OpenCV package, but it requires to edit OpenCV project settings.
|
||||
* *SDK Platform Android 3.0, API 11* (also known as *android-11*)
|
||||
|
||||
The minimal platform supported by OpenCV Java API is **Android 2.2** (API 8). This is also the minimum API Level required for the provided samples to run.
|
||||
See the ``<uses-sdk android:minSdkVersion="8"/>`` tag in their **AndroidManifest.xml** files.
|
||||
But for successful compilation of some samples the **target** platform should be set to Android 3.0 (API 11) or higher. It will not block them from running on Android 2.2+.
|
||||
|
||||
.. image:: images/android_sdk_and_avd_manager.png
|
||||
:height: 400px
|
||||
:alt: Android SDK and AVD manager
|
||||
:height: 500px
|
||||
:alt: Android SDK Manager
|
||||
:align: center
|
||||
|
||||
See `Adding SDK Components
|
||||
<http://developer.android.com/sdk/adding-components.html>`_ for help with installing/updating SDK components.
|
||||
See `Adding SDK Components <http://developer.android.com/sdk/adding-components.html>`_ for help with installing/updating SDK components.
|
||||
|
||||
#. **Eclipse IDE**
|
||||
|
||||
Check the `Android SDK System Requirements
|
||||
<http://developer.android.com/sdk/requirements.html>`_ document for a list of Eclipse versions that are compatible with the Android SDK.
|
||||
For OpenCV 2.4.0 we recommend Eclipse 3.6 (Helios) or later versions. They work well for OpenCV under both Windows and Linux.
|
||||
Check the `Android SDK System Requirements <http://developer.android.com/sdk/requirements.html>`_ document for a list of Eclipse versions that are compatible with the Android SDK.
|
||||
For OpenCV 2.4.0 we recommend Eclipse 3.7 (Indigo) or later versions. They work well for OpenCV under both Windows and Linux.
|
||||
|
||||
If you have no Eclipse installed, you can download it from this location:
|
||||
|
||||
http://www.eclipse.org/downloads/
|
||||
If you have no Eclipse installed, you can get it from the `download page <http://www.eclipse.org/downloads/>`_.
|
||||
|
||||
#. **ADT plugin for Eclipse**
|
||||
|
||||
This instruction is copied from http://developer.android.com/sdk/eclipse-adt.html#downloading
|
||||
. Please, visit that page if you have any troubles with ADT plugin installation.
|
||||
This instruction is copied from `Android Developers site <http://developer.android.com/sdk/eclipse-adt.html>`_.
|
||||
Please, visit `that page <http://developer.android.com/sdk/eclipse-adt.html#downloading>`_ if you have any troubles with :abbr:`ADT(Android Development Tools)` plugin installation.
|
||||
|
||||
Assuming that you have Eclipse IDE installed, as described above, follow these steps to download and install the ADT plugin:
|
||||
|
||||
@ -136,7 +136,7 @@ You need the following tools to be installed:
|
||||
Get the OpenCV package for Android development
|
||||
==============================================
|
||||
|
||||
#. Go to the http://sourceforge.net/projects/opencvlibrary/files/opencv-android/ and download the latest available version. Currently it is |opencv_android_bin_pack_url|_
|
||||
#. Go to the `OpenCV dowload page on SourceForge <http://sourceforge.net/projects/opencvlibrary/files/opencv-android/>`_ and download the latest available version. Currently it is |opencv_android_bin_pack_url|_
|
||||
|
||||
#. Create new folder for Android+OpenCV development.
|
||||
|
||||
@ -175,23 +175,29 @@ Open OpenCV library and samples in Eclipse
|
||||
:alt: Choosing C:\Work\android-opencv\ as workspace location
|
||||
:align: center
|
||||
|
||||
#. Configure your ADT plugin
|
||||
#. Configure your ADT plugin (if needed)
|
||||
|
||||
.. important:: ADT plugin settings are workspace-dependent. So you have to repeat this step each time when you create a new workspace.
|
||||
.. important:: In most cases the ADT plugin finds Android SDK automatically, but sometimes it fails and shows the following prompt:
|
||||
|
||||
Once you have created a new workspace, you have to point the ADT plugin to the Android SDK directory. This setting is stored in workspace metadata, as result this step is required each time when you are creating new workspace for Android development. See `Configuring the ADT Plugin
|
||||
<http://developer.android.com/sdk/eclipse-adt.html#configuring>`_ document for the original instructions from *Google*.
|
||||
.. image:: images/eclipse_1a_locate_sdk.png
|
||||
:alt: Locating Android SDK
|
||||
:align: center
|
||||
|
||||
Select :guilabel:`Use existing SDKs` option, browse for Android SDK folder and click :guilabel:`Finish`.
|
||||
|
||||
To make sure the SDK folder is set correctly do the following step taken from `Configuring the ADT Plugin <http://developer.android.com/sdk/eclipse-adt.html#configuring>`_ document from *Google*:
|
||||
|
||||
* Select :menuselection:`Window --> Preferences...` to open the Preferences panel (Mac OS X: :menuselection:`Eclipse --> Preferences`):
|
||||
|
||||
.. image:: images/eclipse_2_window_preferences.png
|
||||
:height: 400px
|
||||
:alt: Select Window > Preferences...
|
||||
:align: center
|
||||
|
||||
* Select :guilabel:`Android` from the left panel.
|
||||
|
||||
You may see a dialog asking whether you want to send usage statistics to *Google*. If so, make your choice and click :guilabel:`Proceed`. You cannot continue with this procedure until you click :guilabel:`Proceed`.
|
||||
You may see a dialog asking whether you want to send usage statistics to *Google*. If so, make your choice and click :guilabel:`Proceed`. You cannot continue with this procedure until you click :guilabel:`Proceed`.
|
||||
|
||||
If the SDK folder isn't set you'll see the following:
|
||||
|
||||
.. image:: images/eclipse_3_preferences_android.png
|
||||
:alt: Select Android from the left panel
|
||||
@ -199,7 +205,9 @@ Open OpenCV library and samples in Eclipse
|
||||
|
||||
* For the SDK Location in the main panel, click :guilabel:`Browse...` and locate your Android SDK directory.
|
||||
|
||||
* Click :guilabel:`Apply` button at the bottom right corner of main panel:
|
||||
* Click :guilabel:`Apply` button at the bottom right corner of main panel.
|
||||
|
||||
If the SDK folder is already set correctly you'll see something like this:
|
||||
|
||||
.. image:: images/eclipse_4_locate_sdk.png
|
||||
:alt: Locate Android SDK
|
||||
@ -245,6 +253,20 @@ Open OpenCV library and samples in Eclipse
|
||||
|
||||
To help Eclipse to understand that there are no any errors choose OpenCV library in :guilabel:`Package Explorer` (left mouse click) and press :kbd:`F5` button on your keyboard. Then choose any sample (except first samples in *Tutorial Base* and *Tutorial Advanced*) and also press :kbd:`F5`.
|
||||
|
||||
Sometimes more advanced manipulations are needed:
|
||||
|
||||
* The provided projects are configured for `android-11` target that can be missing platform in your Android SDK. After right click on any project select :guilabel:`Properties` and then :guilabel:`Android` on the left pane. Click some target with `API Level` 11 or higher:
|
||||
|
||||
.. image:: images/eclipse_8a_target.png
|
||||
:alt: Updating target
|
||||
:align: center
|
||||
|
||||
* Sometimes a project needs fixing its project properties. After right click on any project select :guilabel:`Android Tools` and then :guilabel:`Fix Project Properties` in sub-menu:
|
||||
|
||||
.. image:: images/eclipse_8b_fix_props.png
|
||||
:alt: Fixing project properties
|
||||
:align: center
|
||||
|
||||
After this manipulation Eclipse will rebuild your workspace and error icons will disappear one after another:
|
||||
|
||||
.. image:: images/eclipse_9_errors_dissapearing.png
|
||||
@ -257,16 +279,14 @@ Open OpenCV library and samples in Eclipse
|
||||
:alt: OpenCV package imported into Eclipse
|
||||
:align: center
|
||||
|
||||
.. note:: If you are importing only OpenCV library without samples then instead of second refresh command (:kbd:`F5`) you might need to make :menuselection:`Android Tools --> Fix Project Properties` from project context menu.
|
||||
|
||||
Running OpenCV Samples
|
||||
======================
|
||||
|
||||
At this point you should be able to build and run all samples except two from Advanced tutorial (these samples require Android NDK to build working applications, see the next tutorial :ref:`Android_Binary_Package_with_NDK` to learn how to compile them).
|
||||
|
||||
Also I want to note that only ``Tutorial 1 Basic - 0. Android Camera`` and ``Tutorial 1 Basic - 1. Add OpenCV`` samples are able to run on Emulator from Android SDK. Other samples are using OpenCV Native Camera which does not work with emulator.
|
||||
Also I want to note that only ``Tutorial 0 - Android Camera`` and ``Tutorial 1 - Add OpenCV`` samples are able to run on Emulator from Android SDK. Other samples are using OpenCV Native Camera which does not work with emulator.
|
||||
|
||||
.. note:: Latest *Android SDK tools, revision 12* can run ARM v7 OS images but *Google* does not provide such images with SDK.
|
||||
.. note:: Latest *Android SDK tools, revision 19* can run ARM v7a OS images but *Google* provides such image for Android 4.x only.
|
||||
|
||||
Well, running samples from Eclipse is very simple:
|
||||
|
||||
@ -290,7 +310,7 @@ Well, running samples from Eclipse is very simple:
|
||||
|
||||
* Select the :guilabel:`Android Application` option and click :guilabel:`OK` button. Eclipse will install and run the sample.
|
||||
|
||||
Here is ``Tutorial 1 Basic - 1. Add OpenCV`` sample detecting edges using Canny algorithm from OpenCV:
|
||||
Here is ``Tutorial 1 - Add OpenCV`` sample detecting edges using Canny algorithm from OpenCV:
|
||||
|
||||
.. image:: images/emulator_canny.png
|
||||
:height: 600px
|
||||
|
@ -17,17 +17,18 @@ This tutorial describes a fast way how to create and build Android applications
|
||||
Please note that before starting this tutorial you should fulfill all the steps, described in the tutorial :ref:`Android_Binary_Package`.
|
||||
|
||||
This tutorial was tested using Ubuntu 10.04 and Windows 7 SP1 operating systems.
|
||||
Nevertheless, it should also work on Mac OS X. If you encounter errors after following the steps described here, feel free to contact us via *android-opencv* discussion group https://groups.google.com/group/android-opencv/ and we will try to help you.
|
||||
Nevertheless, it should also work on Mac OS X. If you encounter errors after following the steps described here, feel free to contact us via
|
||||
`android-opencv <https://groups.google.com/group/android-opencv/>`_ discussion group and we will try to help you.
|
||||
|
||||
|
||||
Prerequisites: Setup NDK
|
||||
========================
|
||||
|
||||
To compile C++ code for Android platform you need Android **N**\ ative **D**\ evelopment **K**\ it (*NDK*).
|
||||
To compile C++ code for Android platform you need ``Android Native Development Kit`` (*NDK*).
|
||||
|
||||
You can get the latest version of NDK from the page http://developer.android.com/sdk/ndk/index.html .
|
||||
You can get the latest version of NDK from the `download page <http://developer.android.com/sdk/ndk/index.html>`_.
|
||||
|
||||
To install Android NDK just extract the archive to some folder on your computer. (Here is installation instructions on the NDK home page: http://developer.android.com/sdk/ndk/index.html#installing)
|
||||
To install Android NDK just extract the archive to some folder on your computer. (Here is `installation instructions <http://developer.android.com/sdk/ndk/index.html#installing>`_ on the NDK home page.)
|
||||
|
||||
.. note:: Before start you can read official Android NDK documentation which is in the Android NDK archive, in the folder :file:`docs/`.
|
||||
|
||||
@ -53,7 +54,7 @@ Usually code of an Android application has the following structure:
|
||||
|
||||
- :file:`AndroidManifest.xml`
|
||||
|
||||
- :file:`default.properties`
|
||||
- :file:`project.properties`
|
||||
|
||||
- :file:`... other files ...`
|
||||
|
||||
@ -77,11 +78,13 @@ Also the root folder should contain the following files
|
||||
|
||||
It can be created using Eclipse wizard or :command:`android` tool from Android SDK
|
||||
|
||||
* :file:`default.properties` is a text file containing information about target Android platform and other build details.
|
||||
* :file:`project.properties` is a text file containing information about target Android platform and other build details.
|
||||
|
||||
This file is generated by Eclipse or can be created with :command:`android` tool from Android SDK
|
||||
|
||||
.. note:: Both files (:file:`AndroidManifest.xml` and :file:`default.properties`) are required to compile the C++ part of the application (NDK build system uses information from these files). If any of these files does not exist, compile the Java part of the project before the C++ part.
|
||||
.. note:: Both files (:file:`AndroidManifest.xml` and :file:`project.properties`) are required to compile the C++ part of the application (NDK build system uses information from these files). If any of these files does not exist, compile the Java part of the project before the C++ part.
|
||||
|
||||
.. _NDK_build_cli:
|
||||
|
||||
|
||||
Theory: How to build Android application having C++ native part (from command line)
|
||||
@ -103,11 +106,11 @@ Here is the standard way to compile C++ part of an Android application:
|
||||
|
||||
<path_where_NDK_is_placed>/ndk-build
|
||||
|
||||
.. note:: If you are working in *cygwin* shell and encounter an error saying that NDK does not find some *cygwin*\ 's path then you might need to define the following environment variable:
|
||||
.. note:: On Windows we recomend use of ``ndk-build.cmd`` in standard Windows console (``cmd.exe``) rather than the similar ``bash`` script in ``Cygwin`` shell.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
export NDK_USE_CYGPATH=1
|
||||
.. image:: images/ndk_build.png
|
||||
:alt: NDK build
|
||||
:align: center
|
||||
|
||||
#. After executing this command the C++ part of the source code is compiled.
|
||||
|
||||
@ -134,95 +137,66 @@ After that the Java part of the application can be (re)compiled (using either *E
|
||||
Theory: How to build Android application having C++ native part (from *Eclipse*)
|
||||
================================================================================
|
||||
|
||||
There are several possible ways to integrate compilation of C++ code by Android NDK into Eclipse compilation process. We recommend the approach taken from this site: http://mobilepearls.com/labs/ndk-builder-in-eclipse/
|
||||
There are several possible ways to integrate compilation of C++ code by Android NDK into Eclipse compilation process.
|
||||
We recommend the approach based on Eclipse :abbr:`CDT(C/C++ Development Tooling)` Builder.
|
||||
|
||||
.. important:: This instructions should be applied for each Android project in *Eclipse* workspace. So if you have 3 projects having C++ part then you need to configure 3 builders.
|
||||
.. important:: Make sure your Eclipse IDE has the :abbr:`CDT(C/C++ Development Tooling)` plugin installed. (Menu ``Help`` -> ``About Eclipse SDK`` -> ``Installation Details``)
|
||||
|
||||
Below is an adapted version of this guide:
|
||||
.. image:: images/eclipse_inst_details.png
|
||||
:alt: Configure builders
|
||||
:align: center
|
||||
|
||||
#. Navigate to :guilabel:`Package Explorer` window and expand your project having JNI resources.
|
||||
To install the `CDT plugin <http://eclipse.org/cdt/>`_ use menu ``Help`` -> ``Install New Software...``,
|
||||
then paste the CDT 8.0 repository URL http://download.eclipse.org/tools/cdt/releases/indigo as shown on the picture below and click :guilabel:`Add...`, name it *CDT* and click :guilabel:`OK`.
|
||||
|
||||
.. image:: images/eclipse_inst_cdt.png
|
||||
:alt: Configure builders
|
||||
:align: center
|
||||
|
||||
If you can not see :file:`libs` folder under this project then you need to create it manually. (It will be required on step 7, but you need to create it before you open project properties.)
|
||||
``CDT Main Features`` should be enough:
|
||||
|
||||
.. image:: images/eclipse_inst_cdt_2.png
|
||||
:alt: Configure builders
|
||||
:align: center
|
||||
.. important:: This instructions should be applied for each Android project in *Eclipse* workspace having native (JNI) part to build.
|
||||
|
||||
#. Right click on your project in :guilabel:`Package Explorer` window and select :guilabel:`Properties`.
|
||||
#. Right click on your project in :guilabel:`Package Explorer` window and select :guilabel:`New` -> :guilabel:`Other`:
|
||||
|
||||
#. In the :guilabel:`Properties` dialog select :guilabel:`Builders` menu and press the :guilabel:`New...` button:
|
||||
|
||||
.. image:: images/eclipse_builders.png
|
||||
:alt: Configure builders
|
||||
.. image:: images/eclipse_cdt_cfg1.png
|
||||
:alt: Configure CDT
|
||||
:align: center
|
||||
|
||||
#. In the resulting dialog select the :guilabel:`Program` type and press :guilabel:`OK` button:
|
||||
#. Select :guilabel:`C/C++` -> :guilabel:`Convert to C/C++ project`:
|
||||
|
||||
.. image:: images/eclipse_builder_types.png
|
||||
:alt: Choose builder type
|
||||
.. image:: images/eclipse_cdt_cfg2.png
|
||||
:alt: Configure CDT
|
||||
:align: center
|
||||
|
||||
#. In the :guilabel:`Main` tab fill the following fields:
|
||||
#. Select :guilabel:`"C++"`, :guilabel:`"Makefile Project"`, :guilabel:`"Other Toolchain"`:
|
||||
|
||||
* :guilabel:`Name` - any name for your builder. ("Tutorial 2.1 Builder" in my case.)
|
||||
.. image:: images/eclipse_cdt_cfg3.png
|
||||
:alt: Configure CDT
|
||||
:align: center
|
||||
|
||||
.. note:: This name has to be unique for each project in your workspace.
|
||||
#. Right click on your project in :guilabel:`Package Explorer` window and select :guilabel:`Properties`, then :guilabel:`C/C++ Build` in the left pane.
|
||||
Unckeck :guilabel:`Use default build command` and put ``ndk-build`` invocation in the :guilabel:`Build command` edit box and click :guilabel:`Apply` :
|
||||
|
||||
* :guilabel:`Location` - full path to :command:`ndk-build` tool.
|
||||
.. image:: images/eclipse_cdt_cfg4.png
|
||||
:alt: Configure CDT
|
||||
:align: center
|
||||
|
||||
+ *UNIX*
|
||||
#. Select :guilabel:`Builders` in the left pane, select :guilabel:`"CDT Builder"`, press :guilabel:`Edit` button on the righ and set check-boxes as on the picture below for automatic rebuild of JNI part:
|
||||
|
||||
Just put full path to :command:`ndk-build` into this filed. Also you can add some options to the :guilabel:`Arguments:guilabel:` fied, for example ``-B`` option.
|
||||
.. image:: images/eclipse_cdt_cfg5.png
|
||||
:alt: Configure CDT
|
||||
:align: center
|
||||
|
||||
+ *Cygwin*
|
||||
|
||||
- Instead of path to the :command:`ndk-build` tool you need to put full path to *cygwin*\ 's :program:`bash.exe` location. E.g: :file:`C:\\cygwin\\bin\\bash.exe`.
|
||||
#. Use menu :guilabel:`Project` -> :guilabel:`Clean...` to make sure that NDK build is invoked on the project build:
|
||||
|
||||
- Put full path to :command:`ndk-build` into the :guilabel:`Arguments` field E.g. :file:`C:\\Android\\android-ndk-r6\\ndk-build`.
|
||||
|
||||
- Go to the :guilabel:`Environment` tab and define an environment variable:
|
||||
|
||||
* :envvar:`PATH` - full path to the *cygwin* tools. E.g. :file:`C:\\cygwin\\bin`
|
||||
|
||||
.. image:: images/eclipse_windows_environment.png
|
||||
:alt: Define environment variables
|
||||
:align: center
|
||||
|
||||
* :guilabel:`Working Directory` - put path to your project into this field. Instead of hardcoding full path you can click :guilabel:`Browse Workspace...` button and select your project.
|
||||
|
||||
.. image:: images/eclipse_edit_configuration_main.png
|
||||
:alt: Define environment variables
|
||||
:align: center
|
||||
|
||||
#. Go to the :guilabel:`Refresh` tab and select both :guilabel:`Refresh resources upon completion` and :guilabel:`Recursively include sub-folders`.
|
||||
|
||||
Next set the :guilabel:`Specific resources` option and click :guilabel:`Specify resources...` button:
|
||||
|
||||
.. image:: images/eclipse_edit_configuration_refresh.png
|
||||
:alt: Define environment variables
|
||||
:align: center
|
||||
|
||||
#. Select :file:`libs` folder under your project and click :guilabel:`Finish`:
|
||||
|
||||
.. image:: images/eclipse_edit_configuration_specify_resources.png
|
||||
:alt: Select resources folder to refresh automatically
|
||||
:align: center
|
||||
|
||||
#. Go to the last tab :guilabel:`Build options`. Make sure that all checkboxes are set as shown on the next screen:
|
||||
|
||||
.. image:: images/eclipse_edit_configuration_build_options.png
|
||||
:alt: Configure build options
|
||||
:align: center
|
||||
|
||||
#. Next, click the :guilabel:`Specify resources...` button.
|
||||
|
||||
#. Select :file:`jni` folder of your project and click the :guilabel:`Finish` button:
|
||||
|
||||
.. image:: images/eclipse_edit_configuration_build_resources.png
|
||||
:alt: Select resources to build
|
||||
:align: center
|
||||
|
||||
#. Finally press :guilabel:`OK` in the builder configuration and project properties dialogs. If you have automatic build turned on then console showing build log should appear:
|
||||
|
||||
.. image:: images/eclipse_NDK_build_success.png
|
||||
:alt: Select resources to build
|
||||
:align: center
|
||||
.. image:: images/eclipse_ndk_build.png
|
||||
:alt: Select resources folder to refresh automatically
|
||||
:align: center
|
||||
|
||||
Theory: The structure of :file:`Android.mk` and :file:`Application.mk` scripts
|
||||
==============================================================================
|
||||
@ -255,65 +229,30 @@ Usually the file :file:`Application.mk` is optional, but in case of project usin
|
||||
Practice: Build samples from OpenCV binary package
|
||||
==================================================
|
||||
|
||||
OpenCV binary package includes two samples having JNI resources:
|
||||
OpenCV binary package includes 3 samples having JNI resources:
|
||||
|
||||
* *Tutorial 2 Advanced - 1. Add Native OpenCV*
|
||||
* *Tutorial 3 (Advanced) - Add Native OpenCV*
|
||||
|
||||
This sample illustrate how you can use OpenCV in C++ but without OpenCV Java API.
|
||||
This sample illustrates how you can use OpenCV in C++ but without OpenCV Java API.
|
||||
|
||||
* *Tutorial 2 Advanced - 2. Mix Java+Native OpenCV*
|
||||
* *Tutorial 4 (Advanced) - Mix Java+Native OpenCV*
|
||||
|
||||
This sample shows how you can mix OpenCV Java API and native C++ code.
|
||||
|
||||
To build these samples you need to:
|
||||
|
||||
#. Fulfill all the steps, described in the tutorial :ref:`Android_Binary_Package`.
|
||||
|
||||
#. Setup one builder for *"Tutorial 2 Advanced - 1. Add Native OpenCV"* project (as described in :ref:`Android_NDK_integration_with_Eclipse`)
|
||||
|
||||
#. Setup second builder for *"Tutorial 2 Advanced - 2. Mix Java+Native OpenCV"* project (repeat the steps from :ref:`Android_NDK_integration_with_Eclipse`)
|
||||
|
||||
#. Clean these projects (in the main Eclipse menu: :menuselection:`Project --> Clean...`)
|
||||
|
||||
#. Run Eclipse build command (if option :guilabel:`Build Automatically` is not set)
|
||||
* *Sample - face-detection*
|
||||
|
||||
This sample illustrates usage of both simple OpenCV face detector via Java API and advanced detection based face tracker via JNI and C++.
|
||||
|
||||
Before OpenCV 2.4.2 for Android these projects are not configured to use CDT for building their native part , so you can do it yourself.
|
||||
|
||||
Practice: Create an Android application, which uses OpenCV
|
||||
==========================================================
|
||||
|
||||
To build your own Android application, which uses OpenCV from native part, the following steps should be done:
|
||||
|
||||
#. The archive with OpenCV binary package should be downloaded and extracted to some folder (as example, into the home folder)
|
||||
#. The archive with OpenCV binary package should be downloaded and extracted to some folder (e.g. ``C:\Work\android-opencv\OpenCV-2.4.0``)
|
||||
|
||||
#. We recommend to use an environment variable to specify the location of OpenCV package. Full or relative path hardcoded in :file:`jni/Android.mk` will also work.
|
||||
|
||||
So, the environment variable :envvar:`OPENCV_PACKAGE_DIR` should be defined.
|
||||
The value of the variable should points to the folder, where the OpenCV package has been extracted.
|
||||
|
||||
|
||||
As an example, on *UNIX* you can add add the following line into the hidden file :file:`.bashrc` placed in your home folder:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
export OPENCV_PACKAGE_DIR = <path to the extracted OpenCV package>
|
||||
|
||||
Then relogin (or better reboot your computer).
|
||||
|
||||
.. attention:: without rebooting (or logout) this change might not work.
|
||||
|
||||
If you are a *Windows* user, then navigate to:
|
||||
|
||||
* *Windows 7 / Windows Vista*
|
||||
|
||||
:menuselection:`My Computer (Right Click on Icon) --> Properties (Link) --> Advanced System Settings (Link) --> Advanced (Tab) --> Environment Variables (Button) --> System variables (Section)`
|
||||
|
||||
* *Windows XP*
|
||||
|
||||
:menuselection:`My Computer (Right Click on Icon) --> Properties (Link) --> Advanced (Tab) --> Environment Variables (Button) --> System variables (Section)`
|
||||
|
||||
Create new variable :envvar:`OPENCV_PACKAGE_DIR` and similarly to *UNIX* relogin or reboot.
|
||||
|
||||
If you are setting NDK builder as described above in :ref:`Android_NDK_integration_with_Eclipse`, then you can define this variable in builder settings. It can be done on third :guilabel:`Environment` tab of the builder configuration window (we have already added some variables to this tab on *Windows* but skipped it for other platforms).
|
||||
#. You can use an environment variable to specify the location of OpenCV package or just hardcode full or relative path in the :file:`jni/Android.mk` of your projects.
|
||||
|
||||
#. The file :file:`jni/Android.mk` should be written for the current application using the common rules for the file.
|
||||
|
||||
@ -324,7 +263,7 @@ To build your own Android application, which uses OpenCV from native part, the f
|
||||
|
||||
.. code-block:: make
|
||||
|
||||
include $(OPENCV_PACKAGE_DIR)/share/OpenCV/OpenCV.mk
|
||||
include C:\Work\android-opencv\OpenCV-2.4.0\share\OpenCV\OpenCV.mk
|
||||
|
||||
should be inserted into the :file:`jni/Android.mk` file right after the line
|
||||
|
||||
@ -332,11 +271,25 @@ To build your own Android application, which uses OpenCV from native part, the f
|
||||
|
||||
include $(CLEAR_VARS)
|
||||
|
||||
.. note:: If your application utilize both native (C++) OpenCV and its Java API you need to put the following line before including :file:`OpenCV.mk` to avoid conflict between C++ and Java builders:
|
||||
Several variables can be used to customize OpenCV stuff, they should be set **before** the ``"include ...\OpenCV.mk"`` line:
|
||||
|
||||
.. code-block:: make
|
||||
.. code-block:: make
|
||||
|
||||
OPENCV_CAMERA_MODULES:=off
|
||||
OPENCV_INSTALL_MODULES:=on
|
||||
|
||||
Copies necessary OpenCV dynamic libs to the project ``libs`` folder in order to include them into the APK.
|
||||
|
||||
.. code-block:: make
|
||||
|
||||
OPENCV_CAMERA_MODULES:=off
|
||||
|
||||
Skip native OpenCV camera related libs copying to the project ``libs`` folder.
|
||||
|
||||
.. code-block:: make
|
||||
|
||||
OPENCV_LIB_TYPE:=STATIC
|
||||
|
||||
Perform static link with OpenCV. By default dynamic link is used and the project JNI lib depends on ``libopencv_java.so``.
|
||||
|
||||
#. The file :file:`Application.mk` should exist and should contain lines
|
||||
|
||||
@ -353,25 +306,4 @@ To build your own Android application, which uses OpenCV from native part, the f
|
||||
|
||||
is recommended for the applications targeting modern ARMs
|
||||
|
||||
#. To build the C++ code the Android NDK script :command:`ndk-build` should be run in the root directory of application.
|
||||
Then the C++ source code using OpenCV will be built by Android NDK build system.
|
||||
After that the Java part of the application can be rebuild and the application can be installed on an Android device.
|
||||
|
||||
Note that this step requires calling the :command:`ndk-build` script from the console. Instead of this step you can use integration of Android NDK into Eclipse
|
||||
as stated above in the section :ref:`Android_NDK_integration_with_Eclipse` .
|
||||
|
||||
|
||||
Additional C++ support in Eclipse
|
||||
==================================
|
||||
|
||||
Note that you can install additional C++ plugins in Eclipse:
|
||||
|
||||
#. Open :guilabel:`Help / Install New Software`. This shows the :guilabel:`Install` dialog.
|
||||
|
||||
#. In the :guilabel:`Work with` drop-down list choose :guilabel:`Helios - http://download.eclipse.org/releases/helios` (or :guilabel:`Indigo - http://download.eclipse.org/releases/indigo` depending on your Eclipse version) and wait while the list of available software is loaded.
|
||||
|
||||
#. From the list of available software select :menuselection:`Programming Languages --> C/C++ Development Tools`.
|
||||
|
||||
#. Click :guilabel:`Next`, click :guilabel:`Next` again, accept the agreement, and click the :guilabel:`Finish` button.
|
||||
|
||||
#. When installation is finished, click :guilabel:`Reload`
|
||||
#. Either use :ref:`manual <NDK_build_cli>` ``ndk-build`` invocation or :ref:`setup Eclipse CDT Builder <Android_NDK_integration_with_Eclipse>` to build native JNI lib before Java part [re]build and APK creation.
|
||||
|
Before Width: | Height: | Size: 21 KiB After Width: | Height: | Size: 22 KiB |
Before Width: | Height: | Size: 23 KiB After Width: | Height: | Size: 44 KiB |
Before Width: | Height: | Size: 27 KiB After Width: | Height: | Size: 73 KiB |
After Width: | Height: | Size: 40 KiB |
Before Width: | Height: | Size: 26 KiB After Width: | Height: | Size: 63 KiB |
Before Width: | Height: | Size: 35 KiB After Width: | Height: | Size: 91 KiB |
After Width: | Height: | Size: 91 KiB |
After Width: | Height: | Size: 94 KiB |
Before Width: | Height: | Size: 27 KiB After Width: | Height: | Size: 73 KiB |
After Width: | Height: | Size: 34 KiB |
After Width: | Height: | Size: 26 KiB |
After Width: | Height: | Size: 33 KiB |
After Width: | Height: | Size: 42 KiB |
After Width: | Height: | Size: 43 KiB |
After Width: | Height: | Size: 40 KiB |
After Width: | Height: | Size: 17 KiB |
After Width: | Height: | Size: 37 KiB |
After Width: | Height: | Size: 72 KiB |
After Width: | Height: | Size: 14 KiB |
@ -6,53 +6,37 @@ Installation in iOS
|
||||
Required packages
|
||||
==================
|
||||
|
||||
* GCC 4.x or later
|
||||
* CMake 2.8 or higher
|
||||
* Xcode 4.0 or higher
|
||||
* CMake 2.8.8 or higher
|
||||
* Xcode 4.3 or higher
|
||||
|
||||
Getting the cutting-edge OpenCV from SourceForge SVN repository
|
||||
-----------------------------------------------------------------
|
||||
|
||||
Launch SVN client and checkout either
|
||||
|
||||
a. the current OpenCV snapshot from here: http://code.opencv.org/svn/opencv/trunk
|
||||
|
||||
#. or the latest tested OpenCV snapshot from here: http://code.opencv.org/svn/opencv/tags/latest_tested_snapshot
|
||||
Launch SVN client and checkout the current OpenCV snapshot from here: http://code.opencv.org/svn/opencv/trunk/opencv
|
||||
|
||||
In MacOS it can be done using the following command in Terminal:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
cd ~/<my_working _directory>
|
||||
svn co http://code.opencv.org/svn/opencv/trunk
|
||||
svn co http://code.opencv.org/svn/opencv/trunk/opencv
|
||||
|
||||
|
||||
Building OpenCV from source using CMake, using the command line
|
||||
================================================================
|
||||
|
||||
#. Create a temporary directory, which we denote as <cmake_binary_dir>, where you want to put the generated Makefiles, project files as well the object filees and output binaries
|
||||
#. Make symbolic link for Xcode to let OpenCV build scripts find the compiler, header files etc.
|
||||
|
||||
#. Enter the <cmake_binary_dir> and type
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
cmake [<some optional parameters>] <path to the OpenCV source directory>
|
||||
|
||||
For example
|
||||
|
||||
.. code-block:: bash
|
||||
.. code-block:: bash
|
||||
|
||||
cd /
|
||||
sudo ln -s /Applications/Xcode.app/Contents/Developer Developer
|
||||
|
||||
cd ~/opencv
|
||||
cd ..
|
||||
mkdir release
|
||||
cd release
|
||||
cmake -GXcode -DCMAKE_TOOLCHAIN_FILE=../opencv/ios/cmake/Toolchains/Toolchain-iPhoneOS_Xcode.cmake -DCMAKE_INSTALL_PREFIX=../OpenCV_iPhoneOS -DCMAKE_BUILD_TYPE=RELEASE ../opencv
|
||||
|
||||
|
||||
#. Enter the created temporary directory (<cmake_binary_dir>) and proceed with:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
xcodebuild -sdk iphoneos -configuration Release -target ALL_BUILD
|
||||
xcodebuild -sdk iphoneos -configuration Release -target install install
|
||||
#. Build OpenCV framework
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
cd ~/<my_working_directory>
|
||||
python opencv/ios/build_framework.py ios
|
||||
|
||||
If everything's fine, after a few minutes you will get ~/<my_working_directory>/ios/opencv2.framework. You can add this framework to your Xcode projects.
|
||||
|
@ -16,7 +16,7 @@ Installation by using the pre-built libraries
|
||||
|
||||
1. Open up a web browser and go to: http://sourceforge.net/projects/opencvlibrary/files/opencv-win/
|
||||
|
||||
#. Open the folder for the latest version (currently this is 2.4).
|
||||
#. Open the folder for the latest version (currently this is 2.4.1).
|
||||
|
||||
#. Choose a build you want to use and download it. The naming conventions used will show what kind of support they offer. For example:
|
||||
|
||||
@ -60,10 +60,10 @@ If you are building your own libraries you can take either the source files from
|
||||
|
||||
.. container:: enumeratevisibleitemswithsquare
|
||||
|
||||
+ stable and tested build - http://code.opencv.org/svn/opencv/branches/2.4 (the number at the end will change with every new realease, so change it to that)
|
||||
+ stable and tested build - http://code.opencv.org/svn/opencv/tags/2.4.1 (the number at the end will change with every new realease, so change it to that)
|
||||
+ development build - http://code.opencv.org/svn/opencv/trunk/
|
||||
|
||||
While the later one may contain a couple of new and experimental algorithms, performance increases and interface improvements, be aware, that it may also contain many-many bugs. Using the first one is recommended in most of the cases. That is unless you are extending the OpenCV library itself or really need to most up to date version of it.
|
||||
While the later one may contain a couple of new and experimental algorithms, performance increases and interface improvements, be aware, that it may also contain some bugs. Using the first one is recommended in most of the cases. That is unless you are extending the OpenCV library itself or really need the most up to date version of it.
|
||||
|
||||
Building the OpenCV library from scratch requires a couple of tools installed beforehand:
|
||||
|
||||
@ -287,11 +287,11 @@ Building the library
|
||||
|
||||
+ *BUILD_DOCS* -> It creates two projects for building the documentation of OpenCV (there will be a separate project for building the HTML and the PDF files). Note that these aren't built together with the solution. You need to make an explicit build project command on these to do so.
|
||||
+ *BUILD_EXAMPLES* -> OpenCV comes with many example applications from which you may learn most of the libraries capabilities. This will also come handy to easily try out if OpenCV is fully functional on your computer.
|
||||
+ *BUILD_JAVA_SUPPORT* -> At the moment this has no real meaning on the Windows platform. Ignore it.
|
||||
+ *BUILD_NEW_PYTHON_SUPPORT* -> Self-explanatory. Create the binaries to use OpenCV from the Python language.
|
||||
+ *BUILD_PACKAGE* -> Prior to version 2.3 with this you could build a project that will build an OpenCV installer. With this you can easily install your OpenCV flavor on other systems. For the latest source files of OpenCV it generates a new project that simply creates zip archive with OpenCV sources.
|
||||
+ *BUILD_SHARED_LIBS* -> With this you can control to build DLL files (when turned on) or static library files (\*.lib) otherwise.
|
||||
+ *BUILD_TESTS* -> Each module of OpenCV has a test project assigned to it. Building these test projects is also a good way to try out, that the modules work just as expected on your system too.
|
||||
+ *BUILD_PERF_TESTS* -> There are also performance tests for many OpenCV functions. If you're concerned about performance, build them and run.
|
||||
+ *BUILD_opencv_python* -> Self-explanatory. Create the binaries to use OpenCV from the Python language.
|
||||
|
||||
Press again the *Configure* button and ensure no errors are reported. If this is the case you can tell CMake to create the project files by pushing the *Generate* button. Go to the build directory and open the created **OpenCV** solution.
|
||||
Depending on just how much of the above options you have selected the solution may contain quite a lot of projects so be tolerant on the IDE at the startup.
|
||||
|
@ -393,7 +393,7 @@ bool computeKsi( int transformType,
|
||||
const Mat& image0, const Mat& cloud0,
|
||||
const Mat& image1, const Mat& dI_dx1, const Mat& dI_dy1,
|
||||
const Mat& corresps, int correspsCount,
|
||||
double fx, double fy, double sobelScale, double normScale, double determinantThreshold,
|
||||
double fx, double fy, double sobelScale, double determinantThreshold,
|
||||
Mat& ksi )
|
||||
{
|
||||
int Cwidth = -1;
|
||||
@ -419,6 +419,7 @@ bool computeKsi( int transformType,
|
||||
Mat C( correspsCount, Cwidth, CV_64FC1 );
|
||||
Mat dI_dt( correspsCount, 1, CV_64FC1 );
|
||||
|
||||
double sigma = 0;
|
||||
int pointCount = 0;
|
||||
for( int v0 = 0; v0 < corresps.rows; v0++ )
|
||||
{
|
||||
@ -428,14 +429,36 @@ bool computeKsi( int transformType,
|
||||
{
|
||||
int u1, v1;
|
||||
get2shorts( corresps.at<int>(v0,u0), u1, v1 );
|
||||
double diff = static_cast<double>(image1.at<uchar>(v1,u1)) -
|
||||
static_cast<double>(image0.at<uchar>(v0,u0));
|
||||
sigma += diff * diff;
|
||||
pointCount++;
|
||||
}
|
||||
}
|
||||
}
|
||||
sigma = std::sqrt(sigma/pointCount);
|
||||
|
||||
pointCount = 0;
|
||||
for( int v0 = 0; v0 < corresps.rows; v0++ )
|
||||
{
|
||||
for( int u0 = 0; u0 < corresps.cols; u0++ )
|
||||
{
|
||||
if( corresps.at<int>(v0,u0) != -1 )
|
||||
{
|
||||
int u1, v1;
|
||||
get2shorts( corresps.at<int>(v0,u0), u1, v1 );
|
||||
|
||||
double diff = static_cast<double>(image1.at<uchar>(v1,u1)) -
|
||||
static_cast<double>(image0.at<uchar>(v0,u0));
|
||||
double w = sigma + std::abs(diff);
|
||||
w = w > DBL_EPSILON ? 1./w : 1.;
|
||||
|
||||
(*computeCFuncPtr)( (double*)C.ptr(pointCount),
|
||||
normScale * sobelScale * dI_dx1.at<short int>(v1,u1),
|
||||
normScale * sobelScale * dI_dy1.at<short int>(v1,u1),
|
||||
w * sobelScale * dI_dx1.at<short int>(v1,u1),
|
||||
w * sobelScale * dI_dy1.at<short int>(v1,u1),
|
||||
cloud0.at<Point3f>(v0,u0), fx, fy);
|
||||
|
||||
dI_dt.at<double>(pointCount) = normScale * (static_cast<double>(image1.at<uchar>(v1,u1)) -
|
||||
static_cast<double>(image0.at<uchar>(v0,u0)));
|
||||
dI_dt.at<double>(pointCount) = w * diff;
|
||||
pointCount++;
|
||||
}
|
||||
}
|
||||
@ -556,8 +579,6 @@ bool cv::RGBDOdometry( cv::Mat& Rt, const Mat& initRt,
|
||||
|
||||
const double fx = levelCameraMatrix.at<double>(0,0);
|
||||
const double fy = levelCameraMatrix.at<double>(1,1);
|
||||
const double avgf = 0.5 *(fx + fy);
|
||||
const double normScale = 1./(255*avgf);
|
||||
const double determinantThreshold = 1e-6;
|
||||
|
||||
Mat corresps( levelImage0.size(), levelImage0.type(), CV_32SC1 );
|
||||
@ -576,7 +597,7 @@ bool cv::RGBDOdometry( cv::Mat& Rt, const Mat& initRt,
|
||||
levelImage0, levelCloud0,
|
||||
levelImage1, level_dI_dx1, level_dI_dy1,
|
||||
corresps, correspsCount,
|
||||
fx, fy, sobelScale, normScale, determinantThreshold,
|
||||
fx, fy, sobelScale, determinantThreshold,
|
||||
ksi );
|
||||
|
||||
if( !solutionExist )
|
||||
|
@ -90,9 +90,9 @@ public:
|
||||
Distance d = Distance()) :
|
||||
dataset_(input_data), index_params_(params), distance_(d)
|
||||
{
|
||||
table_number_ = get_param<unsigned int>(index_params_,"table_number",12);
|
||||
key_size_ = get_param<unsigned int>(index_params_,"key_size",20);
|
||||
multi_probe_level_ = get_param<unsigned int>(index_params_,"multi_probe_level",2);
|
||||
table_number_ = get_param<int>(index_params_,"table_number",12);
|
||||
key_size_ = get_param<int>(index_params_,"key_size",20);
|
||||
multi_probe_level_ = get_param<int>(index_params_,"multi_probe_level",2);
|
||||
|
||||
feature_size_ = (unsigned)dataset_.cols;
|
||||
fill_xor_mask(0, key_size_, multi_probe_level_, xor_masks_);
|
||||
|
@ -940,7 +940,7 @@ NCVStatus NCVBroxOpticalFlow(const NCVBroxOpticalFlowDescriptor desc,
|
||||
ncvAssertCUDAReturn(cudaMemsetAsync(dv.ptr(), 0, kLevelSizeInBytes, stream), NCV_CUDA_ERROR);
|
||||
|
||||
//texture format descriptor
|
||||
cudaChannelFormatDesc channel_desc = cudaCreateChannelDesc<float>();
|
||||
cudaChannelFormatDesc ch_desc = cudaCreateChannelDesc<float>();
|
||||
|
||||
I0 = *img0Iter;
|
||||
I1 = *img1Iter;
|
||||
@ -948,8 +948,8 @@ NCVStatus NCVBroxOpticalFlow(const NCVBroxOpticalFlowDescriptor desc,
|
||||
++img0Iter;
|
||||
++img1Iter;
|
||||
|
||||
ncvAssertCUDAReturn(cudaBindTexture2D(0, tex_I0, I0->ptr(), channel_desc, kLevelWidth, kLevelHeight, kLevelStride*sizeof(float)), NCV_CUDA_ERROR);
|
||||
ncvAssertCUDAReturn(cudaBindTexture2D(0, tex_I1, I1->ptr(), channel_desc, kLevelWidth, kLevelHeight, kLevelStride*sizeof(float)), NCV_CUDA_ERROR);
|
||||
ncvAssertCUDAReturn(cudaBindTexture2D(0, tex_I0, I0->ptr(), ch_desc, kLevelWidth, kLevelHeight, kLevelStride*sizeof(float)), NCV_CUDA_ERROR);
|
||||
ncvAssertCUDAReturn(cudaBindTexture2D(0, tex_I1, I1->ptr(), ch_desc, kLevelWidth, kLevelHeight, kLevelStride*sizeof(float)), NCV_CUDA_ERROR);
|
||||
|
||||
//compute derivatives
|
||||
dim3 dBlocks(iDivUp(kLevelWidth, 32), iDivUp(kLevelHeight, 6));
|
||||
@ -989,20 +989,20 @@ NCVStatus NCVBroxOpticalFlow(const NCVBroxOpticalFlowDescriptor desc,
|
||||
ncvAssertReturnNcvStat( nppiStFilterRowBorder_32f_C1R (Iy.ptr(), srcSize, nSrcStep, Ixy.ptr(), srcSize, nSrcStep, oROI,
|
||||
nppStBorderMirror, derivativeFilter.ptr(), kDFilterSize, kDFilterSize/2, 1.0f/12.0f) );
|
||||
|
||||
ncvAssertCUDAReturn(cudaBindTexture2D(0, tex_Ix, Ix.ptr(), channel_desc, kLevelWidth, kLevelHeight, kPitchTex), NCV_CUDA_ERROR);
|
||||
ncvAssertCUDAReturn(cudaBindTexture2D(0, tex_Ixx, Ixx.ptr(), channel_desc, kLevelWidth, kLevelHeight, kPitchTex), NCV_CUDA_ERROR);
|
||||
ncvAssertCUDAReturn(cudaBindTexture2D(0, tex_Ix0, Ix0.ptr(), channel_desc, kLevelWidth, kLevelHeight, kPitchTex), NCV_CUDA_ERROR);
|
||||
ncvAssertCUDAReturn(cudaBindTexture2D(0, tex_Iy, Iy.ptr(), channel_desc, kLevelWidth, kLevelHeight, kPitchTex), NCV_CUDA_ERROR);
|
||||
ncvAssertCUDAReturn(cudaBindTexture2D(0, tex_Iyy, Iyy.ptr(), channel_desc, kLevelWidth, kLevelHeight, kPitchTex), NCV_CUDA_ERROR);
|
||||
ncvAssertCUDAReturn(cudaBindTexture2D(0, tex_Iy0, Iy0.ptr(), channel_desc, kLevelWidth, kLevelHeight, kPitchTex), NCV_CUDA_ERROR);
|
||||
ncvAssertCUDAReturn(cudaBindTexture2D(0, tex_Ixy, Ixy.ptr(), channel_desc, kLevelWidth, kLevelHeight, kPitchTex), NCV_CUDA_ERROR);
|
||||
ncvAssertCUDAReturn(cudaBindTexture2D(0, tex_Ix, Ix.ptr(), ch_desc, kLevelWidth, kLevelHeight, kPitchTex), NCV_CUDA_ERROR);
|
||||
ncvAssertCUDAReturn(cudaBindTexture2D(0, tex_Ixx, Ixx.ptr(), ch_desc, kLevelWidth, kLevelHeight, kPitchTex), NCV_CUDA_ERROR);
|
||||
ncvAssertCUDAReturn(cudaBindTexture2D(0, tex_Ix0, Ix0.ptr(), ch_desc, kLevelWidth, kLevelHeight, kPitchTex), NCV_CUDA_ERROR);
|
||||
ncvAssertCUDAReturn(cudaBindTexture2D(0, tex_Iy, Iy.ptr(), ch_desc, kLevelWidth, kLevelHeight, kPitchTex), NCV_CUDA_ERROR);
|
||||
ncvAssertCUDAReturn(cudaBindTexture2D(0, tex_Iyy, Iyy.ptr(), ch_desc, kLevelWidth, kLevelHeight, kPitchTex), NCV_CUDA_ERROR);
|
||||
ncvAssertCUDAReturn(cudaBindTexture2D(0, tex_Iy0, Iy0.ptr(), ch_desc, kLevelWidth, kLevelHeight, kPitchTex), NCV_CUDA_ERROR);
|
||||
ncvAssertCUDAReturn(cudaBindTexture2D(0, tex_Ixy, Ixy.ptr(), ch_desc, kLevelWidth, kLevelHeight, kPitchTex), NCV_CUDA_ERROR);
|
||||
|
||||
// flow
|
||||
ncvAssertCUDAReturn(cudaBindTexture(0, tex_u, ptrU->ptr(), channel_desc, kLevelSizeInBytes), NCV_CUDA_ERROR);
|
||||
ncvAssertCUDAReturn(cudaBindTexture(0, tex_v, ptrV->ptr(), channel_desc, kLevelSizeInBytes), NCV_CUDA_ERROR);
|
||||
ncvAssertCUDAReturn(cudaBindTexture(0, tex_u, ptrU->ptr(), ch_desc, kLevelSizeInBytes), NCV_CUDA_ERROR);
|
||||
ncvAssertCUDAReturn(cudaBindTexture(0, tex_v, ptrV->ptr(), ch_desc, kLevelSizeInBytes), NCV_CUDA_ERROR);
|
||||
// flow increments
|
||||
ncvAssertCUDAReturn(cudaBindTexture(0, tex_du, du.ptr(), channel_desc, kLevelSizeInBytes), NCV_CUDA_ERROR);
|
||||
ncvAssertCUDAReturn(cudaBindTexture(0, tex_dv, dv.ptr(), channel_desc, kLevelSizeInBytes), NCV_CUDA_ERROR);
|
||||
ncvAssertCUDAReturn(cudaBindTexture(0, tex_du, du.ptr(), ch_desc, kLevelSizeInBytes), NCV_CUDA_ERROR);
|
||||
ncvAssertCUDAReturn(cudaBindTexture(0, tex_dv, dv.ptr(), ch_desc, kLevelSizeInBytes), NCV_CUDA_ERROR);
|
||||
|
||||
dim3 psor_blocks(iDivUp(kLevelWidth, PSOR_TILE_WIDTH), iDivUp(kLevelHeight, PSOR_TILE_HEIGHT));
|
||||
dim3 psor_threads(PSOR_TILE_WIDTH, PSOR_TILE_HEIGHT);
|
||||
@ -1032,37 +1032,37 @@ NCVStatus NCVBroxOpticalFlow(const NCVBroxOpticalFlowDescriptor desc,
|
||||
|
||||
ncvAssertCUDALastErrorReturn(NCV_CUDA_ERROR);
|
||||
|
||||
ncvAssertCUDAReturn(cudaBindTexture(0, tex_diffusivity_x, diffusivity_x.ptr(), channel_desc, kLevelSizeInBytes), NCV_CUDA_ERROR);
|
||||
ncvAssertCUDAReturn(cudaBindTexture(0, tex_diffusivity_y, diffusivity_y.ptr(), channel_desc, kLevelSizeInBytes), NCV_CUDA_ERROR);
|
||||
ncvAssertCUDAReturn(cudaBindTexture(0, tex_diffusivity_x, diffusivity_x.ptr(), ch_desc, kLevelSizeInBytes), NCV_CUDA_ERROR);
|
||||
ncvAssertCUDAReturn(cudaBindTexture(0, tex_diffusivity_y, diffusivity_y.ptr(), ch_desc, kLevelSizeInBytes), NCV_CUDA_ERROR);
|
||||
|
||||
ncvAssertCUDAReturn(cudaBindTexture(0, tex_numerator_dudv, num_dudv.ptr(), channel_desc, kLevelSizeInBytes), NCV_CUDA_ERROR);
|
||||
ncvAssertCUDAReturn(cudaBindTexture(0, tex_numerator_dudv, num_dudv.ptr(), ch_desc, kLevelSizeInBytes), NCV_CUDA_ERROR);
|
||||
|
||||
ncvAssertCUDAReturn(cudaBindTexture(0, tex_numerator_u, num_u.ptr(), channel_desc, kLevelSizeInBytes), NCV_CUDA_ERROR);
|
||||
ncvAssertCUDAReturn(cudaBindTexture(0, tex_numerator_v, num_v.ptr(), channel_desc, kLevelSizeInBytes), NCV_CUDA_ERROR);
|
||||
ncvAssertCUDAReturn(cudaBindTexture(0, tex_numerator_u, num_u.ptr(), ch_desc, kLevelSizeInBytes), NCV_CUDA_ERROR);
|
||||
ncvAssertCUDAReturn(cudaBindTexture(0, tex_numerator_v, num_v.ptr(), ch_desc, kLevelSizeInBytes), NCV_CUDA_ERROR);
|
||||
|
||||
prepare_sor_stage_2<<<psor_blocks, psor_threads, 0, stream>>>(denom_u.ptr(), denom_v.ptr(), kLevelWidth, kLevelHeight, kLevelStride);
|
||||
|
||||
ncvAssertCUDALastErrorReturn(NCV_CUDA_ERROR);
|
||||
|
||||
// linear system coefficients
|
||||
ncvAssertCUDAReturn(cudaBindTexture(0, tex_diffusivity_x, diffusivity_x.ptr(), channel_desc, kLevelSizeInBytes), NCV_CUDA_ERROR);
|
||||
ncvAssertCUDAReturn(cudaBindTexture(0, tex_diffusivity_y, diffusivity_y.ptr(), channel_desc, kLevelSizeInBytes), NCV_CUDA_ERROR);
|
||||
ncvAssertCUDAReturn(cudaBindTexture(0, tex_diffusivity_x, diffusivity_x.ptr(), ch_desc, kLevelSizeInBytes), NCV_CUDA_ERROR);
|
||||
ncvAssertCUDAReturn(cudaBindTexture(0, tex_diffusivity_y, diffusivity_y.ptr(), ch_desc, kLevelSizeInBytes), NCV_CUDA_ERROR);
|
||||
|
||||
ncvAssertCUDAReturn(cudaBindTexture(0, tex_numerator_dudv, num_dudv.ptr(), channel_desc, kLevelSizeInBytes), NCV_CUDA_ERROR);
|
||||
ncvAssertCUDAReturn(cudaBindTexture(0, tex_numerator_dudv, num_dudv.ptr(), ch_desc, kLevelSizeInBytes), NCV_CUDA_ERROR);
|
||||
|
||||
ncvAssertCUDAReturn(cudaBindTexture(0, tex_numerator_u, num_u.ptr(), channel_desc, kLevelSizeInBytes), NCV_CUDA_ERROR);
|
||||
ncvAssertCUDAReturn(cudaBindTexture(0, tex_numerator_v, num_v.ptr(), channel_desc, kLevelSizeInBytes), NCV_CUDA_ERROR);
|
||||
ncvAssertCUDAReturn(cudaBindTexture(0, tex_numerator_u, num_u.ptr(), ch_desc, kLevelSizeInBytes), NCV_CUDA_ERROR);
|
||||
ncvAssertCUDAReturn(cudaBindTexture(0, tex_numerator_v, num_v.ptr(), ch_desc, kLevelSizeInBytes), NCV_CUDA_ERROR);
|
||||
|
||||
ncvAssertCUDAReturn(cudaBindTexture(0, tex_inv_denominator_u, denom_u.ptr(), channel_desc, kLevelSizeInBytes), NCV_CUDA_ERROR);
|
||||
ncvAssertCUDAReturn(cudaBindTexture(0, tex_inv_denominator_v, denom_v.ptr(), channel_desc, kLevelSizeInBytes), NCV_CUDA_ERROR);
|
||||
ncvAssertCUDAReturn(cudaBindTexture(0, tex_inv_denominator_u, denom_u.ptr(), ch_desc, kLevelSizeInBytes), NCV_CUDA_ERROR);
|
||||
ncvAssertCUDAReturn(cudaBindTexture(0, tex_inv_denominator_v, denom_v.ptr(), ch_desc, kLevelSizeInBytes), NCV_CUDA_ERROR);
|
||||
|
||||
//solve linear system
|
||||
for (Ncv32u solver_iteration = 0; solver_iteration < desc.number_of_solver_iterations; ++solver_iteration)
|
||||
{
|
||||
float omega = 1.99f;
|
||||
|
||||
ncvAssertCUDAReturn(cudaBindTexture(0, tex_du, du.ptr(), channel_desc, kLevelSizeInBytes), NCV_CUDA_ERROR);
|
||||
ncvAssertCUDAReturn(cudaBindTexture(0, tex_dv, dv.ptr(), channel_desc, kLevelSizeInBytes), NCV_CUDA_ERROR);
|
||||
ncvAssertCUDAReturn(cudaBindTexture(0, tex_du, du.ptr(), ch_desc, kLevelSizeInBytes), NCV_CUDA_ERROR);
|
||||
ncvAssertCUDAReturn(cudaBindTexture(0, tex_dv, dv.ptr(), ch_desc, kLevelSizeInBytes), NCV_CUDA_ERROR);
|
||||
|
||||
sor_pass<0><<<sor_blocks, sor_threads, 0, stream>>>
|
||||
(du_new.ptr(),
|
||||
@ -1079,8 +1079,8 @@ NCVStatus NCVBroxOpticalFlow(const NCVBroxOpticalFlowDescriptor desc,
|
||||
|
||||
ncvAssertCUDALastErrorReturn(NCV_CUDA_ERROR);
|
||||
|
||||
ncvAssertCUDAReturn(cudaBindTexture(0, tex_du, du_new.ptr(), channel_desc, kLevelSizeInBytes), NCV_CUDA_ERROR);
|
||||
ncvAssertCUDAReturn(cudaBindTexture(0, tex_dv, dv_new.ptr(), channel_desc, kLevelSizeInBytes), NCV_CUDA_ERROR);
|
||||
ncvAssertCUDAReturn(cudaBindTexture(0, tex_du, du_new.ptr(), ch_desc, kLevelSizeInBytes), NCV_CUDA_ERROR);
|
||||
ncvAssertCUDAReturn(cudaBindTexture(0, tex_dv, dv_new.ptr(), ch_desc, kLevelSizeInBytes), NCV_CUDA_ERROR);
|
||||
|
||||
sor_pass<1><<<sor_blocks, sor_threads, 0, stream>>>
|
||||
(du.ptr(),
|
||||
@ -1097,8 +1097,8 @@ NCVStatus NCVBroxOpticalFlow(const NCVBroxOpticalFlowDescriptor desc,
|
||||
|
||||
ncvAssertCUDALastErrorReturn(NCV_CUDA_ERROR);
|
||||
|
||||
ncvAssertCUDAReturn(cudaBindTexture(0, tex_du, du.ptr(), channel_desc, kLevelSizeInBytes), NCV_CUDA_ERROR);
|
||||
ncvAssertCUDAReturn(cudaBindTexture(0, tex_dv, dv.ptr(), channel_desc, kLevelSizeInBytes), NCV_CUDA_ERROR);
|
||||
ncvAssertCUDAReturn(cudaBindTexture(0, tex_du, du.ptr(), ch_desc, kLevelSizeInBytes), NCV_CUDA_ERROR);
|
||||
ncvAssertCUDAReturn(cudaBindTexture(0, tex_dv, dv.ptr(), ch_desc, kLevelSizeInBytes), NCV_CUDA_ERROR);
|
||||
}//end of solver loop
|
||||
}// end of inner loop
|
||||
|
||||
|
@ -1622,16 +1622,16 @@ NCVStatus ncvDetectObjectsMultiScale_device(NCVMatrix<Ncv8u> &d_srcImg,
|
||||
continue;
|
||||
}
|
||||
|
||||
NcvSize32s srcRoi, srcIIRoi, scaledIIRoi, searchRoi;
|
||||
NcvSize32s srcRoi_, srcIIRo_i, scaledIIRoi, searchRoi;
|
||||
|
||||
srcRoi.width = d_srcImg.width();
|
||||
srcRoi.height = d_srcImg.height();
|
||||
srcRoi_.width = d_srcImg.width();
|
||||
srcRoi_.height = d_srcImg.height();
|
||||
|
||||
srcIIRoi.width = srcRoi.width + 1;
|
||||
srcIIRoi.height = srcRoi.height + 1;
|
||||
srcIIRo_i.width = srcRoi_.width + 1;
|
||||
srcIIRo_i.height = srcRoi_.height + 1;
|
||||
|
||||
scaledIIRoi.width = srcIIRoi.width / scale;
|
||||
scaledIIRoi.height = srcIIRoi.height / scale;
|
||||
scaledIIRoi.width = srcIIRo_i.width / scale;
|
||||
scaledIIRoi.height = srcIIRo_i.height / scale;
|
||||
|
||||
searchRoi.width = scaledIIRoi.width - haar.ClassifierSize.width;
|
||||
searchRoi.height = scaledIIRoi.height - haar.ClassifierSize.height;
|
||||
@ -1659,12 +1659,12 @@ NCVStatus ncvDetectObjectsMultiScale_device(NCVMatrix<Ncv8u> &d_srcImg,
|
||||
{
|
||||
Ncv32u scale = scalesVector[i];
|
||||
|
||||
NcvSize32u srcRoi, scaledIIRoi, searchRoi;
|
||||
NcvSize32u srcRoi_, scaledIIRoi, searchRoi;
|
||||
NcvSize32u srcIIRoi;
|
||||
srcRoi.width = d_srcImg.width();
|
||||
srcRoi.height = d_srcImg.height();
|
||||
srcIIRoi.width = srcRoi.width + 1;
|
||||
srcIIRoi.height = srcRoi.height + 1;
|
||||
srcRoi_.width = d_srcImg.width();
|
||||
srcRoi_.height = d_srcImg.height();
|
||||
srcIIRoi.width = srcRoi_.width + 1;
|
||||
srcIIRoi.height = srcRoi_.height + 1;
|
||||
scaledIIRoi.width = srcIIRoi.width / scale;
|
||||
scaledIIRoi.height = srcIIRoi.height / scale;
|
||||
searchRoi.width = scaledIIRoi.width - haar.ClassifierSize.width;
|
||||
|
@ -1414,17 +1414,17 @@ NCVStatus compactVector_32u_device(Ncv32u *d_src, Ncv32u srcLen,
|
||||
//calculate hierarchical partial sums
|
||||
for (Ncv32u i=1; i<partSumNums.size()-1; i++)
|
||||
{
|
||||
dim3 grid(partSumNums[i+1]);
|
||||
if (grid.x > 65535)
|
||||
dim3 grid_partial(partSumNums[i+1]);
|
||||
if (grid_partial.x > 65535)
|
||||
{
|
||||
grid.y = (grid.x + 65534) / 65535;
|
||||
grid.x = 65535;
|
||||
grid_partial.y = (grid_partial.x + 65534) / 65535;
|
||||
grid_partial.x = 65535;
|
||||
}
|
||||
if (grid.x != 1)
|
||||
if (grid_partial.x != 1)
|
||||
{
|
||||
removePass1Scan
|
||||
<false, true>
|
||||
<<<grid, block, 0, nppStGetActiveCUDAstream()>>>
|
||||
<<<grid_partial, block, 0, nppStGetActiveCUDAstream()>>>
|
||||
(d_hierSums.ptr() + partSumOffsets[i],
|
||||
partSumNums[i], NULL,
|
||||
d_hierSums.ptr() + partSumOffsets[i+1],
|
||||
@ -1434,7 +1434,7 @@ NCVStatus compactVector_32u_device(Ncv32u *d_src, Ncv32u srcLen,
|
||||
{
|
||||
removePass1Scan
|
||||
<false, false>
|
||||
<<<grid, block, 0, nppStGetActiveCUDAstream()>>>
|
||||
<<<grid_partial, block, 0, nppStGetActiveCUDAstream()>>>
|
||||
(d_hierSums.ptr() + partSumOffsets[i],
|
||||
partSumNums[i], NULL,
|
||||
NULL,
|
||||
|
@ -723,16 +723,16 @@ static NCVStatus drawRectsWrapperHost(T *h_dst,
|
||||
|
||||
if (rect.x < dstWidth)
|
||||
{
|
||||
for (Ncv32u i=rect.y; i<rect.y+rect.height && i<dstHeight; i++)
|
||||
for (Ncv32u each=rect.y; each<rect.y+rect.height && each<dstHeight; each++)
|
||||
{
|
||||
h_dst[i*dstStride+rect.x] = color;
|
||||
h_dst[each*dstStride+rect.x] = color;
|
||||
}
|
||||
}
|
||||
if (rect.x+rect.width-1 < dstWidth)
|
||||
{
|
||||
for (Ncv32u i=rect.y; i<rect.y+rect.height && i<dstHeight; i++)
|
||||
for (Ncv32u each=rect.y; each<rect.y+rect.height && each<dstHeight; each++)
|
||||
{
|
||||
h_dst[i*dstStride+rect.x+rect.width-1] = color;
|
||||
h_dst[each*dstStride+rect.x+rect.width-1] = color;
|
||||
}
|
||||
}
|
||||
if (rect.y < dstHeight)
|
||||
|
@ -623,11 +623,11 @@ class NCVVectorAlloc : public NCVVector<T>
|
||||
{
|
||||
NCVVectorAlloc();
|
||||
NCVVectorAlloc(const NCVVectorAlloc &);
|
||||
NCVVectorAlloc& operator=(const NCVVectorAlloc<T>&);
|
||||
NCVVectorAlloc& operator=(const NCVVectorAlloc<T>&);
|
||||
|
||||
public:
|
||||
|
||||
NCVVectorAlloc(INCVMemAllocator &allocator_, Ncv32u length)
|
||||
NCVVectorAlloc(INCVMemAllocator &allocator_, Ncv32u length_)
|
||||
:
|
||||
allocator(allocator_)
|
||||
{
|
||||
@ -636,11 +636,11 @@ public:
|
||||
this->clear();
|
||||
this->allocatedMem.clear();
|
||||
|
||||
ncvStat = allocator.alloc(this->allocatedMem, length * sizeof(T));
|
||||
ncvStat = allocator.alloc(this->allocatedMem, length_ * sizeof(T));
|
||||
ncvAssertPrintReturn(ncvStat == NCV_SUCCESS, "NCVVectorAlloc ctor:: alloc failed", );
|
||||
|
||||
this->_ptr = (T *)this->allocatedMem.begin.ptr;
|
||||
this->_length = length;
|
||||
this->_length = length_;
|
||||
this->_memtype = this->allocatedMem.begin.memtype;
|
||||
}
|
||||
|
||||
@ -698,15 +698,15 @@ public:
|
||||
this->bReused = true;
|
||||
}
|
||||
|
||||
NCVVectorReuse(const NCVMemSegment &memSegment, Ncv32u length)
|
||||
NCVVectorReuse(const NCVMemSegment &memSegment, Ncv32u length_)
|
||||
{
|
||||
this->bReused = false;
|
||||
this->clear();
|
||||
|
||||
ncvAssertPrintReturn(length * sizeof(T) <= memSegment.size, \
|
||||
ncvAssertPrintReturn(length_ * sizeof(T) <= memSegment.size, \
|
||||
"NCVVectorReuse ctor:: memory binding failed due to size mismatch", );
|
||||
|
||||
this->_length = length;
|
||||
this->_length = length_;
|
||||
this->_ptr = (T *)memSegment.begin.ptr;
|
||||
this->_memtype = memSegment.begin.memtype;
|
||||
|
||||
@ -841,34 +841,34 @@ class NCVMatrixAlloc : public NCVMatrix<T>
|
||||
NCVMatrixAlloc& operator=(const NCVMatrixAlloc &);
|
||||
public:
|
||||
|
||||
NCVMatrixAlloc(INCVMemAllocator &allocator, Ncv32u width, Ncv32u height, Ncv32u _pitch=0)
|
||||
NCVMatrixAlloc(INCVMemAllocator &allocator_, Ncv32u width_, Ncv32u height_, Ncv32u pitch_=0)
|
||||
:
|
||||
allocator(allocator)
|
||||
allocator(allocator_)
|
||||
{
|
||||
NCVStatus ncvStat;
|
||||
|
||||
this->clear();
|
||||
this->allocatedMem.clear();
|
||||
|
||||
Ncv32u widthBytes = width * sizeof(T);
|
||||
Ncv32u widthBytes = width_ * sizeof(T);
|
||||
Ncv32u pitchBytes = alignUp(widthBytes, allocator.alignment());
|
||||
|
||||
if (_pitch != 0)
|
||||
if (pitch_ != 0)
|
||||
{
|
||||
ncvAssertPrintReturn(_pitch >= pitchBytes &&
|
||||
(_pitch & (allocator.alignment() - 1)) == 0,
|
||||
ncvAssertPrintReturn(pitch_ >= pitchBytes &&
|
||||
(pitch_ & (allocator.alignment() - 1)) == 0,
|
||||
"NCVMatrixAlloc ctor:: incorrect pitch passed", );
|
||||
pitchBytes = _pitch;
|
||||
pitchBytes = pitch_;
|
||||
}
|
||||
|
||||
Ncv32u requiredAllocSize = pitchBytes * height;
|
||||
Ncv32u requiredAllocSize = pitchBytes * height_;
|
||||
|
||||
ncvStat = allocator.alloc(this->allocatedMem, requiredAllocSize);
|
||||
ncvAssertPrintReturn(ncvStat == NCV_SUCCESS, "NCVMatrixAlloc ctor:: alloc failed", );
|
||||
|
||||
this->_ptr = (T *)this->allocatedMem.begin.ptr;
|
||||
this->_width = width;
|
||||
this->_height = height;
|
||||
this->_width = width_;
|
||||
this->_height = height_;
|
||||
this->_pitch = pitchBytes;
|
||||
this->_memtype = this->allocatedMem.begin.memtype;
|
||||
}
|
||||
@ -916,34 +916,34 @@ class NCVMatrixReuse : public NCVMatrix<T>
|
||||
|
||||
public:
|
||||
|
||||
NCVMatrixReuse(const NCVMemSegment &memSegment, Ncv32u alignment, Ncv32u width, Ncv32u height, Ncv32u pitch=0, NcvBool bSkipPitchCheck=false)
|
||||
NCVMatrixReuse(const NCVMemSegment &memSegment, Ncv32u alignment, Ncv32u width_, Ncv32u height_, Ncv32u pitch_=0, NcvBool bSkipPitchCheck=false)
|
||||
{
|
||||
this->bReused = false;
|
||||
this->clear();
|
||||
|
||||
Ncv32u widthBytes = width * sizeof(T);
|
||||
Ncv32u widthBytes = width_ * sizeof(T);
|
||||
Ncv32u pitchBytes = alignUp(widthBytes, alignment);
|
||||
|
||||
if (pitch != 0)
|
||||
if (pitch_ != 0)
|
||||
{
|
||||
if (!bSkipPitchCheck)
|
||||
{
|
||||
ncvAssertPrintReturn(pitch >= pitchBytes &&
|
||||
(pitch & (alignment - 1)) == 0,
|
||||
ncvAssertPrintReturn(pitch_ >= pitchBytes &&
|
||||
(pitch_ & (alignment - 1)) == 0,
|
||||
"NCVMatrixReuse ctor:: incorrect pitch passed", );
|
||||
}
|
||||
else
|
||||
{
|
||||
ncvAssertPrintReturn(pitch >= widthBytes, "NCVMatrixReuse ctor:: incorrect pitch passed", );
|
||||
ncvAssertPrintReturn(pitch_ >= widthBytes, "NCVMatrixReuse ctor:: incorrect pitch passed", );
|
||||
}
|
||||
pitchBytes = pitch;
|
||||
pitchBytes = pitch_;
|
||||
}
|
||||
|
||||
ncvAssertPrintReturn(pitchBytes * height <= memSegment.size, \
|
||||
ncvAssertPrintReturn(pitchBytes * height_ <= memSegment.size, \
|
||||
"NCVMatrixReuse ctor:: memory binding failed due to size mismatch", );
|
||||
|
||||
this->_width = width;
|
||||
this->_height = height;
|
||||
this->_width = width_;
|
||||
this->_height = height_;
|
||||
this->_pitch = pitchBytes;
|
||||
this->_ptr = (T *)memSegment.begin.ptr;
|
||||
this->_memtype = memSegment.begin.memtype;
|
||||
|
@ -188,7 +188,7 @@ elseif(APPLE)
|
||||
list(APPEND HIGHGUI_LIBRARIES "-framework Carbon" "-framework QuickTime" "-framework CoreFoundation" "-framework QuartzCore")
|
||||
else()
|
||||
list(APPEND highgui_srcs src/cap_qtkit.mm)
|
||||
list(APPEND HIGHGUI_LIBRARIES "-framework QTKit" "-framework QuartzCore")
|
||||
list(APPEND HIGHGUI_LIBRARIES "-framework QTKit" "-framework QuartzCore" "-framework AppKit")
|
||||
endif()
|
||||
endif()
|
||||
|
||||
|
@ -445,12 +445,12 @@ class videoDevice{
|
||||
int nFramesForReconnect;
|
||||
unsigned long nFramesRunning;
|
||||
int connection;
|
||||
int storeConn;
|
||||
int storeConn;
|
||||
int myID;
|
||||
long requestedFrameTime; //ie fps
|
||||
|
||||
char nDeviceName[255];
|
||||
WCHAR wDeviceName[255];
|
||||
char nDeviceName[255];
|
||||
WCHAR wDeviceName[255];
|
||||
|
||||
unsigned char * pixels;
|
||||
char * pBuffer;
|
||||
@ -643,7 +643,7 @@ public:
|
||||
|
||||
bufferSetup = false;
|
||||
newFrame = false;
|
||||
latestBufferLength = 0;
|
||||
latestBufferLength = 0;
|
||||
|
||||
hEvent = CreateEvent(NULL, true, false, NULL);
|
||||
}
|
||||
@ -655,7 +655,7 @@ public:
|
||||
DeleteCriticalSection(&critSection);
|
||||
CloseHandle(hEvent);
|
||||
if(bufferSetup){
|
||||
delete pixels;
|
||||
delete[] pixels;
|
||||
}
|
||||
}
|
||||
|
||||
@ -665,11 +665,11 @@ public:
|
||||
if(bufferSetup){
|
||||
return false;
|
||||
}else{
|
||||
numBytes = numBytesIn;
|
||||
pixels = new unsigned char[numBytes];
|
||||
numBytes = numBytesIn;
|
||||
pixels = new unsigned char[numBytes];
|
||||
bufferSetup = true;
|
||||
newFrame = false;
|
||||
latestBufferLength = 0;
|
||||
latestBufferLength = 0;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
@ -796,12 +796,12 @@ void videoDevice::setSize(int w, int h){
|
||||
}
|
||||
else
|
||||
{
|
||||
width = w;
|
||||
height = h;
|
||||
videoSize = w*h*3;
|
||||
width = w;
|
||||
height = h;
|
||||
videoSize = w*h*3;
|
||||
sizeSet = true;
|
||||
pixels = new unsigned char[videoSize];
|
||||
pBuffer = new char[videoSize];
|
||||
pixels = new unsigned char[videoSize];
|
||||
pBuffer = new char[videoSize];
|
||||
|
||||
memset(pixels, 0 , videoSize);
|
||||
sgCallback->setupBuffer(videoSize);
|
||||
|
@ -657,7 +657,7 @@ Applies a fixed-level threshold to each array element.
|
||||
|
||||
.. ocv:pyoldfunction:: cv.Threshold(src, dst, threshold, maxValue, thresholdType)-> None
|
||||
|
||||
:param src: Source array (single-channel, 8-bit of 32-bit floating point).
|
||||
:param src: Source array (single-channel, 8-bit or 32-bit floating point).
|
||||
|
||||
:param dst: Destination array of the same size and type as ``src`` .
|
||||
|
||||
|
@ -2839,6 +2839,11 @@ void cv::warpAffine( InputArray _src, OutputArray _dst,
|
||||
CV_Assert( (M0.type() == CV_32F || M0.type() == CV_64F) && M0.rows == 2 && M0.cols == 3 );
|
||||
M0.convertTo(matM, matM.type());
|
||||
|
||||
#ifdef HAVE_TEGRA_OPTIMIZATION
|
||||
if( tegra::warpAffine(src, dst, M, flags, borderType, borderValue) )
|
||||
return;
|
||||
#endif
|
||||
|
||||
if( !(flags & WARP_INVERSE_MAP) )
|
||||
{
|
||||
double D = M[0]*M[4] - M[1]*M[3];
|
||||
@ -2851,22 +2856,6 @@ void cv::warpAffine( InputArray _src, OutputArray _dst,
|
||||
M[2] = b1; M[5] = b2;
|
||||
}
|
||||
|
||||
#ifdef HAVE_TEGRA_OPTIMIZATION
|
||||
if (borderType == BORDER_REPLICATE)
|
||||
{
|
||||
if( tegra::warpAffine(src, dst, M, interpolation, borderType, borderValue) )
|
||||
return;
|
||||
}
|
||||
else
|
||||
{
|
||||
double warp_mat[6];
|
||||
Mat warp_m(2, 3, CV_64F, warp_mat);
|
||||
M0.convertTo(warp_m, warp_m.type());
|
||||
if( tegra::warpAffine(src, dst, warp_mat, interpolation, borderType, borderValue) )
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
|
||||
int x, y, x1, y1, width = dst.cols, height = dst.rows;
|
||||
AutoBuffer<int> _abdelta(width*2);
|
||||
int* adelta = &_abdelta[0], *bdelta = adelta + width;
|
||||
@ -2995,14 +2984,14 @@ void cv::warpPerspective( InputArray _src, OutputArray _dst, InputArray _M0,
|
||||
CV_Assert( (M0.type() == CV_32F || M0.type() == CV_64F) && M0.rows == 3 && M0.cols == 3 );
|
||||
M0.convertTo(matM, matM.type());
|
||||
|
||||
if( !(flags & WARP_INVERSE_MAP) )
|
||||
invert(matM, matM);
|
||||
|
||||
#ifdef HAVE_TEGRA_OPTIMIZATION
|
||||
if( tegra::warpPerspective(src, dst, M, interpolation, borderType, borderValue) )
|
||||
if( tegra::warpPerspective(src, dst, M, flags, borderType, borderValue) )
|
||||
return;
|
||||
#endif
|
||||
|
||||
if( !(flags & WARP_INVERSE_MAP) )
|
||||
invert(matM, matM);
|
||||
|
||||
int x, y, x1, y1, width = dst.cols, height = dst.rows;
|
||||
|
||||
int bh0 = std::min(BLOCK_SZ/2, height);
|
||||
|
@ -60,26 +60,10 @@ thresh_8u( const Mat& _src, Mat& _dst, uchar thresh, uchar maxval, int type )
|
||||
}
|
||||
|
||||
#ifdef HAVE_TEGRA_OPTIMIZATION
|
||||
switch( type )
|
||||
{
|
||||
case THRESH_BINARY:
|
||||
if(tegra::thresh_8u_binary(_src, _dst, roi.width, roi.height, thresh, maxval)) return;
|
||||
break;
|
||||
case THRESH_BINARY_INV:
|
||||
if(tegra::thresh_8u_binary_inv(_src, _dst, roi.width, roi.height, thresh, maxval)) return;
|
||||
break;
|
||||
case THRESH_TRUNC:
|
||||
if(tegra::thresh_8u_trunc(_src, _dst, roi.width, roi.height, thresh)) return;
|
||||
break;
|
||||
case THRESH_TOZERO:
|
||||
if(tegra::thresh_8u_tozero(_src, _dst, roi.width, roi.height, thresh)) return;
|
||||
break;
|
||||
case THRESH_TOZERO_INV:
|
||||
if(tegra::thresh_8u_tozero_inv(_src, _dst, roi.width, roi.height, thresh)) return;
|
||||
break;
|
||||
}
|
||||
if (tegra::thresh_8u(_src, _dst, roi.width, roi.height, thresh, maxval, type))
|
||||
return;
|
||||
#endif
|
||||
|
||||
|
||||
switch( type )
|
||||
{
|
||||
case THRESH_BINARY:
|
||||
@ -124,7 +108,7 @@ thresh_8u( const Mat& _src, Mat& _dst, uchar thresh, uchar maxval, int type )
|
||||
__m128i thresh_s = _mm_set1_epi8(thresh ^ 0x80);
|
||||
__m128i maxval_ = _mm_set1_epi8(maxval);
|
||||
j_scalar = roi.width & -8;
|
||||
|
||||
|
||||
for( i = 0; i < roi.height; i++ )
|
||||
{
|
||||
const uchar* src = (const uchar*)(_src.data + _src.step*i);
|
||||
@ -240,7 +224,7 @@ thresh_8u( const Mat& _src, Mat& _dst, uchar thresh, uchar maxval, int type )
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif
|
||||
#endif
|
||||
|
||||
if( j_scalar < roi.width )
|
||||
{
|
||||
@ -248,8 +232,8 @@ thresh_8u( const Mat& _src, Mat& _dst, uchar thresh, uchar maxval, int type )
|
||||
{
|
||||
const uchar* src = (const uchar*)(_src.data + _src.step*i);
|
||||
uchar* dst = (uchar*)(_dst.data + _dst.step*i);
|
||||
j = j_scalar;
|
||||
#if CV_ENABLE_UNROLLED
|
||||
j = j_scalar;
|
||||
#if CV_ENABLE_UNROLLED
|
||||
for( ; j <= roi.width - 4; j += 4 )
|
||||
{
|
||||
uchar t0 = tab[src[j]];
|
||||
@ -264,7 +248,7 @@ thresh_8u( const Mat& _src, Mat& _dst, uchar thresh, uchar maxval, int type )
|
||||
dst[j+2] = t0;
|
||||
dst[j+3] = t1;
|
||||
}
|
||||
#endif
|
||||
#endif
|
||||
for( ; j < roi.width; j++ )
|
||||
dst[j] = tab[src[j]];
|
||||
}
|
||||
@ -282,7 +266,7 @@ thresh_16s( const Mat& _src, Mat& _dst, short thresh, short maxval, int type )
|
||||
short* dst = (short*)_dst.data;
|
||||
size_t src_step = _src.step/sizeof(src[0]);
|
||||
size_t dst_step = _dst.step/sizeof(dst[0]);
|
||||
|
||||
|
||||
#if CV_SSE2
|
||||
volatile bool useSIMD = checkHardwareSupport(CV_CPU_SSE);
|
||||
#endif
|
||||
@ -293,6 +277,11 @@ thresh_16s( const Mat& _src, Mat& _dst, short thresh, short maxval, int type )
|
||||
roi.height = 1;
|
||||
}
|
||||
|
||||
#ifdef HAVE_TEGRA_OPTIMIZATION
|
||||
if (tegra::thresh_16s(_src, _dst, roi.width, roi.height, thresh, maxval, type))
|
||||
return;
|
||||
#endif
|
||||
|
||||
switch( type )
|
||||
{
|
||||
case THRESH_BINARY:
|
||||
@ -344,8 +333,8 @@ thresh_16s( const Mat& _src, Mat& _dst, short thresh, short maxval, int type )
|
||||
_mm_storeu_si128((__m128i*)(dst + j + 8), v1 );
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
||||
for( ; j < roi.width; j++ )
|
||||
dst[j] = src[j] <= thresh ? maxval : 0;
|
||||
}
|
||||
@ -370,8 +359,8 @@ thresh_16s( const Mat& _src, Mat& _dst, short thresh, short maxval, int type )
|
||||
_mm_storeu_si128((__m128i*)(dst + j + 8), v1 );
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
||||
for( ; j < roi.width; j++ )
|
||||
dst[j] = std::min(src[j], thresh);
|
||||
}
|
||||
@ -397,7 +386,7 @@ thresh_16s( const Mat& _src, Mat& _dst, short thresh, short maxval, int type )
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
for( ; j < roi.width; j++ )
|
||||
{
|
||||
short v = src[j];
|
||||
@ -438,7 +427,7 @@ thresh_16s( const Mat& _src, Mat& _dst, short thresh, short maxval, int type )
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
static void
|
||||
thresh_32f( const Mat& _src, Mat& _dst, float thresh, float maxval, int type )
|
||||
{
|
||||
@ -449,17 +438,22 @@ thresh_32f( const Mat& _src, Mat& _dst, float thresh, float maxval, int type )
|
||||
float* dst = (float*)_dst.data;
|
||||
size_t src_step = _src.step/sizeof(src[0]);
|
||||
size_t dst_step = _dst.step/sizeof(dst[0]);
|
||||
|
||||
|
||||
#if CV_SSE2
|
||||
volatile bool useSIMD = checkHardwareSupport(CV_CPU_SSE);
|
||||
#endif
|
||||
|
||||
|
||||
if( _src.isContinuous() && _dst.isContinuous() )
|
||||
{
|
||||
roi.width *= roi.height;
|
||||
roi.height = 1;
|
||||
}
|
||||
|
||||
|
||||
#ifdef HAVE_TEGRA_OPTIMIZATION
|
||||
if (tegra::thresh_32f(_src, _dst, roi.width, roi.height, thresh, maxval, type))
|
||||
return;
|
||||
#endif
|
||||
|
||||
switch( type )
|
||||
{
|
||||
case THRESH_BINARY:
|
||||
@ -484,12 +478,12 @@ thresh_32f( const Mat& _src, Mat& _dst, float thresh, float maxval, int type )
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
for( ; j < roi.width; j++ )
|
||||
dst[j] = src[j] > thresh ? maxval : 0;
|
||||
}
|
||||
break;
|
||||
|
||||
|
||||
case THRESH_BINARY_INV:
|
||||
for( i = 0; i < roi.height; i++, src += src_step, dst += dst_step )
|
||||
{
|
||||
@ -511,13 +505,13 @@ thresh_32f( const Mat& _src, Mat& _dst, float thresh, float maxval, int type )
|
||||
_mm_storeu_ps( dst + j + 4, v1 );
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
||||
for( ; j < roi.width; j++ )
|
||||
dst[j] = src[j] <= thresh ? maxval : 0;
|
||||
}
|
||||
break;
|
||||
|
||||
|
||||
case THRESH_TRUNC:
|
||||
for( i = 0; i < roi.height; i++, src += src_step, dst += dst_step )
|
||||
{
|
||||
@ -537,13 +531,13 @@ thresh_32f( const Mat& _src, Mat& _dst, float thresh, float maxval, int type )
|
||||
_mm_storeu_ps( dst + j + 4, v1 );
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
||||
for( ; j < roi.width; j++ )
|
||||
dst[j] = std::min(src[j], thresh);
|
||||
}
|
||||
break;
|
||||
|
||||
|
||||
case THRESH_TOZERO:
|
||||
for( i = 0; i < roi.height; i++, src += src_step, dst += dst_step )
|
||||
{
|
||||
@ -564,7 +558,7 @@ thresh_32f( const Mat& _src, Mat& _dst, float thresh, float maxval, int type )
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
for( ; j < roi.width; j++ )
|
||||
{
|
||||
float v = src[j];
|
||||
@ -572,7 +566,7 @@ thresh_32f( const Mat& _src, Mat& _dst, float thresh, float maxval, int type )
|
||||
}
|
||||
}
|
||||
break;
|
||||
|
||||
|
||||
case THRESH_TOZERO_INV:
|
||||
for( i = 0; i < roi.height; i++, src += src_step, dst += dst_step )
|
||||
{
|
||||
@ -604,7 +598,7 @@ thresh_32f( const Mat& _src, Mat& _dst, float thresh, float maxval, int type )
|
||||
return CV_Error( CV_StsBadArg, "" );
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
static double
|
||||
getThreshVal_Otsu_8u( const Mat& _src )
|
||||
@ -620,8 +614,8 @@ getThreshVal_Otsu_8u( const Mat& _src )
|
||||
for( i = 0; i < size.height; i++ )
|
||||
{
|
||||
const uchar* src = _src.data + _src.step*i;
|
||||
j = 0;
|
||||
#if CV_ENABLE_UNROLLED
|
||||
j = 0;
|
||||
#if CV_ENABLE_UNROLLED
|
||||
for( ; j <= size.width - 4; j += 4 )
|
||||
{
|
||||
int v0 = src[j], v1 = src[j+1];
|
||||
@ -637,7 +631,7 @@ getThreshVal_Otsu_8u( const Mat& _src )
|
||||
double mu = 0, scale = 1./(size.width*size.height);
|
||||
for( i = 0; i < N; i++ )
|
||||
mu += i*(double)h[i];
|
||||
|
||||
|
||||
mu *= scale;
|
||||
double mu1 = 0, q1 = 0;
|
||||
double max_sigma = 0, max_val = 0;
|
||||
@ -719,7 +713,7 @@ private:
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
|
||||
double cv::threshold( InputArray _src, OutputArray _dst, double thresh, double maxval, int type )
|
||||
{
|
||||
Mat src = _src.getMat();
|
||||
@ -731,12 +725,12 @@ double cv::threshold( InputArray _src, OutputArray _dst, double thresh, double m
|
||||
CV_Assert( src.type() == CV_8UC1 );
|
||||
thresh = getThreshVal_Otsu_8u(src);
|
||||
}
|
||||
|
||||
|
||||
_dst.create( src.size(), src.type() );
|
||||
Mat dst = _dst.getMat();
|
||||
|
||||
int nStripes = 1;
|
||||
#if defined HAVE_TBB && defined HAVE_TEGRA_OPTIMIZATION
|
||||
#if defined HAVE_TBB && defined ANDROID
|
||||
nStripes = 4;
|
||||
#endif
|
||||
|
||||
@ -765,7 +759,6 @@ double cv::threshold( InputArray _src, OutputArray _dst, double thresh, double m
|
||||
}
|
||||
else
|
||||
{
|
||||
//thresh_8u( src, dst, (uchar)ithresh, (uchar)imaxval, type );
|
||||
parallel_for(BlockedRange(0, nStripes),
|
||||
ThresholdRunner(src, dst, nStripes, (uchar)ithresh, (uchar)imaxval, type));
|
||||
}
|
||||
@ -778,7 +771,7 @@ double cv::threshold( InputArray _src, OutputArray _dst, double thresh, double m
|
||||
if( type == THRESH_TRUNC )
|
||||
imaxval = ithresh;
|
||||
imaxval = saturate_cast<short>(imaxval);
|
||||
|
||||
|
||||
if( ithresh < SHRT_MIN || ithresh >= SHRT_MAX )
|
||||
{
|
||||
if( type == THRESH_BINARY || type == THRESH_BINARY_INV ||
|
||||
@ -795,14 +788,12 @@ double cv::threshold( InputArray _src, OutputArray _dst, double thresh, double m
|
||||
}
|
||||
else
|
||||
{
|
||||
//thresh_16s( src, dst, (short)ithresh, (short)imaxval, type );
|
||||
parallel_for(BlockedRange(0, nStripes),
|
||||
ThresholdRunner(src, dst, nStripes, (short)ithresh, (short)imaxval, type));
|
||||
}
|
||||
}
|
||||
else if( src.depth() == CV_32F )
|
||||
{
|
||||
//thresh_32f( src, dst, (float)thresh, (float)maxval, type );
|
||||
parallel_for(BlockedRange(0, nStripes),
|
||||
ThresholdRunner(src, dst, nStripes, (float)thresh, (float)maxval, type));
|
||||
}
|
||||
@ -829,7 +820,7 @@ void cv::adaptiveThreshold( InputArray _src, OutputArray _dst, double maxValue,
|
||||
dst = Scalar(0);
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
Mat mean;
|
||||
|
||||
if( src.data != dst.data )
|
||||
@ -846,7 +837,7 @@ void cv::adaptiveThreshold( InputArray _src, OutputArray _dst, double maxValue,
|
||||
int i, j;
|
||||
uchar imaxval = saturate_cast<uchar>(maxValue);
|
||||
int idelta = type == THRESH_BINARY ? cvCeil(delta) : cvFloor(delta);
|
||||
uchar tab[768];
|
||||
uchar tab[768];
|
||||
|
||||
if( type == CV_THRESH_BINARY )
|
||||
for( i = 0; i < 768; i++ )
|
||||
|
@ -488,7 +488,7 @@ public:
|
||||
bool balanced=false );
|
||||
|
||||
virtual float predict( const CvMat* sample, bool returnDFVal=false ) const;
|
||||
virtual float predict( const CvMat* samples, CvMat* results ) const;
|
||||
virtual float predict( const CvMat* samples, CV_OUT CvMat* results ) const;
|
||||
|
||||
#ifndef SWIG
|
||||
CV_WRAP CvSVM( const cv::Mat& trainData, const cv::Mat& responses,
|
||||
@ -510,6 +510,7 @@ public:
|
||||
CvParamGrid degreeGrid = CvSVM::get_default_grid(CvSVM::DEGREE),
|
||||
bool balanced=false);
|
||||
CV_WRAP virtual float predict( const cv::Mat& sample, bool returnDFVal=false ) const;
|
||||
CV_WRAP_AS(predict_all) void predict( cv::InputArray samples, cv::OutputArray results ) const;
|
||||
#endif
|
||||
|
||||
CV_WRAP virtual int get_support_vector_count() const;
|
||||
|
@ -1250,7 +1250,7 @@ CvBoost::update_weights( CvBoostTree* tree )
|
||||
if( have_subsample )
|
||||
{
|
||||
float* values = (float*)cur_buf_pos;
|
||||
cur_buf_pos = (uchar*)(values + data->buf->step);
|
||||
cur_buf_pos = (uchar*)(values + data->buf->cols);
|
||||
uchar* missing = cur_buf_pos;
|
||||
cur_buf_pos = missing + data->buf->step;
|
||||
CvMat _sample, _mask;
|
||||
|
@ -2124,6 +2124,12 @@ float CvSVM::predict(const CvMat* samples, CV_OUT CvMat* results) const
|
||||
return result;
|
||||
}
|
||||
|
||||
void CvSVM::predict( cv::InputArray _samples, cv::OutputArray _results ) const
|
||||
{
|
||||
_results.create(_samples.size().height, 1, CV_32F);
|
||||
CvMat samples = _samples.getMat(), results = _results.getMat();
|
||||
predict(&samples, &results);
|
||||
}
|
||||
|
||||
CvSVM::CvSVM( const Mat& _train_data, const Mat& _responses,
|
||||
const Mat& _var_idx, const Mat& _sample_idx, CvSVMParams _params )
|
||||
|
@ -60,7 +60,7 @@ add_custom_command(
|
||||
DEPENDS ${opencv_hdrs})
|
||||
|
||||
add_library(${the_module} SHARED src2/cv2.cpp ${CMAKE_CURRENT_BINARY_DIR}/generated0.i ${cv2_generated_hdrs} src2/cv2.cv.hpp)
|
||||
if(PYTHON_DEBUG_LIBRARIES)
|
||||
if(PYTHON_DEBUG_LIBRARIES AND NOT PYTHON_LIBRARIES MATCHES "optimized.*debug")
|
||||
target_link_libraries(${the_module} debug ${PYTHON_DEBUG_LIBRARIES} optimized ${PYTHON_LIBRARIES})
|
||||
else()
|
||||
target_link_libraries(${the_module} ${PYTHON_LIBRARIES})
|
||||
|
@ -13,7 +13,7 @@ else
|
||||
endif
|
||||
|
||||
LOCAL_SRC_FILES := DetectionBasedTracker_jni.cpp
|
||||
LOCAL_C_INCLUDES := $(LOCAL_PATH)
|
||||
LOCAL_C_INCLUDES += $(LOCAL_PATH)
|
||||
LOCAL_LDLIBS += -llog -ldl
|
||||
|
||||
LOCAL_MODULE := detection_based_tacker
|
||||
|
@ -437,13 +437,20 @@ int build_mlp_classifier( char* data_filename,
|
||||
cvMat( 1, (int)(sizeof(layer_sz)/sizeof(layer_sz[0])), CV_32S, layer_sz );
|
||||
mlp.create( &layer_sizes );
|
||||
printf( "Training the classifier (may take a few minutes)...\n");
|
||||
mlp.train( &train_data, new_responses, 0, 0,
|
||||
CvANN_MLP_TrainParams(cvTermCriteria(CV_TERMCRIT_ITER,300,0.01),
|
||||
|
||||
#if 1
|
||||
CvANN_MLP_TrainParams::BACKPROP,0.001));
|
||||
int method = CvANN_MLP_TrainParams::BACKPROP;
|
||||
double method_param = 0.001;
|
||||
int max_iter = 300;
|
||||
#else
|
||||
CvANN_MLP_TrainParams::RPROP,0.05));
|
||||
int method = CvANN_MLP_TrainParams::RPROP;
|
||||
double method_param = 0.1;
|
||||
int max_iter = 1000;
|
||||
#endif
|
||||
|
||||
mlp.train( &train_data, new_responses, 0, 0,
|
||||
CvANN_MLP_TrainParams(cvTermCriteria(CV_TERMCRIT_ITER,max_iter,0.01),
|
||||
method, method_param));
|
||||
cvReleaseMat( &new_responses );
|
||||
printf("\n");
|
||||
}
|
||||
|
@ -86,7 +86,7 @@ void detectAndDisplay( Mat frame )
|
||||
for( int j = 0; j < eyes.size(); j++ )
|
||||
{
|
||||
Point center( faces[i].x + eyes[j].x + eyes[j].width*0.5, faces[i].y + eyes[j].y + eyes[j].height*0.5 );
|
||||
int radius = cvRound( (eyes[j].width + eyes[i].height)*0.25 );
|
||||
int radius = cvRound( (eyes[j].width + eyes[j].height)*0.25 );
|
||||
circle( frame, center, radius, Scalar( 255, 0, 0 ), 3, 8, 0 );
|
||||
}
|
||||
}
|
||||
|
@ -190,3 +190,7 @@ def mosaic(w, imgs):
|
||||
imgs = it.chain([img0], imgs)
|
||||
rows = grouper(w, imgs, pad)
|
||||
return np.vstack(map(np.hstack, rows))
|
||||
|
||||
def getsize(img):
|
||||
h, w = img.shape[:2]
|
||||
return w, h
|
||||
|
@ -76,8 +76,8 @@ if __name__ == '__main__':
|
||||
img2 = cv2.imread(fn2, 0)
|
||||
|
||||
surf = cv2.SURF(1000)
|
||||
kp1, desc1 = surf.detect(img1, None, False)
|
||||
kp2, desc2 = surf.detect(img2, None, False)
|
||||
kp1, desc1 = surf.detectAndCompute(img1, None)
|
||||
kp2, desc2 = surf.detectAndCompute(img2, None)
|
||||
desc1.shape = (-1, surf.descriptorSize())
|
||||
desc2.shape = (-1, surf.descriptorSize())
|
||||
print 'img1 - %d features, img2 - %d features' % (len(kp1), len(kp2))
|
||||
|
64
samples/python2/lappyr.py
Normal file
@ -0,0 +1,64 @@
|
||||
''' An example of Laplacian Pyramid construction and merging.
|
||||
|
||||
Level : Intermediate
|
||||
|
||||
Usage : python lappyr.py [<video source>]
|
||||
|
||||
References:
|
||||
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.54.299
|
||||
|
||||
Alexander Mordvintsev 6/10/12
|
||||
'''
|
||||
|
||||
import numpy as np
|
||||
import cv2
|
||||
import video
|
||||
from common import nothing, getsize
|
||||
|
||||
def build_lappyr(img, leveln=6, dtype=np.int16):
|
||||
img = dtype(img)
|
||||
levels = []
|
||||
for i in xrange(leveln-1):
|
||||
next_img = cv2.pyrDown(img)
|
||||
img1 = cv2.pyrUp(next_img, dstsize=getsize(img))
|
||||
levels.append(img-img1)
|
||||
img = next_img
|
||||
levels.append(img)
|
||||
return levels
|
||||
|
||||
def merge_lappyr(levels):
|
||||
img = levels[-1]
|
||||
for lev_img in levels[-2::-1]:
|
||||
img = cv2.pyrUp(img, dstsize=getsize(lev_img))
|
||||
img += lev_img
|
||||
return np.uint8(np.clip(img, 0, 255))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
import sys
|
||||
print __doc__
|
||||
|
||||
try: fn = sys.argv[1]
|
||||
except: fn = 0
|
||||
cap = video.create_capture(fn)
|
||||
|
||||
leveln = 6
|
||||
cv2.namedWindow('level control')
|
||||
for i in xrange(leveln):
|
||||
cv2.createTrackbar('%d'%i, 'level control', 5, 50, nothing)
|
||||
|
||||
while True:
|
||||
ret, frame = cap.read()
|
||||
|
||||
pyr = build_lappyr(frame, leveln)
|
||||
for i in xrange(leveln):
|
||||
v = cv2.getTrackbarPos('%d'%i, 'level control') / 5
|
||||
pyr[i] *= v
|
||||
res = merge_lappyr(pyr)
|
||||
|
||||
cv2.imshow('laplacian pyramid filter', res)
|
||||
|
||||
if cv2.waitKey(1) == 27:
|
||||
break
|
||||
|
||||
|
@ -88,7 +88,7 @@ class SVM(LetterStatModel):
|
||||
self.model.train(samples, responses, params = params)
|
||||
|
||||
def predict(self, samples):
|
||||
return np.float32( [self.model.predict(s) for s in samples] )
|
||||
return self.model.predict_all(samples).ravel()
|
||||
|
||||
|
||||
class MLP(LetterStatModel):
|
||||
|